diff --git a/config/data.py b/config/data.py
index 8f770b4..5463995 100644
--- a/config/data.py
+++ b/config/data.py
@@ -16,6 +16,20 @@
NOTIF_POS_KEY = "notif_pos"
NOTIF_POS_DEFAULT = "Top"
+# AI API Keys
+AI_OPENAI_KEY = "ai_openai_key"
+AI_GEMINI_KEY = "ai_gemini_key"
+AI_CLAUDE_KEY = "ai_claude_key"
+AI_GROK_KEY = "ai_grok_key"
+AI_DEEPSEEK_KEY = "ai_deepseek_key"
+
+# AI Model Selections
+AI_OPENAI_MODEL = "ai_openai_model"
+AI_GEMINI_MODEL = "ai_gemini_model"
+AI_CLAUDE_MODEL = "ai_claude_model"
+AI_GROK_MODEL = "ai_grok_model"
+AI_DEEPSEEK_MODEL = "ai_deepseek_model"
+
CACHE_DIR = str(GLib.get_user_cache_dir()) + f"/{APP_NAME}"
USERNAME = os.getlogin()
@@ -106,6 +120,20 @@ def load_config():
METRICS_SMALL_VISIBLE = config.get(
"metrics_small_visible", {"cpu": True, "ram": True, "disk": True, "gpu": True}
)
+
+ # AI API Keys
+ AI_OPENAI_API_KEY = config.get(AI_OPENAI_KEY, "")
+ AI_GEMINI_API_KEY = config.get(AI_GEMINI_KEY, "")
+ AI_CLAUDE_API_KEY = config.get(AI_CLAUDE_KEY, "")
+ AI_GROK_API_KEY = config.get(AI_GROK_KEY, "")
+ AI_DEEPSEEK_API_KEY = config.get(AI_DEEPSEEK_KEY, "")
+
+ # AI Model Selections (with backward compatibility for old format)
+ AI_OPENAI_MODEL = config.get(AI_OPENAI_MODEL, config.get("gpt-3.5-turbo", "gpt-3.5-turbo"))
+ AI_GEMINI_MODEL = config.get(AI_GEMINI_MODEL, config.get("gemini-1.5-pro", "gemini-1.5-pro"))
+ AI_CLAUDE_MODEL = config.get(AI_CLAUDE_MODEL, config.get("claude-3-sonnet-20240229", "claude-3-sonnet-20240229"))
+ AI_GROK_MODEL = config.get(AI_GROK_MODEL, config.get("grok-beta", "grok-beta"))
+ AI_DEEPSEEK_MODEL = config.get(AI_DEEPSEEK_MODEL, config.get("deepseek-chat", "deepseek-chat"))
else:
WALLPAPERS_DIR = WALLPAPERS_DIR_DEFAULT
BAR_POSITION = "Left"
@@ -126,6 +154,7 @@ def load_config():
PANEL_POSITION = PANEL_POSITION_DEFAULT
NOTIF_POS = NOTIF_POS_DEFAULT
+
BAR_COMPONENTS_VISIBILITY = {
"button_apps": True,
"systray": True,
diff --git a/config/settings_constants.py b/config/settings_constants.py
index bd93306..a9c8dca 100644
--- a/config/settings_constants.py
+++ b/config/settings_constants.py
@@ -101,4 +101,6 @@
},
"limited_apps_history": ["Spotify"],
"history_ignored_apps": ["Hyprshot"],
+ 'prefix_ai': "SUPER SHIFT",
+ 'suffix_ai': "A",
}
diff --git a/config/settings_gui.py b/config/settings_gui.py
index 5b37962..c03af4b 100644
--- a/config/settings_gui.py
+++ b/config/settings_gui.py
@@ -63,6 +63,7 @@ def __init__(self, show_lock_checkbox: bool, show_idle_checkbox: bool, **kwargs)
self.key_bindings_tab_content = self.create_key_bindings_tab()
self.appearance_tab_content = self.create_appearance_tab()
self.system_tab_content = self.create_system_tab()
+ self.ai_tab_content = self.create_ai_tab()
self.about_tab_content = self.create_about_tab()
self.tab_stack.add_titled(
@@ -72,6 +73,7 @@ def __init__(self, show_lock_checkbox: bool, show_idle_checkbox: bool, **kwargs)
self.appearance_tab_content, "appearance", "Appearance"
)
self.tab_stack.add_titled(self.system_tab_content, "system", "System")
+ self.tab_stack.add_titled(self.ai_tab_content, "ai", "AI")
self.tab_stack.add_titled(self.about_tab_content, "about", "About")
tab_switcher = Gtk.StackSwitcher()
@@ -153,6 +155,7 @@ def create_key_bindings_tab(self):
"prefix_restart_inspector",
"suffix_restart_inspector",
),
+ ("AI", 'prefix_ai', 'suffix_ai'),
]
for i, (label_text, prefix_key, suffix_key) in enumerate(bindings):
@@ -878,6 +881,132 @@ def _add_disk_entry_widget(self, path):
self.disk_entries.add(bar)
self.disk_entries.show_all()
+
+ def create_ai_tab(self):
+ scrolled_window = ScrolledWindow(
+ h_scrollbar_policy="never",
+ v_scrollbar_policy="automatic",
+ h_expand=True,
+ v_expand=True,
+ propagate_width=False,
+ propagate_height=False
+ )
+
+ main_vbox = Box(orientation="v", spacing=15, style="margin: 20px;")
+ scrolled_window.add(main_vbox)
+
+ # Title
+ title_label = Label(markup="AI API Keys", h_align="start", style="font-size: 1.2em; margin-bottom: 10px;")
+ main_vbox.add(title_label)
+
+ # Description
+ desc_label = Label(
+ label="Enter your API keys for the AI models you want to use. Leave empty if you don't have a key for a specific model.",
+ h_align="start",
+ style="margin-bottom: 20px; color: #888;"
+ )
+ main_vbox.add(desc_label)
+
+ # API Keys Grid
+ api_grid = Gtk.Grid()
+ api_grid.set_column_spacing(15)
+ api_grid.set_row_spacing(10)
+ api_grid.set_margin_start(5)
+ api_grid.set_margin_end(5)
+
+ # Headers
+ model_label = Label(markup="AI Service", h_align="start")
+ key_label = Label(markup="API Key", h_align="start")
+ model_select_label = Label(markup="Model", h_align="start")
+ api_grid.attach(model_label, 0, 0, 1, 1)
+ api_grid.attach(key_label, 1, 0, 1, 1)
+ api_grid.attach(model_select_label, 2, 0, 1, 1)
+
+ # Import data module to get current API keys and models
+ from .data import (
+ AI_OPENAI_API_KEY, AI_GEMINI_API_KEY, AI_CLAUDE_API_KEY,
+ AI_GROK_API_KEY, AI_DEEPSEEK_API_KEY,
+ AI_OPENAI_KEY, AI_GEMINI_KEY, AI_CLAUDE_KEY,
+ AI_GROK_KEY, AI_DEEPSEEK_KEY,
+ AI_OPENAI_MODEL, AI_GEMINI_MODEL, AI_CLAUDE_MODEL,
+ AI_GROK_MODEL, AI_DEEPSEEK_MODEL
+ )
+
+ # AI Models and their API key entries
+ self.ai_entries = []
+ self.ai_model_combos = {}
+
+ # Get the actual model values from the imported constants
+ # These are the actual model names like "gpt-3.5-turbo", "gemini-1.5-pro", etc.
+ from .data import (
+ AI_OPENAI_MODEL as OPENAI_MODEL_VALUE,
+ AI_GEMINI_MODEL as GEMINI_MODEL_VALUE,
+ AI_CLAUDE_MODEL as CLAUDE_MODEL_VALUE,
+ AI_GROK_MODEL as GROK_MODEL_VALUE,
+ AI_DEEPSEEK_MODEL as DEEPSEEK_MODEL_VALUE
+ )
+
+ # Create a mapping of model keys to their current values
+ model_values = {
+ AI_OPENAI_MODEL: OPENAI_MODEL_VALUE,
+ AI_GEMINI_MODEL: GEMINI_MODEL_VALUE,
+ AI_CLAUDE_MODEL: CLAUDE_MODEL_VALUE,
+ AI_GROK_MODEL: GROK_MODEL_VALUE,
+ AI_DEEPSEEK_MODEL: DEEPSEEK_MODEL_VALUE,
+ }
+
+ # Define available models for each service
+ ai_services = [
+ ("Chat GPT (OpenAI)", AI_OPENAI_KEY, AI_OPENAI_API_KEY, AI_OPENAI_MODEL, [
+ "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "gpt-4-turbo", "gpt-4o", "gpt-4o-mini"
+ ]),
+ ("Gemini (Google)", AI_GEMINI_KEY, AI_GEMINI_API_KEY, AI_GEMINI_MODEL, [
+ "gemini-2.5-flash", "gemini-2.5-pro", "gemini-2.0-flash", "gemini-2.0-flash-exp",
+ "gemini-1.5-pro", "gemini-1.5-flash", "gemini-1.5-pro-latest", "gemini-1.5-flash-latest",
+ "gemini-1.0-pro", "gemini-1.0-pro-vision"
+ ]),
+ ("Claude (Anthropic)", AI_CLAUDE_KEY, AI_CLAUDE_API_KEY, AI_CLAUDE_MODEL, [
+ "claude-3-sonnet-20240229", "claude-3-opus-20240229", "claude-3-haiku-20240307",
+ "claude-3-5-sonnet-20241022", "claude-3-5-haiku-20241022"
+ ]),
+ ("Grok (xAI)", AI_GROK_KEY, AI_GROK_API_KEY, AI_GROK_MODEL, [
+ "grok-beta", "grok-2"
+ ]),
+ ("Deepseek", AI_DEEPSEEK_KEY, AI_DEEPSEEK_API_KEY, AI_DEEPSEEK_MODEL, [
+ "deepseek-chat", "deepseek-coder", "deepseek-llm-7b-chat"
+ ]),
+ ]
+
+ for i, (service_name, key_name, current_key, model_key, available_models) in enumerate(ai_services):
+ row = i + 1
+ model_label = Label(label=service_name, h_align="start")
+ key_entry = Entry(text=current_key, tooltip_text=f"Enter your {service_name} API key")
+ key_entry.set_visibility(False) # Hide the API key for security
+
+ # Create model selection combo box
+ model_combo = Gtk.ComboBoxText()
+ for model in available_models:
+ model_combo.append_text(model)
+
+ # Set current model
+ current_model = model_values[model_key]
+ model_combo.set_active(available_models.index(current_model) if current_model in available_models else 0)
+
+ api_grid.attach(model_label, 0, row, 1, 1)
+ api_grid.attach(key_entry, 1, row, 1, 1)
+ api_grid.attach(model_combo, 2, row, 1, 1)
+
+ self.ai_entries.append((key_name, key_entry))
+ self.ai_model_combos[model_key] = model_combo
+
+ main_vbox.add(api_grid)
+
+ # Add some spacing
+ main_vbox.add(Box(v_expand=True))
+
+ return scrolled_window
+
+
def create_about_tab(self):
vbox = Box(orientation="v", spacing=18, style="margin: 30px;")
vbox.add(
@@ -1043,6 +1172,14 @@ def on_accept(self, widget):
and isinstance(child.get_children()[0], Entry)
]
+ # Save AI API keys and model selections
+ for key_name, key_entry in self.ai_entries:
+ current_bind_vars_snapshot[key_name] = key_entry.get_text()
+
+ # Save AI model selections
+ for model_key, model_combo in self.ai_model_combos.items():
+ selected_model = model_combo.get_active_text()
+ current_bind_vars_snapshot[model_key] = selected_model
# Parse notification app lists
def parse_app_list(text):
"""Parse comma-separated app names with quotes"""
diff --git a/config/settings_utils.py b/config/settings_utils.py
index e95db8c..5a43eb2 100644
--- a/config/settings_utils.py
+++ b/config/settings_utils.py
@@ -271,6 +271,7 @@ def generate_hyprconf() -> str:
bind = {bind_vars.get("prefix_caffeine", "SUPER SHIFT")}, {bind_vars.get("suffix_caffeine", "M")}, exec, $fabricSend 'notch.dashboard.widgets.buttons.caffeine_button.toggle_inhibit(external=True)' # Toggle Caffeine
bind = {bind_vars.get("prefix_css", "SUPER SHIFT")}, {bind_vars.get("suffix_css", "B")}, exec, $fabricSend 'app.set_css()' # Reload CSS
bind = {bind_vars.get("prefix_restart_inspector", "SUPER CTRL ALT")}, {bind_vars.get("suffix_restart_inspector", "B")}, exec, killall {APP_NAME}; uwsm-app $(GTK_DEBUG=interactive python {home}/.config/{APP_NAME_CAP}/main.py) # Restart with inspector
+bind = {bind_vars.get('prefix_ai', 'SUPER SHIFT')}, {bind_vars.get('suffix_ai', 'A')}, exec, $fabricSend 'notch.open_notch("ai")' # AI Panel
# Wallpapers directory: {bind_vars.get("wallpapers_dir", "~/.config/Ax-Shell/assets/wallpapers_example")}
diff --git a/main.css b/main.css
index 1b961d6..71a06e3 100644
--- a/main.css
+++ b/main.css
@@ -22,6 +22,7 @@
@import url("./styles/wallpapers.css");
@import url("./styles/systemprofiles.css");
@import url("./styles/workspaces.css");
+@import url("./styles/ai.css");
* {
all: unset;
@@ -114,7 +115,7 @@
}
#later-button:active {
- background-color: var(--surface-bright);
+ background-color: var(--primary);
}
#toggle-updater-button {
@@ -129,3 +130,4 @@
background-color: var(--outline);
border-radius: 8px;
}
+
diff --git a/modules/ai.py b/modules/ai.py
new file mode 100644
index 0000000..358acae
--- /dev/null
+++ b/modules/ai.py
@@ -0,0 +1,557 @@
+from pydoc import text
+from fabric.widgets.box import Box
+from fabric.widgets.label import Label
+from fabric.widgets.button import Button
+from fabric.widgets.scrolledwindow import ScrolledWindow
+from fabric.widgets.revealer import Revealer
+from fabric.widgets.entry import Entry
+from widgets.wayland import WaylandWindow as Window
+import gi
+import asyncio
+gi.require_version("Gtk", "3.0")
+from gi.repository import Gtk, Gdk, GLib, Pango
+
+# Import AI services
+from .ai_services import ai_manager
+
+# Utility to break long unbreakable words for GTK Label wrapping
+import re
+def break_long_words(text, n=20):
+ return re.sub(r'(\S{' + str(n) + r',})', lambda m: '\u200b'.join([m.group(0)[i:i+n] for i in range(0, len(m.group(0)), n)]), text)
+
+class AI(Window):
+ def __init__(self, **kwargs):
+ super().__init__(
+ name="ai-window",
+ title="AI Panel",
+ size=(400, 600),
+ layer="top",
+ anchor="top left bottom",
+ keyboard_mode="on-demand", # Changed from 'none' to 'on-demand'
+ exclusivity="none",
+ visible=False,
+ all_visible=False,
+ **kwargs,
+ )
+ self.set_size_request(400, 500)
+ # self.steal_input() # Removed to allow normal input
+
+ # Create revealer for slide animation (recommended for WaylandWindow)
+ self.revealer = Revealer(
+ name="ai-revealer",
+ transition_type="slide_right",
+ transition_duration=250,
+ )
+
+ # Main container
+ self.main_box = Box(
+ orientation="v",
+ spacing=16,
+ style="border: 4px solid #000; border-radius: 16px; margin: 0px 16px 16px 0px; padding: 24px; min-width: 320px; min-height: 480px; background: #000000;",
+ )
+ self.main_box.set_hexpand(True)
+ self.main_box.set_halign(Gtk.Align.FILL)
+
+ # Title label (handwritten style, large)
+ self.title_label = Label(
+ label="panel",
+ h_align="start",
+ style="font-family: 'Comic Sans MS', 'Comic Sans', cursive; font-size: 2em; font-weight: bold; margin-bottom: 12px;"
+ )
+ self.main_box.add(self.title_label)
+
+ # # Divider (horizontal line)
+ # self.divider = Box(
+ # style="min-height: 2px; background-color: #fff; margin: 16px 0 24px 0;",
+ # h_expand=True
+ # )
+ # self.main_box.add(self.divider)
+
+ # Chat area (scrollable)
+ self.chat_scroll = ScrolledWindow(
+ name="ai-chat-scroll",
+ vexpand=True,
+ min_content_height=200,
+ )
+ self.chat_scroll.set_size_request(384, -1)
+
+ # Chat container for messages
+ self.chat_container = Box(
+ name="ai-chat-container",
+ orientation="v",
+ spacing=8,
+ margin_start=0, # Set to 0 for flush left
+ margin_end=8,
+ margin_top=8,
+ margin_bottom=8,
+ )
+ self.chat_container.set_hexpand(True)
+ self.chat_container.set_halign(Gtk.Align.FILL)
+ # Wrap in Gtk.Alignment to constrain width
+ self.chat_alignment = Gtk.Alignment.new(0.0, 0, 0, 0)
+ self.chat_alignment.set_hexpand(True)
+ self.chat_alignment.set_halign(Gtk.Align.START)
+ #self.chat_alignment.set_size_request(384, -1)
+ self.chat_alignment.add(self.chat_container)
+ self.chat_scroll.add(self.chat_alignment)
+ self.main_box.add(self.chat_scroll)
+
+ # Spacer to push dropdown to bottom
+ self.spacer = Box(v_expand=True)
+ self.main_box.add(self.spacer)
+
+ # Text field and gear button container (horizontal)
+ self.input_container = Box(
+ orientation="h",
+ spacing=8,
+ h_align="fill",
+ v_align="fill",
+ hexpand=True,
+ style="margin: 8px 0 8px -18px;" # Reduced left margin from 8px to 4px
+ )
+ self.input_container.set_hexpand(True)
+ self.input_container.set_halign(Gtk.Align.FILL)
+
+ # AI Model selection (gear button only) - now to the left of text field
+ self.model_button = Button(
+ name="ai-model-button",
+ child=Label(name="ai-model-icon", markup="⚙"), # Gear icon
+ tooltip_text="AI Model Settings",
+ halign=Gtk.Align.START,
+ hexpand=False
+ )
+ self.model_button.set_size_request(-1, 40)
+ self.model_button.connect("clicked", self._on_model_button_clicked)
+
+ # Create a popover for the model options
+ self.model_popover = Gtk.Popover()
+ self.model_popover.set_relative_to(self.model_button)
+ self.model_popover.set_position(Gtk.PositionType.BOTTOM)
+
+ # Create a vertical box for model options
+ self.model_options_box = Box(
+ orientation="v",
+ spacing=4,
+ margin_start=8,
+ margin_end=8,
+ margin_top=8,
+ margin_bottom=8,
+ name="ai-model-options-box"
+ )
+
+ # Create buttons for each model
+ ai_models = ["Chat GPT", "Gemini", "Claude", "Grok", "Deepseek"]
+ self.model_buttons = {} # Store references to buttons
+
+ for model in ai_models:
+ # Default styling for unselected models
+ model_button = Button(
+ label=model,
+ halign="fill",
+ name="ai-model-option-button"
+ )
+ model_button.get_style_context().add_class("ai-model-button-unselected")
+ model_button.connect("clicked", self._on_model_option_clicked, model)
+ self.model_options_box.add(model_button)
+ self.model_buttons[model] = model_button
+
+ # Set initial selected model styling
+ self.selected_model = "Chat GPT"
+ self._update_model_button_styles()
+
+ self.model_popover.add(self.model_options_box)
+ # self.model_button.set_popover(self.model_popover) # Not available in GTK3
+
+ self.input_container.add(self.model_button)
+
+ # Text field (input area) - multiline, scrollable, wrapped
+ self.text_entry = Gtk.TextView()
+ self.text_entry.set_name("ai-text-entry")
+ self.text_entry.set_hexpand(True)
+ self.text_entry.set_halign(Gtk.Align.FILL)
+ self.text_entry.set_vexpand(True)
+ self.text_entry.set_margin_top(0)
+ self.text_entry.set_margin_bottom(0)
+ self.text_entry.set_sensitive(True)
+ self.text_entry.set_editable(True)
+ self.text_entry.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
+
+ # Make text entry scrollable
+ self.text_scroll = Gtk.ScrolledWindow()
+ self.text_scroll.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
+ self.text_scroll.set_hexpand(True)
+ self.text_scroll.set_vexpand(True)
+ self.text_scroll.set_min_content_height(40)
+ self.text_scroll.set_min_content_width(100)
+ self.text_scroll.get_style_context().add_class('ai-text-scroll')
+ self.text_scroll.set_vexpand(False)
+ self.text_scroll.add(self.text_entry)
+
+
+ # Add scrolled window to input_container for full width
+ self.input_container.add(self.text_scroll)
+ # Remove Entry 'activate' signal (not valid for TextView)
+ # To send on Enter, handle key-press-event on TextView if needed
+
+ self.main_box.add(self.input_container)
+
+ # Add the main box to the revealer, and revealer to the window
+ self.revealer.add(self.main_box)
+ self.add(self.revealer)
+
+ def show_at_position(self, x, y):
+ self.move(x, y)
+ self.set_visible(True)
+ self.present() # Bring window to front
+ self.grab_focus() # Grab window focus
+ self.show_all()
+
+ # Reveal the content with smooth slide animation
+ self.revealer.set_reveal_child(True)
+ self.add_events(Gdk.EventMask.KEY_PRESS_MASK)
+
+ # Focus the text entry after window is mapped
+ GLib.idle_add(self.text_entry.grab_focus)
+
+ # Connect key press and key release events to the text entry
+ self.text_entry.connect("key-press-event", self._on_text_entry_key_press)
+ self.text_entry.connect("key-release-event", self._on_text_entry_key_release)
+
+ def hide_ai_panel(self):
+ print("hide_ai_panel() called")
+ # Hide the content with smooth slide animation
+ self.revealer.set_reveal_child(False)
+
+ # Wait for animation to complete before hiding the window
+ GLib.timeout_add(250, self._hide_after_animation) # 250ms to match animation duration
+
+ def _hide_after_animation(self):
+ """Hide the window after the revealer animation completes"""
+ self.set_visible(False)
+ self.hide()
+ return False
+
+ def _on_model_button_clicked(self, button):
+ """Handle gear button click - manually show popover"""
+ self.model_popover.show_all()
+ self.model_popover.popup()
+
+ def _on_model_option_clicked(self, button, model_name):
+ """Handle model selection from popover"""
+ self.selected_model = model_name
+ print(f"Selected AI model: {model_name}")
+ self._update_model_button_styles() # Update button styles after selection
+ # Close the popover after selection
+ self.model_popover.hide()
+
+ def on_model_changed(self, combo):
+ """Handle model selection change"""
+ active_iter = combo.get_active_iter()
+ if active_iter is not None:
+ selected_model = self.model_store.get_value(active_iter, 0)
+ self.selected_model = selected_model
+ print(f"Selected AI model: {selected_model}")
+ # Hide the dropdown after selection
+ self.model_dropdown.set_visible(False)
+
+ def show_ai_panel(self):
+ """Show the AI panel with revealer animation"""
+ self.show_at_position(0, 0)
+
+ def _on_entry_activate(self, entry):
+ print("[DEBUG] Entry activate signal fired")
+ """Send the current message to the AI when Enter is pressed in Entry"""
+ self.current_message = entry.get_text()
+ print(f"Message saved: {self.current_message}")
+ if self.current_message.strip():
+ self.add_user_message(self.current_message)
+ entry.set_text("")
+ print(f"Sending message to {self.selected_model}: {self.current_message}")
+
+ def _send_current_message(self):
+ buffer = self.text_entry.get_buffer()
+ start_iter = buffer.get_start_iter()
+ end_iter = buffer.get_end_iter()
+ message = buffer.get_text(start_iter, end_iter, True).strip()
+ if message:
+ self.add_user_message(message)
+ buffer.set_text("") # Clear the text field
+
+ def _reset_sending_flag(self):
+ """Reset the sending message flag"""
+ self._sending_message = False
+ return False
+
+ def add_user_message(self, message):
+ """Add a user message to the chat area (right side)"""
+ message = break_long_words(message)
+ # Create message container
+ message_container = Box(
+ orientation="h",
+ h_align="end",
+ margin_top=8,
+ margin_bottom=8,
+ margin_start=8,
+ margin_end=8,
+ )
+ message_container.set_hexpand(True)
+ message_container.set_halign(Gtk.Align.END) # <--- THIS IS CRITICAL
+
+ # Create message bubble
+ message_bubble = Box(
+ name="user-message-bubble",
+ orientation="h",
+ vexpand=False,
+ margin_top=2,
+ margin_bottom=2,
+ margin_start=0,
+ margin_end=0
+ )
+ message_bubble.set_hexpand(True)
+ message_bubble.set_halign(Gtk.Align.FILL)
+ # Create message label for text
+ message_label = Label(
+ label=message,
+ wrap=True,
+ xalign=0.0,
+ selectable=True,
+ style="color: #fff; font-size: 1em; padding: 2px;"
+ )
+ message_label.set_xalign(0.0)
+ message_label.set_hexpand(True)
+ message_label.set_halign(Gtk.Align.END)
+ message_label.set_line_wrap(True)
+ message_label.set_max_width_chars(40)
+ message_label.set_ellipsize(Pango.EllipsizeMode.NONE)
+ message_bubble.add(message_label)
+ message_container.add(message_bubble)
+
+ # Add to chat container
+ self.chat_container.add(message_container)
+
+ # Scroll to bottom
+ self.chat_scroll.get_vadjustment().set_value(
+ self.chat_scroll.get_vadjustment().get_upper()
+ )
+
+ # Show the new message
+ self.chat_container.show_all()
+
+ # Get AI response
+ self.get_ai_response(message)
+
+
+ def add_ai_message(self, message):
+ """Add an AI message to the chat area (left side)"""
+ message = break_long_words(message)
+ # Create message container
+ message_container = Box(
+ orientation="h",
+ h_align="start",
+ margin_top=8,
+ margin_bottom=8,
+ margin_start=8, # Same margin as user
+ margin_end=8, # Same margin as user
+ )
+ # Create message bubble
+ message_bubble = Box(
+ name="ai-message-bubble",
+ orientation="h",
+ vexpand=False,
+ margin_top=2,
+ margin_bottom=2,
+ margin_start=0, # No extra margin on bubble
+ margin_end=0,
+ style="background: #444; border-radius: 16px; padding: 10px;"
+ )
+ message_bubble.set_hexpand(False)
+ message_bubble.set_halign(Gtk.Align.START)
+ # Create message label for text
+ message_label = Label(
+ label=message,
+ wrap=True,
+ xalign=0.0,
+ selectable=True,
+ style="color: #fff; font-size: 1em; padding: 2px;"
+ )
+ message_label.set_hexpand(True)
+ message_label.set_halign(Gtk.Align.END)
+ message_label.set_line_wrap(True)
+ message_label.set_max_width_chars(40)
+ message_label.set_ellipsize(Pango.EllipsizeMode.NONE)
+ message_bubble.add(message_label)
+ message_container.add(message_bubble)
+ self.chat_container.add(message_container)
+ self.chat_container.show_all()
+
+
+ def get_ai_response(self, user_message):
+ """Get response from the selected AI model"""
+ # Show typing indicator
+ self.show_typing_indicator()
+
+ def _run_async_response():
+ """Run the async response in a new event loop"""
+ async def _get_response():
+ try:
+ response = await ai_manager.get_response(self.selected_model, user_message)
+ # Hide typing indicator and show response
+ GLib.idle_add(self.hide_typing_indicator)
+ GLib.idle_add(self.add_ai_message, response)
+ except Exception as e:
+ error_msg = f"Error getting AI response: {str(e)}"
+ GLib.idle_add(self.hide_typing_indicator)
+ GLib.idle_add(self.add_ai_message, error_msg)
+
+ # Create new event loop for this thread
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ loop.run_until_complete(_get_response())
+ finally:
+ loop.close()
+
+ # Run the async function in a new thread
+ import threading
+ thread = threading.Thread(target=_run_async_response)
+ thread.daemon = True
+ thread.start()
+
+ def show_typing_indicator(self):
+ """Show typing indicator with animated dots"""
+ print("Showing typing indicator")
+
+ # Create typing indicator container
+ typing_container = Box(
+ orientation="h",
+ h_align="start",
+ margin_top=4,
+ margin_bottom=4,
+ )
+
+ # Create typing bubble
+ typing_bubble = Box(
+ name="typing-bubble",
+ v_expand=False,
+ h_expand=False,
+ )
+ typing_bubble.get_style_context().add_class("typing-bubble")
+ typing_bubble.set_size_request(80, 40) # Small size for typing indicator
+
+ # Create dots container
+ dots_container = Box(
+ orientation="h",
+ spacing=4,
+ margin_start=12,
+ margin_end=12,
+ margin_top=8,
+ margin_bottom=8,
+ )
+
+ # Create three animated dots
+ self.typing_dots = []
+ for i in range(3):
+ dot = Label(
+ name=f"typing-dot-{i}",
+ text="●",
+ )
+ dot.get_style_context().add_class("typing-dot")
+ dot.get_style_context().add_class("typing-dot-inactive")
+ dots_container.add(dot)
+ self.typing_dots.append(dot)
+ print(f"Created dot {i}")
+
+ typing_bubble.add(dots_container)
+ typing_container.add(typing_bubble)
+
+ # Add to chat container
+ self.chat_container.add(typing_container)
+
+ # Scroll to bottom
+ self.chat_scroll.get_vadjustment().set_value(
+ self.chat_scroll.get_vadjustment().get_upper()
+ )
+
+ # Show the typing indicator
+ self.chat_container.show_all()
+
+ # Store reference to typing container for removal
+ self.current_typing_container = typing_container
+
+ # Initialize animation state
+ self._dot_index = 0
+
+ print(f"Starting animation with {len(self.typing_dots)} dots")
+ # Start animation
+ GLib.timeout_add(500, self._animate_typing_dots)
+
+ def hide_typing_indicator(self):
+ """Hide the typing indicator"""
+ if hasattr(self, 'current_typing_container') and self.current_typing_container:
+ self.chat_container.remove(self.current_typing_container)
+ self.current_typing_container = None
+ self.chat_container.show_all()
+
+ def _animate_typing_dots(self):
+ """Animate the typing dots"""
+ if not hasattr(self, 'typing_dots') or not self.typing_dots:
+ print("No typing dots found")
+ return False
+
+ print(f"Animating dots, index: {self._dot_index}")
+
+ # Simple animation: cycle through dots
+ for i, dot in enumerate(self.typing_dots):
+ if i == (self._dot_index % 3):
+ dot.get_style_context().add_class("typing-dot-active")
+ dot.get_style_context().remove_class("typing-dot-inactive")
+ print(f"Dot {i} is now active")
+ else:
+ dot.get_style_context().add_class("typing-dot-inactive")
+ dot.get_style_context().remove_class("typing-dot-active")
+
+ # Update dot index
+ self._dot_index += 1
+
+ # Continue animation if typing indicator is still visible
+ if hasattr(self, 'current_typing_container') and self.current_typing_container:
+ GLib.timeout_add(500, self._animate_typing_dots)
+
+ return False # Important: return False to stop the timeout
+
+ def _on_key_press(self, widget, event):
+ # Close window when Escape key is pressed
+ if event.keyval == Gdk.KEY_Escape:
+ print("Escape key pressed - closing AI panel")
+ self.hide_ai_panel()
+ return True
+ return False
+
+ def do_key_press_event(self, event):
+ """Override key press event for the window"""
+ if event.keyval == Gdk.KEY_Escape:
+ print("Escape key pressed (do_key_press_event) - closing AI panel")
+ self.hide_ai_panel()
+ return True
+ return Gtk.Window.do_key_press_event(self, event)
+
+ def _on_text_entry_key_press(self, widget, event):
+ if event.keyval == Gdk.KEY_Return and not (event.state & Gdk.ModifierType.SHIFT_MASK):
+ self._send_current_message()
+ return True
+ return False
+
+ def _on_text_entry_key_release(self, widget, event):
+ """Handle key release events on the text entry."""
+ print(f"Text entry key release: {Gdk.keyval_name(event.keyval)} ({event.keyval})")
+ return False # Return False to allow other handlers to process the event
+
+ def _update_model_button_styles(self):
+ """Update the styles of all model buttons to reflect the selected model."""
+ for model_name, button in self.model_buttons.items():
+ if model_name == self.selected_model:
+ button.get_style_context().add_class("ai-model-button-selected")
+ button.get_style_context().remove_class("ai-model-button-unselected")
+ else:
+ button.get_style_context().add_class("ai-model-button-unselected")
+ button.get_style_context().remove_class("ai-model-button-selected")
\ No newline at end of file
diff --git a/modules/ai_services.py b/modules/ai_services.py
new file mode 100644
index 0000000..dfc98f2
--- /dev/null
+++ b/modules/ai_services.py
@@ -0,0 +1,242 @@
+import asyncio
+import json
+import os
+import sys
+from typing import Optional
+
+# Import the data module to get API keys and models
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'config'))
+from config.data import (
+ AI_OPENAI_API_KEY, AI_GEMINI_API_KEY, AI_CLAUDE_API_KEY,
+ AI_GROK_API_KEY, AI_DEEPSEEK_API_KEY,
+ AI_OPENAI_MODEL, AI_GEMINI_MODEL, AI_CLAUDE_MODEL,
+ AI_GROK_MODEL, AI_DEEPSEEK_MODEL
+)
+
+class AIService:
+ """Base class for AI services"""
+
+ def __init__(self, api_key: str, model_name: str):
+ self.api_key = api_key
+ self.model_name = model_name
+
+ async def generate_response(self, message: str) -> str:
+ """Generate a response from the AI model"""
+ if not self.api_key:
+ return f"API key not found for {self.model_name}. Please make sure you pasted it in the Ax-Shell settings."
+
+ try:
+ # Run the API call in a thread to avoid blocking
+ loop = asyncio.get_event_loop()
+ return await loop.run_in_executor(None, self._make_api_call_sync, message)
+ except Exception as e:
+ return f"Error communicating with {self.model_name}: {str(e)}"
+
+ def _make_api_call_sync(self, message: str) -> str:
+ """Synchronous version of the API call"""
+ return self._make_api_call(message)
+
+ async def _make_api_call(self, message: str) -> str:
+ """Make the actual API call - to be implemented by subclasses"""
+ raise NotImplementedError
+
+class OpenAIService(AIService):
+ """OpenAI/ChatGPT service"""
+
+ def __init__(self):
+ super().__init__(AI_OPENAI_API_KEY, "Chat GPT")
+ self.model = AI_OPENAI_MODEL
+
+ async def _make_api_call(self, message: str) -> str:
+ try:
+ import openai
+ openai.api_key = self.api_key
+
+ response = await asyncio.to_thread(
+ openai.ChatCompletion.create,
+ model=self.model,
+ messages=[
+ {"role": "user", "content": message}
+ ],
+ max_tokens=1000,
+ temperature=0.7
+ )
+
+ return response.choices[0].message.content
+ except ImportError:
+ return "OpenAI library not installed. Please install it with: pip install openai"
+ except Exception as e:
+ return f"OpenAI API error: {str(e)}"
+
+class GeminiService(AIService):
+ """Google Gemini service"""
+
+ def __init__(self):
+ super().__init__(AI_GEMINI_API_KEY, "Gemini")
+ self.model = AI_GEMINI_MODEL
+
+ def _make_api_call_sync(self, message: str) -> str:
+ """Synchronous API call for Gemini"""
+ try:
+ import google.generativeai as genai
+
+ # Validate API key
+ if not self.api_key or self.api_key.strip() == "":
+ return "Gemini API key is empty. Please add your API key in the Ax-Shell settings."
+
+ # Configure the API
+ genai.configure(api_key=self.api_key)
+
+ # Test the API key with a simple call
+ try:
+ model = genai.GenerativeModel(self.model)
+
+ # Make the API call with error handling
+ response = model.generate_content(message)
+
+ # Check if response is valid and has content
+ if response and hasattr(response, 'text') and response.text:
+ return response.text
+ elif response and hasattr(response, 'parts') and response.parts:
+ # Handle response with parts
+ return response.parts[0].text
+ elif response and hasattr(response, 'candidates') and response.candidates:
+ # Try accessing through candidates
+ candidate = response.candidates[0]
+ if hasattr(candidate, 'content') and candidate.content:
+ if hasattr(candidate.content, 'parts') and candidate.content.parts:
+ return candidate.content.parts[0].text
+ else:
+ return "Gemini API returned an empty response. Please try again."
+
+ except Exception as api_error:
+ # Handle specific API errors
+ error_str = str(api_error)
+ if "API_KEY_INVALID" in error_str or "INVALID_ARGUMENT" in error_str:
+ return "Invalid Gemini API key. Please check your API key in the Ax-Shell settings."
+ elif "QUOTA_EXCEEDED" in error_str:
+ return "Gemini API quota exceeded. Please check your usage limits."
+ elif "PERMISSION_DENIED" in error_str:
+ return "Permission denied. Please check your Gemini API key permissions."
+ elif "NoneType" in error_str and "from_call" in error_str:
+ return """Gemini API is currently experiencing issues.
+
+This appears to be a known issue with the Google Generative AI library.
+
+Please try:
+1. Using a different AI model (ChatGPT, Claude, or Deepseek)
+2. Updating the library: pip install --upgrade google-generativeai
+3. Checking your internet connection
+
+For now, I recommend using ChatGPT or Claude instead."""
+ else:
+ return f"Gemini API error: {error_str}"
+
+ except ImportError:
+ return "Google Generative AI library not installed. Please install it with: pip install google-generativeai"
+ except Exception as e:
+ return f"Gemini configuration error: {str(e)}"
+
+ async def _make_api_call(self, message: str) -> str:
+ """Async wrapper for the synchronous API call"""
+ return self._make_api_call_sync(message)
+
+class ClaudeService(AIService):
+ """Anthropic Claude service"""
+
+ def __init__(self):
+ super().__init__(AI_CLAUDE_API_KEY, "Claude")
+ self.model = AI_CLAUDE_MODEL
+
+ async def _make_api_call(self, message: str) -> str:
+ try:
+ import anthropic
+ client = anthropic.Anthropic(api_key=self.api_key)
+
+ response = await asyncio.to_thread(
+ client.messages.create,
+ model=self.model,
+ max_tokens=1000,
+ messages=[
+ {"role": "user", "content": message}
+ ]
+ )
+
+ return response.content[0].text
+ except ImportError:
+ return "Anthropic library not installed. Please install it with: pip install anthropic"
+ except Exception as e:
+ return f"Claude API error: {str(e)}"
+
+class GrokService(AIService):
+ """xAI Grok service"""
+
+ def __init__(self):
+ super().__init__(AI_GROK_API_KEY, "Grok")
+ self.model = AI_GROK_MODEL
+
+ async def _make_api_call(self, message: str) -> str:
+ # Grok API is not publicly available yet, so we'll return a placeholder
+ return f"Grok API is not publicly available yet. Selected model: {self.model}. Please check back later for updates."
+
+class DeepseekService(AIService):
+ """Deepseek service"""
+
+ def __init__(self):
+ super().__init__(AI_DEEPSEEK_API_KEY, "Deepseek")
+ self.model = AI_DEEPSEEK_MODEL
+
+ async def _make_api_call(self, message: str) -> str:
+ try:
+ import openai
+ openai.api_key = self.api_key
+ openai.api_base = "https://api.deepseek.com/v1"
+
+ response = await asyncio.to_thread(
+ openai.ChatCompletion.create,
+ model=self.model,
+ messages=[
+ {"role": "user", "content": message}
+ ],
+ max_tokens=1000,
+ temperature=0.7
+ )
+
+ return response.choices[0].message.content
+ except ImportError:
+ return "OpenAI library not installed. Please install it with: pip install openai"
+ except Exception as e:
+ return f"Deepseek API error: {str(e)}"
+
+class AIManager:
+ """Manager class for all AI services"""
+
+ def __init__(self):
+ self.services = {
+ "Chat GPT": OpenAIService(),
+ "Gemini": GeminiService(),
+ "Claude": ClaudeService(),
+ "Grok": GrokService(),
+ "Deepseek": DeepseekService(),
+ }
+
+ async def get_response(self, model_name: str, message: str) -> str:
+ """Get a response from the specified AI model"""
+ if model_name not in self.services:
+ return f"Unknown AI model: {model_name}"
+
+ service = self.services[model_name]
+ return await service.generate_response(message)
+
+ def get_available_models(self) -> list:
+ """Get list of available models"""
+ return list(self.services.keys())
+
+ def has_api_key(self, model_name: str) -> bool:
+ """Check if a model has an API key configured"""
+ if model_name not in self.services:
+ return False
+ return bool(self.services[model_name].api_key)
+
+# Global AI manager instance
+ai_manager = AIManager()
\ No newline at end of file
diff --git a/modules/notch.py b/modules/notch.py
index dd5a3cd..9e8f67e 100644
--- a/modules/notch.py
+++ b/modules/notch.py
@@ -22,6 +22,7 @@
from utils.icon_resolver import IconResolver
from utils.occlusion import check_occlusion
from widgets.wayland import WaylandWindow as Window
+from modules.ai import AI
class Notch(Window):
@@ -138,6 +139,7 @@ def __init__(self, **kwargs):
self._prevent_occlusion = False
self._occlusion_timer_id = None
+
self.icon_resolver = IconResolver()
self._all_apps = get_desktop_applications()
self.app_identifiers = self._build_app_identifiers_map()
@@ -152,12 +154,14 @@ def __init__(self, **kwargs):
self.btdevices.set_visible(False)
self.nwconnections.set_visible(False)
+
self.launcher = AppLauncher(notch=self)
self.overview = Overview()
self.emoji = EmojiPicker(notch=self)
self.power = PowerMenu(notch=self)
self.tmux = TmuxManager(notch=self)
self.cliphist = ClipHistory(notch=self)
+ self.ai = AI()
self.window_label = Label(
name="notch-window-label",
@@ -178,6 +182,7 @@ def __init__(self, **kwargs):
),
)
+
self.active_window_box = CenterBox(
name="active-window-box",
h_expand=True,
@@ -185,8 +190,13 @@ def __init__(self, **kwargs):
start_children=self.window_icon,
center_children=self.active_window,
end_children=None,
+ end_children=None,
)
+ self.active_window_box.connect(
+ "button-press-event",
+ lambda widget, event: (self.open_notch("dashboard"), False)[1],
+ )
self.active_window_box.connect(
"button-press-event",
lambda widget, event: (self.open_notch("dashboard"), False)[1],
@@ -201,6 +211,9 @@ def __init__(self, **kwargs):
self.active_window.get_children()[0].set_halign(Gtk.Align.FILL)
self.active_window.get_children()[0].set_ellipsize(Pango.EllipsizeMode.END)
+ self.active_window.connect(
+ "notify::label", lambda *_: self.restore_label_properties()
+ )
self.active_window.connect(
"notify::label", lambda *_: self.restore_label_properties()
)
@@ -209,7 +222,17 @@ def __init__(self, **kwargs):
self.user_label = Label(
name="compact-user", label=f"{data.USERNAME}@{data.HOSTNAME}"
)
+ self.user_label = Label(
+ name="compact-user", label=f"{data.USERNAME}@{data.HOSTNAME}"
+ )
+ self.player_small.mpris_manager.connect(
+ "player-appeared",
+ lambda *_: self.compact_stack.set_visible_child(self.player_small),
+ )
+ self.player_small.mpris_manager.connect(
+ "player-vanished", self.on_player_vanished
+ )
self.player_small.mpris_manager.connect(
"player-appeared",
lambda *_: self.compact_stack.set_visible_child(self.player_small),
@@ -245,6 +268,10 @@ def __init__(self, **kwargs):
"button-press-event",
lambda widget, event: (self.open_notch("dashboard"), False)[1],
)
+ self.compact.connect(
+ "button-press-event",
+ lambda widget, event: (self.open_notch("dashboard"), False)[1],
+ )
self.compact.connect("enter-notify-event", self.on_button_enter)
self.compact.connect("leave-notify-event", self.on_button_leave)
@@ -257,6 +284,10 @@ def __init__(self, **kwargs):
if (not data.VERTICAL and data.BAR_THEME in ["Dense", "Edge"])
and data.BAR_POSITION not in ["Bottom"]
else [],
+ style_classes=["invert"]
+ if (not data.VERTICAL and data.BAR_THEME in ["Dense", "Edge"])
+ and data.BAR_POSITION not in ["Bottom"]
+ else [],
transition_type="crossfade",
transition_duration=250,
children=[
@@ -329,6 +360,7 @@ def __init__(self, **kwargs):
self.notch_box.add_style_class(data.PANEL_THEME.lower())
+
self.notch_revealer = Revealer(
name="notch-revealer",
transition_type=revealer_transition_type,
@@ -346,6 +378,12 @@ def __init__(self, **kwargs):
self.notch_hover_area_event_box.connect(
"leave-notify-event", self.on_notch_hover_area_leave
)
+ self.notch_hover_area_event_box.connect(
+ "enter-notify-event", self.on_notch_hover_area_enter
+ )
+ self.notch_hover_area_event_box.connect(
+ "leave-notify-event", self.on_notch_hover_area_leave
+ )
self.notch_hover_area_event_box.set_size_request(-1, 1)
self.notch_complete = Box(
@@ -579,6 +617,14 @@ def open_notch(self, widget_name: str):
"tools": {"instance": self.tools},
}
+ if widget_name == "ai":
+ # Handle AI window separately since it's a standalone window
+ if self.ai.get_visible():
+ self.ai.hide_ai_panel()
+ else:
+ self.ai.show_ai_panel()
+ return
+
if widget_name in widget_configs:
config = widget_configs[widget_name]
target_widget_on_stack = config["instance"]
@@ -944,6 +990,12 @@ def _ensure_no_text_selection(self):
def on_key_press(self, widget, event):
"""Handle key presses at the notch level"""
+ print(f"Notch on_key_press received key: {Gdk.keyval_name(event.keyval)} ({event.keyval})")
+
+ # If the AI panel is visible, allow key events to propagate to it
+ if self.ai.get_visible():
+ print("AI panel is visible, allowing key event to propagate.")
+ return False # Allow propagation
if self._launcher_transitioning:
keyval = event.keyval
@@ -969,6 +1021,7 @@ def on_key_press(self, widget, event):
and self.dashboard.stack.get_visible_child() == self.dashboard.widgets
):
if self.stack.get_visible_child() == self.launcher:
+ print("Launcher is visible, not opening again.")
return False
keyval = event.keyval
@@ -983,9 +1036,9 @@ def on_key_press(self, widget, event):
)
if is_valid_char and keychar:
- print(f"Notch received keypress: {keychar}")
-
+ print(f"Notch received keypress: {keychar} and is opening launcher.")
self.open_launcher_with_text(keychar)
return True
+ print(f"Notch on_key_press returning False. Current visible child: {self.stack.get_visible_child().get_name()}")
return False
diff --git a/styles/ai.css b/styles/ai.css
new file mode 100644
index 0000000..20bbebb
--- /dev/null
+++ b/styles/ai.css
@@ -0,0 +1,237 @@
+
+/* AI Panel Main Styling */
+#ai-panel {
+ background-color: var(--surface-bright);
+ border-radius: 0 16px 16px 0;
+ padding: 16px;
+ margin: 0px 0px 0px 0px;
+ min-width: 400px;
+ min-height: 800px;
+}
+
+/* AI Dropdown Styling */
+.ai-dropdown {
+ background-color: var(--foreground);
+}
+
+.ai-dropdown popup * {
+ color: var(--foreground);
+ background-color: var(--background);
+}
+
+.ai-dropdown popup menuitem {
+ color: var(--foreground);
+ background-color: var(--background);
+}
+
+.ai-dropdown popup menuitem:hover {
+ background-color: var(--red);
+}
+
+/* AI Chat Styling */
+#ai-chat-scroll {
+ background-color: transparent;
+ border-radius: 8px;
+}
+
+#ai-chat-container {
+ background-color: transparent;
+}
+
+#user-message-bubble {
+ background-color: var(--cyan);
+ border-radius: 12px;
+ padding: 8px 12px;
+ margin-left: 40px;
+
+}
+
+/* AI Text Entry Styling */
+#ai-text-entry {
+ background-color: var(--surface-bright);
+ color: var(--foreground);
+ font-size: 1em;
+ min-width: 20px;
+ min-height: 40px;
+ border-radius: 8px;
+ padding: 8px;
+}
+
+.ai-text-scroll {
+ min-height: 40px;
+ padding: 0;
+}
+
+#ai-text-entry text {
+ background-color: transparent;
+ color: var(--foreground);
+ font-size: 1em;
+ border: none;
+ outline: none;
+}
+
+ /* AI Gear Button Styling */
+#ai-model-button {
+ background-color: var(--surface-bright);
+ color: var(--foreground);
+ min-width: 20px;
+ min-height: 40px;
+ border-radius: 8px;
+ padding: 8px;
+}
+
+#ai-model-icon {
+ color: var(--foreground);
+ font-size: 2em;
+ font-weight: bold;
+}
+
+ /* User Message Styling */
+.user-message-bubble {
+ background-color: var(--cyan);
+ border-radius: 12px;
+ padding: 8px 12px;
+ margin-left: 40px;
+}
+
+.user-message-text {
+ color: var(--background);
+ font-size: 1.1em;
+ background-color: transparent;
+ border: none;
+ outline: none;
+}
+
+.user-message-text text {
+ background-color: transparent;
+ border: none;
+ outline: none;
+ color: var(--background);
+ font-size: 1.1em;
+ font-weight: bold;
+}
+
+ .user-message-bubble {
+ background-color: var(--cyan);
+ border-radius: 12px;
+ padding: 8px 12px 8px 12px;
+ margin-left: 40px;
+ margin-right: 40px;
+}
+
+/* AI Message Styling */
+.ai-message-bubble {
+ background-color: var(--surface-bright);
+ border-radius: 12px;
+ padding: 8px 12px 8px 12px;
+ margin-left: 40px;
+ margin-right: 40px;
+}
+
+.ai-message-text {
+ color: var(--foreground);
+ font-size: 1.1em;
+ background-color: transparent;
+ border: none;
+ outline: none;
+}
+
+.ai-message-text text {
+ background-color: transparent;
+ color: var(--foreground);
+ font-size: 1.1em;
+ border: none;
+ outline: none;
+}
+
+ /* Typing Indicator Styling */
+.typing-bubble {
+ background-color: var(--surface-bright);
+ border-radius: 12px;
+ padding: 8px 12px;
+ margin-right: 40px;
+}
+
+/* AI Model Options Box Styling */
+#ai-model-options-box {
+ background-color: var(--surface);
+ border: 1px solid var(--outline);
+ border-radius: 8px;
+ padding: 8px;
+}
+
+/* AI Model Option Button Styling */
+#ai-model-option-button {
+ background-color: var(--surface-bright);
+ font-size: 0.9em;
+ color: var(--cyan);
+ border: none;
+ outline: none;
+ box-shadow: none;
+}
+
+.ai-model-button-selected {
+ border: none;
+ outline: none;
+ border-radius: 4px;
+ padding: 6px 12px;
+ background-color: var(--cyan);
+ font-size: 1.1em;
+ font-weight: bold;
+ color: #000000;
+}
+
+#ai-model-option-button.ai-model-button-selected {
+ border: none;
+ outline: none;
+ border-radius: 4px;
+ padding: 6px 12px;
+ background-color: var(--cyan);
+ font-size: 0.9em;
+ color: #000000;
+}
+
+.ai-model-button-selected label {
+ color: #000000;
+}
+
+#ai-model-option-button.ai-model-button-selected label {
+ color: #000000;
+}
+
+.ai-model-button-unselected {
+ border: none;
+ border-radius: 4px;
+ padding: 6px 12px;
+ background-color: var(--surface);
+ font-size: 0.9em;
+ color: var(--foreground);
+}
+
+/* AI Entry Container Styling */
+#ai-entry-container {
+ background-color: var(--surface-bright);
+ border-radius: 8px;
+ padding: 12px 16px;
+}
+
+.typing-dot {
+ color: var(--outline);
+ font-size: 1.2em;
+ background-color: transparent;
+ border: none;
+ outline: none;
+ transition: color 0.2s ease-in-out;
+}
+
+.typing-dot-active {
+ color: var(--foreground);
+ font-size: 1.2em;
+ font-weight: bold;
+}
+
+.typing-dot-inactive {
+ color: var(--outline);
+ font-size: 1.2em;
+ opacity: 0.5;
+}
\ No newline at end of file