diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index c530d91..3b1fd11 100644 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -30,3 +30,5 @@ tauri-plugin-fs = "2.0.0-rc" tauri-plugin-log = "2.0.0-rc" tauri-plugin-shell = "2" dirs = "5.0" +chrono = { version = "0.4", features = ["serde"] } +serde = { version = "1.0", features = ["derive"] } \ No newline at end of file diff --git a/src-tauri/src/commands/ai.rs b/src-tauri/src/commands/ai.rs index 95c0c00..4bd0504 100644 --- a/src-tauri/src/commands/ai.rs +++ b/src-tauri/src/commands/ai.rs @@ -1,5 +1,94 @@ +// src-tauri/src/commands/ai.rs +use serde::{Deserialize, Serialize}; +use std::sync::{Arc, Mutex}; +use tauri::{AppHandle, Manager, State}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConversationMessage { + pub role: String, // "user", "assistant", or "system" + pub content: String, + pub timestamp: u64, +} + +#[derive(Debug, Default)] +pub struct ConversationHistory { + messages: Arc>>, + max_messages: usize, +} + +impl ConversationHistory { + pub fn new(max_messages: usize) -> Self { + Self { + messages: Arc::new(Mutex::new(Vec::new())), + max_messages, + } + } + + pub fn add_message(&self, role: String, content: String) { + let mut messages = self.messages.lock().unwrap(); + let timestamp = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(); + + messages.push(ConversationMessage { + role, + content, + timestamp, + }); + + // Keep only the last N messages to avoid token limit issues + if messages.len() > self.max_messages { + messages.drain(0..messages.len() - self.max_messages); + } + } + + pub fn get_messages(&self) -> Vec { + self.messages.lock().unwrap().clone() + } + + pub fn clear(&self) { + self.messages.lock().unwrap().clear(); + } + + pub fn get_context_summary(&self) -> String { + let messages = self.messages.lock().unwrap(); + if messages.is_empty() { + return String::new(); + } + + let mut context = String::new(); + context.push_str("Previous conversation context:\n"); + + // Include last few exchanges for context + let recent_messages: Vec<&ConversationMessage> = messages + .iter() + .rev() + .take(6) // Last 3 user-assistant pairs + .collect::>() + .into_iter() + .rev() + .collect(); + + for msg in recent_messages { + match msg.role.as_str() { + "user" => context.push_str(&format!("User: {}\n", msg.content)), + "assistant" => context.push_str(&format!("Assistant: {}\n", msg.content)), + _ => {} + } + } + + context.push_str("---\n"); + context + } +} + #[tauri::command] -pub async fn ask_llm(prompt: String, app_handle: tauri::AppHandle) -> Result { +pub async fn ask_llm_with_history( + prompt: String, + app_handle: AppHandle, + conversation_state: State<'_, ConversationHistory> +) -> Result { // First try to get API key from storage let key = match crate::commands::api_key::get_api_key(app_handle).await { Ok(stored_key) if !stored_key.is_empty() => stored_key, @@ -18,6 +107,135 @@ pub async fn ask_llm(prompt: String, app_handle: tauri::AppHandle) -> Result>().into_iter().rev() { + if msg.role == "user" || msg.role == "assistant" { + messages.push(serde_json::json!({ + "role": msg.role, + "content": msg.content + })); + } + } + + // Add current user message + messages.push(serde_json::json!({ + "role": "user", + "content": prompt + })); + + let response = client + .post("https://api.openai.com/v1/chat/completions") + .bearer_auth(key) + .json(&serde_json::json!({ + "model": "gpt-4o-mini", + "messages": messages, + "max_tokens": 1000, + "temperature": 0.7 + })) + .send() + .await + .map_err(|e| format!("Request failed: {}", e))?; + + let json: serde_json::Value = response.json().await.map_err(|e| e.to_string())?; + + let assistant_response = json["choices"][0]["message"]["content"] + .as_str() + .unwrap_or("No response") + .to_string(); + + // Add both user prompt and assistant response to conversation history + conversation_state.add_message("user".to_string(), prompt); + conversation_state.add_message("assistant".to_string(), assistant_response.clone()); + + Ok(assistant_response) +} + +#[tauri::command] +pub async fn clear_conversation_history( + conversation_state: State<'_, ConversationHistory> +) -> Result<(), String> { + conversation_state.clear(); + Ok(()) +} + +#[tauri::command] +pub async fn get_conversation_summary( + conversation_state: State<'_, ConversationHistory> +) -> Result { + let messages = conversation_state.get_messages(); + if messages.is_empty() { + return Ok("No conversation history available.".to_string()); + } + + let mut summary = String::new(); + summary.push_str(&format!("Conversation History ({} messages):\n\n", messages.len())); + + for (i, msg) in messages.iter().enumerate() { + let timestamp = chrono::DateTime::from_timestamp(msg.timestamp as i64, 0) + .map(|dt| dt.format("%H:%M:%S").to_string()) + .unwrap_or_else(|| "Unknown".to_string()); + + match msg.role.as_str() { + "user" => summary.push_str(&format!("[{}] User: {}\n", timestamp, msg.content)), + "assistant" => summary.push_str(&format!("[{}] AI: {}\n\n", timestamp, msg.content)), + _ => {} + } + } + + Ok(summary) +} + +// Keep the original function for backwards compatibility +#[tauri::command] +pub async fn ask_llm(prompt: String, app_handle: tauri::AppHandle) -> Result { + // This is a simplified version without history - you might want to phase this out + let key = match crate::commands::api_key::get_api_key(app_handle).await { + Ok(stored_key) if !stored_key.is_empty() => stored_key, + _ => return Err("API key not configured. Please set your OpenAI API key with the 'setapikey YOUR_API_KEY' command.".to_string()) + }; + + let client = reqwest::Client::new(); + + let os_name = if cfg!(target_os = "windows") { + "Windows" + } else if cfg!(target_os = "macos") { + "macOS" + } else if cfg!(target_os = "linux") { + "Linux" + } else { + "Unknown OS" + }; + let system_prompt = format!( "You are AI running in terminal called Term, a lightweight terminal assistant. You are running on {os_name}. \ Your job is to help users with their terminal commands and queries. \ @@ -49,4 +267,4 @@ pub async fn ask_llm(prompt: String, app_handle: tauri::AppHandle) -> Result(); + let cache_clone = cache.inner().clone(); + + // Sudo cache cleanup thread + std::thread::spawn(move || { + loop { + std::thread::sleep(std::time::Duration::from_secs(300)); + cache_clone.clear_expired(15); + } + }); + + Ok(()) + }) .invoke_handler(tauri::generate_handler![ commands::shell::run_shell, - commands::shell::run_sudo_command, commands::shell::get_current_dir, commands::shell::list_directory_contents, commands::shell::change_directory, commands::ai::ask_llm, + commands::ai::ask_llm_with_history, + commands::ai::clear_conversation_history, + commands::ai::get_conversation_summary, commands::api_key::save_api_key, commands::api_key::get_api_key, commands::api_key::validate_api_key, - commands::api_key::delete_api_key + commands::api_key::delete_api_key, + fast_sudo, + clear_sudo_cache, + direct_privilege_escalation, + check_sudo_privileges ]) .run(tauri::generate_context!()) .expect("error while running tauri application"); -} +} \ No newline at end of file diff --git a/src/components/Terminal/CommandProcessor.tsx b/src/components/Terminal/CommandProcessor.tsx index 2d3551e..72c51ce 100644 --- a/src/components/Terminal/CommandProcessor.tsx +++ b/src/components/Terminal/CommandProcessor.tsx @@ -28,6 +28,43 @@ const useCommandProcessor = ({ return; } + // Handle conversation history commands + if (input.trim().toLowerCase() === 'clearhistory' || input.trim().toLowerCase() === 'clear-history') { + try { + await invoke('clear_conversation_history'); + appendHistory({ + type: 'output', + content: 'Conversation history cleared successfully.' + }); + } catch (error) { + appendHistory({ + type: 'error', + content: `Failed to clear conversation history: ${error}` + }); + } + setInput(''); + setIsProcessing(false); + return; + } + + if (input.trim().toLowerCase() === 'showhistory' || input.trim().toLowerCase() === 'show-history') { + try { + const summary = await invoke('get_conversation_summary'); + appendHistory({ + type: 'output', + content: summary + }); + } catch (error) { + appendHistory({ + type: 'error', + content: `Failed to get conversation history: ${error}` + }); + } + setInput(''); + setIsProcessing(false); + return; + } + // Handle exit command if (input.trim().toLowerCase() === 'exit') { try { @@ -97,7 +134,6 @@ const useCommandProcessor = ({ }); } } catch (error) { - // The error message now contains detailed information from Rust appendHistory({ content: `API Error: ${error}`, type: 'error' @@ -163,7 +199,8 @@ const useCommandProcessor = ({ if (commandNotFoundRegex.test(result) || notRecognizedRegex.test(result)) { appendHistory({ type: 'error', content: result.trim() }); - const fallback = await invoke("ask_llm", { prompt: input }); + // Use the new AI function with history context + const fallback = await invoke("ask_llm_with_history", { prompt: input }); appendHistory({ type: 'llm', content: fallback.trim() }); } else { appendHistory({ type: 'output', content: result.trim() }); @@ -174,13 +211,15 @@ const useCommandProcessor = ({ appendHistory({ type: 'output', content: result.trim() }); } else if (parsed.type === "file_summary") { const fileContent = await invoke("read_file", { path: parsed.filename }); - const summary = await invoke("ask_llm", { + // Use the new AI function with history context + const summary = await invoke("ask_llm_with_history", { prompt: `Summarize the following file:\n\n${fileContent}`, }); appendHistory({ type: 'llm', content: summary.trim() }); } else if (parsed.type === "llm_query") { try { - const result = await invoke("ask_llm", { prompt: parsed.prompt }); + // Use the new AI function with history context + const result = await invoke("ask_llm_with_history", { prompt: parsed.prompt }); appendHistory({ type: 'llm', content: result.trim() }); } catch (error: any) { // Handle API key not configured errors @@ -199,7 +238,8 @@ const useCommandProcessor = ({ } } else { try { - const fallback = await invoke("ask_llm", { prompt: input }); + // Use the new AI function with history context + const fallback = await invoke("ask_llm_with_history", { prompt: input }); appendHistory({ type: 'llm', content: fallback.trim() }); } catch (error: any) { // Handle API key not configured errors @@ -273,4 +313,4 @@ const useCommandProcessor = ({ }; }; -export default useCommandProcessor; +export default useCommandProcessor; \ No newline at end of file diff --git a/src/components/Terminal/WelcomeMessage.tsx b/src/components/Terminal/WelcomeMessage.tsx index f7fe16e..a968dea 100644 --- a/src/components/Terminal/WelcomeMessage.tsx +++ b/src/components/Terminal/WelcomeMessage.tsx @@ -4,11 +4,18 @@ const WelcomeMessage: React.FC = () => { return (
Term
-
AI-powered Terminal Assistant
-
Type a command or ask a question to get started.
-
Use Up/Down arrows to navigate command history.
+
AI-powered Terminal Assistant with Memory
+
Type a command or ask a question to get started.
+ +
+
๐Ÿ’ก New: AI remembers conversation context within this session
+
๐Ÿ“š Use showhistory to view conversation history
+
๐Ÿงน Use clearhistory to clear AI memory
+
โฌ†๏ธโฌ‡๏ธ Use Up/Down arrows to navigate command history
+
๐Ÿ”‘ Use setapikey YOUR_KEY to configure OpenAI API
+
); }; -export default WelcomeMessage; +export default WelcomeMessage; \ No newline at end of file