-
-
-
-
-
-
-
-
-
- {{ t('chat.messages.thought_for_seconds', Math.ceil(message.reasoningTime / 1000), { named: { second: Math.ceil(message.reasoningTime / 1000) } }) }}
-
-
-
+
+
+
+
+
+
+
+
+
+ {{ t('chat.messages.thought_for_seconds', Math.ceil(message.reasoningTime / 1000), { named: { second: Math.ceil(message.reasoningTime / 1000) } }) }}
+
+
+
+
+
+
+
+ {{ t('chat.messages.thinking') }}
+
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+ {{ t('chat.messages.thinking') }}
+
+
-
+
-
-
-
-
- {{ t('chat.messages.thinking') }}
-
-
+
+
+
+
emit('retry', modelId, endpointType)"
+ />
+
+ {{ t('chat.messages.used_model', { model: message.model }) }}
+
+
diff --git a/entrypoints/sidepanel/components/Chat/Messages/MessageRetrySelector.vue b/entrypoints/sidepanel/components/Chat/Messages/MessageRetrySelector.vue
new file mode 100644
index 00000000..ce6ac314
--- /dev/null
+++ b/entrypoints/sidepanel/components/Chat/Messages/MessageRetrySelector.vue
@@ -0,0 +1,132 @@
+
+ e.stopPropagation()"
+ >
+
+
+
+
+
+
+
+
+
+
+ {{ option.label }}
+
+
+
+
+ {{ option.label }}
+
+
+
+
+
+
+
+
diff --git a/entrypoints/sidepanel/components/Chat/Messages/User.vue b/entrypoints/sidepanel/components/Chat/Messages/User.vue
new file mode 100644
index 00000000..75d9b9dd
--- /dev/null
+++ b/entrypoints/sidepanel/components/Chat/Messages/User.vue
@@ -0,0 +1,190 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/entrypoints/sidepanel/components/Chat/ThinkingEffortSelector.vue b/entrypoints/sidepanel/components/Chat/ThinkingEffortSelector.vue
new file mode 100644
index 00000000..d38203ec
--- /dev/null
+++ b/entrypoints/sidepanel/components/Chat/ThinkingEffortSelector.vue
@@ -0,0 +1,84 @@
+
+
+
+
+
+
+ {{ option?.label || t('chat.thinking_mode.label') }}
+
+
+
+
+
+
+
+
diff --git a/entrypoints/sidepanel/components/Chat/ThinkingModeSwitch.vue b/entrypoints/sidepanel/components/Chat/ThinkingModeSwitch.vue
index 4c0a923f..f43a5ffa 100644
--- a/entrypoints/sidepanel/components/Chat/ThinkingModeSwitch.vue
+++ b/entrypoints/sidepanel/components/Chat/ThinkingModeSwitch.vue
@@ -29,6 +29,7 @@
import { computed, onBeforeUnmount, onMounted, toRefs, watch } from 'vue'
import IconThinking from '@/assets/icons/thinking-capability.svg?component'
+import { mergeReasoningPreference, normalizeReasoningPreference } from '@/types/reasoning'
import { useI18n } from '@/utils/i18n'
import { isToggleableThinkingModel } from '@/utils/llm/thinking-models'
import { useLLMBackendStatusStore } from '@/utils/pinia-store/store'
@@ -64,7 +65,8 @@ const isThinkingEnabled = computed({
get() {
// If chat has a specific setting, use it; otherwise use global setting
const chatSetting = chat.historyManager.chatHistory.value.reasoningEnabled
- return chatSetting !== undefined ? chatSetting : userConfig.llm.reasoning.get()
+ const source = chatSetting !== undefined ? chatSetting : userConfig.llm.reasoning.get()
+ return normalizeReasoningPreference(source).enabled
},
set(value: boolean) {
setThinkingEnabled(value)
@@ -101,10 +103,12 @@ const toggleThinking = () => {
const setThinkingEnabled = (value: boolean, skipStoreUpdate = false) => {
// Update global setting
- userConfig.llm.reasoning.set(value)
+ const updatedGlobal = mergeReasoningPreference(userConfig.llm.reasoning.get(), { enabled: value })
+ userConfig.llm.reasoning.set(updatedGlobal)
if (skipStoreUpdate) return
// Store chat-specific setting in chat history
- chat.historyManager.chatHistory.value.reasoningEnabled = value
+ const chatPreference = chat.historyManager.chatHistory.value.reasoningEnabled ?? updatedGlobal
+ chat.historyManager.chatHistory.value.reasoningEnabled = mergeReasoningPreference(chatPreference, { enabled: value })
}
// Handle thinking state based on model capabilities
diff --git a/entrypoints/sidepanel/components/Chat/index.vue b/entrypoints/sidepanel/components/Chat/index.vue
index 47cadcdf..397e53ab 100644
--- a/entrypoints/sidepanel/components/Chat/index.vue
+++ b/entrypoints/sidepanel/components/Chat/index.vue
@@ -19,19 +19,21 @@
:class="[item.role === 'user' ? 'self-end' : 'self-start', { 'w-full': ['agent-task-group', 'assistant', 'agent'].includes(item.role) ,'mt-2': ['agent-task-group', 'assistant', 'agent'].includes(item.role) }]"
class="max-w-full relative flex"
>
-
+ :message="item"
+ :isEditing="editingMessageId === item.id"
+ :disabled="chat.isAnswering() || editingInFlight"
+ @startEdit="onStartEdit(item.id)"
+ @cancelEdit="() => onCancelEdit(item.id)"
+ @submitEdit="(value) => onSubmitEdit(item.id, value)"
+ />
onRetryAssistantMessage(item.id, modelId, endpointType)"
/>
@@ -97,28 +99,35 @@
-
+
+
-
-
-
@@ -140,10 +149,12 @@ import ScrollContainer from '@/components/ScrollContainer.vue'
import Button from '@/components/ui/Button.vue'
import { FileGetter } from '@/utils/file'
import { useI18n } from '@/utils/i18n'
+import { isGptOssModel } from '@/utils/llm/reasoning'
+import logger from '@/utils/logger'
import { setSidepanelStatus } from '@/utils/sidepanel-status'
+import { getUserConfig } from '@/utils/user-config'
import { classNames } from '@/utils/vue/utils'
-import MarkdownViewer from '../../../../components/MarkdownViewer.vue'
import { showSettings } from '../../../../utils/settings'
import {
ActionEvent,
@@ -151,11 +162,14 @@ import {
initChatSideEffects,
} from '../../utils/chat/index'
import AttachmentSelector from '../AttachmentSelector.vue'
+import CameraButton from './CameraButton.vue'
import MessageAction from './Messages/Action.vue'
import MessageTaskGroup from './Messages/AgentTaskGroup.vue'
import MessageAssistant from './Messages/Assistant.vue'
import MessageTask from './Messages/Task.vue'
+import MessageUser from './Messages/User.vue'
import OnlineSearchSwitch from './OnlineSearchSwitch.vue'
+import ThinkingEffortSelector from './ThinkingEffortSelector.vue'
import ThinkingModeSwitch from './ThinkingModeSwitch.vue'
const inputContainerRef = ref()
@@ -167,16 +181,51 @@ const userInput = ref('')
const isComposing = ref(false)
const attachmentSelectorRef = ref>()
const scrollContainerRef = ref>()
+const editingMessageId = ref(null)
+const editingInFlight = ref(false)
+const log = logger.child('chat-sidepanel')
defineExpose({
attachmentSelectorRef,
})
+const userConfig = await getUserConfig()
+const currentModel = userConfig.llm.model.toRef()
+const showReasoningEffortSelector = computed(() => isGptOssModel(currentModel.value))
+
const chat = await Chat.getInstance()
const contextAttachmentStorage = chat.contextAttachmentStorage
initChatSideEffects()
+// Track the final assistant/agent message for each reply block (between user turns) FOR triggering retry action
+const assistantActionMessageIds = computed(() => {
+ const ids = new Set()
+ const history = chat.historyManager.history.value
+ let lastAssistantId: string | null = null
+
+ for (const item of history) {
+ if (item.role === 'assistant' || item.role === 'agent') {
+ // Only consider messages scoped not including welcome or quick actions
+ if (!item.id.includes('welcomeMessage') && !item.id.includes('quickActions')) {
+ lastAssistantId = item.id
+ }
+ }
+ else if (item.role === 'user') {
+ if (lastAssistantId) {
+ ids.add(lastAssistantId)
+ lastAssistantId = null
+ }
+ }
+ }
+
+ if (lastAssistantId) {
+ ids.add(lastAssistantId)
+ }
+
+ return ids
+})
+
const actionEventHandler = Chat.createActionEventHandler((actionEvent) => {
if (actionEvent.action === 'customInput') {
chat.ask((actionEvent as ActionEvent<'customInput'>).data.prompt)
@@ -194,6 +243,86 @@ const allowAsk = computed(() => {
return !chat.isAnswering() && userInput.value.trim().length > 0
})
+const onStartEdit = (messageId: string) => {
+ if (chat.isAnswering() || editingInFlight.value) return
+ editingMessageId.value = messageId
+}
+
+const onCancelEdit = (messageId: string) => {
+ if (editingMessageId.value === messageId) {
+ editingMessageId.value = null
+ }
+}
+
+const onSubmitEdit = async (messageId: string, value: string) => {
+ if (editingInFlight.value) return
+ editingMessageId.value = null
+ editingInFlight.value = true
+ try {
+ await chat.editUserMessage(messageId, value)
+ }
+ catch (error) {
+ log.error('Failed to re-send edited message', error)
+ }
+ finally {
+ editingInFlight.value = false
+ }
+}
+
+const onRetryAssistantMessage = async (assistantMessageId: string, modelId: string, endpointType: string) => {
+ if (chat.isAnswering() || editingInFlight.value) return
+
+ const messageIndex = chat.historyManager.history.value.findIndex((item) => item.id === assistantMessageId)
+ if (messageIndex === -1) {
+ log.error('Assistant message not found', assistantMessageId)
+ return
+ }
+
+ // Find the last user message before this assistant message
+ let userMessageIndex = -1
+ for (let i = messageIndex - 1; i >= 0; i--) {
+ if (chat.historyManager.history.value[i].role === 'user') {
+ userMessageIndex = i
+ break
+ }
+ }
+
+ if (userMessageIndex === -1) {
+ log.error('No user message found before assistant message')
+ return
+ }
+
+ const userMessage = chat.historyManager.history.value[userMessageIndex]
+ if (userMessage.role !== 'user') return
+
+ // Stop any ongoing chat
+ chat.stop()
+ editingInFlight.value = true
+
+ try {
+ // Remove all messages after and including the assistant message
+ chat.historyManager.history.value.splice(messageIndex)
+
+ // Set temporary model override
+ chat.historyManager.temporaryModelOverride = { model: modelId, endpointType }
+
+ try {
+ // Re-generate the response with the new model
+ await chat.editUserMessage(userMessage.id, userMessage.displayContent ?? userMessage.content)
+ }
+ finally {
+ // Clear temporary model override
+ chat.historyManager.temporaryModelOverride = null
+ }
+ }
+ catch (error) {
+ log.error('Failed to retry assistant message', error)
+ }
+ finally {
+ editingInFlight.value = false
+ }
+}
+
const cleanUp = chat.historyManager.onMessageAdded(() => {
scrollContainerRef.value?.snapToBottom()
})
diff --git a/entrypoints/sidepanel/components/Onboarding/index.vue b/entrypoints/sidepanel/components/Onboarding/index.vue
index a32f3478..32cf3016 100644
--- a/entrypoints/sidepanel/components/Onboarding/index.vue
+++ b/entrypoints/sidepanel/components/Onboarding/index.vue
@@ -96,13 +96,13 @@ const onBackendInstalled = async (backend: 'ollama' | 'lm-studio') => {
panel.value = 'model-downloader'
}
else {
- close()
+ await close()
}
}
const onOpenSettings = async () => {
endpointType.value = 'ollama'
- close()
+ await close()
showSettings()
}
@@ -110,17 +110,17 @@ const onModelDownloaderFinished = async () => {
endpointType.value = 'ollama'
await llmBackendStatusStore.updateOllamaConnectionStatus()
await llmBackendStatusStore.updateOllamaModelList()
- close()
+ await close()
}
-const onWebLLMInstalled = () => {
+const onWebLLMInstalled = async () => {
endpointType.value = 'web-llm'
- close()
+ await close()
}
-const setWelcomeChatMessage = () => {
+const setWelcomeChatMessage = async () => {
// FYI: this message will also be modified by side-effects.ts for locale changes
- const msg = chat.historyManager.appendAssistantMessage(welcomeMessage(t))
+ const msg = await chat.historyManager.appendAssistantMessage(welcomeMessage(t))
msg.style = {
backgroundColor: 'transparent',
}
@@ -131,8 +131,8 @@ const setWelcomeChatMessage = () => {
chat.historyManager.insertMessageAt(msg, 0)
}
-const close = () => {
- setWelcomeChatMessage()
+const close = async () => {
+ await setWelcomeChatMessage()
onboardingVersion.value = TARGET_ONBOARDING_VERSION
}
diff --git a/entrypoints/sidepanel/utils/agent/index.ts b/entrypoints/sidepanel/utils/agent/index.ts
index 9376d657..13481326 100644
--- a/entrypoints/sidepanel/utils/agent/index.ts
+++ b/entrypoints/sidepanel/utils/agent/index.ts
@@ -77,6 +77,7 @@ interface AgentOptions {
agentStorage: AgentStorage
tools: AgentToolCall
maxIterations?: number
+ temporaryModelOverride?: { model: string, endpointType: string } | null
}
type AgentStatus = 'idle' | 'running' | 'error'
@@ -87,6 +88,7 @@ export class Agent {
tools: AgentOptions['tools']
agentStorage: AgentStorage
maxIterations: number
+ temporaryModelOverride: { model: string, endpointType: string } | null
status: Ref = ref('idle')
log = logger.child('Agent')
constructor(public options: AgentOptions) {
@@ -94,6 +96,7 @@ export class Agent {
this.tools = options.tools
this.agentStorage = options.agentStorage
this.maxIterations = options.maxIterations || 6
+ this.temporaryModelOverride = options.temporaryModelOverride || null
}
createAbortController() {
@@ -191,8 +194,8 @@ export class Agent {
const getAgentMessage = () => {
return agentMessage
}
- const getOrAddAgentMessage = () => {
- if (!agentMessage) agentMessage = this.historyManager.appendAgentMessage()
+ const getOrAddAgentMessage = async () => {
+ if (!agentMessage) agentMessage = await this.historyManager.appendAgentMessage()
return agentMessage
}
const deleteAgentMessageIfEmpty = (includeReasoning = true) => {
@@ -204,8 +207,8 @@ export class Agent {
}
}
// make this message available to the next user task
- const convertToAssistantMessage = (): AssistantMessageV1 => {
- const agentMessage = getOrAddAgentMessage()
+ const convertToAssistantMessage = async (): Promise => {
+ const agentMessage = await getOrAddAgentMessage()
agentMessage.done = true
;(agentMessage as unknown as AssistantMessageV1).role = 'assistant'
return agentMessage as unknown as AssistantMessageV1
@@ -302,10 +305,11 @@ export class Agent {
if (shouldForceAnswer) thisLoopMessages.push({ role: 'user', content: AGENT_FORCE_FINAL_ANSWER })
let taskMessageModifier = this.makeTaskMessageGroupProxy(abortController.signal)
const agentMessageManager = this.makeTempAgentMessageManager()
- const agentMessage = agentMessageManager.getOrAddAgentMessage()
+ const agentMessage = await agentMessageManager.getOrAddAgentMessage()
const response = streamTextInBackground({
abortSignal: abortController.signal,
messages: this.injectImagesToLastMessage(thisLoopMessages, loopImages),
+ temporaryModelOverride: this.temporaryModelOverride,
})
let hasError = false
let text = ''
@@ -384,7 +388,7 @@ export class Agent {
this.log.debug('Agent iteration end', iteration, { currentLoopToolCalls, text, normalizedText, hasError })
if ((currentLoopToolCalls.length === 0 && normalizedText && !hasError) || shouldForceAnswer) {
this.log.debug('No tool call, ending iteration')
- const lastMsg = agentMessageManager.convertToAssistantMessage()
+ const lastMsg = await agentMessageManager.convertToAssistantMessage()
eventBus.emit('onAgentFinished')
return lastMsg
}
@@ -412,7 +416,7 @@ export class Agent {
}
else if (error instanceof ModelNotFoundError) {
const { t } = await useGlobalI18n()
- const errorMsg = agentMessageManager.convertToAssistantMessage()
+ const errorMsg = await agentMessageManager.convertToAssistantMessage()
errorMsg.isError = true
errorMsg.content = t('errors.model_not_found', { endpointType: error.endpointType === 'ollama' ? 'Ollama' : 'LM Studio' })
// unresolvable error, break the loop
@@ -420,14 +424,14 @@ export class Agent {
}
else if (error instanceof ModelRequestError) {
const { t } = await useGlobalI18n()
- const errorMsg = agentMessageManager.convertToAssistantMessage()
+ const errorMsg = await agentMessageManager.convertToAssistantMessage()
errorMsg.isError = true
errorMsg.content = t('errors.model_request_error', { endpointType: error.endpointType === 'ollama' ? 'Ollama' : 'LM Studio' })
return false
}
else if (error instanceof LMStudioLoadModelError) {
const { t } = await useGlobalI18n()
- const errorMsg = agentMessageManager.convertToAssistantMessage()
+ const errorMsg = await agentMessageManager.convertToAssistantMessage()
errorMsg.isError = true
const msg = error.message.split('\n')[0]
const trimmedMsg = msg.length > 300 ? msg.slice(0, 300) + '...' : msg
@@ -436,7 +440,7 @@ export class Agent {
}
else if (error instanceof AppError) {
const { t } = await useGlobalI18n()
- const errorMsg = agentMessageManager.convertToAssistantMessage()
+ const errorMsg = await agentMessageManager.convertToAssistantMessage()
errorMsg.isError = true
errorMsg.content = t('errors.unknown_error', { message: error.message })
return false
diff --git a/entrypoints/sidepanel/utils/agent/strorage.ts b/entrypoints/sidepanel/utils/agent/strorage.ts
index 6f916187..85218e3c 100644
--- a/entrypoints/sidepanel/utils/agent/strorage.ts
+++ b/entrypoints/sidepanel/utils/agent/strorage.ts
@@ -32,7 +32,7 @@ export class AgentStorage {
if (this.attachmentStorage.currentTab?.type === 'image') {
imageAttachments.push(this.attachmentStorage.currentTab)
}
- imageAttachments.push(...this.attachmentStorage.attachments.filter((attachment) => attachment.type === 'image'))
+ imageAttachments.push(...this.attachmentStorage.attachments.filter((attachment) => attachment.type === 'image' || attachment.type === 'captured-page'))
return imageAttachments
}
diff --git a/entrypoints/sidepanel/utils/chat/chat.ts b/entrypoints/sidepanel/utils/chat/chat.ts
index a52bdf65..599eea30 100644
--- a/entrypoints/sidepanel/utils/chat/chat.ts
+++ b/entrypoints/sidepanel/utils/chat/chat.ts
@@ -4,6 +4,7 @@ import { type Ref, ref, toRaw, toRef, watch } from 'vue'
import type { ActionMessageV1, ActionTypeV1, ActionV1, AgentMessageV1, AgentTaskGroupMessageV1, AgentTaskMessageV1, AssistantMessageV1, ChatHistoryV1, ChatList, HistoryItemV1, TaskMessageV1, UserMessageV1 } from '@/types/chat'
import { ContextAttachmentStorage } from '@/types/chat'
+import { normalizeReasoningPreference, StoredReasoningPreference } from '@/types/reasoning'
import { nonNullable } from '@/utils/array'
import { debounce } from '@/utils/debounce'
import { useGlobalI18n } from '@/utils/i18n'
@@ -29,6 +30,8 @@ const log = logger.child('chat')
export type MessageIdScope = 'quickActions' | 'welcomeMessage'
export class ReactiveHistoryManager extends EventEmitter {
+ public temporaryModelOverride: { model: string, endpointType: string } | null = null
+
constructor(public chatHistory: Ref) {
super()
this.cleanUp()
@@ -43,6 +46,10 @@ export class ReactiveHistoryManager extends EventEmitter {
if (item.role === 'task' && item.subTasks) {
this.cleanUp(item.subTasks)
}
+ if (item.role === 'agent-task-group' && item.tasks) {
+ // if task-group not done, remove the group
+ item.tasks = item.tasks.filter((task) => task.done)
+ }
return item
})
history.length = 0
@@ -151,26 +158,38 @@ export class ReactiveHistoryManager extends EventEmitter {
return newMsg as UserMessageV1
}
- appendAssistantMessage(content: string = '') {
+ async appendAssistantMessage(content: string = '') {
+ const userConfig = await getUserConfig()
+ const model = this.temporaryModelOverride?.model ?? userConfig.llm.model.get()
+ const endpointType = this.temporaryModelOverride?.endpointType ?? userConfig.llm.endpointType.get()
+
this.history.value.push({
id: this.generateId(),
role: 'assistant',
content,
done: false,
timestamp: Date.now(),
+ model,
+ endpointType,
})
const newMsg = this.history.value[this.history.value.length - 1]
this.emit('messageAdded', newMsg)
return newMsg as AssistantMessageV1
}
- appendAgentMessage(content: string = '') {
+ async appendAgentMessage(content: string = '') {
+ const userConfig = await getUserConfig()
+ const model = this.temporaryModelOverride?.model ?? userConfig.llm.model.get()
+ const endpointType = this.temporaryModelOverride?.endpointType ?? userConfig.llm.endpointType.get()
+
this.history.value.push({
id: this.generateId(),
role: 'agent',
content,
done: false,
timestamp: Date.now(),
+ model,
+ endpointType,
})
const newMsg = this.history.value[this.history.value.length - 1]
this.emit('messageAdded', newMsg)
@@ -279,6 +298,17 @@ export class ReactiveHistoryManager extends EventEmitter {
cleanupLoadingMessages() {
this.cleanUp(this.chatHistory.value.history)
}
+
+ cleanupLoadingAttachments(contextAttachmentStorage: Ref) {
+ // Remove loading attachments from the attachments array
+ contextAttachmentStorage.value.attachments = contextAttachmentStorage.value.attachments.filter(
+ (attachment) => attachment.type !== 'loading',
+ )
+ // Remove loading attachment from currentTab if it exists
+ if (contextAttachmentStorage.value.currentTab?.type === 'loading') {
+ contextAttachmentStorage.value.currentTab = undefined
+ }
+ }
}
type ChatStatus = 'idle' | 'pending' | 'streaming'
@@ -316,7 +346,18 @@ export class Chat {
onlineSearchEnabled: true, // Default to true for new chats
})
- userConfig.llm.reasoning.set(chatHistory.value.reasoningEnabled ?? true)
+ const applyReasoningPreference = (preference?: StoredReasoningPreference) => {
+ if (preference === undefined) {
+ const normalized = normalizeReasoningPreference(userConfig.llm.reasoning.get())
+ userConfig.llm.reasoning.set(normalized)
+ return
+ }
+ const normalized = normalizeReasoningPreference(preference)
+ chatHistory.value.reasoningEnabled = normalized
+ userConfig.llm.reasoning.set(normalized)
+ }
+
+ applyReasoningPreference(chatHistory.value.reasoningEnabled)
userConfig.chat.onlineSearch.enable.set(chatHistory.value.onlineSearchEnabled ?? true)
const contextAttachments = ref(await s2bRpc.getContextAttachments(chatHistoryId.value) ?? { attachments: [], id: chatHistoryId.value, lastInteractedAt: Date.now() })
const chatList = ref([])
@@ -384,11 +425,13 @@ export class Chat {
Object.assign(chatHistory.value, newChatHistory)
Object.assign(contextAttachments.value, newContextAttachments)
- userConfig.llm.reasoning.set(newChatHistory.reasoningEnabled ?? true)
+ applyReasoningPreference(newChatHistory.reasoningEnabled)
userConfig.chat.onlineSearch.enable.set(newChatHistory.onlineSearchEnabled ?? true)
// Clean up any loading messages
instance.historyManager.cleanupLoadingMessages()
+ // Clean up any loading attachments
+ instance.historyManager.cleanupLoadingAttachments(contextAttachments)
// Update the chat list to reflect any changes
updateChatList()
@@ -536,6 +579,48 @@ export class Chat {
await this.runWithAgent(baseMessages)
}
+ async editUserMessage(messageId: string, question: string) {
+ const trimmedQuestion = question.trim()
+ if (!trimmedQuestion) throw new Error('Question cannot be empty.')
+
+ const messageIndex = this.historyManager.history.value.findIndex((item) => item.id === messageId)
+ if (messageIndex === -1) throw new Error(`Message with id ${messageId} not found.`)
+ const message = this.historyManager.history.value[messageIndex]
+ if (message.role !== 'user') throw new Error(`Message with id ${messageId} is not a user message.`)
+
+ this.stop()
+ using _s = this.statusScope('pending')
+ const abortController = new AbortController()
+ this.abortControllers.push(abortController)
+
+ this.historyManager.chatHistory.value.lastInteractedAt = Date.now()
+
+ if (messageIndex < this.historyManager.history.value.length - 1) {
+ this.historyManager.history.value.splice(messageIndex + 1)
+ }
+
+ const contextInfo = this.historyManager.chatHistory.value.contextUpdateInfo
+ if (contextInfo?.lastFullUpdateMessageId) {
+ const exists = this.historyManager.history.value.some((item) => item.id === contextInfo.lastFullUpdateMessageId)
+ if (!exists) {
+ contextInfo.lastFullUpdateMessageId = undefined
+ }
+ }
+
+ const environmentDetails = await this.generateEnvironmentDetails(message.id)
+ const prompt = await chatWithEnvironment(trimmedQuestion, environmentDetails)
+
+ message.displayContent = trimmedQuestion
+ message.content = prompt.user.extractText()
+ message.timestamp = Date.now()
+ message.done = true
+
+ const baseMessages = this.historyManager.getLLMMessages({ system: prompt.system, lastUser: prompt.user })
+ await this.prepareModel()
+ if (this.contextPDFs.length > 1) log.warn('Multiple PDFs are attached, only the first one will be used for the chat context.')
+ await this.runWithAgent(baseMessages)
+ }
+
private async runWithAgent(baseMessages: CoreMessage[]) {
const userConfig = await getUserConfig()
const maxIterations = userConfig.chat.agent.maxIterations.get()
@@ -544,6 +629,7 @@ export class Chat {
historyManager: this.historyManager,
agentStorage: new AgentStorage(this.contextAttachmentStorage.value),
maxIterations,
+ temporaryModelOverride: this.historyManager.temporaryModelOverride,
tools: {
search_online: { execute: executeSearchOnline },
fetch_page: { execute: executeFetchPage },
@@ -591,6 +677,8 @@ export class Chat {
abortController.abort()
})
this.abortControllers.length = 0
+ // Clean up any loading attachments when stopping
+ this.historyManager.cleanupLoadingAttachments(this.contextAttachmentStorage)
}
/**
diff --git a/entrypoints/sidepanel/utils/chat/tool-calls/index.ts b/entrypoints/sidepanel/utils/chat/tool-calls/index.ts
index 1d4ffc3c..a1fb422a 100644
--- a/entrypoints/sidepanel/utils/chat/tool-calls/index.ts
+++ b/entrypoints/sidepanel/utils/chat/tool-calls/index.ts
@@ -266,7 +266,8 @@ export const executeViewPdf: AgentToolCallExecute<'view_pdf'> = async ({ params,
export const executeViewImage: AgentToolCallExecute<'view_image'> = async ({ params, taskMessageModifier, agentStorage, loopImages }) => {
const { image_id: imageId } = params
const { t } = await useGlobalI18n()
- const image = agentStorage.getById('image', imageId)
+ const capturedPage = agentStorage.getById('captured-page', imageId)
+ const image = agentStorage.getById('image', imageId) ?? capturedPage
const taskMsg = taskMessageModifier.addTaskMessage({ summary: t('chat.tool_calls.view_image.analyzing', { title: imageId }) })
taskMsg.icon = 'taskReadFile'
if (!image) {
diff --git a/entrypoints/sidepanel/utils/llm.ts b/entrypoints/sidepanel/utils/llm.ts
index ed649ba1..a7e12338 100644
--- a/entrypoints/sidepanel/utils/llm.ts
+++ b/entrypoints/sidepanel/utils/llm.ts
@@ -7,7 +7,9 @@ import { browser } from 'wxt/browser'
import { readPortMessageIntoIterator, toAsyncIter } from '@/utils/async'
import { AbortError, fromError, ModelRequestTimeoutError } from '@/utils/error'
import { BackgroundAliveKeeper } from '@/utils/keepalive'
+import type { LLMEndpointType } from '@/utils/llm/models'
import { SchemaName } from '@/utils/llm/output-schema'
+import { getReasoningOptionForModel } from '@/utils/llm/reasoning'
import { WebLLMSupportedModel } from '@/utils/llm/web-llm'
import logger from '@/utils/logger'
import { s2bRpc } from '@/utils/rpc'
@@ -21,9 +23,22 @@ interface ExtraOptions {
timeout?: number
}
-export async function* streamTextInBackground(options: Parameters[0] & ExtraOptions) {
- const { abortSignal, timeout = DEFAULT_PENDING_TIMEOUT, ...restOptions } = options
- const { portName } = await s2bRpc.streamText(restOptions)
+export async function* streamTextInBackground(options: Parameters[0] & ExtraOptions & { temporaryModelOverride?: { model: string, endpointType: string } | null }) {
+ const { abortSignal, timeout = DEFAULT_PENDING_TIMEOUT, temporaryModelOverride, ...restOptions } = options
+ const userConfig = await getUserConfig()
+ const modelId = temporaryModelOverride?.model ?? userConfig.llm.model.get()
+ const endpointType = (temporaryModelOverride?.endpointType as LLMEndpointType | undefined) ?? userConfig.llm.endpointType.get()
+ const reasoningPreference = userConfig.llm.reasoning.get()
+ const computedReasoning = restOptions.autoThinking
+ ? restOptions.reasoning
+ : (restOptions.reasoning ?? getReasoningOptionForModel(reasoningPreference, modelId))
+ const requestOptions = {
+ ...restOptions,
+ ...(computedReasoning !== undefined ? { reasoning: computedReasoning } : {}),
+ modelId,
+ endpointType,
+ }
+ const { portName } = await s2bRpc.streamText(requestOptions)
const aliveKeeper = new BackgroundAliveKeeper()
const port = browser.runtime.connect({ name: portName })
abortSignal?.addEventListener('abort', () => {
@@ -36,7 +51,17 @@ export async function* streamTextInBackground(options: Parameters[0] & ExtraOptions) {
const { abortSignal, timeout = DEFAULT_PENDING_TIMEOUT, ...restOptions } = options
- const { portName } = await s2bRpc.streamObjectFromSchema(restOptions)
+ const userConfig = await getUserConfig()
+ const modelId = userConfig.llm.model.get()
+ const reasoningPreference = userConfig.llm.reasoning.get()
+ const computedReasoning = restOptions.autoThinking
+ ? restOptions.reasoning
+ : (restOptions.reasoning ?? getReasoningOptionForModel(reasoningPreference, modelId))
+ const requestOptions = {
+ ...restOptions,
+ ...(computedReasoning !== undefined ? { reasoning: computedReasoning } : {}),
+ }
+ const { portName } = await s2bRpc.streamObjectFromSchema(requestOptions)
const aliveKeeper = new BackgroundAliveKeeper()
const port = browser.runtime.connect({ name: portName })
port.onDisconnect.addListener(() => aliveKeeper.dispose())
@@ -51,6 +76,16 @@ export async function* streamObjectInBackground(options: Parameters(options: Parameters>[0] & ExtraOptions) {
const { promise: abortPromise, reject } = Promise.withResolvers>>>()
const { abortSignal, timeout = DEFAULT_PENDING_TIMEOUT, ...restOptions } = options
+ const userConfig = await getUserConfig()
+ const modelId = userConfig.llm.model.get()
+ const reasoningPreference = userConfig.llm.reasoning.get()
+ const computedReasoning = restOptions.autoThinking
+ ? restOptions.reasoning
+ : (restOptions.reasoning ?? getReasoningOptionForModel(reasoningPreference, modelId))
+ const requestOptions = {
+ ...restOptions,
+ ...(computedReasoning !== undefined ? { reasoning: computedReasoning } : {}),
+ }
const aliveKeeper = new BackgroundAliveKeeper()
abortSignal?.addEventListener('abort', () => {
log.debug('generate object request aborted')
@@ -58,12 +93,12 @@ export async function generateObjectInBackground(options:
reject(new AbortError('Aborted'))
})
const timeoutTimer = setTimeout(() => {
- log.warn('generate object request timeout', restOptions)
+ log.warn('generate object request timeout', requestOptions)
reject(new ModelRequestTimeoutError())
}, timeout)
const promise = s2bRpc
.generateObjectFromSchema({
- ...restOptions,
+ ...requestOptions,
})
.then((result) => {
clearTimeout(timeoutTimer)
diff --git a/locales/de.json b/locales/de.json
index 9d761d29..b69c340d 100644
--- a/locales/de.json
+++ b/locales/de.json
@@ -4,13 +4,17 @@
"thinking": "Denken...",
"reading": "Lektüre",
"search_locally": "Lokal im Browser suchen…",
- "thought_for_seconds": "Gedanke für {second} Sekunde | Gedanke für {second} Sekunden"
+ "thought_for_seconds": "Gedanke für {second} Sekunde | Gedanke für {second} Sekunden",
+ "used_model": "Verwendet {model}"
},
"quick_actions": {
"title": "Schnelle Aktionen"
},
"thinking_mode": {
- "label": "Denken"
+ "label": "Denken",
+ "low": "Niedrig",
+ "medium": "Mittel",
+ "high": "Hoch"
},
"input": {
"placeholder": {
@@ -28,7 +32,9 @@
"pdf_text_extract_error": "Textextraktion fehlgeschlagen - diese PDF könnte gescannt oder bildbasiert sein.",
"too_many_pdfs": "Maximal {max} PDF-Datei erlaubt",
"pdf_page_count_exceeded": "PDF muss weniger als {max} Seiten haben",
- "only_load_partial_pages": "Aufgrund der Dateigröße werden nur die ersten {max} Seiten geladen"
+ "only_load_partial_pages": "Aufgrund der Dateigröße werden nur die ersten {max} Seiten geladen",
+ "selected_text_prefix": "Ausgewählter Text:",
+ "captured_page_prefix": "Erfasste Seite:"
}
},
"prompt": {
@@ -438,7 +444,8 @@
"years": "{year} Jahr | {year} Jahre",
"months": "{month} Monat | {month} Monate"
},
- "confirm": "Bestätigen"
+ "confirm": "Bestätigen",
+ "send": "Senden"
},
"ollama": {
"sites": {
@@ -453,7 +460,10 @@
"close": "Schließen",
"chat_history": "Geschichte",
"new_chat": "Neuer Chat",
- "back": "Zurück"
+ "back": "Zurück",
+ "copy_message": "Nachricht kopieren",
+ "edit_message": "Nachricht bearbeiten",
+ "retry_message": "Wiederholen"
},
"gmail_tools": {
"buttons": {
diff --git a/locales/en.json b/locales/en.json
index 5b52d4c8..945562a9 100644
--- a/locales/en.json
+++ b/locales/en.json
@@ -244,13 +244,17 @@
"thought_for_seconds": "Thought for {second} second | Thought for {second} seconds",
"thinking": "Thinking...",
"search_locally": "Searching locally in browser…",
- "reading": "Reading"
+ "reading": "Reading",
+ "used_model": "Used {model}"
},
"quick_actions": {
"title": "Quick Actions"
},
"thinking_mode": {
- "label": "Thinking"
+ "label": "Thinking",
+ "low": "Low",
+ "medium": "Medium",
+ "high": "High"
},
"input": {
"placeholder": {
@@ -268,7 +272,9 @@
"pdf_oversize": "PDF file size must be less than {size}",
"too_many_pdfs": "Maximum {max} PDF file allowed",
"pdf_text_extract_error": "Text extraction failed - this PDF may be scanned or image-based.",
- "only_load_partial_pages": "Only the first {max} pages are loaded due to file size"
+ "only_load_partial_pages": "Only the first {max} pages are loaded due to file size",
+ "selected_text_prefix": "Selected Text:",
+ "captured_page_prefix": "Page Captured:"
}
},
"prompt": {
@@ -417,7 +423,8 @@
"years": "{year} year | {year} years",
"months": "{month} month | {month} months"
},
- "confirm": "Confirm"
+ "confirm": "Confirm",
+ "send": "Send"
},
"ollama": {
"sites": {
@@ -453,7 +460,10 @@
"close": "Close",
"chat_history": "History",
"new_chat": "New Chat",
- "back": "Back"
+ "back": "Back",
+ "copy_message": "Copy",
+ "edit_message": "Edit",
+ "retry_message": "Retry"
},
"gmail_tools": {
"buttons": {
diff --git a/locales/es.json b/locales/es.json
index f0db0018..5b532e14 100644
--- a/locales/es.json
+++ b/locales/es.json
@@ -16,20 +16,26 @@
"pdf_text_extract_error": "La extracción de texto falló - este PDF puede estar escaneado o basado en imágenes.",
"too_many_pdfs": "Máximo {max} archivo PDF permitido",
"pdf_page_count_exceeded": "El PDF debe tener menos de {max} páginas",
- "only_load_partial_pages": "Solo se cargan las primeras {max} páginas debido al tamaño del archivo"
+ "only_load_partial_pages": "Solo se cargan las primeras {max} páginas debido al tamaño del archivo",
+ "selected_text_prefix": "Texto seleccionado:",
+ "captured_page_prefix": "Página capturada:"
}
},
"messages": {
"reading": "Leyendo",
"search_locally": "Buscando localmente en el navegador...",
"thinking": "Pensando...",
- "thought_for_seconds": "Pensando durante {second} segundo | Pensando durante {second} segundos"
+ "thought_for_seconds": "Pensando durante {second} segundo | Pensando durante {second} segundos",
+ "used_model": "Usó {model}"
},
"quick_actions": {
"title": "Acciones Rápidas"
},
"thinking_mode": {
- "label": "Pensando"
+ "label": "Pensando",
+ "low": "Bajo",
+ "medium": "Medio",
+ "high": "Alto"
},
"prompt": {
"highlight_key_insights": {
@@ -417,7 +423,8 @@
"years": "{year} año | {year} años",
"months": "{month} mes | {month} meses"
},
- "confirm": "Confirmar"
+ "confirm": "Confirmar",
+ "send": "Enviar"
},
"ollama": {
"sites": {
@@ -453,7 +460,10 @@
"close": "Cerrar",
"chat_history": "Historia",
"new_chat": "Nuevo chat",
- "back": "Atrás"
+ "back": "Atrás",
+ "copy_message": "Copiar",
+ "edit_message": "Editar",
+ "retry_message": "Reintentar"
},
"gmail_tools": {
"buttons": {
diff --git a/locales/fr.json b/locales/fr.json
index 57dc4615..438958df 100644
--- a/locales/fr.json
+++ b/locales/fr.json
@@ -4,13 +4,17 @@
"thought_for_seconds": "Réflexion pendant {second} seconde | Réflexion pendant {second} secondes",
"thinking": "Réflexion en cours...",
"reading": "Lecture en cours",
- "search_locally": "Recherche locale dans le navigateur..."
+ "search_locally": "Recherche locale dans le navigateur...",
+ "used_model": "Utilisé {model}"
},
"quick_actions": {
"title": "Actions rapides"
},
"thinking_mode": {
- "label": "Réflexion"
+ "label": "Réflexion",
+ "low": "Faible",
+ "medium": "Moyen",
+ "high": "Élevé"
},
"input": {
"placeholder": {
@@ -28,7 +32,9 @@
"pdf_text_extract_error": "L'extraction de texte a échoué - ce PDF peut être numérisé ou basé sur des images.",
"too_many_pdfs": "Maximum {max} fichier PDF autorisé",
"pdf_page_count_exceeded": "Le PDF doit contenir moins de {max} pages",
- "only_load_partial_pages": "Seules les {max} premières pages sont chargées en raison de la taille du fichier"
+ "only_load_partial_pages": "Seules les {max} premières pages sont chargées en raison de la taille du fichier",
+ "selected_text_prefix": "Texte sélectionné :",
+ "captured_page_prefix": "Page capturée :"
}
},
"prompt": {
@@ -417,7 +423,8 @@
"years": "{year} an | {year} ans",
"months": "{month} mois | {month} mois"
},
- "confirm": "Confirmer"
+ "confirm": "Confirmer",
+ "send": "Envoyer"
},
"ollama": {
"sites": {
@@ -453,7 +460,10 @@
"close": "Fermer",
"chat_history": "Histoire",
"new_chat": "Nouveau chat",
- "back": "Retour"
+ "back": "Retour",
+ "copy_message": "Copier le message",
+ "edit_message": "Modifier le message",
+ "retry_message": "Réessayer"
},
"gmail_tools": {
"buttons": {
diff --git a/locales/id.json b/locales/id.json
index c74f293f..0c772cb7 100644
--- a/locales/id.json
+++ b/locales/id.json
@@ -4,13 +4,17 @@
"thought_for_seconds": "Berpikir selama {second} detik | Berpikir selama {second} detik",
"thinking": "Sedang berpikir...",
"reading": "Sedang membaca...",
- "search_locally": "Mencari secara lokal di browser..."
+ "search_locally": "Mencari secara lokal di browser...",
+ "used_model": "Menggunakan {model}"
},
"quick_actions": {
"title": "Tindakan cepat"
},
"thinking_mode": {
- "label": "Berpikir"
+ "label": "Berpikir",
+ "low": "Rendah",
+ "medium": "Sedang",
+ "high": "Tinggi"
},
"input": {
"placeholder": {
@@ -28,7 +32,9 @@
"pdf_text_extract_error": "Ekstraksi teks gagal - PDF ini mungkin hasil scan atau berbasis gambar.",
"too_many_pdfs": "Maksimal {max} file PDF diizinkan",
"pdf_page_count_exceeded": "PDF harus kurang dari {max} halaman",
- "only_load_partial_pages": "Hanya {max} halaman pertama yang dimuat karena ukuran file"
+ "only_load_partial_pages": "Hanya {max} halaman pertama yang dimuat karena ukuran file",
+ "selected_text_prefix": "Teks terpilih:",
+ "captured_page_prefix": "Halaman tertangkap:"
}
},
"prompt": {
@@ -417,7 +423,8 @@
"years": "{year} tahun | {year} tahun",
"months": "{month} bulan | {month} bulan"
},
- "confirm": "Konfirmasi"
+ "confirm": "Konfirmasi",
+ "send": "Kirim"
},
"ollama": {
"sites": {
@@ -453,7 +460,10 @@
"close": "Tutup",
"chat_history": "Sejarah",
"new_chat": "Obrolan baru",
- "back": "Kembali"
+ "back": "Kembali",
+ "copy_message": "Salin",
+ "edit_message": "Edit",
+ "retry_message": "Coba lagi"
},
"gmail_tools": {
"buttons": {
diff --git a/locales/ja.json b/locales/ja.json
index c20c5c26..5a2f39c0 100644
--- a/locales/ja.json
+++ b/locales/ja.json
@@ -4,13 +4,17 @@
"thought_for_seconds": "{second}秒間考えています | {second}秒間考えています",
"thinking": "考え中...",
"reading": "読み込み中",
- "search_locally": "ブラウザ内で検索中..."
+ "search_locally": "ブラウザ内で検索中...",
+ "used_model": "{model}を使用"
},
"quick_actions": {
"title": "クイックアクション"
},
"thinking_mode": {
- "label": "思考"
+ "label": "思考",
+ "low": "低",
+ "medium": "中",
+ "high": "高"
},
"input": {
"placeholder": {
@@ -28,7 +32,9 @@
"pdf_text_extract_error": "テキスト抽出に失敗しました - このPDFはスキャンされたものか画像ベースの可能性があります。",
"too_many_pdfs": "最大{max}個のPDFファイルまで許可されています",
"pdf_page_count_exceeded": "PDFは{max}ページ以下である必要があります",
- "only_load_partial_pages": "ファイルサイズのため、最初の{max}ページのみが読み込まれました"
+ "only_load_partial_pages": "ファイルサイズのため、最初の{max}ページのみが読み込まれました",
+ "selected_text_prefix": "選択テキスト:",
+ "captured_page_prefix": "キャプチャページ:"
}
},
"prompt": {
@@ -417,7 +423,8 @@
"years": "{year}年 | {year}年",
"months": "{month}ヶ月 | {month}ヶ月"
},
- "confirm": "確認"
+ "confirm": "確認",
+ "send": "送信"
},
"ollama": {
"sites": {
@@ -453,7 +460,10 @@
"close": "閉じる",
"chat_history": "歴史",
"new_chat": "新しいチャット",
- "back": "戻る"
+ "back": "戻る",
+ "copy_message": "メッセージをコピー",
+ "edit_message": "メッセージを編集",
+ "retry_message": "再試行"
},
"gmail_tools": {
"buttons": {
diff --git a/locales/ko.json b/locales/ko.json
index fa2887de..b759c74f 100644
--- a/locales/ko.json
+++ b/locales/ko.json
@@ -4,13 +4,17 @@
"thought_for_seconds": "{second}초 동안 생각 중 | {second}초 동안 생각 중",
"thinking": "생각하는 중...",
"reading": "읽는 중",
- "search_locally": "브라우저에서 검색 중..."
+ "search_locally": "브라우저에서 검색 중...",
+ "used_model": "{model} 사용"
},
"quick_actions": {
"title": "빠른 액션"
},
"thinking_mode": {
- "label": "사고"
+ "label": "사고",
+ "low": "낮음",
+ "medium": "중간",
+ "high": "높음"
},
"input": {
"placeholder": {
@@ -28,7 +32,9 @@
"pdf_text_extract_error": "PDF에서 텍스트 추출에 실패했습니다 - 이 PDF는 스캔되었거나 이미지 기반일 수 있습니다.",
"too_many_pdfs": "최대 {max}개의 PDF 파일이 허용됩니다",
"pdf_page_count_exceeded": "PDF는 {max}페이지 미만이어야 합니다",
- "only_load_partial_pages": "파일 크기로 인해 처음 {max}페이지만 로드됩니다"
+ "only_load_partial_pages": "파일 크기로 인해 처음 {max}페이지만 로드됩니다",
+ "selected_text_prefix": "선택된 텍스트:",
+ "captured_page_prefix": "캡처된 페이지:"
}
},
"prompt": {
@@ -417,7 +423,8 @@
"years": "{year}년 | {year}년",
"months": "{month}개월 | {month}개월"
},
- "confirm": "확인"
+ "confirm": "확인",
+ "send": "보내기"
},
"ollama": {
"sites": {
@@ -453,7 +460,10 @@
"close": "닫기",
"chat_history": "역사",
"new_chat": "새로운 채팅",
- "back": "뒤로"
+ "back": "뒤로",
+ "copy_message": "메시지 복사",
+ "edit_message": "메시지 수정",
+ "retry_message": "재시도"
},
"gmail_tools": {
"buttons": {
diff --git a/locales/pt.json b/locales/pt.json
index 58e50a1d..e589aeae 100644
--- a/locales/pt.json
+++ b/locales/pt.json
@@ -4,13 +4,17 @@
"thought_for_seconds": "Pensando por {second} segundo | Pensando por {second} segundos",
"thinking": "Pensando...",
"reading": "Lendo",
- "search_locally": "Pesquisando localmente no navegador..."
+ "search_locally": "Pesquisando localmente no navegador...",
+ "used_model": "Usado {model}"
},
"quick_actions": {
"title": "Ações rápidas"
},
"thinking_mode": {
- "label": "Pensando"
+ "label": "Pensando",
+ "low": "Baixo",
+ "medium": "Médio",
+ "high": "Alto"
},
"input": {
"placeholder": {
@@ -28,7 +32,9 @@
"pdf_text_extract_error": "A extração de texto falhou - este PDF pode estar digitalizado ou ser baseado em imagem.",
"too_many_pdfs": "Máximo de {max} arquivo PDF permitido",
"pdf_page_count_exceeded": "O PDF deve ter menos de {max} páginas",
- "only_load_partial_pages": "Apenas as primeiras {max} páginas são carregadas devido ao tamanho do arquivo"
+ "only_load_partial_pages": "Apenas as primeiras {max} páginas são carregadas devido ao tamanho do arquivo",
+ "selected_text_prefix": "Texto selecionado:",
+ "captured_page_prefix": "Página capturada:"
}
},
"prompt": {
@@ -417,7 +423,8 @@
"years": "{year} ano | {year} anos",
"months": "{month} mês | {month} meses"
},
- "confirm": "Confirmar"
+ "confirm": "Confirmar",
+ "send": "Enviar"
},
"ollama": {
"sites": {
@@ -453,7 +460,10 @@
"close": "Fechar",
"chat_history": "História",
"new_chat": "Novo bate -papo",
- "back": "Voltar"
+ "back": "Voltar",
+ "copy_message": "Copiar mensagem",
+ "edit_message": "Editar mensagem",
+ "retry_message": "Tentar novamente"
},
"gmail_tools": {
"buttons": {
diff --git a/locales/ru.json b/locales/ru.json
index 93beda36..8892a624 100644
--- a/locales/ru.json
+++ b/locales/ru.json
@@ -4,13 +4,17 @@
"thought_for_seconds": "Думаю {second} секунду | Думаю {second} секунд",
"thinking": "Думаю...",
"reading": "Читаю",
- "search_locally": "Ищу локально в браузере..."
+ "search_locally": "Ищу локально в браузере...",
+ "used_model": "Используется {model}"
},
"quick_actions": {
"title": "Быстрые действия"
},
"thinking_mode": {
- "label": "Мышление"
+ "label": "Мышление",
+ "low": "Низкий",
+ "medium": "Средний",
+ "high": "Высокий"
},
"input": {
"placeholder": {
@@ -28,7 +32,9 @@
"pdf_text_extract_error": "Извлечение текста не удалось - этот PDF может быть отсканированным или основанным на изображениях.",
"too_many_pdfs": "Максимально разрешено {max} PDF файлов",
"pdf_page_count_exceeded": "PDF должен содержать менее {max} страниц",
- "only_load_partial_pages": "Загружены только первые {max} страниц из-за размера файла"
+ "only_load_partial_pages": "Загружены только первые {max} страниц из-за размера файла",
+ "selected_text_prefix": "Выделенный текст:",
+ "captured_page_prefix": "Захваченная страница:"
}
},
"prompt": {
@@ -417,7 +423,8 @@
"years": "{year} год | {year} лет",
"months": "{month} месяц | {month} месяца | {month} месяцев"
},
- "confirm": "Подтвердить"
+ "confirm": "Подтвердить",
+ "send": "Отправить"
},
"ollama": {
"sites": {
@@ -453,7 +460,10 @@
"close": "Закрыть",
"chat_history": "История",
"new_chat": "Новый чат",
- "back": "Назад"
+ "back": "Назад",
+ "copy_message": "Скопировать сообщение",
+ "edit_message": "Редактировать сообщение",
+ "retry_message": "Повторить"
},
"gmail_tools": {
"buttons": {
diff --git a/locales/th.json b/locales/th.json
index 7587af5d..bd033417 100644
--- a/locales/th.json
+++ b/locales/th.json
@@ -4,13 +4,17 @@
"thought_for_seconds": "กำลังคิด {second} วินาที | กำลังคิด {second} วินาที",
"thinking": "กำลังคิด...",
"reading": "กำลังอ่าน",
- "search_locally": "กำลังค้นหาในเบราว์เซอร์..."
+ "search_locally": "กำลังค้นหาในเบราว์เซอร์...",
+ "used_model": "ใช้ {model}"
},
"quick_actions": {
"title": "การกระทำที่รวดเร็ว"
},
"thinking_mode": {
- "label": "กำลังคิด"
+ "label": "กำลังคิด",
+ "low": "ต่ำ",
+ "medium": "ปานกลาง",
+ "high": "สูง"
},
"input": {
"placeholder": {
@@ -28,7 +32,9 @@
"pdf_text_extract_error": "การดึงข้อความล้มเหลว - PDF นี้อาจเป็นไฟล์สแกนหรือเป็นรูปภาพ",
"too_many_pdfs": "อนุญาตไฟล์ PDF สูงสุด {max} ไฟล์",
"pdf_page_count_exceeded": "PDF ต้องมีจำนวนหน้าไม่เกิน {max} หน้า",
- "only_load_partial_pages": "โหลดเฉพาะ {max} หน้าแรกเท่านั้นเนื่องจากขนาดไฟล์"
+ "only_load_partial_pages": "โหลดเฉพาะ {max} หน้าแรกเท่านั้นเนื่องจากขนาดไฟล์",
+ "selected_text_prefix": "ข้อความที่เลือก:",
+ "captured_page_prefix": "หน้าที่จับภาพ:"
}
},
"prompt": {
@@ -417,7 +423,8 @@
"years": "{year} ปี | {year} ปี",
"months": "{month} เดือน | {month} เดือน"
},
- "confirm": "ยืนยัน"
+ "confirm": "ยืนยัน",
+ "send": "ส่ง"
},
"ollama": {
"sites": {
@@ -453,7 +460,10 @@
"close": "ปิด",
"chat_history": "ประวัติศาสตร์",
"new_chat": "แชทใหม่",
- "back": "กลับ"
+ "back": "กลับ",
+ "copy_message": "คัดลอกข้อความ",
+ "edit_message": "แก้ไขข้อความ",
+ "retry_message": "ลองอีกครั้ง"
},
"gmail_tools": {
"buttons": {
diff --git a/locales/vi.json b/locales/vi.json
index e8e4537f..d2e13503 100644
--- a/locales/vi.json
+++ b/locales/vi.json
@@ -4,13 +4,17 @@
"thought_for_seconds": "Suy nghĩ trong {second} giây | Suy nghĩ trong {second} giây",
"thinking": "Đang suy nghĩ...",
"reading": "Đang đọc...",
- "search_locally": "Đang tìm kiếm trong trình duyệt..."
+ "search_locally": "Đang tìm kiếm trong trình duyệt...",
+ "used_model": "Đã sử dụng {model}"
},
"quick_actions": {
"title": "Thao tác nhanh"
},
"thinking_mode": {
- "label": "Suy nghĩ"
+ "label": "Suy nghĩ",
+ "low": "Thấp",
+ "medium": "Trung bình",
+ "high": "Cao"
},
"input": {
"placeholder": {
@@ -28,7 +32,9 @@
"pdf_text_extract_error": "Trích xuất văn bản thất bại - tệp PDF này có thể được quét hoặc dựa trên hình ảnh.",
"too_many_pdfs": "Tối đa {max} tệp PDF được phép",
"pdf_page_count_exceeded": "PDF phải có ít hơn {max} trang",
- "only_load_partial_pages": "Chỉ {max} trang đầu tiên được tải do kích thước tệp"
+ "only_load_partial_pages": "Chỉ {max} trang đầu tiên được tải do kích thước tệp",
+ "selected_text_prefix": "Văn bản đã chọn:",
+ "captured_page_prefix": "Trang đã chụp:"
}
},
"prompt": {
@@ -417,7 +423,8 @@
"years": "{year} năm | {year} năm",
"months": "{month} tháng | {month} tháng"
},
- "confirm": "Xác nhận"
+ "confirm": "Xác nhận",
+ "send": "Gửi"
},
"ollama": {
"sites": {
@@ -453,7 +460,10 @@
"close": "Đóng",
"chat_history": "Lịch sử",
"new_chat": "Trò chuyện mới",
- "back": "Quay lại"
+ "back": "Quay lại",
+ "copy_message": "Sao chép tin nhắn",
+ "edit_message": "Chỉnh sửa tin nhắn",
+ "retry_message": "Thử lại"
},
"gmail_tools": {
"buttons": {
diff --git a/locales/zh-CN.json b/locales/zh-CN.json
index bee4d513..59b7cfff 100644
--- a/locales/zh-CN.json
+++ b/locales/zh-CN.json
@@ -244,13 +244,17 @@
"thought_for_seconds": "思考了{second}秒 | 思考了{second}秒",
"thinking": "正在思考...",
"reading": "正在阅读",
- "search_locally": "正在浏览器中本地搜索..."
+ "search_locally": "正在浏览器中本地搜索...",
+ "used_model": "使用了 {model}"
},
"quick_actions": {
"title": "快捷操作"
},
"thinking_mode": {
- "label": "思考"
+ "label": "思考",
+ "low": "低",
+ "medium": "中",
+ "high": "高"
},
"input": {
"placeholder": {
@@ -268,7 +272,9 @@
"pdf_text_extract_error": "文本提取失败 - 此PDF可能是扫描版或基于图像的。",
"too_many_pdfs": "最多允许 {max} 个 PDF 文件",
"pdf_page_count_exceeded": "PDF 页数不能超过 {max} 页",
- "only_load_partial_pages": "由于文件大小限制,仅加载前 {max} 页"
+ "only_load_partial_pages": "由于文件大小限制,仅加载前 {max} 页",
+ "selected_text_prefix": "已选择文本:",
+ "captured_page_prefix": "已捕获页面:"
}
},
"prompt": {
@@ -417,7 +423,8 @@
"years": "{year} 年 | {year} 年",
"months": "{month} 个月 | {month} 个月"
},
- "confirm": "确认"
+ "confirm": "确认",
+ "send": "发送"
},
"ollama": {
"sites": {
@@ -453,7 +460,10 @@
"close": "关闭",
"chat_history": "历史",
"new_chat": "新聊天",
- "back": "返回"
+ "back": "返回",
+ "copy_message": "复制",
+ "edit_message": "编辑",
+ "retry_message": "重试"
},
"gmail_tools": {
"buttons": {
diff --git a/locales/zh-TW.json b/locales/zh-TW.json
index 47752408..30c2fdb5 100644
--- a/locales/zh-TW.json
+++ b/locales/zh-TW.json
@@ -4,13 +4,17 @@
"thought_for_seconds": "思考了{second}秒 | 思考了{second}秒",
"thinking": "正在思考...",
"reading": "讀取中",
- "search_locally": "在瀏覽器中本地搜尋…"
+ "search_locally": "在瀏覽器中本地搜尋…",
+ "used_model": "使用了 {model}"
},
"quick_actions": {
"title": "快捷操作"
},
"thinking_mode": {
- "label": "推理"
+ "label": "推理",
+ "low": "低",
+ "medium": "中",
+ "high": "高"
},
"input": {
"placeholder": {
@@ -28,7 +32,9 @@
"pdf_text_extract_error": "文字擷取失敗 - 此 PDF 可能是掃描檔或圖像格式。",
"too_many_pdfs": "最多允許 {max} 個 PDF 檔案",
"pdf_page_count_exceeded": "PDF 必須少於 {max} 頁",
- "only_load_partial_pages": "由於檔案大小限制,僅載入前 {max} 頁"
+ "only_load_partial_pages": "由於檔案大小限制,僅載入前 {max} 頁",
+ "selected_text_prefix": "已選擇文字:",
+ "captured_page_prefix": "已擷取頁面:"
}
},
"prompt": {
@@ -417,7 +423,8 @@
"years": "{year} 年 | {year} 年",
"months": "{month} 個月 | {month} 個月"
},
- "confirm": "確認"
+ "confirm": "確認",
+ "send": "傳送"
},
"ollama": {
"sites": {
@@ -453,7 +460,10 @@
"close": "關閉",
"chat_history": "歷史",
"new_chat": "新聊天",
- "back": "返回"
+ "back": "返回",
+ "copy_message": "複製訊息",
+ "edit_message": "編輯訊息",
+ "retry_message": "重試"
},
"gmail_tools": {
"buttons": {
diff --git a/package.json b/package.json
index 6292b070..6977cc60 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "nativemind-extension",
- "version": "1.10.1",
+ "version": "1.11.0",
"private": false,
"author": "NativeMind",
"keywords": [
diff --git a/types/chat.ts b/types/chat.ts
index 60835840..5a116c81 100644
--- a/types/chat.ts
+++ b/types/chat.ts
@@ -4,6 +4,7 @@ import { IconName } from '@/utils/icon'
import { PromiseOr } from './common'
import { Base64ImageData } from './image'
+import { StoredReasoningPreference } from './reasoning'
import { SettingsScrollTarget } from './scroll-targets'
import { TabInfo } from './tab'
@@ -42,6 +43,23 @@ export type TabAttachment = {
value: TabInfo & { id: string }
}
+export type SelectedTextAttachment = {
+ type: 'selected-text'
+ value: {
+ id: string
+ text: string
+ }
+}
+
+export type CapturedPageAttachment = {
+ type: 'captured-page'
+ value: Base64ImageData & {
+ id: string
+ name: string
+ size?: number
+ }
+}
+
// this is a placeholder for attachment that is still loading
export type LoadingAttachment = {
type: 'loading'
@@ -52,7 +70,7 @@ export type LoadingAttachment = {
}
}
-export type ContextAttachment = ImageAttachment | PDFAttachment | TabAttachment | LoadingAttachment
+export type ContextAttachment = ImageAttachment | PDFAttachment | TabAttachment | SelectedTextAttachment | LoadingAttachment | CapturedPageAttachment
export type ContextAttachmentStorage = {
id: string
lastInteractedAt?: number // last time user interacted with this context(attach/detach)
@@ -94,6 +112,8 @@ export interface AssistantMessageV1 extends BaseMessage {
reasoningTime?: number
isError?: boolean
timestamp?: number
+ model?: string
+ endpointType?: string
style?: {
backgroundColor?: CSS.Property.BackgroundColor
}
@@ -106,6 +126,8 @@ export interface AgentMessageV1 extends BaseMessage {
reasoningTime?: number
isError?: boolean
timestamp?: number
+ model?: string
+ endpointType?: string
style?: {
backgroundColor?: CSS.Property.BackgroundColor
}
@@ -180,7 +202,7 @@ export type ChatHistoryV1 = {
lastFullUpdateMessageId?: string // last message id that was fully updated with context info
lastAttachmentIds: string[]
}
- reasoningEnabled?: boolean // reasoning setting for this chat
+ reasoningEnabled?: StoredReasoningPreference // reasoning setting for this chat
onlineSearchEnabled: boolean // online search setting for this chat, default is true
history: HistoryItemV1[]
}
diff --git a/types/reasoning.ts b/types/reasoning.ts
new file mode 100644
index 00000000..ae191b4f
--- /dev/null
+++ b/types/reasoning.ts
@@ -0,0 +1,53 @@
+export const REASONING_EFFORTS = ['low', 'medium', 'high'] as const
+
+export type ReasoningEffort = typeof REASONING_EFFORTS[number]
+
+export type ReasoningPreference = {
+ enabled: boolean
+ effort: ReasoningEffort
+}
+
+export type StoredReasoningPreference = boolean | ReasoningPreference | undefined
+export type ReasoningOption = boolean | ReasoningEffort
+
+export const DEFAULT_REASONING_PREFERENCE: ReasoningPreference = {
+ enabled: true,
+ effort: 'medium',
+}
+
+export const isReasoningEffort = (value: unknown): value is ReasoningEffort => {
+ return typeof value === 'string' && (REASONING_EFFORTS as readonly string[]).includes(value.toLowerCase())
+}
+
+export const normalizeReasoningEffort = (value: unknown): ReasoningEffort => {
+ if (isReasoningEffort(value)) return value
+ return DEFAULT_REASONING_PREFERENCE.effort
+}
+
+export const normalizeReasoningPreference = (value: StoredReasoningPreference): ReasoningPreference => {
+ if (!value) return { ...DEFAULT_REASONING_PREFERENCE }
+ if (typeof value === 'boolean') {
+ return {
+ enabled: value,
+ effort: DEFAULT_REASONING_PREFERENCE.effort,
+ }
+ }
+ const enabled = typeof value.enabled === 'boolean' ? value.enabled : DEFAULT_REASONING_PREFERENCE.enabled
+ const effort = normalizeReasoningEffort(value.effort)
+ return {
+ enabled,
+ effort,
+ }
+}
+
+export const mergeReasoningPreference = (
+ base: StoredReasoningPreference,
+ updates: Partial,
+): ReasoningPreference => {
+ const normalized = normalizeReasoningPreference(base)
+ return {
+ ...normalized,
+ ...updates,
+ effort: updates.effort ? normalizeReasoningEffort(updates.effort) : normalized.effort,
+ }
+}
diff --git a/utils/llm/models.ts b/utils/llm/models.ts
index f8f0d9cb..1ea21bd7 100644
--- a/utils/llm/models.ts
+++ b/utils/llm/models.ts
@@ -1,27 +1,33 @@
import { LanguageModelV1, wrapLanguageModel } from 'ai'
+import type { ReasoningOption } from '@/types/reasoning'
import { getUserConfig } from '@/utils/user-config'
import { ModelNotFoundError } from '../error'
import { makeCustomFetch } from '../fetch'
+import logger from '../logger'
import { loadModel as loadLMStudioModel } from './lm-studio'
import { middlewares } from './middlewares'
import { checkModelSupportThinking } from './ollama'
import { LMStudioChatLanguageModel } from './providers/lm-studio/chat-language-model'
import { createOllama } from './providers/ollama'
import { WebLLMChatLanguageModel } from './providers/web-llm/openai-compatible-chat-language-model'
+import { getReasoningOptionForModel, isGptOssModel } from './reasoning'
import { isToggleableThinkingModel } from './thinking-models'
import { getWebLLMEngine, WebLLMSupportedModel } from './web-llm'
-export async function getModelUserConfig() {
+export async function getModelUserConfig(overrides?: { model?: string, endpointType?: LLMEndpointType }) {
+ logger.debug('Detected override model', { overrides })
const userConfig = await getUserConfig()
- const model = userConfig.llm.model.get()
- const endpointType = userConfig.llm.endpointType.get()
+ const endpointType = overrides?.endpointType ?? userConfig.llm.endpointType.get()
+ const model = overrides?.model ?? userConfig.llm.model.get()
+
const baseUrl = userConfig.llm.backends[endpointType === 'lm-studio' ? 'lmStudio' : 'ollama'].baseUrl.get()
const apiKey = userConfig.llm.apiKey.get()
const numCtx = userConfig.llm.backends[endpointType === 'lm-studio' ? 'lmStudio' : 'ollama'].numCtx.get()
const enableNumCtx = userConfig.llm.backends[endpointType === 'lm-studio' ? 'lmStudio' : 'ollama'].enableNumCtx.get()
- const reasoning = userConfig.llm.reasoning.get()
+ const reasoningPreference = userConfig.llm.reasoning.get()
+ const reasoning = getReasoningOptionForModel(reasoningPreference, model)
if (!model) {
throw new ModelNotFoundError(undefined, endpointType)
}
@@ -44,7 +50,7 @@ export async function getModel(options: {
apiKey: string
numCtx: number
enableNumCtx: boolean
- reasoning: boolean
+ reasoning: ReasoningOption
autoThinking?: boolean
endpointType: LLMEndpointType
onLoadingModel?: (prg: ModelLoadingProgressEvent) => void
@@ -58,6 +64,17 @@ export async function getModel(options: {
const currentModel = options.model
const supportsThinking = await checkModelSupportThinking(currentModel)
const supportsToggleThinking = isToggleableThinkingModel(endpointType, currentModel)
+ const isCurrentGptOss = isGptOssModel(currentModel)
+ const reasoningValue = options.reasoning
+ let thinkValue: ReasoningOption | undefined
+ if (supportsThinking && reasoningValue !== undefined) {
+ if (isCurrentGptOss) {
+ thinkValue = reasoningValue
+ }
+ else if (supportsToggleThinking) {
+ thinkValue = typeof reasoningValue === 'boolean' ? reasoningValue : true
+ }
+ }
const customFetch = makeCustomFetch({
bodyTransformer: (body) => {
// process thinking capability by ollama itself, using on translation feature
@@ -67,7 +84,7 @@ export async function getModel(options: {
const parsedBody = JSON.parse(body)
return JSON.stringify({
...parsedBody,
- think: supportsThinking && supportsToggleThinking ? options.reasoning : undefined,
+ think: thinkValue,
})
},
})
diff --git a/utils/llm/providers/lm-studio/chat-language-model.ts b/utils/llm/providers/lm-studio/chat-language-model.ts
index d437ff9c..51061013 100644
--- a/utils/llm/providers/lm-studio/chat-language-model.ts
+++ b/utils/llm/providers/lm-studio/chat-language-model.ts
@@ -60,6 +60,7 @@ export class LMStudioChatLanguageModel implements LanguageModelV1 {
stopSequences,
seed,
abortSignal,
+ providerMetadata,
}: Parameters[0]) {
const warnings: LanguageModelV1CallWarning[] = []
@@ -90,6 +91,8 @@ export class LMStudioChatLanguageModel implements LanguageModelV1 {
stop: stopSequences,
seed,
+ // reasoning_effort support for gpt-oss models
+ reasoningEffort: providerMetadata?.['lm-studio']?.reasoningEffort ?? providerMetadata?.['openai-compatible']?.reasoningEffort,
// messages:
messages: await convertToLMStudioMessages(this.client, prompt),
}
@@ -107,6 +110,9 @@ export class LMStudioChatLanguageModel implements LanguageModelV1 {
log.debug('doGenerate called', { args, options })
+ // Note: LM Studio SDK types don't include reasoningEffort yet, but we pass it anyway
+ // in case the runtime supports it (for gpt-oss models)
+
const responseBody = await this.model.respond(args.messages, {
signal: body.signal,
maxTokens: body.maxTokens,
@@ -116,6 +122,7 @@ export class LMStudioChatLanguageModel implements LanguageModelV1 {
stopStrings: body.stop,
contextOverflowPolicy: 'truncateMiddle',
structured: body.structured,
+ // reasoningEffort: body.reasoningEffort, //TODO
})
const { messages: rawPrompt, ...rawSettings } = args
diff --git a/utils/llm/providers/web-llm/openai-compatible-chat-language-model.ts b/utils/llm/providers/web-llm/openai-compatible-chat-language-model.ts
index 102420e2..02d16342 100644
--- a/utils/llm/providers/web-llm/openai-compatible-chat-language-model.ts
+++ b/utils/llm/providers/web-llm/openai-compatible-chat-language-model.ts
@@ -144,7 +144,8 @@ export class WebLLMChatLanguageModel implements LanguageModelV1 {
seed,
...providerMetadata?.[this.providerOptionsName],
- reasoning_effort: providerMetadata?.[this.providerOptionsName]?.reasoningEffort ?? providerMetadata?.['openai-compatible']?.reasoningEffort,
+ // Note: WebLLM does not support reasoning_effort parameter
+ // This parameter is ignored for WebLLM models
// messages:
messages: convertToOpenAICompatibleChatMessages(prompt),
diff --git a/utils/llm/reasoning.ts b/utils/llm/reasoning.ts
new file mode 100644
index 00000000..47d2a2cb
--- /dev/null
+++ b/utils/llm/reasoning.ts
@@ -0,0 +1,36 @@
+import { DEFAULT_REASONING_PREFERENCE, mergeReasoningPreference, normalizeReasoningPreference, ReasoningEffort, ReasoningPreference, StoredReasoningPreference } from '@/types/reasoning'
+
+export const isGptOssModel = (model?: string | null): boolean => {
+ if (!model) return false
+ return model.toLowerCase().includes('gpt-oss')
+}
+
+export const ensureReasoningPreference = (value: StoredReasoningPreference): ReasoningPreference => {
+ return normalizeReasoningPreference(value)
+}
+
+export const withReasoningUpdates = (
+ base: StoredReasoningPreference,
+ updates: Partial,
+): ReasoningPreference => {
+ return mergeReasoningPreference(base, updates)
+}
+
+export const getReasoningOptionForModel = (
+ preference: StoredReasoningPreference,
+ model?: string | null,
+): ReasoningEffort | boolean => {
+ const normalized = normalizeReasoningPreference(preference)
+ if (!normalized.enabled) return false
+ return isGptOssModel(model) ? normalized.effort : true
+}
+
+export const getReasoningPreferenceOrDefault = (value: StoredReasoningPreference): ReasoningPreference => {
+ const normalized = normalizeReasoningPreference(value)
+ return {
+ ...DEFAULT_REASONING_PREFERENCE,
+ ...normalized,
+ effort: normalized.effort,
+ enabled: normalized.enabled,
+ }
+}
diff --git a/utils/prompts/chat.ts b/utils/prompts/chat.ts
index fb53b250..3d77b09c 100644
--- a/utils/prompts/chat.ts
+++ b/utils/prompts/chat.ts
@@ -1,4 +1,4 @@
-import { ContextAttachment, ContextAttachmentStorage, ImageAttachment, PDFAttachment, TabAttachment } from '@/types/chat'
+import { CapturedPageAttachment, ContextAttachment, ContextAttachmentStorage, ImageAttachment, PDFAttachment, SelectedTextAttachment, TabAttachment } from '@/types/chat'
import { Base64ImageData } from '@/types/image'
import dayjs from '@/utils/time'
@@ -31,6 +31,12 @@ export class EnvironmentDetailsBuilder {
generateUpdates(usedIds: string[] = []) {
const ensureUnused = (attachment?: T) => this.ensureUnused(usedIds, attachment)
+ // Check for selected text (always include it, regardless of usedIds)
+ const selectedTextAttachment = this.contextAttachmentStorage.attachments.find((a): a is SelectedTextAttachment => a.type === 'selected-text')
+
+ // Check for Captured page (always include it, regardless of usedIds)
+ const capturedPageAttachment = this.contextAttachmentStorage.attachments.find((a): a is CapturedPageAttachment => a.type === 'captured-page')
+
const envBuilder = new TagBuilder('environment_updates')
const currentTab = ensureUnused(this.contextAttachmentStorage.currentTab?.type === 'tab' ? this.contextAttachmentStorage.currentTab : undefined)
const attachments = this.contextAttachmentStorage.attachments.filter((a) => !!ensureUnused(a))
@@ -58,8 +64,14 @@ export class EnvironmentDetailsBuilder {
}
const currentTabImage = ensureUnused(this.contextAttachmentStorage.currentTab?.type === 'image' ? this.contextAttachmentStorage.currentTab : undefined)
- const imagesMeta = attachments.filter((a): a is ImageAttachment => a.type === 'image')
- const allImages = [currentTabImage, ...imagesMeta].filter(Boolean) as ImageAttachment[]
+ const currentTabCapturedPage = ensureUnused(this.contextAttachmentStorage.currentTab?.type === 'captured-page' ? this.contextAttachmentStorage.currentTab : undefined)
+ const imagesMeta = attachments.filter((a): a is ImageAttachment | CapturedPageAttachment => a.type === 'image' || a.type === 'captured-page')
+ const allImages = [currentTabImage, currentTabCapturedPage, ...imagesMeta].filter(Boolean) as (ImageAttachment | CapturedPageAttachment)[]
+
+ if (capturedPageAttachment) {
+ allImages.unshift(capturedPageAttachment)
+ }
+
if (allImages.length) {
envBuilder.insertContent(`# Updated Images`)
for (const img of allImages) {
@@ -67,10 +79,21 @@ export class EnvironmentDetailsBuilder {
}
}
- return envBuilder.hasContent() ? envBuilder.build() : undefined
+ const envUpdates = envBuilder.hasContent() ? envBuilder.build() : undefined
+
+ // Include selected text if present
+ if (selectedTextAttachment) {
+ const selectedTextTag = new TagBuilder('user_selection').insertContent(selectedTextAttachment.value.text.trim())
+ return envUpdates ? `${selectedTextTag.build()}\n\n${envUpdates}` : selectedTextTag.build()
+ }
+
+ return envUpdates
}
generateFull() {
+ // Check for selected text
+ const selectedTextAttachment = this.contextAttachmentStorage.attachments.find((a): a is SelectedTextAttachment => a.type === 'selected-text')
+
const tabContextBuilder = new TextBuilder('# Available Tabs')
const currentTab = this.contextAttachmentStorage.currentTab?.type === 'tab' ? this.contextAttachmentStorage.currentTab : undefined
const tabs = this.contextAttachmentStorage.attachments.filter((a): a is TabAttachment => a.type === 'tab' && a.value.tabId !== currentTab?.value.tabId)
@@ -99,8 +122,9 @@ export class EnvironmentDetailsBuilder {
const imageContextBuilder = new TextBuilder('# Available Images')
const currentTabImage = this.contextAttachmentStorage.currentTab?.type === 'image' ? this.contextAttachmentStorage.currentTab : undefined
- const attachmentImages = this.contextAttachmentStorage.attachments.filter((a): a is ImageAttachment => a.type === 'image')
- const allImages = [currentTabImage, ...attachmentImages].filter(Boolean) as ImageAttachment[]
+ const currentTabCapturedPage = this.contextAttachmentStorage.currentTab?.type === 'captured-page' ? this.contextAttachmentStorage.currentTab : undefined
+ const attachmentImages = this.contextAttachmentStorage.attachments.filter((a): a is ImageAttachment | CapturedPageAttachment => a.type === 'image' || a.type === 'captured-page')
+ const allImages = [currentTabImage, currentTabCapturedPage, ...attachmentImages].filter(Boolean) as (ImageAttachment | CapturedPageAttachment)[]
if (allImages.length === 0) {
imageContextBuilder.insertContent('(No available images)')
}
@@ -117,6 +141,12 @@ ${pdfContextBuilder}
${imageContextBuilder}
`.trim())
+ // Build the result with optional selected text before environment details
+ if (selectedTextAttachment) {
+ const selectedTextTag = new TagBuilder('user_selection').insertContent(selectedTextAttachment.value.text.trim())
+ return `${selectedTextTag.build()}\n\n${environmentTagBuilder.build()}`
+ }
+
return environmentTagBuilder.build()
}
}
diff --git a/utils/rpc/background-fns.ts b/utils/rpc/background-fns.ts
index 31621f0c..521d4d94 100644
--- a/utils/rpc/background-fns.ts
+++ b/utils/rpc/background-fns.ts
@@ -6,6 +6,7 @@ import { z } from 'zod'
import { convertJsonSchemaToZod, JSONSchema } from 'zod-from-json-schema'
import { ChatHistoryV1, ContextAttachmentStorage } from '@/types/chat'
+import type { ReasoningOption } from '@/types/reasoning'
import { TabInfo } from '@/types/tab'
import logger from '@/utils/logger'
@@ -34,7 +35,7 @@ import { preparePortConnection, shouldGenerateChatTitle } from './utils'
type StreamTextOptions = Omit[0], 'tools'>
type GenerateTextOptions = Omit[0], 'tools'>
type GenerateObjectOptions = Omit[0], 'tools'>
-type ExtraGenerateOptions = { modelId?: string, reasoning?: boolean, autoThinking?: boolean }
+type ExtraGenerateOptions = { modelId?: string, endpointType?: LLMEndpointType, reasoning?: ReasoningOption, autoThinking?: boolean }
type ExtraGenerateOptionsWithTools = ExtraGenerateOptions
type SchemaOptions = { schema: S } | { jsonSchema: JSONSchema }
@@ -79,7 +80,8 @@ const parseSchema = (options: SchemaOptions) => {
const generateExtraModelOptions = (options: ExtraGenerateOptions) => {
return {
...(options.modelId !== undefined ? { model: options.modelId } : {}),
- ...(options.reasoning !== undefined ? { reasoningEffort: options.reasoning } : {}),
+ ...(options.endpointType !== undefined ? { endpointType: options.endpointType } : {}),
+ ...(options.reasoning !== undefined ? { reasoning: options.reasoning } : {}),
...(options.autoThinking !== undefined ? { autoThinking: options.autoThinking } : {}),
}
}
@@ -124,7 +126,7 @@ const streamText = async (options: Pick & ExtraGenerateOptionsWithTools) => {
try {
const response = originalGenerateText({
- model: await getModel({ ...(await getModelUserConfig()), ...generateExtraModelOptions(options) }),
+ model: await getModel({ ...(await getModelUserConfig({ model: options.modelId, endpointType: options.endpointType })), ...generateExtraModelOptions(options) }),
messages: options.messages,
prompt: options.prompt,
system: options.system,
@@ -194,7 +196,7 @@ const generateText = async (options: Pick(options: Pick pattern.test(model.modelId))) {
const schema = parseSchema(options)
const s = zodSchema(schema)
@@ -305,7 +307,7 @@ export const generateObjectFromSchema = async (options: Pi
const isEnum = s instanceof z.ZodEnum
let ret
try {
- const modelInfo = { ...(await getModelUserConfig()), ...generateExtraModelOptions(options) }
+ const modelInfo = { ...(await getModelUserConfig({ model: options.modelId, endpointType: options.endpointType })), ...generateExtraModelOptions(options) }
if (MODELS_NOT_SUPPORTED_FOR_STRUCTURED_OUTPUT.some((pattern) => pattern.test(modelInfo.model))) {
const jsonSchema = zodSchema(s).jsonSchema
const injectSchemaToSystemPrompt = (prompt?: string) => {
@@ -759,10 +761,16 @@ function getTabCaptureMediaStreamId(tabId: number, consumerTabId?: number) {
})
}
-function captureVisibleTab(windowId?: number, options?: Browser.tabs.CaptureVisibleTabOptions) {
- const wid = windowId ?? browser.windows.WINDOW_ID_CURRENT
- const screenCaptureBase64Url = browser.tabs.captureVisibleTab(wid, options ?? {})
- return screenCaptureBase64Url
+function captureVisibleTab(options?: Browser.tabs.CaptureVisibleTabOptions) {
+ const cachedWindowId = BackgroundWindowManager.getCurrentWindowId()
+ browser.permissions.request({ origins: [''] })
+ if (cachedWindowId) {
+ const screenCaptureBase64Url = browser.tabs.captureVisibleTab(cachedWindowId, options ?? {})
+ return screenCaptureBase64Url
+ }
+ else {
+ throw new Error('No cached window ID available for capturing visible tab')
+ }
}
function getTabInfoByTabId(tabId: number) {
@@ -1075,6 +1083,17 @@ async function forwardGmailAction(action: 'summary' | 'reply' | 'compose', data:
}
}
+async function forwardSelectionText(tabId: number, selectedText: string) {
+ try {
+ b2sRpc.emit('selectionChanged', { tabId, selectedText })
+ return { success: true }
+ }
+ catch (error) {
+ logger.error('Failed to forward selection text to sidepanel:', error)
+ return { success: false, error: String(error) }
+ }
+}
+
export const backgroundFunctions = {
emit: (ev: E, ...args: Parameters) => {
eventEmitter.emit(ev, ...args)
@@ -1146,5 +1165,7 @@ export const backgroundFunctions = {
showSettings: showSettingsForBackground,
updateSidepanelModelList,
forwardGmailAction,
+ // Selected Text
+ forwardSelectionText,
}
; (self as unknown as { backgroundFunctions: unknown }).backgroundFunctions = backgroundFunctions
diff --git a/utils/rpc/content-fns.ts b/utils/rpc/content-fns.ts
index ebef8b6b..5d1d0701 100644
--- a/utils/rpc/content-fns.ts
+++ b/utils/rpc/content-fns.ts
@@ -15,6 +15,7 @@ export type Events = {
tabUpdated(opts: { tabId: number, url?: string, faviconUrl?: string, title?: string }): void
tabRemoved(opts: { tabId: number } & Browser.tabs.TabRemoveInfo): void
contextMenuClicked(opts: { _toTab?: number } & Browser.contextMenus.OnClickData & { menuItemId: ContextMenuId }): void
+ selectionChanged(opts: { tabId: number, selectedText: string }): void
}
export type EventKey = keyof Events
@@ -53,6 +54,12 @@ export function ping(_: { _toTab?: number }) {
return 'pong'
}
+export function getSelectedText(_: { _toTab?: number }) {
+ const selection = window.getSelection()
+ const selectedText = selection?.toString().trim() || ''
+ return selectedText
+}
+
export const contentFunctions = {
emit: (ev: E, ...args: Parameters) => {
eventEmitter.emit(ev, ...args)
@@ -63,6 +70,7 @@ export const contentFunctions = {
getPagePDFContent,
getPageContentType,
getDocumentContent,
+ getSelectedText,
ping,
} as const
diff --git a/utils/rpc/sidepanel-fns.ts b/utils/rpc/sidepanel-fns.ts
index 14e64e23..36cf74b3 100644
--- a/utils/rpc/sidepanel-fns.ts
+++ b/utils/rpc/sidepanel-fns.ts
@@ -14,6 +14,7 @@ export type Events = {
gmailAction(options: { action: 'summary' | 'reply' | 'compose', data: unknown, tabInfo: TabInfo }): void
updateModelList(): void
updateChatList(): void
+ selectionChanged(options: { tabId: number, selectedText: string }): void
}
export type EventKey = keyof Events
diff --git a/utils/user-config/defaults.ts b/utils/user-config/defaults.ts
index e9cf9a90..aa2da0b4 100644
--- a/utils/user-config/defaults.ts
+++ b/utils/user-config/defaults.ts
@@ -209,18 +209,22 @@ ${tools.map((tool) => renderPrompt`${new PromptBasedToolBuilder(tool)}`).join('\
# WORKFLOW
-Simple two-step process for ALL queries:
+Simple step-by-step process for ALL queries:
-### Step 1: Always Check Selected Tab First
-- Start with brief explanation: "Let me first check the selected tab to see if it contains relevant information"
+### Step 1: Check for Selected Text First
+- If user_message contains tags, prioritize understanding and responding to the selected content
+- The selected text indicates the user's specific focus area and should guide all subsequent tool usage
+
+### Step 2: Always Check Selected Tab First
+- Start with brief explanation: "Let me first check the selected tab to see if it contains relevant information about [selected text topic]"
- Use view_tab for the SELECTED tab (marked as SELECTED in available tabs)
- This is mandatory regardless of query type
-### Step 2: Click on Relevant Links Found
+### Step 3: Click on Relevant Links Found
- If the selected tab shows relevant interactive elements, use click to explore them
- Prioritize click over other tools when relevant links are available
-### Step 3: Other Tools as Needed
+### Step 4: Other Tools as Needed
- Use other available resources: view_pdf, view_image, other tabs
${hasOnlineSearch ? '- Use search_online only if existing resources don\'t provide sufficient information' : '- Note: Web search is currently disabled. Focus on available local resources.'}
- Use fetch_page for specific URLs mentioned by user
diff --git a/utils/user-config/index.ts b/utils/user-config/index.ts
index cc5fcb2f..90d37d54 100644
--- a/utils/user-config/index.ts
+++ b/utils/user-config/index.ts
@@ -1,6 +1,7 @@
import { computed } from 'vue'
import { browser } from 'wxt/browser'
+import { DEFAULT_REASONING_PREFERENCE, normalizeReasoningPreference } from '@/types/reasoning'
import { ThemeModeType } from '@/types/theme'
import { c2bRpc } from '@/utils/rpc'
@@ -92,6 +93,10 @@ export async function _getUserConfig() {
}
})
+ const reasoning = await new Config('llm.reasoning').default(DEFAULT_REASONING_PREFERENCE).build()
+ const normalizedReasoning = normalizeReasoningPreference(reasoning.get())
+ reasoning.set(normalizedReasoning)
+
return {
locale: {
current: await new Config('locale.current').build(),
@@ -101,7 +106,7 @@ export async function _getUserConfig() {
endpointType: await new Config('llm.endpointType').default('ollama' as LLMEndpointType).build(),
model: await new Config('llm.model').build(),
apiKey: await new Config('llm.apiKey').default('ollama').build(),
- reasoning: await new Config('llm.reasoning').default(true).build(),
+ reasoning,
titleGenerationSystemPrompt: await new Config('llm.titleGenerationSystemPrompt').default(DEFAULT_CHAT_TITLE_GENERATION_SYSTEM_PROMPT).build(),
backends: {
ollama: {
diff --git a/wxt.config.ts b/wxt.config.ts
index 5b182e46..8c190528 100644
--- a/wxt.config.ts
+++ b/wxt.config.ts
@@ -110,5 +110,6 @@ export default defineConfig({
},
],
host_permissions: ['*://*/*', 'ws://*/*', 'wss://*/*'],
+ optional_host_permissions: [''],
},
})