Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .husky/pre-commit
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,7 @@
#
# Auto-fix any list errors in staged files
#
npx lint-staged
export NVM_DIR="${NVM_DIR:-$HOME/.nvm}"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"

npx lint-staged
18 changes: 12 additions & 6 deletions agentic/src/cache.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,12 @@ export interface FileBasedCacheOptions {
* @template C - The coordinates of the cache - paths to cache files.
* @template O - Additional options for the cache.
*/
export class FileBasedResponseCache<K, V>
implements InputOutputCache<K, V, CacheFilePaths, FileBasedCacheOptions>
{
export class FileBasedResponseCache<K, V> implements InputOutputCache<
K,
V,
CacheFilePaths,
FileBasedCacheOptions
> {
enabled: boolean;

constructor(
Expand Down Expand Up @@ -131,9 +134,12 @@ export const ALL_REVISIONS = -1;
* @template C - undefined (coordinates not available for in-memory cache).
* @template O - Any additional options.
*/
export class InMemoryCacheWithRevisions<K, V>
implements InputOutputCache<K, V, void, InMemoryCacheWithRevisionsOptions>
{
export class InMemoryCacheWithRevisions<K, V> implements InputOutputCache<
K,
V,
void,
InMemoryCacheWithRevisionsOptions
> {
private readonly cache: Map<K, V[]>;
enabled: boolean;

Expand Down
8 changes: 7 additions & 1 deletion agentic/src/nodes/analysisIssueFix.ts
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,10 @@ If you have any additional details or steps that need to be performed, put it he
);

if (!response) {
this.logger.silly("AnalysisIssueFix returned undefined response");
this.logger.warn(
`AnalysisIssueFix: LLM returned no response for file "${fileName}". ` +
`This may indicate a model provider configuration issue.`,
);
return {
outputAdditionalInfo: undefined,
outputUpdatedFile: undefined,
Expand Down Expand Up @@ -460,6 +463,9 @@ ${state.inputAllReasoning}`,
);

if (!response) {
this.logger.warn(
"SummarizeHistory: LLM returned no response. This may indicate a model provider configuration issue.",
);
return {
summarizedHistory: "",
iterationCount: state.iterationCount,
Expand Down
104 changes: 70 additions & 34 deletions agentic/src/nodes/base.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ import {
import { KaiWorkflowEventEmitter } from "../eventEmitter";

export abstract class BaseNode extends KaiWorkflowEventEmitter {
private static readonly DEFAULT_LLM_TIMEOUT_MS = 300_000; // 5 minutes

constructor(
private readonly name: string,
protected readonly modelProvider: KaiModelProvider,
Expand Down Expand Up @@ -55,6 +57,9 @@ export abstract class BaseNode extends KaiWorkflowEventEmitter {
* Falls back to invoke() when native tools are supported but not in streaming.
* If native tools are not supported, parses response on-the-fly and assembles
* into tool_call_chunks making it transparent to callers.
*
* Includes a configurable timeout (default 5 min) to prevent hanging when
* the model provider is misconfigured or unreachable.
*/
protected async streamOrInvoke(
input: BaseLanguageModelInput,
Expand All @@ -64,6 +69,8 @@ export abstract class BaseNode extends KaiWorkflowEventEmitter {
emitResponseChunks?: boolean;
// toolsSelector matches tool names to enable
toolsSelectors?: string[];
// timeout in ms for the entire LLM request (default: 5 minutes)
timeoutMs?: number;
},
options?: KaiModelProviderInvokeCallOptions | undefined,
): Promise<AIMessage | AIMessageChunk | undefined> {
Expand All @@ -72,50 +79,75 @@ export abstract class BaseNode extends KaiWorkflowEventEmitter {
enableTools = true,
emitResponseChunks = true,
toolsSelectors = [],
timeoutMs = BaseNode.DEFAULT_LLM_TIMEOUT_MS,
} = streamOptions || {};

let timeoutId: ReturnType<typeof setTimeout> | undefined;

try {
// if we don't have tools enabled or registered, we should be able to stream without any issues
if (!enableTools || !this.tools.length) {
const executeRequest = async (): Promise<AIMessage | AIMessageChunk | undefined> => {
// if we don't have tools enabled or registered, we should be able to stream without any issues
if (!enableTools || !this.tools.length) {
return this.process_stream(
messageId,
enableTools,
emitResponseChunks,
await this.modelProvider.stream(input, options),
);
}

let runnable: KaiModelProvider = this.modelProvider;
let processedInput: BaseLanguageModelInput = input;

if (this.modelProvider.toolCallsSupported()) {
runnable = this.modelProvider.bindTools(this.tools);
} else {
// use custom tool parsing if model does not support tool calls
processedInput = this.getInputWithTools(input, toolsSelectors);
}

// use invoke if the model does not support streaming tool calls but supports normal tool calls
if (
this.modelProvider.toolCallsSupported() &&
!this.modelProvider.toolCallsSupportedInStreaming()
) {
const fullResponse = await runnable.invoke(processedInput, options);
if (emitResponseChunks) {
this.emitWorkflowMessage({
id: messageId,
type: KaiWorkflowMessageType.LLMResponse,
data: fullResponse,
});
}
return fullResponse;
}

return this.process_stream(
messageId,
enableTools,
emitResponseChunks,
await this.modelProvider.stream(input, options),
await runnable.stream(processedInput, options),
);
}

let runnable: KaiModelProvider = this.modelProvider;
let processedInput: BaseLanguageModelInput = input;
};

if (this.modelProvider.toolCallsSupported()) {
runnable = this.modelProvider.bindTools(this.tools);
} else {
// use custom tool parsing if model does not support tool calls
processedInput = this.getInputWithTools(input, toolsSelectors);
}
const requestPromise = executeRequest();
const timeoutPromise = new Promise<never>((_, reject) => {
timeoutId = setTimeout(() => {
reject(
new Error(
`LLM request timed out after ${timeoutMs / 1000}s. ` +
`Verify your provider-settings.yaml configuration is correct.`,
),
);
}, timeoutMs);
});

// use invoke if the model does not support streaming tool calls but supports normal tool calls
if (
this.modelProvider.toolCallsSupported() &&
!this.modelProvider.toolCallsSupportedInStreaming()
) {
const fullResponse = await runnable.invoke(processedInput, options);
if (emitResponseChunks) {
this.emitWorkflowMessage({
id: messageId,
type: KaiWorkflowMessageType.LLMResponse,
data: fullResponse,
});
}
return fullResponse;
try {
return await Promise.race([requestPromise, timeoutPromise]);
} finally {
// Suppress unhandled rejection from the request if it settles after the timeout
requestPromise.catch(() => {});
}

return this.process_stream(
messageId,
enableTools,
emitResponseChunks,
await runnable.stream(processedInput, options),
);
} catch (err) {
this.logger.error(
`Error calling stream(): ${err instanceof Error ? err.message : String(err)}`,
Expand All @@ -131,6 +163,10 @@ export abstract class BaseNode extends KaiWorkflowEventEmitter {
data: `Failed to get llm response - ${String(err)}`,
});
}
} finally {
if (timeoutId !== undefined) {
clearTimeout(timeoutId);
}
}
}

Expand Down
12 changes: 9 additions & 3 deletions agentic/src/nodes/diagnosticsIssueFix.ts
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,9 @@ Instructions for Agent B to solve Issue 3, Issue 4, etc. (mention specific issue
);

if (!response) {
this.logger.silly("PlanFixes returned undefined response");
this.logger.warn(
"PlanFixes: LLM returned no response. This may indicate a model provider configuration issue.",
);
return {
plannerOutputNominatedAgents: [],
iterationCount: state.iterationCount,
Expand Down Expand Up @@ -401,7 +403,9 @@ ${
);

if (!response) {
this.logger.silly("FixGeneralIssues returned undefined response");
this.logger.warn(
"FixGeneralIssues: LLM returned no response. This may indicate a model provider configuration issue.",
);
return {
messages: [new AIMessage(`DONE`)],
outputModifiedFilesFromGeneralFix: [],
Expand Down Expand Up @@ -471,7 +475,9 @@ ${state.inputInstructionsForGeneralFix}
);

if (!response) {
this.logger.silly("FixJavaDependencyIssues returned undefined response");
this.logger.warn(
"FixJavaDependencyIssues: LLM returned no response. This may indicate a model provider configuration issue.",
);
return {
messages: [new AIMessage(`DONE`)],
outputModifiedFilesFromGeneralFix: [],
Expand Down
8 changes: 6 additions & 2 deletions agentic/src/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ export enum KaiWorkflowMessageType {
export interface KaiModifiedFile {
path: string;
content: string;
/** Pre-modification content for diffing and revert (used by the Goose flow where files are written to disk before we process them). */
originalContent?: string;
userInteraction?: KaiUserInteraction;
}

Expand All @@ -37,6 +39,7 @@ export interface KaiToolCall {
name?: string;
args?: string;
status: "generating" | "running" | "succeeded" | "failed";
result?: string;
}

export interface KaiUserInteraction {
Expand Down Expand Up @@ -101,8 +104,9 @@ export interface PendingUserInteraction {
reject(reason: any): void;
}

export interface KaiWorkflow<TWorkflowInput extends KaiWorkflowInput = KaiWorkflowInput>
extends KaiWorkflowEvents {
export interface KaiWorkflow<
TWorkflowInput extends KaiWorkflowInput = KaiWorkflowInput,
> extends KaiWorkflowEvents {
init(options: KaiWorkflowInitOptions): Promise<void>;
run(input: TWorkflowInput): Promise<KaiWorkflowResponse>;
resolveUserInteraction(response: KaiUserInteractionMessage): Promise<void>;
Expand Down
10 changes: 10 additions & 0 deletions changes/unreleased/1351-pluggable-agent-backend.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
kind: feature

description: >
Add pluggable agent backend infrastructure supporting Goose, OpenCode, Claude,
and Codex as interchangeable AI backends. Includes MCP server with analysis
tools, structured chat panel with streaming and permission review UI, and
granular tool permission policy.

extensions:
- core
3 changes: 3 additions & 0 deletions changes/unreleased/fix-duplicate-loading-indicators.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
kind: bugfix
description: >
Consolidated duplicate running indicators in chat view and fixed getSolution not showing loading state in non-experimental mode.
Loading
Loading