Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion agentic/src/nodes/analysisIssueFix.ts
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,10 @@ If you have any additional details or steps that need to be performed, put it he
);

if (!response) {
this.logger.silly("AnalysisIssueFix returned undefined response");
this.logger.warn(
`AnalysisIssueFix: LLM returned no response for file "${fileName}". ` +
`This may indicate a model provider configuration issue.`,
);
return {
outputAdditionalInfo: undefined,
outputUpdatedFile: undefined,
Expand Down Expand Up @@ -460,6 +463,9 @@ ${state.inputAllReasoning}`,
);

if (!response) {
this.logger.warn(
"SummarizeHistory: LLM returned no response. This may indicate a model provider configuration issue.",
);
return {
summarizedHistory: "",
iterationCount: state.iterationCount,
Expand Down
12 changes: 9 additions & 3 deletions agentic/src/nodes/diagnosticsIssueFix.ts
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,9 @@ Instructions for Agent B to solve Issue 3, Issue 4, etc. (mention specific issue
);

if (!response) {
this.logger.silly("PlanFixes returned undefined response");
this.logger.warn(
"PlanFixes: LLM returned no response. This may indicate a model provider configuration issue.",
);
return {
plannerOutputNominatedAgents: [],
iterationCount: state.iterationCount,
Expand Down Expand Up @@ -401,7 +403,9 @@ ${
);

if (!response) {
this.logger.silly("FixGeneralIssues returned undefined response");
this.logger.warn(
"FixGeneralIssues: LLM returned no response. This may indicate a model provider configuration issue.",
);
return {
messages: [new AIMessage(`DONE`)],
outputModifiedFilesFromGeneralFix: [],
Expand Down Expand Up @@ -471,7 +475,9 @@ ${state.inputInstructionsForGeneralFix}
);

if (!response) {
this.logger.silly("FixJavaDependencyIssues returned undefined response");
this.logger.warn(
"FixJavaDependencyIssues: LLM returned no response. This may indicate a model provider configuration issue.",
);
return {
messages: [new AIMessage(`DONE`)],
outputModifiedFilesFromGeneralFix: [],
Expand Down
226 changes: 109 additions & 117 deletions vscode/core/resources/sample-provider-settings.yaml
Original file line number Diff line number Diff line change
@@ -1,122 +1,114 @@
# This is a sample settings file generated by the extension with a few common providers
# and models defined. To return the file back to default, delete it and restart vscode.
# Sample provider settings — configure via the settings gear in the chat panel,
# or edit this file directly. To reset, delete it and restart VS Code.
#
# The extension reads the `active` block to configure the LLM provider.
# Provider examples are listed below for reference.
---
environment:
ALWAYS_APPLIED_KEY: "envvar to be set regardless of which model is active"
# CA_BUNDLE: "" # optional
# ALLOW_INSECURE: "false" # optional

# This is a collection of model configurations that may be used. The `&active`
# anchor is used to reference the model to the active node below. The extension will
# use the active node for configuration the kai-rpc-server.
models:
OpenAI: &active
environment:
OPENAI_API_KEY: "" # Required
provider: ChatOpenAI
args:
model: gpt-4o # Required

## Following keys are optional & only exist for documentation purposes
# user: <string>
# temperature: <number>
# maxTokens: <number>
# timeout: <number>
# maxRetries: <number>
# configuration:
# baseURL: <string>
# project: <string>
# organization: <string>

AzureChatOpenAI:
environment:
AZURE_OPENAI_API_KEY: "" # Required
provider: AzureChatOpenAI
args:
azureOpenAIApiDeploymentName: "" # Required
azureOpenAIApiVersion: "" # Required

## Following keys are optional & only exist for documentation purposes
# azureOpenAIApiInstanceName: <string>
# azureOpenAIBasePath: <string>
# azureOpenAIEndpoint: <string>
# azureOpenAIDeploymentName: <string>
# openAIApiVersion: <string>
# openAIBasePath: <string>
# deploymentName: <string>
# temperature: <number>
# maxTokens: <number>
# timeout: <number>
# maxRetries: <number>
# modelKwargs: <object>

AmazonBedrock:
environment:
AWS_ACCESS_KEY_ID: "" # Required if a global ~/.aws/credentials file is not present
AWS_SECRET_ACCESS_KEY: "" # Required if a global ~/.aws/credentials file is not present
AWS_DEFAULT_REGION: "" # Required
provider: ChatBedrock
args:
model: meta.llama3-70b-instruct-v1:0 # Required

## Following keys are optional & only exist for documentation purposes
# temperature: <number>
# additionalModelRequestFields: <object>
# configFilepath: <string>

DeepSeek:
environment:
DEEPSEEK_API_KEY: "" # Required
provider: ChatDeepSeek
args:
model: deepseek-chat # Required

## Following keys are optional & only exist for documentation purposes
# temperature: <number>
# maxTokens: <number>
# timeout: <number>
# maxRetries: <number>
# modelKwargs: <object>

GoogleGenAI:
environment:
GOOGLE_API_KEY: "" # Required
provider: ChatGoogleGenerativeAI
args:
model: gemini-2.5-pro # Required

## Following keys are optional & only exist for documentation purposes
# apiVersion: <string>
# baseUrl: <string>
# temperature: <number>

# If running locally https://ollama.com/, get the model name via `ollama list`
ChatOllama:
provider: ChatOllama
args:
model: your-model-here # Required

## Following keys are optional & only exist for documentation purposes
# temperature: <number>
# numPredict: <number>

JustAnExample:
environment:
ANY_KEY_1: "any environment variable needed for this model provider"
ANY_KEY_2: "any environment variable needed for this model provider"

provider: "provider-string"
args:
anyArgumentName1: "argument one"
anyArgumentName2: "argument two"
any-argument-name-3: "argument three"

template: "template string" # optional
llamaHeader: "header string" # optional
llmRetries: 5 # optional number, defaults to 5
llmRetryDelay: 10.0 # optional float, default is 10.0

# This is the node used for configuring the server. A simple anchor/reference
# pair is an easy way to to select a configuration. To change configs, move the
# `&active` anchor to the desired block and restart the server.
active: *active
# The active provider configuration used by the extension.
# Update this block directly or use the chat panel settings UI.
active:
environment:
OPENAI_API_KEY: "" # Required
provider: ChatOpenAI
args:
model: gpt-4o # Required


## Following keys are optional
# temperature: <number>
# maxTokens: <number>
# timeout: <number>
# maxRetries: <number>
# configuration:
# baseURL: <string>
# project: <string>
# organization: <string>

# ──────────────────────────────────────────────────────────────────────
# Provider reference — copy a block above to `active:` and fill in
# the required fields.
# ──────────────────────────────────────────────────────────────────────

# --- OpenAI ---
# active:
# environment:
# OPENAI_API_KEY: ""
# provider: ChatOpenAI
# args:
# model: gpt-4o

# --- Azure OpenAI ---
# active:
# environment:
# AZURE_OPENAI_API_KEY: ""
# provider: AzureChatOpenAI
# args:
# azureOpenAIApiDeploymentName: ""
# azureOpenAIApiVersion: ""
# # azureOpenAIEndpoint: <string>
# # temperature: <number>
# # maxTokens: <number>

# --- Amazon Bedrock ---
# active:
# environment:
# AWS_ACCESS_KEY_ID: "" # if no ~/.aws/credentials
# AWS_SECRET_ACCESS_KEY: "" # if no ~/.aws/credentials
# AWS_DEFAULT_REGION: ""
# provider: ChatBedrock
# args:
# model: meta.llama3-70b-instruct-v1:0
# # temperature: <number>

# --- DeepSeek ---
# active:
# environment:
# DEEPSEEK_API_KEY: ""
# provider: ChatDeepSeek
# args:
# model: deepseek-chat
# # temperature: <number>
# # maxTokens: <number>

# --- Google Gemini ---
# active:
# environment:
# GOOGLE_API_KEY: ""
# provider: ChatGoogleGenerativeAI
# args:
# model: gemini-2.5-pro
# # apiVersion: <string>
# # baseUrl: <string>
# # temperature: <number>

# --- Ollama (local) — get model name via `ollama list` ---
# active:
# provider: ChatOllama
# args:
# model: your-model-here
# # temperature: <number>
# # numPredict: <number>

# --- Anthropic ---
# active:
# environment:
# ANTHROPIC_API_KEY: ""
# provider: ChatAnthropic
# args:
# model: claude-sonnet-4-20250514

# --- Custom provider ---
# active:
# environment:
# ANY_KEY: "environment variable needed for this provider"
# provider: "provider-string"
# args:
# model: "model-name"
# template: "template string" # optional
# llamaHeader: "header string" # optional
# llmRetries: 5 # optional, defaults to 5
# llmRetryDelay: 10.0 # optional, defaults to 10.0
Loading