From 7347ee6c875f703c6fe9071482d4b8f5bf4a4b57 Mon Sep 17 00:00:00 2001 From: Victor Vazquez Date: Sat, 21 Jun 2025 04:45:38 +0000 Subject: [PATCH 001/116] WIP --- cli/azd/.vscode/cspell-azd-dictionary.txt | 2 ++ cli/azd/cmd/container.go | 2 ++ cli/azd/cmd/version.go | 26 +++++++++----- cli/azd/pkg/llm/manager.go | 43 +++++++++++++++++++++++ go.mod | 4 ++- go.sum | 9 +++++ 6 files changed, 76 insertions(+), 10 deletions(-) create mode 100644 cli/azd/pkg/llm/manager.go diff --git a/cli/azd/.vscode/cspell-azd-dictionary.txt b/cli/azd/.vscode/cspell-azd-dictionary.txt index 8851e9da440..3ff5e701535 100644 --- a/cli/azd/.vscode/cspell-azd-dictionary.txt +++ b/cli/azd/.vscode/cspell-azd-dictionary.txt @@ -136,10 +136,12 @@ jmes jquery keychain kubelogin +langchaingo LASTEXITCODE ldflags lechnerc77 libc +llms memfs mergo mgmt diff --git a/cli/azd/cmd/container.go b/cli/azd/cmd/container.go index 8eeaf46cc89..87c690b394b 100644 --- a/cli/azd/cmd/container.go +++ b/cli/azd/cmd/container.go @@ -53,6 +53,7 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/kubelogin" "github.com/azure/azure-dev/cli/azd/pkg/kustomize" "github.com/azure/azure-dev/cli/azd/pkg/lazy" + "github.com/azure/azure-dev/cli/azd/pkg/llm" "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/azure/azure-dev/cli/azd/pkg/pipeline" "github.com/azure/azure-dev/cli/azd/pkg/platform" @@ -544,6 +545,7 @@ func registerCommonDependencies(container *ioc.NestedContainer) { return serviceManager, err }) }) + container.MustRegisterSingleton(llm.NewManager) container.MustRegisterSingleton(repository.NewInitializer) container.MustRegisterSingleton(alpha.NewFeaturesManager) container.MustRegisterSingleton(config.NewUserConfigManager) diff --git a/cli/azd/cmd/version.go b/cli/azd/cmd/version.go index fb464321fc2..626207b216c 100644 --- a/cli/azd/cmd/version.go +++ b/cli/azd/cmd/version.go @@ -12,6 +12,7 @@ import ( "github.com/azure/azure-dev/cli/azd/internal" "github.com/azure/azure-dev/cli/azd/pkg/contracts" "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/llm" "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -33,10 +34,11 @@ func newVersionFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) } type versionAction struct { - flags *versionFlags - formatter output.Formatter - writer io.Writer - console input.Console + flags *versionFlags + formatter output.Formatter + writer io.Writer + console input.Console + llmManager llm.Manager } func newVersionAction( @@ -44,19 +46,25 @@ func newVersionAction( formatter output.Formatter, writer io.Writer, console input.Console, + llmManager llm.Manager, ) actions.Action { return &versionAction{ - flags: flags, - formatter: formatter, - writer: writer, - console: console, + flags: flags, + formatter: formatter, + writer: writer, + console: console, + llmManager: llmManager, } } func (v *versionAction) Run(ctx context.Context) (*actions.ActionResult, error) { switch v.formatter.Kind() { case output.NoneFormat: - fmt.Fprintf(v.console.Handles().Stdout, "azd version %s\n", internal.Version) + llmInfo, err := v.llmManager.Info() + if err != nil { + return nil, fmt.Errorf("failed to get LLM info: %w", err) + } + fmt.Fprintf(v.console.Handles().Stdout, "azd version %s\n%s\n", internal.Version, llmInfo) case output.JsonFormat: var result contracts.VersionResult versionSpec := internal.VersionInfo() diff --git a/cli/azd/pkg/llm/manager.go b/cli/azd/pkg/llm/manager.go new file mode 100644 index 00000000000..a2d1fbee589 --- /dev/null +++ b/cli/azd/pkg/llm/manager.go @@ -0,0 +1,43 @@ +package llm + +import ( + "context" + "fmt" + + "github.com/tmc/langchaingo/llms" + "github.com/tmc/langchaingo/llms/openai" +) + +func NewManager() Manager { + return Manager{} +} + +type Manager struct { +} + +func (m Manager) Info() (string, error) { + llm, err := openai.New( + openai.WithModel("o1-mini"), + openai.WithAPIType(openai.APITypeAzure), + openai.WithAPIVersion("2024-12-01-preview"), + openai.WithBaseURL("https://vivazqu-2260-resource.cognitiveservices.azure.com/"), + ) + if err != nil { + return "", fmt.Errorf("failed to create LLM: %w", err) + } + + ctx := context.Background() + content := []llms.MessageContent{ + llms.TextParts(llms.ChatMessageTypeHuman, ` +Tell what model you are using and what is your version. Make it sound like a friendly human.`), + } + fmt.Println("Generating content...") + output, err := llm.GenerateContent(ctx, content, + llms.WithMaxTokens(4000), + llms.WithTemperature(1), + ) + if err != nil { + return "", fmt.Errorf("failed to generate content: %w", err) + } + return output.Choices[0].Content, nil +} diff --git a/go.mod b/go.mod index c87d835aa4e..d3000d4f235 100644 --- a/go.mod +++ b/go.mod @@ -67,6 +67,7 @@ require ( github.com/stretchr/testify v1.10.0 github.com/theckman/yacspin v0.13.12 github.com/tidwall/gjson v1.18.0 + github.com/tmc/langchaingo v0.1.13 go.lsp.dev/jsonrpc2 v0.10.0 go.opentelemetry.io/otel v1.35.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 @@ -88,6 +89,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dlclark/regexp2 v1.10.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect @@ -99,7 +101,7 @@ require ( github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/otiai10/mint v1.6.3 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pkg/errors v0.9.1 // indirect + github.com/pkoukk/tiktoken-go v0.1.6 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/segmentio/asm v1.2.0 // indirect diff --git a/go.sum b/go.sum index 96dbd762d9e..a2fd2103e19 100644 --- a/go.sum +++ b/go.sum @@ -105,6 +105,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= +github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/drone/envsubst v1.0.3 h1:PCIBwNDYjs50AsLZPYdfhSATKaRg/FJmDc2D6+C2x8g= github.com/drone/envsubst v1.0.3/go.mod h1:N2jZmlMufstn1KEqvbHjw40h1KyTmnVzHcSc9bFiJ2g= github.com/eiannone/keyboard v0.0.0-20220611211555-0d226195f203 h1:XBBHcIb256gUJtLmY22n99HaZTz+r2Z51xUPi01m3wg= @@ -198,6 +200,8 @@ github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmd github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkoukk/tiktoken-go v0.1.6 h1:JF0TlJzhTbrI30wCvFuiw6FzP2+/bR+FIxUdgEAcUsw= +github.com/pkoukk/tiktoken-go v0.1.6/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/psanford/memfs v0.0.0-20241019191636-4ef911798f9b h1:xzjEJAHum+mV5Dd5KyohRlCyP03o4yq6vNpEUtAJQzI= @@ -246,6 +250,8 @@ github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JT github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tmc/langchaingo v0.1.13 h1:rcpMWBIi2y3B90XxfE4Ao8dhCQPVDMaNPnN5cGB1CaA= +github.com/tmc/langchaingo v0.1.13/go.mod h1:vpQ5NOIhpzxDfTZK9B6tf2GM/MoaHewPWM5KXXGh7hg= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.lsp.dev/jsonrpc2 v0.10.0 h1:Pr/YcXJoEOTMc/b6OTmcR1DPJ3mSWl/SWiU1Cct6VmI= go.lsp.dev/jsonrpc2 v0.10.0/go.mod h1:fmEzIdXPi/rf6d4uFcayi8HpFP1nBF99ERP1htC72Ac= @@ -340,7 +346,10 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= From 0ffff37ff47cd4dd610e0f8c0bc34677c600085e Mon Sep 17 00:00:00 2001 From: Victor Vazquez Date: Tue, 24 Jun 2025 20:20:10 +0000 Subject: [PATCH 002/116] wip --- cli/azd/.vscode/cspell-azd-dictionary.txt | 1 + cli/azd/cmd/version.go | 7 +++-- cli/azd/pkg/llm/manager.go | 34 +++++++++++++++++++++-- 3 files changed, 37 insertions(+), 5 deletions(-) diff --git a/cli/azd/.vscode/cspell-azd-dictionary.txt b/cli/azd/.vscode/cspell-azd-dictionary.txt index 3ff5e701535..d9286fb7306 100644 --- a/cli/azd/.vscode/cspell-azd-dictionary.txt +++ b/cli/azd/.vscode/cspell-azd-dictionary.txt @@ -161,6 +161,7 @@ nodeapp nolint nologo notrail +ollama omitempty oneauth oneline diff --git a/cli/azd/cmd/version.go b/cli/azd/cmd/version.go index 626207b216c..c71ff455705 100644 --- a/cli/azd/cmd/version.go +++ b/cli/azd/cmd/version.go @@ -7,6 +7,7 @@ import ( "context" "fmt" "io" + "time" "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" @@ -60,11 +61,13 @@ func newVersionAction( func (v *versionAction) Run(ctx context.Context) (*actions.ActionResult, error) { switch v.formatter.Kind() { case output.NoneFormat: - llmInfo, err := v.llmManager.Info() + fmt.Fprintf(v.console.Handles().Stdout, "azd version %s\n", internal.Version) + time.Sleep(500 * time.Millisecond) + _, err := v.llmManager.Info(v.console.Handles().Stdout) if err != nil { return nil, fmt.Errorf("failed to get LLM info: %w", err) } - fmt.Fprintf(v.console.Handles().Stdout, "azd version %s\n%s\n", internal.Version, llmInfo) + fmt.Fprintf(v.console.Handles().Stdout, "\n") case output.JsonFormat: var result contracts.VersionResult versionSpec := internal.VersionInfo() diff --git a/cli/azd/pkg/llm/manager.go b/cli/azd/pkg/llm/manager.go index a2d1fbee589..fc53fd59a2f 100644 --- a/cli/azd/pkg/llm/manager.go +++ b/cli/azd/pkg/llm/manager.go @@ -3,6 +3,7 @@ package llm import ( "context" "fmt" + "io" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/llms/openai" @@ -15,7 +16,7 @@ func NewManager() Manager { type Manager struct { } -func (m Manager) Info() (string, error) { +func (m Manager) Info(stdout io.Writer) (string, error) { llm, err := openai.New( openai.WithModel("o1-mini"), openai.WithAPIType(openai.APITypeAzure), @@ -29,15 +30,42 @@ func (m Manager) Info() (string, error) { ctx := context.Background() content := []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, ` -Tell what model you are using and what is your version. Make it sound like a friendly human.`), +Respond with the version of the LLM you are using. +Use the format "LLM: ".`), } - fmt.Println("Generating content...") output, err := llm.GenerateContent(ctx, content, llms.WithMaxTokens(4000), llms.WithTemperature(1), + llms.WithStreamingFunc(func(ctx context.Context, chunk []byte) error { + fmt.Fprintf(stdout, "%s", string(chunk)) + return nil + }), ) if err != nil { return "", fmt.Errorf("failed to generate content: %w", err) } return output.Choices[0].Content, nil } + +// func (m Manager) Info(stdout io.Writer) (string, error) { +// llm, err := ollama.New(ollama.WithModel("llama3")) +// if err != nil { +// return "", err +// } +// ctx := context.Background() +// output, err := llms.GenerateFromSinglePrompt( +// ctx, +// llm, +// "Human: Describe the version of the LLM you are using. Use the format 'LLM: '.", +// llms.WithTemperature(0.8), +// llms.WithStreamingFunc(func(ctx context.Context, chunk []byte) error { +// fmt.Fprintf(stdout, "%s", string(chunk)) +// return nil +// }), +// ) +// _ = output // We don't use the output here, as we are streaming directly to stdout. +// if err != nil { +// return "", err +// } +// return "", nil +// } From 8900565878a23222022f124bc76ff8de31449008 Mon Sep 17 00:00:00 2001 From: Victor Vazquez Date: Thu, 26 Jun 2025 17:37:03 +0000 Subject: [PATCH 003/116] Ading llm package wrapping langchain lib --- cli/azd/pkg/llm/azure_openai.go | 75 +++++++++ cli/azd/pkg/llm/client.go | 10 ++ cli/azd/pkg/llm/manager.go | 232 ++++++++++++++++++++------ cli/azd/pkg/llm/manager_test.go | 129 ++++++++++++++ cli/azd/pkg/llm/ollama.go | 33 ++++ cli/azd/resources/alpha_features.yaml | 3 + 6 files changed, 431 insertions(+), 51 deletions(-) create mode 100644 cli/azd/pkg/llm/azure_openai.go create mode 100644 cli/azd/pkg/llm/client.go create mode 100644 cli/azd/pkg/llm/manager_test.go create mode 100644 cli/azd/pkg/llm/ollama.go diff --git a/cli/azd/pkg/llm/azure_openai.go b/cli/azd/pkg/llm/azure_openai.go new file mode 100644 index 00000000000..b4b64bd62c4 --- /dev/null +++ b/cli/azd/pkg/llm/azure_openai.go @@ -0,0 +1,75 @@ +package llm + +import ( + "fmt" + "maps" + "os" + + "github.com/azure/azure-dev/cli/azd/pkg/output/ux" + "github.com/tmc/langchaingo/llms/openai" +) + +const ( + modelEnvVar = "AZD_AZURE_OPENAI_MODEL" + versionEnvVar = "AZD_AZURE_OPENAI_VERSION" + urlEnvVar = "AZD_AZURE_OPENAI_URL" + keyEnvVar = "OPENAI_API_KEY" +) + +type requiredEnvVar struct { + name string + value string + isDefined bool +} + +var requiredEnvVars = map[string]requiredEnvVar{ + modelEnvVar: {name: modelEnvVar}, + versionEnvVar: {name: versionEnvVar}, + urlEnvVar: {name: urlEnvVar}, + keyEnvVar: {name: keyEnvVar}, +} + +func loadAzureOpenAi() (InfoResponse, error) { + + envVars := maps.Clone(requiredEnvVars) + hasMissing := false + for name, envVar := range envVars { + if value, isDefined := os.LookupEnv(envVar.name); isDefined { + envVar.value = value + envVar.isDefined = true + } else { + hasMissing = true + } + envVars[name] = envVar + } + if hasMissing { + missingEnvVars := []string{} + for _, ev := range envVars { + if !ev.isDefined { + missingEnvVars = append(missingEnvVars, ev.name) + } + } + return InfoResponse{}, fmt.Errorf( + "missing required environment variable(s): %s", ux.ListAsText(missingEnvVars)) + } + + _, err := openai.New( + openai.WithModel(envVars[modelEnvVar].value), + openai.WithAPIType(openai.APITypeAzure), + openai.WithAPIVersion(envVars[versionEnvVar].value), + openai.WithBaseURL(envVars[urlEnvVar].value), + ) + if err != nil { + return InfoResponse{}, fmt.Errorf("failed to create LLM: %w", err) + } + + return InfoResponse{ + Type: LlmTypeOpenAIAzure, + IsLocal: false, + Model: LlmModel{ + Name: envVars[modelEnvVar].value, + Version: envVars[versionEnvVar].value, + }, + Url: envVars[urlEnvVar].value, + }, nil +} diff --git a/cli/azd/pkg/llm/client.go b/cli/azd/pkg/llm/client.go new file mode 100644 index 00000000000..7690f0c2e38 --- /dev/null +++ b/cli/azd/pkg/llm/client.go @@ -0,0 +1,10 @@ +package llm + +import ( + "github.com/tmc/langchaingo/llms" +) + +// Client is the AZD representation of a Language Model (LLM) client. +type Client struct { + llms.Model +} diff --git a/cli/azd/pkg/llm/manager.go b/cli/azd/pkg/llm/manager.go index fc53fd59a2f..53204b8eb80 100644 --- a/cli/azd/pkg/llm/manager.go +++ b/cli/azd/pkg/llm/manager.go @@ -1,71 +1,201 @@ package llm import ( - "context" "fmt" "io" + "log" + "os" + "strings" - "github.com/tmc/langchaingo/llms" + "github.com/azure/azure-dev/cli/azd/pkg/alpha" + "github.com/tmc/langchaingo/llms/ollama" "github.com/tmc/langchaingo/llms/openai" ) -func NewManager() Manager { - return Manager{} +var featureLlm = alpha.MustFeatureKey("llm") + +func NewManager( + alphaManager *alpha.FeatureManager, +) Manager { + return Manager{ + alphaManager: alphaManager, + } } +// Manager provides functionality to manage Language Model (LLM) features and capabilities. +// It encapsulates the alpha feature manager to control access to experimental LLM features. type Manager struct { + alphaManager *alpha.FeatureManager +} + +type LlmType string + +func (l LlmType) String() string { + switch l { + case LlmTypeOllama: + return "Ollama" + case LlmTypeOpenAIAzure: + return "OpenAI Azure" + default: + return string(l) + } +} + +const ( + LlmTypeOpenAIAzure LlmType = "azure" + LlmTypeOllama LlmType = "ollama" +) + +// LlmModel represents a language model with its name and version information. +// Name specifies the identifier of the language model. +// Version indicates the specific version or release of the model. +type LlmModel struct { + Name string + Version string +} + +// InfoResponse represents the configuration information of a Language Learning Model (LLM). +// It contains details about the model type, deployment location, model specification, +// and endpoint URL for remote models. +type InfoResponse struct { + Type LlmType + IsLocal bool + Model LlmModel + Url string // For remote models, this is the API endpoint URL +} + +// NotEnabledError represents an error that occurs when LLM functionality is not enabled. +// This error is typically raised when attempting to use LLM features that have not been +// activated or configured in the system. +type NotEnabledError struct { +} + +func (e NotEnabledError) Error() string { + return fmt.Sprintf("LLM feature is not enabled. Run '%s' to enable", + alpha.GetEnableCommand(featureLlm)) } -func (m Manager) Info(stdout io.Writer) (string, error) { - llm, err := openai.New( - openai.WithModel("o1-mini"), - openai.WithAPIType(openai.APITypeAzure), - openai.WithAPIVersion("2024-12-01-preview"), - openai.WithBaseURL("https://vivazqu-2260-resource.cognitiveservices.azure.com/"), - ) - if err != nil { - return "", fmt.Errorf("failed to create LLM: %w", err) +// InvalidLlmConfiguration represents an error that occurs when the LLM (Large Language Model) +// configuration is invalid or improperly formatted. This error type is used to indicate +// configuration-related issues in the LLM system. +type InvalidLlmConfiguration struct { +} + +func (e InvalidLlmConfiguration) Error() string { + return "Unable to determine LLM configuration. Please check your environment variables or configuration." +} + +// Info obtains configuration information about the LLM (Large Language Model) feature. +// If the LLM feature is not enabled through the alpha manager, it returns a NotEnabledError. +// The function writes output to the provided stdout writer. +// Returns an InfoResponse containing the LLM configuration and any error that occurred. +func (m Manager) Info(stdout io.Writer) (InfoResponse, error) { + if !m.alphaManager.IsEnabled(featureLlm) { + return InfoResponse{}, NotEnabledError{} + } + return LlmConfig() +} + +var availableLlmTypes = []LlmType{ + LlmTypeOpenAIAzure, + LlmTypeOllama, +} + +// LlmConfig attempts to load and validate LLM (Language Learning Model) configuration. +// It first determines the default LLM type, which can be overridden by the AZD_LLM_TYPE +// environment variable. It then tries to load configurations for available LLM types +// in order, starting with the default type. +// +// The function supports two LLM types: +// - LlmTypeOpenAIAzure (default) +// - LlmTypeOllama +// +// Returns: +// - InfoResponse: Contains the successfully loaded LLM configuration +// - error: Returns an error if no valid LLM configuration could be loaded or if +// an unknown LLM type is specified in AZD_LLM_TYPE +func LlmConfig() (InfoResponse, error) { + defaultLLm := LlmTypeOpenAIAzure + // Default LLM can be overridden by environment variable AZD_LLM_TYPE + if value, isDefined := os.LookupEnv("AZD_LLM_TYPE"); isDefined { + switch strings.ToLower(value) { + case string(LlmTypeOllama): + defaultLLm = LlmTypeOllama + case string(LlmTypeOpenAIAzure): + defaultLLm = LlmTypeOpenAIAzure + default: + return InfoResponse{}, fmt.Errorf("unknown LLM type: %s", value) + } + } + + // keep default on the top and add the rest in the order they are defined + configOrder := []LlmType{defaultLLm} + for _, llmType := range availableLlmTypes { + if llmType != defaultLLm { + configOrder = append(configOrder, llmType) + } } - ctx := context.Background() - content := []llms.MessageContent{ - llms.TextParts(llms.ChatMessageTypeHuman, ` -Respond with the version of the LLM you are using. -Use the format "LLM: ".`), + for _, llmType := range configOrder { + log.Println("Checking LLM configuration for: ", llmType) + info, err := loadLlmConfig(llmType) + if err != nil { + log.Printf("Failed to load LLM configuration for %s: %v\n", llmType, err) + continue // Try the next LLM type + } + return info, nil } - output, err := llm.GenerateContent(ctx, content, - llms.WithMaxTokens(4000), - llms.WithTemperature(1), - llms.WithStreamingFunc(func(ctx context.Context, chunk []byte) error { - fmt.Fprintf(stdout, "%s", string(chunk)) - return nil - }), - ) - if err != nil { - return "", fmt.Errorf("failed to generate content: %w", err) + + return InfoResponse{}, InvalidLlmConfiguration{} +} + +// loadLlmConfig loads the configuration for the specified LLM type. +// It returns an InfoResponse containing the LLM configuration details and any error encountered. +// +// Parameters: +// - llmType: The type of LLM to load configuration for (LlmTypeOllama or LlmTypeOpenAIAzure) +// +// Returns: +// - InfoResponse: Configuration details for the specified LLM +// - error: InvalidLlmConfiguration error if an unsupported LLM type is provided +func loadLlmConfig(llmType LlmType) (InfoResponse, error) { + switch llmType { + case LlmTypeOllama: + return loadOllama() + case LlmTypeOpenAIAzure: + return loadAzureOpenAi() + default: + return InfoResponse{}, InvalidLlmConfiguration{} } - return output.Choices[0].Content, nil } -// func (m Manager) Info(stdout io.Writer) (string, error) { -// llm, err := ollama.New(ollama.WithModel("llama3")) -// if err != nil { -// return "", err -// } -// ctx := context.Background() -// output, err := llms.GenerateFromSinglePrompt( -// ctx, -// llm, -// "Human: Describe the version of the LLM you are using. Use the format 'LLM: '.", -// llms.WithTemperature(0.8), -// llms.WithStreamingFunc(func(ctx context.Context, chunk []byte) error { -// fmt.Fprintf(stdout, "%s", string(chunk)) -// return nil -// }), -// ) -// _ = output // We don't use the output here, as we are streaming directly to stdout. -// if err != nil { -// return "", err -// } -// return "", nil -// } +// LlmClient creates and returns a new LLM (Language Learning Model) client based on the provided InfoResponse. +// It supports different types of LLM services including Ollama and Azure OpenAI. +// +// Parameters: +// - info: InfoResponse containing the configuration details for the LLM service +// +// Returns: +// - Client: A configured LLM client wrapper +// - error: An error if the client creation fails or if the LLM type is unsupported +func LlmClient(info InfoResponse) (Client, error) { + switch info.Type { + case LlmTypeOllama: + c, err := ollama.New(ollama.WithModel(info.Model.Name)) + return Client{ + Model: c, + }, err + case LlmTypeOpenAIAzure: + c, err := openai.New( + openai.WithModel(info.Model.Name), + openai.WithAPIType(openai.APITypeAzure), + openai.WithAPIVersion(info.Model.Version), + openai.WithBaseURL(info.Url), + ) + return Client{ + Model: c, + }, err + default: + return Client{}, fmt.Errorf("unsupported LLM type: %s", info.Type) + } +} diff --git a/cli/azd/pkg/llm/manager_test.go b/cli/azd/pkg/llm/manager_test.go new file mode 100644 index 00000000000..36f54852762 --- /dev/null +++ b/cli/azd/pkg/llm/manager_test.go @@ -0,0 +1,129 @@ +package llm + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLlmConfig(t *testing.T) { + tests := []struct { + name string + envVars map[string]string + expectedType LlmType + expectErr bool + }{ + { + name: "Default to local Ollama", + envVars: map[string]string{}, + expectedType: LlmTypeOllama, + expectErr: false, + }, + { + name: "Use Ollama when AZD_LLM_TYPE=ollama", + envVars: map[string]string{ + "AZD_LLM_TYPE": "ollama", + }, + expectedType: LlmTypeOllama, + expectErr: false, + }, + { + name: "Use Azure OpenAI when AZD_LLM_TYPE=azure", + envVars: map[string]string{ + "AZD_LLM_TYPE": "azure", + keyEnvVar: "test-key", + urlEnvVar: "https://test.openai.azure.com/", + versionEnvVar: "2023-05-15", + modelEnvVar: "gpt-35-turbo", + }, + expectedType: LlmTypeOpenAIAzure, + expectErr: false, + }, + { + name: "Error on invalid LLM type", + envVars: map[string]string{ + "AZD_LLM_TYPE": "invalid", + }, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(innerTest *testing.T) { + + for key, value := range tt.envVars { + t.Setenv(key, value) + } + + info, err := LlmConfig() + if tt.expectErr { + require.Error(innerTest, err) + return + } + + require.NoError(innerTest, err) + require.Equal(innerTest, tt.expectedType, info.Type, "Expected LLM type does not match") + }) + } +} + +func TestLlmClient(t *testing.T) { + tests := []struct { + name string + info InfoResponse + expectErr bool + env map[string]string + }{ + { + name: "Create Ollama client", + info: InfoResponse{ + Type: LlmTypeOllama, + Model: LlmModel{ + Name: "llama2", + }, + }, + expectErr: false, + }, + { + name: "Create Azure OpenAI client", + info: InfoResponse{ + Type: LlmTypeOpenAIAzure, + Model: LlmModel{ + Name: "gpt-35-turbo", + Version: "2023-05-15", + }, + Url: "https://test.openai.azure.com/", + }, + expectErr: false, + env: map[string]string{ + keyEnvVar: "test-key", + }, + }, + { + name: "Error on invalid LLM type", + info: InfoResponse{ + Type: "invalid", + }, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + for key, value := range tt.env { + t.Setenv(key, value) + } + + client, err := LlmClient(tt.info) + if tt.expectErr { + require.Error(t, err) + require.Equal(t, Client{}, client, "Expected empty client on error") + require.Nil(t, client.Model, "Expected nil Model on error") + return + } + require.NoError(t, err) + require.NotNil(t, client) + }) + } +} diff --git a/cli/azd/pkg/llm/ollama.go b/cli/azd/pkg/llm/ollama.go new file mode 100644 index 00000000000..f96432a126a --- /dev/null +++ b/cli/azd/pkg/llm/ollama.go @@ -0,0 +1,33 @@ +package llm + +import ( + "log" + "os" + + "github.com/tmc/langchaingo/llms/ollama" +) + +func loadOllama() (InfoResponse, error) { + defaultLlamaVersion := "llama3" + + if value, isDefined := os.LookupEnv("AZD_OLLAMA_MODEL"); isDefined { + log.Printf("Found AZD_OLLAMA_MODEL with %s. Using this model", value) + defaultLlamaVersion = value + } + + _, err := ollama.New( + ollama.WithModel(defaultLlamaVersion), + ) + if err != nil { + return InfoResponse{}, err + } + + return InfoResponse{ + Type: LlmTypeOllama, + IsLocal: true, + Model: LlmModel{ + Name: defaultLlamaVersion, + Version: "latest", + }, + }, nil +} diff --git a/cli/azd/resources/alpha_features.yaml b/cli/azd/resources/alpha_features.yaml index cddd1d12bde..04004a9cd0f 100644 --- a/cli/azd/resources/alpha_features.yaml +++ b/cli/azd/resources/alpha_features.yaml @@ -12,3 +12,6 @@ description: "Enables Azure deployment stacks for ARM/Bicep based deployments." - id: extensions description: "Enables the use of `azd` extension packages." +- id: llm + description: "Enables the use of LLMs in the CLI." + \ No newline at end of file From fe556447b1d94409cbaa65233bb2e053c061e3e1 Mon Sep 17 00:00:00 2001 From: Victor Vazquez Date: Fri, 27 Jun 2025 22:50:36 +0000 Subject: [PATCH 004/116] revert --- cli/azd/cmd/version.go | 27 ++++++++------------------- 1 file changed, 8 insertions(+), 19 deletions(-) diff --git a/cli/azd/cmd/version.go b/cli/azd/cmd/version.go index c71ff455705..fb464321fc2 100644 --- a/cli/azd/cmd/version.go +++ b/cli/azd/cmd/version.go @@ -7,13 +7,11 @@ import ( "context" "fmt" "io" - "time" "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" "github.com/azure/azure-dev/cli/azd/pkg/contracts" "github.com/azure/azure-dev/cli/azd/pkg/input" - "github.com/azure/azure-dev/cli/azd/pkg/llm" "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -35,11 +33,10 @@ func newVersionFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) } type versionAction struct { - flags *versionFlags - formatter output.Formatter - writer io.Writer - console input.Console - llmManager llm.Manager + flags *versionFlags + formatter output.Formatter + writer io.Writer + console input.Console } func newVersionAction( @@ -47,14 +44,12 @@ func newVersionAction( formatter output.Formatter, writer io.Writer, console input.Console, - llmManager llm.Manager, ) actions.Action { return &versionAction{ - flags: flags, - formatter: formatter, - writer: writer, - console: console, - llmManager: llmManager, + flags: flags, + formatter: formatter, + writer: writer, + console: console, } } @@ -62,12 +57,6 @@ func (v *versionAction) Run(ctx context.Context) (*actions.ActionResult, error) switch v.formatter.Kind() { case output.NoneFormat: fmt.Fprintf(v.console.Handles().Stdout, "azd version %s\n", internal.Version) - time.Sleep(500 * time.Millisecond) - _, err := v.llmManager.Info(v.console.Handles().Stdout) - if err != nil { - return nil, fmt.Errorf("failed to get LLM info: %w", err) - } - fmt.Fprintf(v.console.Handles().Stdout, "\n") case output.JsonFormat: var result contracts.VersionResult versionSpec := internal.VersionInfo() From 5e48f4a21f6ae44658998f43d5244d3bbb692cb5 Mon Sep 17 00:00:00 2001 From: Victor Vazquez Date: Mon, 30 Jun 2025 16:32:37 +0000 Subject: [PATCH 005/116] copyright --- cli/azd/pkg/llm/azure_openai.go | 3 +++ cli/azd/pkg/llm/client.go | 3 +++ cli/azd/pkg/llm/manager.go | 3 +++ cli/azd/pkg/llm/manager_test.go | 3 +++ cli/azd/pkg/llm/ollama.go | 3 +++ 5 files changed, 15 insertions(+) diff --git a/cli/azd/pkg/llm/azure_openai.go b/cli/azd/pkg/llm/azure_openai.go index b4b64bd62c4..095692c888c 100644 --- a/cli/azd/pkg/llm/azure_openai.go +++ b/cli/azd/pkg/llm/azure_openai.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package llm import ( diff --git a/cli/azd/pkg/llm/client.go b/cli/azd/pkg/llm/client.go index 7690f0c2e38..b11f392f1c7 100644 --- a/cli/azd/pkg/llm/client.go +++ b/cli/azd/pkg/llm/client.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package llm import ( diff --git a/cli/azd/pkg/llm/manager.go b/cli/azd/pkg/llm/manager.go index 53204b8eb80..c288aedc138 100644 --- a/cli/azd/pkg/llm/manager.go +++ b/cli/azd/pkg/llm/manager.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package llm import ( diff --git a/cli/azd/pkg/llm/manager_test.go b/cli/azd/pkg/llm/manager_test.go index 36f54852762..46d0d6e3874 100644 --- a/cli/azd/pkg/llm/manager_test.go +++ b/cli/azd/pkg/llm/manager_test.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package llm import ( diff --git a/cli/azd/pkg/llm/ollama.go b/cli/azd/pkg/llm/ollama.go index f96432a126a..46f7187bd15 100644 --- a/cli/azd/pkg/llm/ollama.go +++ b/cli/azd/pkg/llm/ollama.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package llm import ( From f6e1dbca048ef293f11e1b4d6631c80d58ae99bb Mon Sep 17 00:00:00 2001 From: Victor Vazquez Date: Tue, 1 Jul 2025 20:40:52 +0000 Subject: [PATCH 006/116] WIP --- cli/azd/cmd/hooks.go | 6 ++ cli/azd/cmd/hooks_new.go | 110 ++++++++++++++++++++ go.mod | 20 ++++ go.sum | 211 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 347 insertions(+) create mode 100644 cli/azd/cmd/hooks_new.go diff --git a/cli/azd/cmd/hooks.go b/cli/azd/cmd/hooks.go index 3b0bcd93451..8b4863d9505 100644 --- a/cli/azd/cmd/hooks.go +++ b/cli/azd/cmd/hooks.go @@ -39,6 +39,12 @@ func hooksActions(root *actions.ActionDescriptor) *actions.ActionDescriptor { ActionResolver: newHooksRunAction, }) + group.Add("new", &actions.ActionDescriptorOptions{ + Command: newHooksNewCmd(), + FlagsResolver: newHooksNewFlags, + ActionResolver: newHooksNewAction, + }) + return group } diff --git a/cli/azd/cmd/hooks_new.go b/cli/azd/cmd/hooks_new.go new file mode 100644 index 00000000000..df19e551125 --- /dev/null +++ b/cli/azd/cmd/hooks_new.go @@ -0,0 +1,110 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package cmd + +import ( + "context" + "fmt" + + "github.com/azure/azure-dev/cli/azd/cmd/actions" + "github.com/azure/azure-dev/cli/azd/internal" + "github.com/azure/azure-dev/cli/azd/pkg/exec" + "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/ioc" + "github.com/azure/azure-dev/cli/azd/pkg/llm" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/tmc/langchaingo/agents" + "github.com/tmc/langchaingo/chains" + "github.com/tmc/langchaingo/tools" +) + +func newHooksNewCmd() *cobra.Command { + return &cobra.Command{ + Use: "new", + Short: "Create a new hook for the project.", + } +} + +func newHooksNewFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *hooksNewFlags { + flags := &hooksNewFlags{} + flags.Bind(cmd.Flags(), global) + + return flags +} + +type hooksNewFlags struct { + internal.EnvFlag + global *internal.GlobalCommandOptions + platform string + service string +} + +func (f *hooksNewFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + f.EnvFlag.Bind(local, global) + f.global = global + + local.StringVar(&f.platform, "platform", "", "Forces hooks to run for the specified platform.") + local.StringVar(&f.service, "service", "", "Only runs hooks for the specified service.") +} + +type hooksNewAction struct { + commandRunner exec.CommandRunner + console input.Console + flags *hooksNewFlags + args []string + serviceLocator ioc.ServiceLocator + llmManager llm.Manager +} + +func newHooksNewAction( + commandRunner exec.CommandRunner, + console input.Console, + flags *hooksNewFlags, + args []string, + serviceLocator ioc.ServiceLocator, + llmManager llm.Manager, +) actions.Action { + return &hooksNewAction{ + commandRunner: commandRunner, + console: console, + flags: flags, + args: args, + serviceLocator: serviceLocator, + llmManager: llmManager, + } +} + +func (hna *hooksNewAction) Run(ctx context.Context) (*actions.ActionResult, error) { + llmInfo, err := hna.llmManager.Info(hna.console.GetWriter()) + if err != nil { + return nil, fmt.Errorf("failed to load LLM info: %w", err) + } + llClient, err := llm.LlmClient(llmInfo) + if err != nil { + return nil, fmt.Errorf("failed to create LLM client: %w", err) + } + + agent := agents.NewOneShotAgent(llClient, []tools.Tool{ + tools.Calculator{}, + }) + executor := agents.NewExecutor(agent) + answer, err := chains.Run(ctx, executor, "If I have 4 apples and I give 2 to my friend, how many apples do I have left?", + chains.WithTemperature(0.0), + chains.WithStreamingFunc(func(ctx context.Context, chunk []byte) error { + fmt.Fprintf(hna.console.GetWriter(), "%s", chunk) + return nil + }), + ) + if err != nil { + return nil, fmt.Errorf("failed to exe: %w", err) + } + fmt.Println(answer) + + return &actions.ActionResult{ + Message: &actions.ResultMessage{ + Header: "Done", + }, + }, nil +} diff --git a/go.mod b/go.mod index d3000d4f235..4ee5237fa94 100644 --- a/go.mod +++ b/go.mod @@ -87,33 +87,53 @@ require ( github.com/Azure/azure-pipeline-go v0.2.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dlclark/regexp2 v1.10.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/goph/emperror v0.17.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect + github.com/huandu/xstrings v1.3.3 // indirect + github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mattn/go-ieproxy v0.0.12 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect + github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/mitchellh/reflectwalk v1.0.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/nikolalohinski/gonja v1.5.3 // indirect github.com/otiai10/mint v1.6.3 // indirect + github.com/pelletier/go-toml/v2 v2.0.9 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pkoukk/tiktoken-go v0.1.6 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/segmentio/encoding v0.4.1 // indirect + github.com/shopspring/decimal v1.2.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/cast v1.3.1 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect + github.com/yargevad/filepathx v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect go.opentelemetry.io/otel/metric v1.35.0 // indirect go.opentelemetry.io/proto/otlp v1.5.0 // indirect + go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 // indirect golang.org/x/crypto v0.37.0 // indirect + golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect golang.org/x/net v0.39.0 // indirect golang.org/x/sync v0.13.0 // indirect golang.org/x/term v0.31.0 // indirect diff --git a/go.sum b/go.sum index a2fd2103e19..852a7d22a7e 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,29 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.114.0 h1:OIPFAdfrFDFO2ve2U7r/H5SwSbBzEdrBdE7xkgwc+kY= +cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E= +cloud.google.com/go/ai v0.7.0 h1:P6+b5p4gXlza5E+u7uvcgYlzZ7103ACg70YdZeC6oGE= +cloud.google.com/go/ai v0.7.0/go.mod h1:7ozuEcraovh4ABsPbrec3o4LmFl9HigNI3D5haxYeQo= +cloud.google.com/go/aiplatform v1.68.0 h1:EPPqgHDJpBZKRvv+OsB3cr0jYz3EL2pZ+802rBPcG8U= +cloud.google.com/go/aiplatform v1.68.0/go.mod h1:105MFA3svHjC3Oazl7yjXAmIR89LKhRAeNdnDKJczME= +cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= +cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= +cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= +cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU= +cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng= +cloud.google.com/go/vertexai v0.12.0 h1:zTadEo/CtsoyRXNx3uGCncoWAP1H2HakGqwznt+iMo8= +cloud.google.com/go/vertexai v0.12.0/go.mod h1:8u+d0TsvBfAAd2x5R6GMgbYhsLgo3J7lmP4bR8g2ig8= code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= +github.com/AssemblyAI/assemblyai-go-sdk v1.3.0 h1:AtOVgGxUycvK4P4ypP+1ZupecvFgnfH+Jsum0o5ILoU= +github.com/AssemblyAI/assemblyai-go-sdk v1.3.0/go.mod h1:H0naZbvpIW49cDA5ZZ/gggeXqi7ojSGB1mqshRk6kNE= github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= @@ -71,32 +92,57 @@ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/MakeNowJust/heredoc/v2 v2.0.1 h1:rlCHh70XXXv7toz95ajQWOWQnN4WNLt0TdpZYIR/J6A= github.com/MakeNowJust/heredoc/v2 v2.0.1/go.mod h1:6/2Abh5s+hc3g9nbWLe9ObDIOhaRrqsyY9MWy+4JdRM= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= +github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM= +github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJsnnd3H7Ho5jQ= github.com/adam-lavrik/go-imath v0.0.0-20210910152346-265a42a96f0b h1:g9SuFmxM/WucQFKTMSP+irxyf5m0RiUJreBDhGI6jSA= github.com/adam-lavrik/go-imath v0.0.0-20210910152346-265a42a96f0b/go.mod h1:XjvqMUpGd3Xn9Jtzk/4GEBCSoBX0eB2RyriXgne0IdM= +github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o= +github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss= +github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= +github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR5wKP38= github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bradleyjkemp/cupaloy/v2 v2.8.0 h1:any4BmKE+jGIaMpnU8YgH/I2LPiLBufr6oMMlVBbn9M= github.com/bradleyjkemp/cupaloy/v2 v2.8.0/go.mod h1:bm7JXdkRd4BHJk9HpwqAI8BoAY1lps46Enkdqw6aRX0= github.com/braydonk/yaml v0.9.0 h1:ewGMrVmEVpsm3VwXQDR388sLg5+aQ8Yihp6/hc4m+h4= github.com/braydonk/yaml v0.9.0/go.mod h1:hcm3h581tudlirk8XEUPDBAimBPbmnL0Y45hCRl47N4= github.com/buger/goterm v1.0.4 h1:Z9YvGmOih81P0FbVtEYTFF6YsSgxSUKEhf/f9bTMXbY= github.com/buger/goterm v1.0.4/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE= +github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cli/browser v1.3.0 h1:LejqCrpWr+1pRqmEPDGnTZOjsMe7sehifLynZJuqJpo= github.com/cli/browser v1.3.0/go.mod h1:HH8s+fOAxjhQoBUAsKuPCbqUuxZDhQ2/aD+SzsEfBTk= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -109,35 +155,80 @@ github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/drone/envsubst v1.0.3 h1:PCIBwNDYjs50AsLZPYdfhSATKaRg/FJmDc2D6+C2x8g= github.com/drone/envsubst v1.0.3/go.mod h1:N2jZmlMufstn1KEqvbHjw40h1KyTmnVzHcSc9bFiJ2g= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eiannone/keyboard v0.0.0-20220611211555-0d226195f203 h1:XBBHcIb256gUJtLmY22n99HaZTz+r2Z51xUPi01m3wg= github.com/eiannone/keyboard v0.0.0-20220611211555-0d226195f203/go.mod h1:E1jcSv8FaEny+OP/5k9UxZVw9YFWGj7eI4KR/iOBqCg= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/getzep/zep-go v1.0.4 h1:09o26bPP2RAPKFjWuVWwUWLbtFDF/S8bfbilxzeZAAg= +github.com/getzep/zep-go v1.0.4/go.mod h1:HC1Gz7oiyrzOTvzeKC4dQKUiUy87zpIJl0ZFXXdHuss= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golobby/container/v3 v3.3.2 h1:7u+RgNnsdVlhGoS8gY4EXAG601vpMMzLZlYqSp77Quw= github.com/golobby/container/v3 v3.3.2/go.mod h1:RDdKpnKpV1Of11PFBe7Dxc2C1k2KaLE4FD47FflAmj0= +github.com/google/generative-ai-go v0.15.1 h1:n8aQUpvhPOlGVuM2DRkJ2jvx04zpp42B778AROJa+pQ= +github.com/google/generative-ai-go v0.15.1/go.mod h1:AAucpWZjXsDKhQYWvCYuP6d0yB1kX998pJlOW1rAesw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= +github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18= +github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= +github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= +github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= @@ -145,14 +236,25 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1ns github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= +github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= +github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -163,6 +265,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80 h1:6Yzfa6GP0rIo/kULo2bwGEkFvCePZ3qHDDTC3/J9Swo= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -179,16 +283,29 @@ github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58= +github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs= github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= github.com/microsoft/azure-devops-go-api/azuredevops/v7 v7.1.0 h1:mmJCWLe63QvybxhW1iBmQWEaCKdc4SKgALfTNZ+OphU= github.com/microsoft/azure-devops-go-api/azuredevops/v7 v7.1.0/go.mod h1:mDunUZ1IUJdJIRHvFb+LPBUtxe3AYB5MI6BMXNg8194= github.com/microsoft/go-deviceid v1.0.0 h1:i5AQ654Xk9kfvwJeKQm3w2+eT1+ImBDVEpAR0AjpP40= github.com/microsoft/go-deviceid v1.0.0/go.mod h1:KY13FeVdHkzD8gy+6T8+kVmD/7RMpTaWW75K+T4uZWg= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/nathan-fiscaletti/consolesize-go v0.0.0-20220204101620-317176b6684d h1:NqRhLdNVlozULwM1B3VaHhcXYSgrOAv8V5BE65om+1Q= github.com/nathan-fiscaletti/consolesize-go v0.0.0-20220204101620-317176b6684d/go.mod h1:cxIIfNMTwff8f/ZvRouvWYF6wOoO7nj99neWSx2q/Es= +github.com/nikolalohinski/gonja v1.5.3 h1:GsA+EEaZDZPGJ8JtpeGN78jidhOlxeJROpqMT9fTj9c= +github.com/nikolalohinski/gonja v1.5.3/go.mod h1:RmjwxNiXAEqcq1HeK5SSMmqFJvKOfTfXhkJv6YBtPa4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -196,14 +313,18 @@ github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= +github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0= +github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkoukk/tiktoken-go v0.1.6 h1:JF0TlJzhTbrI30wCvFuiw6FzP2+/bR+FIxUdgEAcUsw= github.com/pkoukk/tiktoken-go v0.1.6/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/psanford/memfs v0.0.0-20241019191636-4ef911798f9b h1:xzjEJAHum+mV5Dd5KyohRlCyP03o4yq6vNpEUtAJQzI= github.com/psanford/memfs v0.0.0-20241019191636-4ef911798f9b/go.mod h1:tcaRap0jS3eifrEEllL6ZMd9dg8IlDpi2S1oARrQ+NI= github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= @@ -213,6 +334,7 @@ github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= @@ -222,6 +344,13 @@ github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= @@ -232,8 +361,12 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= @@ -252,11 +385,31 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tmc/langchaingo v0.1.13 h1:rcpMWBIi2y3B90XxfE4Ao8dhCQPVDMaNPnN5cGB1CaA= github.com/tmc/langchaingo v0.1.13/go.mod h1:vpQ5NOIhpzxDfTZK9B6tf2GM/MoaHewPWM5KXXGh7hg= +github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= +github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181 h1:K+bMSIx9A7mLES1rtG+qKduLIXq40DAzYHtb0XuCukA= +gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181/go.mod h1:dzYhVIwWCtzPAa4QP98wfB9+mzt33MSmM8wsKiMi2ow= +gitlab.com/golang-commonmark/linkify v0.0.0-20191026162114-a0c2df6c8f82 h1:oYrL81N608MLZhma3ruL8qTM4xcpYECGut8KSxRY59g= +gitlab.com/golang-commonmark/linkify v0.0.0-20191026162114-a0c2df6c8f82/go.mod h1:Gn+LZmCrhPECMD3SOKlE+BOHwhOYD9j7WT9NUtkCrC8= +gitlab.com/golang-commonmark/markdown v0.0.0-20211110145824-bf3e522c626a h1:O85GKETcmnCNAfv4Aym9tepU8OE0NmcZNqPlXcsBKBs= +gitlab.com/golang-commonmark/markdown v0.0.0-20211110145824-bf3e522c626a/go.mod h1:LaSIs30YPGs1H5jwGgPhLzc8vkNc/k0rDX/fEZqiU/M= +gitlab.com/golang-commonmark/mdurl v0.0.0-20191124015652-932350d1cb84 h1:qqjvoVXdWIcZCLPMlzgA7P9FZWdPGPvP/l3ef8GzV6o= +gitlab.com/golang-commonmark/mdurl v0.0.0-20191124015652-932350d1cb84/go.mod h1:IJZ+fdMvbW2qW6htJx7sLJ04FEs4Ldl/MDsJtMKywfw= +gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f h1:Wku8eEdeJqIOFHtrfkYUByc4bCaTeA6fL0UJgfEiFMI= +gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f/go.mod h1:Tiuhl+njh/JIg0uS/sOJVYi0x2HEa5rc1OAaVsb5tAs= go.lsp.dev/jsonrpc2 v0.10.0 h1:Pr/YcXJoEOTMc/b6OTmcR1DPJ3mSWl/SWiU1Cct6VmI= go.lsp.dev/jsonrpc2 v0.10.0/go.mod h1:fmEzIdXPi/rf6d4uFcayi8HpFP1nBF99ERP1htC72Ac= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 h1:A3SayB3rNyt+1S6qpI9mHPkeHTZbD7XILEqWnYZb2l0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0/go.mod h1:27iA5uvhuRNmalO+iEUdVn5ZMj2qy10Mm+XRIpRmyuU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= @@ -275,29 +428,50 @@ go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 h1:Ss6D3hLXTM0KobyBYEAygXzFfGcjnmfEJOBgSbemCtg= +go.starlark.net v0.0.0-20230302034142-4b1e35fe2254/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -306,14 +480,18 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -322,17 +500,44 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= +google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20240528184218-531527333157 h1:u7WMYrIrVvs0TF5yaKwKNbcJyySYf+HAIFXxWltJOXE= +google.golang.org/genproto v0.0.0-20240528184218-531527333157/go.mod h1:ubQlAQnzejB8uZzszhrTCU2Fyp6Vi7ZE5nn0c3W8+qQ= google.golang.org/genproto/googleapis/api v0.0.0-20250407143221-ac9807e6c755 h1:AMLTAunltONNuzWgVPZXrjLWtXpsG6A3yLLPEoJ/IjU= google.golang.org/genproto/googleapis/api v0.0.0-20250407143221-ac9807e6c755/go.mod h1:2R6XrVC8Oc08GlNh8ujEpc7HkLiEZ16QeY7FxIs20ac= google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755 h1:TwXJCGVREgQ/cl18iY0Z4wJCTL/GmW+Um2oSwZiZPnc= google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI= google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -346,10 +551,16 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= From f23545b7dd62c8582b7e278924d852b250a6876d Mon Sep 17 00:00:00 2001 From: Victor Vazquez Date: Wed, 2 Jul 2025 04:32:34 +0000 Subject: [PATCH 007/116] WIP --- cli/azd/cmd/hooks_new.go | 56 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 51 insertions(+), 5 deletions(-) diff --git a/cli/azd/cmd/hooks_new.go b/cli/azd/cmd/hooks_new.go index df19e551125..54255ad2bd0 100644 --- a/cli/azd/cmd/hooks_new.go +++ b/cli/azd/cmd/hooks_new.go @@ -6,6 +6,8 @@ package cmd import ( "context" "fmt" + "runtime" + "strings" "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" @@ -88,14 +90,21 @@ func (hna *hooksNewAction) Run(ctx context.Context) (*actions.ActionResult, erro agent := agents.NewOneShotAgent(llClient, []tools.Tool{ tools.Calculator{}, + hookResolverTool{}, + osResolverTool{}, }) executor := agents.NewExecutor(agent) - answer, err := chains.Run(ctx, executor, "If I have 4 apples and I give 2 to my friend, how many apples do I have left?", + answer, err := chains.Run(ctx, executor, ` +You are an expert in creating hooks for the Azure Dev CLI. +Your task is to create a new hook for linux bash or windows powershell, depending on the user's platform. +Use the os resolver tool to determine the user's platform. You will write a powershell script if the user is on windows, +or a bash script if the user is on linux. +Start by resolving the type of the hook based on the input. +The hook should start with a comment on the top that describes the hook type. +Then use the next prompt to create the hook code. +This is a script that ask user for their age and prints how many days they have lived. +`, chains.WithTemperature(0.0), - chains.WithStreamingFunc(func(ctx context.Context, chunk []byte) error { - fmt.Fprintf(hna.console.GetWriter(), "%s", chunk) - return nil - }), ) if err != nil { return nil, fmt.Errorf("failed to exe: %w", err) @@ -108,3 +117,40 @@ func (hna *hooksNewAction) Run(ctx context.Context) (*actions.ActionResult, erro }, }, nil } + +type hookResolverTool struct { +} + +func (h hookResolverTool) Name() string { + return "Hook Resolver" +} + +func (h hookResolverTool) Description() string { + return `Useful for resolving the type of the hook based on the input. + The input to this tool should be a string that contains the prompt that creates the hook.` +} + +func (h hookResolverTool) Call(ctx context.Context, input string) (string, error) { + validHookTypes := []string{"preprovision", "postprovision", "predeploy", "postdeploy"} + for _, hookType := range validHookTypes { + if strings.Contains(input, hookType) { + return hookType, nil + } + } + return "preprovision", nil +} + +type osResolverTool struct { +} + +func (h osResolverTool) Name() string { + return "Os Resolver" +} + +func (h osResolverTool) Description() string { + return "Useful for resolving what is the user's operating system." +} + +func (h osResolverTool) Call(ctx context.Context, input string) (string, error) { + return runtime.GOOS, nil +} From 036f6902f5ba46f7f803bc071eff4cd6f31b2ace Mon Sep 17 00:00:00 2001 From: Victor Vazquez Date: Wed, 2 Jul 2025 05:44:13 +0000 Subject: [PATCH 008/116] wip --- cli/azd/cmd/hooks_new.go | 44 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/cli/azd/cmd/hooks_new.go b/cli/azd/cmd/hooks_new.go index 54255ad2bd0..72c8ed21aca 100644 --- a/cli/azd/cmd/hooks_new.go +++ b/cli/azd/cmd/hooks_new.go @@ -5,7 +5,9 @@ package cmd import ( "context" + "encoding/json" "fmt" + "os" "runtime" "strings" @@ -92,6 +94,7 @@ func (hna *hooksNewAction) Run(ctx context.Context) (*actions.ActionResult, erro tools.Calculator{}, hookResolverTool{}, osResolverTool{}, + saveHookTool{}, }) executor := agents.NewExecutor(agent) answer, err := chains.Run(ctx, executor, ` @@ -101,8 +104,10 @@ Use the os resolver tool to determine the user's platform. You will write a powe or a bash script if the user is on linux. Start by resolving the type of the hook based on the input. The hook should start with a comment on the top that describes the hook type. -Then use the next prompt to create the hook code. -This is a script that ask user for their age and prints how many days they have lived. +Then use the next prompt to create the hook code: +Ask user for their age and prints how many days they have lived. + +Use the save hook tool to save the generated hook to a file. `, chains.WithTemperature(0.0), ) @@ -154,3 +159,38 @@ func (h osResolverTool) Description() string { func (h osResolverTool) Call(ctx context.Context, input string) (string, error) { return runtime.GOOS, nil } + +type saveHookTool struct { +} + +func (h saveHookTool) Name() string { + return "Save Hook" +} + +func (h saveHookTool) Description() string { + return `Useful for saving the generated hook to a file. + The input to this tool should be a JSON string with the following format: + { + "hookType": "", + "hookCode": "" + }. + The input must be just the JSON string, without any additional text.` +} + +func (h saveHookTool) Call(ctx context.Context, input string) (string, error) { + // Parse the input JSON string + var hookData struct { + HookType string `json:"hookType"` + HookCode string `json:"hookCode"` + } + if err := json.Unmarshal([]byte(input), &hookData); err != nil { + return "", fmt.Errorf("failed to parse input JSON: %w", err) + } + + // Save the hook code to a file + if err := os.WriteFile(fmt.Sprintf("%s_hook.sh", hookData.HookType), []byte(hookData.HookCode), 0755); err != nil { + return "", fmt.Errorf("failed to save hook file: %w", err) + } + + return "Hook saved successfully", nil +} From 3ffce68dda9756fd5f92e29bd20e1f0293162ea7 Mon Sep 17 00:00:00 2001 From: Victor Vazquez Date: Wed, 2 Jul 2025 23:42:26 +0000 Subject: [PATCH 009/116] wip --- cli/azd/cmd/hooks_new.go | 108 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 106 insertions(+), 2 deletions(-) diff --git a/cli/azd/cmd/hooks_new.go b/cli/azd/cmd/hooks_new.go index 72c8ed21aca..bf7140bc0ef 100644 --- a/cli/azd/cmd/hooks_new.go +++ b/cli/azd/cmd/hooks_new.go @@ -21,6 +21,8 @@ import ( "github.com/spf13/pflag" "github.com/tmc/langchaingo/agents" "github.com/tmc/langchaingo/chains" + "github.com/tmc/langchaingo/llms" + "github.com/tmc/langchaingo/schema" "github.com/tmc/langchaingo/tools" ) @@ -90,13 +92,20 @@ func (hna *hooksNewAction) Run(ctx context.Context) (*actions.ActionResult, erro return nil, fmt.Errorf("failed to create LLM client: %w", err) } + // Create a callback handler to log agent steps + callbackHandler := &agentLogHandler{console: hna.console} + agent := agents.NewOneShotAgent(llClient, []tools.Tool{ tools.Calculator{}, hookResolverTool{}, osResolverTool{}, saveHookTool{}, - }) + }, agents.WithCallbacksHandler(callbackHandler)) + executor := agents.NewExecutor(agent) + + fmt.Println("🤖 Starting AI agent execution...") + answer, err := chains.Run(ctx, executor, ` You are an expert in creating hooks for the Azure Dev CLI. Your task is to create a new hook for linux bash or windows powershell, depending on the user's platform. @@ -105,7 +114,7 @@ or a bash script if the user is on linux. Start by resolving the type of the hook based on the input. The hook should start with a comment on the top that describes the hook type. Then use the next prompt to create the hook code: -Ask user for their age and prints how many days they have lived. +Print the env variables that are available to the hook and then print a short description of the hook. Use the save hook tool to save the generated hook to a file. `, @@ -114,6 +123,7 @@ Use the save hook tool to save the generated hook to a file. if err != nil { return nil, fmt.Errorf("failed to exe: %w", err) } + fmt.Println("✅ AI agent execution completed") fmt.Println(answer) return &actions.ActionResult{ @@ -194,3 +204,97 @@ func (h saveHookTool) Call(ctx context.Context, input string) (string, error) { return "Hook saved successfully", nil } + +// agentLogHandler implements callbacks.Handler to log agent execution steps +type agentLogHandler struct { + console input.Console + step int +} + +// HandleLLMGenerateContentStart implements callbacks.Handler. +func (h *agentLogHandler) HandleLLMGenerateContentStart(ctx context.Context, ms []llms.MessageContent) { +} + +// HandleRetrieverEnd implements callbacks.Handler. +func (h *agentLogHandler) HandleRetrieverEnd(ctx context.Context, query string, documents []schema.Document) { +} + +// HandleRetrieverStart implements callbacks.Handler. +func (h *agentLogHandler) HandleRetrieverStart(ctx context.Context, query string) { +} + +// HandleStreamingFunc implements callbacks.Handler. +func (h *agentLogHandler) HandleStreamingFunc(ctx context.Context, chunk []byte) { + // use console to stream output + if len(chunk) > 0 { + // Print the chunk to the console + fmt.Print(string(chunk)) + } +} + +func (h *agentLogHandler) HandleLLMStart(ctx context.Context, prompts []string) { + h.step++ + fmt.Printf("🧠 Step %d: LLM processing...\n", h.step) + if len(prompts) > 0 && len(prompts[0]) < 200 { + fmt.Printf(" Prompt: %s\n", prompts[0]) + } +} + +func (h *agentLogHandler) HandleLLMError(ctx context.Context, err error) { + fmt.Printf("❌ Step %d: LLM error: %v\n", h.step, err) +} + +func (h *agentLogHandler) HandleChainStart(ctx context.Context, inputs map[string]any) { + fmt.Println("🚀 Agent chain started") +} + +func (h *agentLogHandler) HandleChainEnd(ctx context.Context, outputs map[string]any) { + fmt.Println("🏁 Agent chain completed") +} + +func (h *agentLogHandler) HandleChainError(ctx context.Context, err error) { + fmt.Printf("💥 Agent chain error: %v\n", err) +} + +func (h *agentLogHandler) HandleToolStart(ctx context.Context, input string) { + fmt.Printf("🔧 Using tool: %s\n") + if input != "" && len(input) < 100 { + fmt.Printf(" Input: %s\n", input) + } +} + +func (h *agentLogHandler) HandleToolEnd(ctx context.Context, output string) { + if output != "" && len(output) < 150 { + fmt.Printf(" Output: %s\n", output) + } else { + fmt.Println(" Tool completed") + } +} + +func (h *agentLogHandler) HandleToolError(ctx context.Context, err error) { + fmt.Printf(" ❌ Tool error: %v\n", err) +} + +func (h *agentLogHandler) HandleText(ctx context.Context, text string) { + if text != "" && len(text) < 200 { + fmt.Printf("💭 Agent thinking: %s\n", text) + } +} + +func (h *agentLogHandler) HandleAgentAction(ctx context.Context, action schema.AgentAction) { + fmt.Printf("🎯 Agent action: %s\n", action.Tool) + if action.ToolInput != "" && len(action.ToolInput) < 100 { + fmt.Printf(" Tool input: %s\n", action.ToolInput) + } +} + +func (h *agentLogHandler) HandleAgentFinish(ctx context.Context, finish schema.AgentFinish) { + fmt.Println("🏆 Agent finished successfully") + if finish.ReturnValues != nil { + fmt.Printf(" Final output: %v\n", finish.ReturnValues) + } +} + +func (h *agentLogHandler) HandleLLMGenerateContentEnd(ctx context.Context, response *llms.ContentResponse) { + fmt.Println("✨ LLM content generation completed") +} From 593af36b2afa5ef398591fe28841148598e3880d Mon Sep 17 00:00:00 2001 From: Victor Vazquez Date: Thu, 3 Jul 2025 18:18:49 +0000 Subject: [PATCH 010/116] wip --- cli/azd/cmd/hooks_new.go | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/cli/azd/cmd/hooks_new.go b/cli/azd/cmd/hooks_new.go index bf7140bc0ef..748fff2bf19 100644 --- a/cli/azd/cmd/hooks_new.go +++ b/cli/azd/cmd/hooks_new.go @@ -7,6 +7,7 @@ import ( "context" "encoding/json" "fmt" + "log" "os" "runtime" "strings" @@ -228,16 +229,11 @@ func (h *agentLogHandler) HandleStreamingFunc(ctx context.Context, chunk []byte) // use console to stream output if len(chunk) > 0 { // Print the chunk to the console - fmt.Print(string(chunk)) + log.Print(string(chunk)) } } func (h *agentLogHandler) HandleLLMStart(ctx context.Context, prompts []string) { - h.step++ - fmt.Printf("🧠 Step %d: LLM processing...\n", h.step) - if len(prompts) > 0 && len(prompts[0]) < 200 { - fmt.Printf(" Prompt: %s\n", prompts[0]) - } } func (h *agentLogHandler) HandleLLMError(ctx context.Context, err error) { @@ -245,30 +241,25 @@ func (h *agentLogHandler) HandleLLMError(ctx context.Context, err error) { } func (h *agentLogHandler) HandleChainStart(ctx context.Context, inputs map[string]any) { - fmt.Println("🚀 Agent chain started") + log.Println("🚀 Agent chain started") } func (h *agentLogHandler) HandleChainEnd(ctx context.Context, outputs map[string]any) { - fmt.Println("🏁 Agent chain completed") + log.Println("🏁 Agent chain completed") } func (h *agentLogHandler) HandleChainError(ctx context.Context, err error) { - fmt.Printf("💥 Agent chain error: %v\n", err) + log.Printf("💥 Agent chain error: %v\n", err) } func (h *agentLogHandler) HandleToolStart(ctx context.Context, input string) { - fmt.Printf("🔧 Using tool: %s\n") + log.Printf("🔧 Using tool: %s\n", input) if input != "" && len(input) < 100 { - fmt.Printf(" Input: %s\n", input) + log.Printf(" Input: %s\n", input) } } func (h *agentLogHandler) HandleToolEnd(ctx context.Context, output string) { - if output != "" && len(output) < 150 { - fmt.Printf(" Output: %s\n", output) - } else { - fmt.Println(" Tool completed") - } } func (h *agentLogHandler) HandleToolError(ctx context.Context, err error) { From 514d4e21d99fdc529654a3f682c65d345331329e Mon Sep 17 00:00:00 2001 From: Victor Vazquez Date: Fri, 11 Jul 2025 17:10:33 +0000 Subject: [PATCH 011/116] wip --- cli/azd/cmd/hooks_new.go | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/cli/azd/cmd/hooks_new.go b/cli/azd/cmd/hooks_new.go index 748fff2bf19..bf7140bc0ef 100644 --- a/cli/azd/cmd/hooks_new.go +++ b/cli/azd/cmd/hooks_new.go @@ -7,7 +7,6 @@ import ( "context" "encoding/json" "fmt" - "log" "os" "runtime" "strings" @@ -229,11 +228,16 @@ func (h *agentLogHandler) HandleStreamingFunc(ctx context.Context, chunk []byte) // use console to stream output if len(chunk) > 0 { // Print the chunk to the console - log.Print(string(chunk)) + fmt.Print(string(chunk)) } } func (h *agentLogHandler) HandleLLMStart(ctx context.Context, prompts []string) { + h.step++ + fmt.Printf("🧠 Step %d: LLM processing...\n", h.step) + if len(prompts) > 0 && len(prompts[0]) < 200 { + fmt.Printf(" Prompt: %s\n", prompts[0]) + } } func (h *agentLogHandler) HandleLLMError(ctx context.Context, err error) { @@ -241,25 +245,30 @@ func (h *agentLogHandler) HandleLLMError(ctx context.Context, err error) { } func (h *agentLogHandler) HandleChainStart(ctx context.Context, inputs map[string]any) { - log.Println("🚀 Agent chain started") + fmt.Println("🚀 Agent chain started") } func (h *agentLogHandler) HandleChainEnd(ctx context.Context, outputs map[string]any) { - log.Println("🏁 Agent chain completed") + fmt.Println("🏁 Agent chain completed") } func (h *agentLogHandler) HandleChainError(ctx context.Context, err error) { - log.Printf("💥 Agent chain error: %v\n", err) + fmt.Printf("💥 Agent chain error: %v\n", err) } func (h *agentLogHandler) HandleToolStart(ctx context.Context, input string) { - log.Printf("🔧 Using tool: %s\n", input) + fmt.Printf("🔧 Using tool: %s\n") if input != "" && len(input) < 100 { - log.Printf(" Input: %s\n", input) + fmt.Printf(" Input: %s\n", input) } } func (h *agentLogHandler) HandleToolEnd(ctx context.Context, output string) { + if output != "" && len(output) < 150 { + fmt.Printf(" Output: %s\n", output) + } else { + fmt.Println(" Tool completed") + } } func (h *agentLogHandler) HandleToolError(ctx context.Context, err error) { From 62246e7fb93d1c1edbf15bdfa8d0febe77c04d2d Mon Sep 17 00:00:00 2001 From: Victor Vazquez Date: Tue, 15 Jul 2025 22:48:44 +0000 Subject: [PATCH 012/116] mcp sampling --- cli/azd/cmd/hooks_new.go | 164 ++++++++++++++++++++++++++++++++++----- cli/azd/tools/mcp/mcp.go | 98 +++++++++++++++++++++++ go.mod | 5 +- go.sum | 11 ++- 4 files changed, 258 insertions(+), 20 deletions(-) create mode 100644 cli/azd/tools/mcp/mcp.go diff --git a/cli/azd/cmd/hooks_new.go b/cli/azd/cmd/hooks_new.go index bf7140bc0ef..73c4cdef38a 100644 --- a/cli/azd/cmd/hooks_new.go +++ b/cli/azd/cmd/hooks_new.go @@ -7,6 +7,7 @@ import ( "context" "encoding/json" "fmt" + "log" "os" "runtime" "strings" @@ -17,13 +18,16 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/input" "github.com/azure/azure-dev/cli/azd/pkg/ioc" "github.com/azure/azure-dev/cli/azd/pkg/llm" + langchaingo_mcp_adapter "github.com/i2y/langchaingo-mcp-adapter" + "github.com/mark3labs/mcp-go/client" + "github.com/mark3labs/mcp-go/client/transport" + "github.com/mark3labs/mcp-go/mcp" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/tmc/langchaingo/agents" "github.com/tmc/langchaingo/chains" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/schema" - "github.com/tmc/langchaingo/tools" ) func newHooksNewCmd() *cobra.Command { @@ -64,6 +68,107 @@ type hooksNewAction struct { llmManager llm.Manager } +type samplingHandler struct { + llmClient llms.Model + console input.Console +} + +func (s *samplingHandler) CreateMessage( + ctx context.Context, request mcp.CreateMessageRequest) (*mcp.CreateMessageResult, error) { + // Enhanced logging for debugging + fmt.Printf("🔬 MCP Sampling Request received!\n") + fmt.Printf(" Request ID: %v\n", ctx.Value("requestId")) + fmt.Printf(" Max tokens: %d\n", request.MaxTokens) + fmt.Printf(" Temperature: %f\n", request.Temperature) + fmt.Printf(" Model preferences: %v\n", request.ModelPreferences) + fmt.Printf(" Number of messages: %d\n", len(request.Messages)) + + // Debug: Print message details + for i, msg := range request.Messages { + fmt.Printf(" Message %d: Role=%s, Content=%v\n", i, msg.Role, msg.Content) + } + + // Convert MCP messages to LLM format + var llmMessages []llms.MessageContent + for _, msg := range request.Messages { + var content []llms.ContentPart + + // Handle the Content field which can be different types + switch contentType := msg.Content.(type) { + case mcp.TextContent: + fmt.Printf(" Processing TextContent: %s\n", contentType.Text) + content = append(content, llms.TextPart(contentType.Text)) + case string: + fmt.Printf(" Processing string content: %s\n", contentType) + content = append(content, llms.TextPart(contentType)) + default: + // Try to convert to string as fallback + contentStr := fmt.Sprintf("%v", msg.Content) + fmt.Printf(" Processing unknown content type: %s\n", contentStr) + content = append(content, llms.TextPart(contentStr)) + } + + // Map MCP roles to LLM roles + var role llms.ChatMessageType + switch msg.Role { + case mcp.RoleUser: + role = llms.ChatMessageTypeHuman + case mcp.RoleAssistant: + role = llms.ChatMessageTypeAI + default: + role = llms.ChatMessageTypeSystem + } + + llmMessages = append(llmMessages, llms.MessageContent{ + Role: role, + Parts: content, + }) + } + + // Generate response using the LLM + fmt.Printf("🧠 Generating response with LLM (messages: %d)...\n", len(llmMessages)) + response, err := s.llmClient.GenerateContent(ctx, llmMessages) + if err != nil { + fmt.Printf("❌ LLM generation error: %v\n", err) + return nil, fmt.Errorf("failed to generate LLM response: %w", err) + } + + // Extract text from the response + var responseText string + if len(response.Choices) > 0 && len(response.Choices[0].Content) > 0 { + // Convert the response content to string + responseText = string(response.Choices[0].Content) + fmt.Printf("📝 Raw LLM response: %s\n", responseText) + } + + if responseText == "" { + responseText = "No response generated" + fmt.Printf("⚠️ Using fallback response\n") + } + + fmt.Printf("✅ LLM response generated (length: %d): %s\n", len(responseText), responseText[:min(100, len(responseText))]) + + // Return the MCP result using the same format as the MCP server + result := &mcp.CreateMessageResult{ + SamplingMessage: mcp.SamplingMessage{ + Role: mcp.RoleAssistant, + Content: responseText, + }, + Model: "llm-delegated", + } + + fmt.Printf("🎯 Returning sampling result with model: %s\n", result.Model) + return result, nil +} + +// Helper function for min +func min(a, b int) int { + if a < b { + return a + } + return b +} + func newHooksNewAction( commandRunner exec.CommandRunner, console input.Console, @@ -95,28 +200,51 @@ func (hna *hooksNewAction) Run(ctx context.Context) (*actions.ActionResult, erro // Create a callback handler to log agent steps callbackHandler := &agentLogHandler{console: hna.console} - agent := agents.NewOneShotAgent(llClient, []tools.Tool{ - tools.Calculator{}, - hookResolverTool{}, - osResolverTool{}, - saveHookTool{}, - }, agents.WithCallbacksHandler(callbackHandler)) + // // Connect to MCP server via stdio + // mcpClient, err := client.NewStdioMCPClient("/home/vivazqu/workspace/azure-dev/cli/azd/tools/mcp/mcp", nil) + // if err != nil { + // log.Fatalf("Failed to create MCP client: %v", err) + // } + + // defer mcpClient.Close() + t := transport.NewStdioWithOptions("/home/vivazqu/workspace/azure-dev/cli/azd/tools/mcp/mcp", nil, nil) + // Create sampling handler with LLM client + samplingHandler := &samplingHandler{ + llmClient: llClient, + console: hna.console, + } + + mcpClient := client.NewClient(t, client.WithSamplingHandler(samplingHandler)) + if err := mcpClient.Start(ctx); err != nil { + return nil, fmt.Errorf("failed to start MCP client: %w", err) + } + defer mcpClient.Close() + + fmt.Println("🔌 MCP client created with sampling handler") + + // Create adapter + adapter, err := langchaingo_mcp_adapter.New(mcpClient) + + if err != nil { + log.Fatalf("Failed to create adapter: %v", err) + } + + // Load tools from MCP server + tools, err := adapter.Tools() + if err != nil { + log.Fatalf("Failed to get tools: %v", err) + } + + agent := agents.NewOneShotAgent(llClient, tools, agents.WithCallbacksHandler(callbackHandler)) executor := agents.NewExecutor(agent) fmt.Println("🤖 Starting AI agent execution...") + fmt.Printf(" Agent has %d tools available from MCP server\n", len(tools)) + fmt.Println(" Sampling handler is configured for MCP tool requests") answer, err := chains.Run(ctx, executor, ` -You are an expert in creating hooks for the Azure Dev CLI. -Your task is to create a new hook for linux bash or windows powershell, depending on the user's platform. -Use the os resolver tool to determine the user's platform. You will write a powershell script if the user is on windows, -or a bash script if the user is on linux. -Start by resolving the type of the hook based on the input. -The hook should start with a comment on the top that describes the hook type. -Then use the next prompt to create the hook code: -Print the env variables that are available to the hook and then print a short description of the hook. - -Use the save hook tool to save the generated hook to a file. +Say hello to Raul using the exact result from a tool to say hello to someone. `, chains.WithTemperature(0.0), ) @@ -257,7 +385,7 @@ func (h *agentLogHandler) HandleChainError(ctx context.Context, err error) { } func (h *agentLogHandler) HandleToolStart(ctx context.Context, input string) { - fmt.Printf("🔧 Using tool: %s\n") + fmt.Printf("🔧 Using tool with input: %s\n", input) if input != "" && len(input) < 100 { fmt.Printf(" Input: %s\n", input) } diff --git a/cli/azd/tools/mcp/mcp.go b/cli/azd/tools/mcp/mcp.go new file mode 100644 index 00000000000..51d2e65dc15 --- /dev/null +++ b/cli/azd/tools/mcp/mcp.go @@ -0,0 +1,98 @@ +package main + +import ( + "context" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +func main() { + // Create a new MCP server + s := server.NewMCPServer( + "Hello Server 🚀", + "1.0.0", + server.WithToolCapabilities(false), + ) + s.EnableSampling() + + // Define the tool + tool := mcp.NewTool( + "hello_world", + mcp.WithDescription("Say hello to someone"), + mcp.WithString("name", + mcp.Required(), + mcp.Description("Name of the person to greet"), + ), + ) + + // Register the tool handler + s.AddTool(tool, helloHandler) + + // Start the server using stdio transport + if err := server.ServeStdio(s); err != nil { + fmt.Printf("Server error: %v\n", err) + } +} + +// Tool handler function +func helloHandler(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + name, err := request.RequireString("name") + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + + // Get the client session from context + session := server.ClientSessionFromContext(ctx) + if session == nil { + // If no session, fall back to simple greeting + return mcp.NewToolResultText(fmt.Sprintf("This is the MCP tool - Helloooo, %s!", name)), nil + } + + // Check if the session supports sampling + if samplingSession, ok := session.(server.SessionWithSampling); ok { + // Create a sampling request to get a creative greeting + samplingRequest := mcp.CreateMessageRequest{ + CreateMessageParams: mcp.CreateMessageParams{ + Messages: []mcp.SamplingMessage{ + { + Role: mcp.RoleUser, + Content: mcp.TextContent{ + Type: "text", + Text: fmt.Sprintf("Please provide a creative and enthusiastic greeting for %s. Make it feel that it is from someone mysterious and a little scary!", name), + }, + }, + }, + MaxTokens: 100, + Temperature: 0.8, + }, + } + + // Send the sampling request to get a response from the host's LLM + samplingResponse, err := samplingSession.RequestSampling(ctx, samplingRequest) + if err != nil { + // If sampling fails, fall back to a simple greeting + return mcp.NewToolResultText(fmt.Sprintf("This is the MCP tool - Helloooo, %s! (sampling failed: %v)", name, err)), nil + } + + // Extract the generated greeting from the sampling response + var generatedGreeting string + if samplingResponse != nil { + // The response Content field contains the message content + if textContent, ok := samplingResponse.Content.(mcp.TextContent); ok { + generatedGreeting = textContent.Text + } else if contentStr, ok := samplingResponse.Content.(string); ok { + generatedGreeting = contentStr + } + } + + // If we got a response, use it + if generatedGreeting != "" { + return mcp.NewToolResultText(fmt.Sprintf("🤖 AI-Generated Greeting: %s", generatedGreeting)), nil + } + } + + // Fallback to simple greeting + return mcp.NewToolResultText(fmt.Sprintf("This is the MCP tool - Helloooo, %s!", name)), nil +} diff --git a/go.mod b/go.mod index b37b7d31d73..80fc1a5f017 100644 --- a/go.mod +++ b/go.mod @@ -50,8 +50,10 @@ require ( github.com/golobby/container/v3 v3.3.2 github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 + github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df github.com/joho/godotenv v1.5.1 github.com/magefile/mage v1.15.0 + github.com/mark3labs/mcp-go v0.33.0 github.com/mattn/go-colorable v0.1.14 github.com/mattn/go-isatty v0.0.20 github.com/microsoft/ApplicationInsights-Go v0.4.4 @@ -124,11 +126,12 @@ require ( github.com/segmentio/encoding v0.4.1 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/cast v1.3.1 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect github.com/yargevad/filepathx v1.0.0 // indirect + github.com/yosida95/uritemplate/v3 v3.0.2 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect go.opentelemetry.io/otel/metric v1.35.0 // indirect diff --git a/go.sum b/go.sum index 99461012910..3d6dc032df8 100644 --- a/go.sum +++ b/go.sum @@ -169,6 +169,8 @@ github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= @@ -242,6 +244,8 @@ github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4Dvx github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df h1:4lTJXCZw16BF0BCzrQ1LUzlMW4+2OwBkkYj1/bRybhY= +github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df/go.mod h1:oL2JAtsIp/1vnVy4UG4iDzL8SZwkOzqvRL3YR9PGPjs= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= @@ -273,6 +277,8 @@ github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80 h1:6Yzfa6GP0rIo/kUL github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/mark3labs/mcp-go v0.33.0 h1:naxhjnTIs/tyPZmWUZFuG0lDmdA6sUyYGGf3gsHvTCc= +github.com/mark3labs/mcp-go v0.33.0/go.mod h1:rXqOudj/djTORU/ThxYx8fqEVj/5pvTuuebQ2RC7uk4= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= @@ -353,8 +359,9 @@ github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFR github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= @@ -393,6 +400,8 @@ github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJ github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= +github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= +github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181 h1:K+bMSIx9A7mLES1rtG+qKduLIXq40DAzYHtb0XuCukA= gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181/go.mod h1:dzYhVIwWCtzPAa4QP98wfB9+mzt33MSmM8wsKiMi2ow= From e592c651149c3125e512a8f94d4940b99ab7c1ce Mon Sep 17 00:00:00 2001 From: hemarina Date: Wed, 23 Jul 2025 15:32:24 -0700 Subject: [PATCH 013/116] separate from victor's pr --- cli/azd/cmd/hooks_new.go | 428 --------------------------------------- 1 file changed, 428 deletions(-) delete mode 100644 cli/azd/cmd/hooks_new.go diff --git a/cli/azd/cmd/hooks_new.go b/cli/azd/cmd/hooks_new.go deleted file mode 100644 index 73c4cdef38a..00000000000 --- a/cli/azd/cmd/hooks_new.go +++ /dev/null @@ -1,428 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package cmd - -import ( - "context" - "encoding/json" - "fmt" - "log" - "os" - "runtime" - "strings" - - "github.com/azure/azure-dev/cli/azd/cmd/actions" - "github.com/azure/azure-dev/cli/azd/internal" - "github.com/azure/azure-dev/cli/azd/pkg/exec" - "github.com/azure/azure-dev/cli/azd/pkg/input" - "github.com/azure/azure-dev/cli/azd/pkg/ioc" - "github.com/azure/azure-dev/cli/azd/pkg/llm" - langchaingo_mcp_adapter "github.com/i2y/langchaingo-mcp-adapter" - "github.com/mark3labs/mcp-go/client" - "github.com/mark3labs/mcp-go/client/transport" - "github.com/mark3labs/mcp-go/mcp" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "github.com/tmc/langchaingo/agents" - "github.com/tmc/langchaingo/chains" - "github.com/tmc/langchaingo/llms" - "github.com/tmc/langchaingo/schema" -) - -func newHooksNewCmd() *cobra.Command { - return &cobra.Command{ - Use: "new", - Short: "Create a new hook for the project.", - } -} - -func newHooksNewFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *hooksNewFlags { - flags := &hooksNewFlags{} - flags.Bind(cmd.Flags(), global) - - return flags -} - -type hooksNewFlags struct { - internal.EnvFlag - global *internal.GlobalCommandOptions - platform string - service string -} - -func (f *hooksNewFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { - f.EnvFlag.Bind(local, global) - f.global = global - - local.StringVar(&f.platform, "platform", "", "Forces hooks to run for the specified platform.") - local.StringVar(&f.service, "service", "", "Only runs hooks for the specified service.") -} - -type hooksNewAction struct { - commandRunner exec.CommandRunner - console input.Console - flags *hooksNewFlags - args []string - serviceLocator ioc.ServiceLocator - llmManager llm.Manager -} - -type samplingHandler struct { - llmClient llms.Model - console input.Console -} - -func (s *samplingHandler) CreateMessage( - ctx context.Context, request mcp.CreateMessageRequest) (*mcp.CreateMessageResult, error) { - // Enhanced logging for debugging - fmt.Printf("🔬 MCP Sampling Request received!\n") - fmt.Printf(" Request ID: %v\n", ctx.Value("requestId")) - fmt.Printf(" Max tokens: %d\n", request.MaxTokens) - fmt.Printf(" Temperature: %f\n", request.Temperature) - fmt.Printf(" Model preferences: %v\n", request.ModelPreferences) - fmt.Printf(" Number of messages: %d\n", len(request.Messages)) - - // Debug: Print message details - for i, msg := range request.Messages { - fmt.Printf(" Message %d: Role=%s, Content=%v\n", i, msg.Role, msg.Content) - } - - // Convert MCP messages to LLM format - var llmMessages []llms.MessageContent - for _, msg := range request.Messages { - var content []llms.ContentPart - - // Handle the Content field which can be different types - switch contentType := msg.Content.(type) { - case mcp.TextContent: - fmt.Printf(" Processing TextContent: %s\n", contentType.Text) - content = append(content, llms.TextPart(contentType.Text)) - case string: - fmt.Printf(" Processing string content: %s\n", contentType) - content = append(content, llms.TextPart(contentType)) - default: - // Try to convert to string as fallback - contentStr := fmt.Sprintf("%v", msg.Content) - fmt.Printf(" Processing unknown content type: %s\n", contentStr) - content = append(content, llms.TextPart(contentStr)) - } - - // Map MCP roles to LLM roles - var role llms.ChatMessageType - switch msg.Role { - case mcp.RoleUser: - role = llms.ChatMessageTypeHuman - case mcp.RoleAssistant: - role = llms.ChatMessageTypeAI - default: - role = llms.ChatMessageTypeSystem - } - - llmMessages = append(llmMessages, llms.MessageContent{ - Role: role, - Parts: content, - }) - } - - // Generate response using the LLM - fmt.Printf("🧠 Generating response with LLM (messages: %d)...\n", len(llmMessages)) - response, err := s.llmClient.GenerateContent(ctx, llmMessages) - if err != nil { - fmt.Printf("❌ LLM generation error: %v\n", err) - return nil, fmt.Errorf("failed to generate LLM response: %w", err) - } - - // Extract text from the response - var responseText string - if len(response.Choices) > 0 && len(response.Choices[0].Content) > 0 { - // Convert the response content to string - responseText = string(response.Choices[0].Content) - fmt.Printf("📝 Raw LLM response: %s\n", responseText) - } - - if responseText == "" { - responseText = "No response generated" - fmt.Printf("⚠️ Using fallback response\n") - } - - fmt.Printf("✅ LLM response generated (length: %d): %s\n", len(responseText), responseText[:min(100, len(responseText))]) - - // Return the MCP result using the same format as the MCP server - result := &mcp.CreateMessageResult{ - SamplingMessage: mcp.SamplingMessage{ - Role: mcp.RoleAssistant, - Content: responseText, - }, - Model: "llm-delegated", - } - - fmt.Printf("🎯 Returning sampling result with model: %s\n", result.Model) - return result, nil -} - -// Helper function for min -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func newHooksNewAction( - commandRunner exec.CommandRunner, - console input.Console, - flags *hooksNewFlags, - args []string, - serviceLocator ioc.ServiceLocator, - llmManager llm.Manager, -) actions.Action { - return &hooksNewAction{ - commandRunner: commandRunner, - console: console, - flags: flags, - args: args, - serviceLocator: serviceLocator, - llmManager: llmManager, - } -} - -func (hna *hooksNewAction) Run(ctx context.Context) (*actions.ActionResult, error) { - llmInfo, err := hna.llmManager.Info(hna.console.GetWriter()) - if err != nil { - return nil, fmt.Errorf("failed to load LLM info: %w", err) - } - llClient, err := llm.LlmClient(llmInfo) - if err != nil { - return nil, fmt.Errorf("failed to create LLM client: %w", err) - } - - // Create a callback handler to log agent steps - callbackHandler := &agentLogHandler{console: hna.console} - - // // Connect to MCP server via stdio - // mcpClient, err := client.NewStdioMCPClient("/home/vivazqu/workspace/azure-dev/cli/azd/tools/mcp/mcp", nil) - // if err != nil { - // log.Fatalf("Failed to create MCP client: %v", err) - // } - - // defer mcpClient.Close() - t := transport.NewStdioWithOptions("/home/vivazqu/workspace/azure-dev/cli/azd/tools/mcp/mcp", nil, nil) - // Create sampling handler with LLM client - samplingHandler := &samplingHandler{ - llmClient: llClient, - console: hna.console, - } - - mcpClient := client.NewClient(t, client.WithSamplingHandler(samplingHandler)) - if err := mcpClient.Start(ctx); err != nil { - return nil, fmt.Errorf("failed to start MCP client: %w", err) - } - defer mcpClient.Close() - - fmt.Println("🔌 MCP client created with sampling handler") - - // Create adapter - adapter, err := langchaingo_mcp_adapter.New(mcpClient) - - if err != nil { - log.Fatalf("Failed to create adapter: %v", err) - } - - // Load tools from MCP server - tools, err := adapter.Tools() - if err != nil { - log.Fatalf("Failed to get tools: %v", err) - } - - agent := agents.NewOneShotAgent(llClient, tools, agents.WithCallbacksHandler(callbackHandler)) - - executor := agents.NewExecutor(agent) - - fmt.Println("🤖 Starting AI agent execution...") - fmt.Printf(" Agent has %d tools available from MCP server\n", len(tools)) - fmt.Println(" Sampling handler is configured for MCP tool requests") - - answer, err := chains.Run(ctx, executor, ` -Say hello to Raul using the exact result from a tool to say hello to someone. -`, - chains.WithTemperature(0.0), - ) - if err != nil { - return nil, fmt.Errorf("failed to exe: %w", err) - } - fmt.Println("✅ AI agent execution completed") - fmt.Println(answer) - - return &actions.ActionResult{ - Message: &actions.ResultMessage{ - Header: "Done", - }, - }, nil -} - -type hookResolverTool struct { -} - -func (h hookResolverTool) Name() string { - return "Hook Resolver" -} - -func (h hookResolverTool) Description() string { - return `Useful for resolving the type of the hook based on the input. - The input to this tool should be a string that contains the prompt that creates the hook.` -} - -func (h hookResolverTool) Call(ctx context.Context, input string) (string, error) { - validHookTypes := []string{"preprovision", "postprovision", "predeploy", "postdeploy"} - for _, hookType := range validHookTypes { - if strings.Contains(input, hookType) { - return hookType, nil - } - } - return "preprovision", nil -} - -type osResolverTool struct { -} - -func (h osResolverTool) Name() string { - return "Os Resolver" -} - -func (h osResolverTool) Description() string { - return "Useful for resolving what is the user's operating system." -} - -func (h osResolverTool) Call(ctx context.Context, input string) (string, error) { - return runtime.GOOS, nil -} - -type saveHookTool struct { -} - -func (h saveHookTool) Name() string { - return "Save Hook" -} - -func (h saveHookTool) Description() string { - return `Useful for saving the generated hook to a file. - The input to this tool should be a JSON string with the following format: - { - "hookType": "", - "hookCode": "" - }. - The input must be just the JSON string, without any additional text.` -} - -func (h saveHookTool) Call(ctx context.Context, input string) (string, error) { - // Parse the input JSON string - var hookData struct { - HookType string `json:"hookType"` - HookCode string `json:"hookCode"` - } - if err := json.Unmarshal([]byte(input), &hookData); err != nil { - return "", fmt.Errorf("failed to parse input JSON: %w", err) - } - - // Save the hook code to a file - if err := os.WriteFile(fmt.Sprintf("%s_hook.sh", hookData.HookType), []byte(hookData.HookCode), 0755); err != nil { - return "", fmt.Errorf("failed to save hook file: %w", err) - } - - return "Hook saved successfully", nil -} - -// agentLogHandler implements callbacks.Handler to log agent execution steps -type agentLogHandler struct { - console input.Console - step int -} - -// HandleLLMGenerateContentStart implements callbacks.Handler. -func (h *agentLogHandler) HandleLLMGenerateContentStart(ctx context.Context, ms []llms.MessageContent) { -} - -// HandleRetrieverEnd implements callbacks.Handler. -func (h *agentLogHandler) HandleRetrieverEnd(ctx context.Context, query string, documents []schema.Document) { -} - -// HandleRetrieverStart implements callbacks.Handler. -func (h *agentLogHandler) HandleRetrieverStart(ctx context.Context, query string) { -} - -// HandleStreamingFunc implements callbacks.Handler. -func (h *agentLogHandler) HandleStreamingFunc(ctx context.Context, chunk []byte) { - // use console to stream output - if len(chunk) > 0 { - // Print the chunk to the console - fmt.Print(string(chunk)) - } -} - -func (h *agentLogHandler) HandleLLMStart(ctx context.Context, prompts []string) { - h.step++ - fmt.Printf("🧠 Step %d: LLM processing...\n", h.step) - if len(prompts) > 0 && len(prompts[0]) < 200 { - fmt.Printf(" Prompt: %s\n", prompts[0]) - } -} - -func (h *agentLogHandler) HandleLLMError(ctx context.Context, err error) { - fmt.Printf("❌ Step %d: LLM error: %v\n", h.step, err) -} - -func (h *agentLogHandler) HandleChainStart(ctx context.Context, inputs map[string]any) { - fmt.Println("🚀 Agent chain started") -} - -func (h *agentLogHandler) HandleChainEnd(ctx context.Context, outputs map[string]any) { - fmt.Println("🏁 Agent chain completed") -} - -func (h *agentLogHandler) HandleChainError(ctx context.Context, err error) { - fmt.Printf("💥 Agent chain error: %v\n", err) -} - -func (h *agentLogHandler) HandleToolStart(ctx context.Context, input string) { - fmt.Printf("🔧 Using tool with input: %s\n", input) - if input != "" && len(input) < 100 { - fmt.Printf(" Input: %s\n", input) - } -} - -func (h *agentLogHandler) HandleToolEnd(ctx context.Context, output string) { - if output != "" && len(output) < 150 { - fmt.Printf(" Output: %s\n", output) - } else { - fmt.Println(" Tool completed") - } -} - -func (h *agentLogHandler) HandleToolError(ctx context.Context, err error) { - fmt.Printf(" ❌ Tool error: %v\n", err) -} - -func (h *agentLogHandler) HandleText(ctx context.Context, text string) { - if text != "" && len(text) < 200 { - fmt.Printf("💭 Agent thinking: %s\n", text) - } -} - -func (h *agentLogHandler) HandleAgentAction(ctx context.Context, action schema.AgentAction) { - fmt.Printf("🎯 Agent action: %s\n", action.Tool) - if action.ToolInput != "" && len(action.ToolInput) < 100 { - fmt.Printf(" Tool input: %s\n", action.ToolInput) - } -} - -func (h *agentLogHandler) HandleAgentFinish(ctx context.Context, finish schema.AgentFinish) { - fmt.Println("🏆 Agent finished successfully") - if finish.ReturnValues != nil { - fmt.Printf(" Final output: %v\n", finish.ReturnValues) - } -} - -func (h *agentLogHandler) HandleLLMGenerateContentEnd(ctx context.Context, response *llms.ContentResponse) { - fmt.Println("✨ LLM content generation completed") -} From 802a203b4b620a9c26e8aac4770e5c1d0574756e Mon Sep 17 00:00:00 2001 From: hemarina Date: Wed, 23 Jul 2025 15:36:22 -0700 Subject: [PATCH 014/116] move part of hooks new code here and tweak for agentRunner --- cli/azd/pkg/agentRunner/agentRunner.go | 285 +++++++++++++++++++++++++ 1 file changed, 285 insertions(+) create mode 100644 cli/azd/pkg/agentRunner/agentRunner.go diff --git a/cli/azd/pkg/agentRunner/agentRunner.go b/cli/azd/pkg/agentRunner/agentRunner.go new file mode 100644 index 00000000000..b0cfdc1f2e0 --- /dev/null +++ b/cli/azd/pkg/agentRunner/agentRunner.go @@ -0,0 +1,285 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package agentRunner + +import ( + "context" + "errors" + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/azure/azure-dev/cli/azd/pkg/auth" + "github.com/azure/azure-dev/cli/azd/pkg/azapi" + "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/llm" + langchaingo_mcp_adapter "github.com/i2y/langchaingo-mcp-adapter" + "github.com/mark3labs/mcp-go/client" + "github.com/mark3labs/mcp-go/client/transport" + "github.com/mark3labs/mcp-go/mcp" + "github.com/tmc/langchaingo/agents" + "github.com/tmc/langchaingo/chains" + "github.com/tmc/langchaingo/llms" + "github.com/tmc/langchaingo/schema" +) + +type samplingHandler struct { + llmClient llms.Model + console input.Console +} + +func (s *samplingHandler) CreateMessage( + ctx context.Context, request mcp.CreateMessageRequest) (*mcp.CreateMessageResult, error) { + // Enhanced logging for debugging + log.Printf("🔬 MCP Sampling Request received!\n") + log.Printf(" Request ID: %v\n", ctx.Value("requestId")) + log.Printf(" Max tokens: %d\n", request.MaxTokens) + log.Printf(" Temperature: %f\n", request.Temperature) + log.Printf(" Model preferences: %v\n", request.ModelPreferences) + log.Printf(" Number of messages: %d\n", len(request.Messages)) + + // Debug: Print message details + for i, msg := range request.Messages { + log.Printf(" Message %d: Role=%s, Content=%v\n", i, msg.Role, msg.Content) + } + + // Convert MCP messages to LLM format + var llmMessages []llms.MessageContent + for _, msg := range request.Messages { + var content []llms.ContentPart + + // Handle the Content field which can be different types + switch contentType := msg.Content.(type) { + case mcp.TextContent: + log.Printf(" Processing TextContent: %s\n", contentType.Text) + content = append(content, llms.TextPart(contentType.Text)) + case string: + log.Printf(" Processing string content: %s\n", contentType) + content = append(content, llms.TextPart(contentType)) + default: + // Try to convert to string as fallback + contentStr := fmt.Sprintf("%v", msg.Content) + log.Printf(" Processing unknown content type: %s\n", contentStr) + content = append(content, llms.TextPart(contentStr)) + } + + // Map MCP roles to LLM roles + var role llms.ChatMessageType + switch msg.Role { + case mcp.RoleUser: + role = llms.ChatMessageTypeHuman + case mcp.RoleAssistant: + role = llms.ChatMessageTypeAI + default: + role = llms.ChatMessageTypeSystem + } + + llmMessages = append(llmMessages, llms.MessageContent{ + Role: role, + Parts: content, + }) + } + + // Generate response using the LLM + log.Printf("🧠 Generating response with LLM (messages: %d)...\n", len(llmMessages)) + response, err := s.llmClient.GenerateContent(ctx, llmMessages) + if err != nil { + log.Printf("❌ LLM generation error: %v\n", err) + return nil, fmt.Errorf("failed to generate LLM response: %w", err) + } + + // Extract text from the response + var responseText string + if len(response.Choices) > 0 && len(response.Choices[0].Content) > 0 { + // Convert the response content to string + responseText = string(response.Choices[0].Content) + log.Printf("📝 Raw LLM response: %s\n", responseText) + } + + if responseText == "" { + responseText = "No response generated" + log.Printf("⚠️ Using fallback response\n") + } + + log.Printf("✅ LLM response generated (length: %d): %s\n", len(responseText), responseText[:min(100, len(responseText))]) + + // Return the MCP result using the same format as the MCP server + result := &mcp.CreateMessageResult{ + SamplingMessage: mcp.SamplingMessage{ + Role: mcp.RoleAssistant, + Content: responseText, + }, + Model: "llm-delegated", + } + + log.Printf("🎯 Returning sampling result with model: %s\n", result.Model) + return result, nil +} + +// Helper function for min +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func Run(ctx context.Context, console input.Console, llmManager llm.Manager, errNeedSuggestion error) (string, error) { + llmInfo, err := llmManager.Info(console.GetWriter()) + if err != nil { + return "", fmt.Errorf("failed to load LLM info: %w", err) + } + llClient, err := llm.LlmClient(llmInfo) + if err != nil { + return "", fmt.Errorf("failed to create LLM client: %w", err) + } + + // Create a callback handler to log agent steps + callbackHandler := &agentLogHandler{console: console} + + // defer mcpClient.Close() + t := transport.NewStdioWithOptions("C:\\Users\\hemarina\\Downloads\\vhvb1989\\azure-dev\\cli\\azd\\tools\\mcp\\mcp.exe", nil, nil) + // Create sampling handler with LLM client + samplingHandler := &samplingHandler{ + llmClient: llClient, + console: console, + } + + mcpClient := client.NewClient(t, client.WithSamplingHandler(samplingHandler)) + if err := mcpClient.Start(ctx); err != nil { + return "", fmt.Errorf("failed to start MCP client: %w", err) + } + defer mcpClient.Close() + + log.Println("🔌 MCP client created with sampling handler") + + // Create adapter + adapter, err := langchaingo_mcp_adapter.New(mcpClient) + + if err != nil { + log.Fatalf("Failed to create adapter: %v", err) + } + + // Load tools from MCP server + tools, err := adapter.Tools() + if err != nil { + log.Fatalf("Failed to get tools: %v", err) + } + + agent := agents.NewOneShotAgent(llClient, tools, agents.WithCallbacksHandler(callbackHandler)) + + executor := agents.NewExecutor(agent) + + log.Println("🤖 Starting AI agent execution...") + log.Printf(" Agent has %d tools available from MCP server\n", len(tools)) + log.Println(" Sampling handler is configured for MCP tool requests") + + // ask the agent to describe + // instructions to the error + input := promptingWithDifferentErrors(errNeedSuggestion) + + answer, err := chains.Run(ctx, executor, input, + chains.WithTemperature(0.0), + ) + if err != nil { + return "", fmt.Errorf("failed to exe: %w", err) + } + log.Println("✅ AI agent execution completed") + + return answer, nil +} + +// agentLogHandler implements callbacks.Handler to log agent execution steps +type agentLogHandler struct { + console input.Console + step int +} + +// HandleLLMGenerateContentStart implements callbacks.Handler. +func (h *agentLogHandler) HandleLLMGenerateContentStart(ctx context.Context, ms []llms.MessageContent) { +} + +// HandleRetrieverEnd implements callbacks.Handler. +func (h *agentLogHandler) HandleRetrieverEnd(ctx context.Context, query string, documents []schema.Document) { +} + +// HandleRetrieverStart implements callbacks.Handler. +func (h *agentLogHandler) HandleRetrieverStart(ctx context.Context, query string) { +} + +// HandleStreamingFunc implements callbacks.Handler. +func (h *agentLogHandler) HandleStreamingFunc(ctx context.Context, chunk []byte) { + // use console to stream output + if len(chunk) > 0 { + // Print the chunk to the console + log.Print(string(chunk)) + } +} + +func (h *agentLogHandler) HandleLLMStart(ctx context.Context, prompts []string) { + h.step++ + log.Printf("🧠 Step %d: LLM processing...\n", h.step) + if len(prompts) > 0 && len(prompts[0]) < 200 { + log.Printf(" Prompt: %s\n", prompts[0]) + } +} + +func (h *agentLogHandler) HandleLLMError(ctx context.Context, err error) { + log.Printf("❌ Step %d: LLM error: %v\n", h.step, err) +} + +func (h *agentLogHandler) HandleChainStart(ctx context.Context, inputs map[string]any) { + log.Println("🚀 Agent chain started") +} + +func (h *agentLogHandler) HandleChainEnd(ctx context.Context, outputs map[string]any) { + log.Println("🏁 Agent chain completed") +} + +func (h *agentLogHandler) HandleChainError(ctx context.Context, err error) { + log.Printf("💥 Agent chain error: %v\n", err) +} + +func (h *agentLogHandler) HandleToolStart(ctx context.Context, input string) { + log.Printf("🔧 Using tool with input: %s\n", input) + if input != "" && len(input) < 100 { + log.Printf(" Input: %s\n", input) + } +} + +func (h *agentLogHandler) HandleToolEnd(ctx context.Context, output string) { + if output != "" && len(output) < 150 { + log.Printf(" Output: %s\n", output) + } else { + log.Println(" Tool completed") + } +} + +func (h *agentLogHandler) HandleToolError(ctx context.Context, err error) { + log.Printf(" ❌ Tool error: %v\n", err) +} + +func (h *agentLogHandler) HandleText(ctx context.Context, text string) { + if text != "" && len(text) < 200 { + log.Printf("💭 Agent thinking: %s\n", text) + } +} + +func (h *agentLogHandler) HandleAgentAction(ctx context.Context, action schema.AgentAction) { + log.Printf("🎯 Agent action: %s\n", action.Tool) + if action.ToolInput != "" && len(action.ToolInput) < 100 { + log.Printf(" Tool input: %s\n", action.ToolInput) + } +} + +func (h *agentLogHandler) HandleAgentFinish(ctx context.Context, finish schema.AgentFinish) { + log.Println("🏆 Agent finished successfully") + if finish.ReturnValues != nil { + log.Printf(" Final output: %v\n", finish.ReturnValues) + } +} + +func (h *agentLogHandler) HandleLLMGenerateContentEnd(ctx context.Context, response *llms.ContentResponse) { + log.Println("✨ LLM content generation completed") +} From 38a56942ddbf339f24c10f3f1b3a191d138e81d9 Mon Sep 17 00:00:00 2001 From: hemarina Date: Wed, 23 Jul 2025 15:38:56 -0700 Subject: [PATCH 015/116] AI error suggestion --- cli/azd/internal/cmd/provision.go | 84 +++++++++++++++++++++----- cli/azd/pkg/agentRunner/agentRunner.go | 47 ++++++++++++++ cli/azd/tools/mcp/mcp.go | 55 +++++++++++------ 3 files changed, 152 insertions(+), 34 deletions(-) diff --git a/cli/azd/internal/cmd/provision.go b/cli/azd/internal/cmd/provision.go index cc7bc5224f4..89d95d2fb3e 100644 --- a/cli/azd/internal/cmd/provision.go +++ b/cli/azd/internal/cmd/provision.go @@ -16,12 +16,14 @@ import ( "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" "github.com/azure/azure-dev/cli/azd/pkg/account" + "github.com/azure/azure-dev/cli/azd/pkg/agentRunner" "github.com/azure/azure-dev/cli/azd/pkg/alpha" "github.com/azure/azure-dev/cli/azd/pkg/azapi" "github.com/azure/azure-dev/cli/azd/pkg/cloud" "github.com/azure/azure-dev/cli/azd/pkg/environment" "github.com/azure/azure-dev/cli/azd/pkg/infra/provisioning" "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/llm" "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/azure/azure-dev/cli/azd/pkg/output/ux" "github.com/azure/azure-dev/cli/azd/pkg/project" @@ -113,6 +115,7 @@ type ProvisionAction struct { importManager *project.ImportManager alphaFeatureManager *alpha.FeatureManager portalUrlBase string + llmManager llm.Manager } func NewProvisionAction( @@ -130,6 +133,7 @@ func NewProvisionAction( subManager *account.SubscriptionsManager, alphaFeatureManager *alpha.FeatureManager, cloud *cloud.Cloud, + llmManager llm.Manager, ) actions.Action { return &ProvisionAction{ flags: flags, @@ -146,6 +150,7 @@ func NewProvisionAction( importManager: importManager, alphaFeatureManager: alphaFeatureManager, portalUrlBase: cloud.PortalUrlBase, + llmManager: llmManager, } } @@ -158,6 +163,54 @@ func (p *ProvisionAction) SetFlags(flags *ProvisionFlags) { p.flags = flags } +func (p *ProvisionAction) errorWithSuggestion(ctx context.Context, originalError error) error { + // Show preview of the error + previewWriter := p.console.ShowPreviewer(ctx, + &input.ShowPreviewerOptions{ + Prefix: " ", + MaxLineCount: 20, + Title: "Error Preview", + }) + fmt.Fprintf(previewWriter, "%s", originalError.Error()) + + // Ask user if they want to get error suggestions from AI + selection, err := p.console.Select(ctx, input.ConsoleOptions{ + Message: "Do you want to get error suggestions from AI?", + Options: []string{ + "Yes", + "No", + }, + }) + + p.console.StopPreviewer(ctx, false) + + if err != nil { + return fmt.Errorf("prompting failed to get error suggestions: %w", err) + } + + switch selection { + case 0: // get error suggestion + // it takes around 30-60s + p.console.MessageUxItem(ctx, &ux.MessageTitle{ + Title: "Getting AI error suggestions", + TitleNote: "Getting AI error suggestions can take some time", + }) + + result, errSampling := agentRunner.Run(ctx, p.console, p.llmManager, originalError) + // If llm/sampling fails, we still want to return the original error + if errSampling != nil { + fmt.Printf("Not able to get AI error suggestions: %s\n", errSampling) + return originalError + } + + return &internal.ErrorWithSuggestion{Err: originalError, Suggestion: result} + case 1: // don't get error suggestion + return originalError + } + + return originalError +} + func (p *ProvisionAction) Run(ctx context.Context) (*actions.ActionResult, error) { if p.flags.noProgress { fmt.Fprintln( @@ -186,23 +239,23 @@ func (p *ProvisionAction) Run(ctx context.Context) (*actions.ActionResult, error startTime := time.Now() if err := p.projectManager.Initialize(ctx, p.projectConfig); err != nil { - return nil, err + return nil, p.errorWithSuggestion(ctx, err) } if err := p.projectManager.EnsureAllTools(ctx, p.projectConfig, nil); err != nil { - return nil, err + return nil, p.errorWithSuggestion(ctx, err) } infra, err := p.importManager.ProjectInfrastructure(ctx, p.projectConfig) if err != nil { - return nil, err + return nil, p.errorWithSuggestion(ctx, err) } defer func() { _ = infra.Cleanup() }() infraOptions := infra.Options infraOptions.IgnoreDeploymentState = p.flags.ignoreDeploymentState if err := p.provisionManager.Initialize(ctx, p.projectConfig.Path, infraOptions); err != nil { - return nil, fmt.Errorf("initializing provisioning manager: %w", err) + return nil, p.errorWithSuggestion(ctx, fmt.Errorf("initializing provisioning manager: %w", err)) } // Get Subscription to Display in Command Title Note @@ -264,18 +317,18 @@ func (p *ProvisionAction) Run(ctx context.Context) (*actions.ActionResult, error if p.formatter.Kind() == output.JsonFormat { stateResult, err := p.provisionManager.State(ctx, nil) if err != nil { - return nil, fmt.Errorf( + return nil, p.errorWithSuggestion(ctx, fmt.Errorf( "deployment failed and the deployment result is unavailable: %w", multierr.Combine(err, err), - ) + )) } if err := p.formatter.Format( provisioning.NewEnvRefreshResultFromState(stateResult.State), p.writer, nil); err != nil { - return nil, fmt.Errorf( + return nil, p.errorWithSuggestion(ctx, fmt.Errorf( "deployment failed and the deployment result could not be displayed: %w", multierr.Combine(err, err), - ) + )) } } @@ -313,7 +366,8 @@ func (p *ProvisionAction) Run(ctx context.Context) (*actions.ActionResult, error } } - return nil, fmt.Errorf("deployment failed: %w", err) + return nil, p.errorWithSuggestion(ctx, fmt.Errorf("deployment failed: %w", err)) + } if previewMode { @@ -346,7 +400,7 @@ func (p *ProvisionAction) Run(ctx context.Context) (*actions.ActionResult, error servicesStable, err := p.importManager.ServiceStable(ctx, p.projectConfig) if err != nil { - return nil, err + return nil, p.errorWithSuggestion(ctx, err) } for _, svc := range servicesStable { @@ -359,25 +413,25 @@ func (p *ProvisionAction) Run(ctx context.Context) (*actions.ActionResult, error } if err := svc.RaiseEvent(ctx, project.ServiceEventEnvUpdated, eventArgs); err != nil { - return nil, err + return nil, p.errorWithSuggestion(ctx, err) } } if p.formatter.Kind() == output.JsonFormat { stateResult, err := p.provisionManager.State(ctx, nil) if err != nil { - return nil, fmt.Errorf( + return nil, p.errorWithSuggestion(ctx, fmt.Errorf( "deployment succeeded but the deployment result is unavailable: %w", multierr.Combine(err, err), - ) + )) } if err := p.formatter.Format( provisioning.NewEnvRefreshResultFromState(stateResult.State), p.writer, nil); err != nil { - return nil, fmt.Errorf( + return nil, p.errorWithSuggestion(ctx, fmt.Errorf( "deployment succeeded but the deployment result could not be displayed: %w", multierr.Combine(err, err), - ) + )) } } diff --git a/cli/azd/pkg/agentRunner/agentRunner.go b/cli/azd/pkg/agentRunner/agentRunner.go index b0cfdc1f2e0..7c7487892e4 100644 --- a/cli/azd/pkg/agentRunner/agentRunner.go +++ b/cli/azd/pkg/agentRunner/agentRunner.go @@ -283,3 +283,50 @@ func (h *agentLogHandler) HandleAgentFinish(ctx context.Context, finish schema.A func (h *agentLogHandler) HandleLLMGenerateContentEnd(ctx context.Context, response *llms.ContentResponse) { log.Println("✨ LLM content generation completed") } + +func promptingWithDifferentErrors(err error) string { + var respErr *azcore.ResponseError + var armDeployErr *azapi.AzureDeploymentError + var authFailedErr *auth.AuthFailedError + if errors.As(err, &respErr) { + return fmt.Sprintf(`I'm using Azure Developer CLI (azd) and encountered an Azure HTTP response error: %s + +This appears to be an Azure REST API error with status code %d and error code '%s'. Please: + +1. Explain what this specific error means and why it occurred +2. Provide step-by-step troubleshooting instructions without az cli command and instructions with az cli command +3. Suggest specific fixes for Bicep files and Terraform files if this is infrastructure provisioning related +4. If this involves Azure resource permissions, quotas, or configuration issues, provide the exact azure portal instructions and az cli commands to verify the changes from bicep or terraform files works +5. Provide suggestions only if this requires changes to Azure subscription settings, resource group permissions, or service principal setup + +Focus on actionable solutions rather than general advice.`, + err.Error(), respErr.StatusCode, respErr.ErrorCode) + } else if errors.As(err, &armDeployErr) { + return fmt.Sprintf(`I'm using Azure Developer CLI (azd) and encountered an Azure deployment error: %s + +This is a deployment validation or provisioning failure. Please: + +1. Explain what this specific error means and why it occurred +2. Provide step-by-step troubleshooting instructions without az cli command and instructions with az cli command +3. Suggest specific fixes for Bicep files and Terraform files +4. Provide the exact azure portal instructions and az cli commands to verify the suggested changes from bicep or terraform files works + +Focus on actionable solutions rather than general advice.`, + err.Error()) + } else if errors.As(err, &authFailedErr) { + // We should move this part under azd auth command + return fmt.Sprintf(`I'm using Azure Developer CLI (azd) and encountered an authentication error: %s. Please: + +1. Explain what this specific Azure authentication error means and common causes. +2. Identify which auth method is failing (device code, service principal, managed identity, interactive) and what should I do to fix it. +3. Provide specific azd auth commands to re-authenticate: + - azd auth logout + - azd auth login +4. Ensure correct tenant and subscription are selected +5. Verify Azure-related environment variables are correct + +Focus on actionable solutions rather than general advice.`, err.Error()) + } + + return fmt.Sprintf("I'm using Azure Developer CLI (azd) and I encountered an error: %s. Explain the error and what should I do next to fix it. Focus on actionable solutions rather than general advice.", err.Error()) +} diff --git a/cli/azd/tools/mcp/mcp.go b/cli/azd/tools/mcp/mcp.go index 51d2e65dc15..0c374f96e62 100644 --- a/cli/azd/tools/mcp/mcp.go +++ b/cli/azd/tools/mcp/mcp.go @@ -3,6 +3,7 @@ package main import ( "context" "fmt" + "log" "github.com/mark3labs/mcp-go/mcp" "github.com/mark3labs/mcp-go/server" @@ -19,16 +20,16 @@ func main() { // Define the tool tool := mcp.NewTool( - "hello_world", - mcp.WithDescription("Say hello to someone"), - mcp.WithString("name", + "Error_Handler", + mcp.WithDescription("Explain the error and provide a suggestion to fix it for azd provision related errors"), + mcp.WithString("errorPromptProvision", mcp.Required(), - mcp.Description("Name of the person to greet"), + mcp.Description("Complete error message from 'azd provision' command failure and complete prompt request to the tool"), ), ) // Register the tool handler - s.AddTool(tool, helloHandler) + s.AddTool(tool, errorProvisionHandler) // Start the server using stdio transport if err := server.ServeStdio(s); err != nil { @@ -36,9 +37,9 @@ func main() { } } -// Tool handler function -func helloHandler(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - name, err := request.RequireString("name") +func errorProvisionHandler(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + log.Println("🤖 Starting errorProvisionHandler session...") + promptRequest, err := request.RequireString("errorPromptProvision") if err != nil { return mcp.NewToolResultError(err.Error()), nil } @@ -46,10 +47,23 @@ func helloHandler(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallTo // Get the client session from context session := server.ClientSessionFromContext(ctx) if session == nil { - // If no session, fall back to simple greeting - return mcp.NewToolResultText(fmt.Sprintf("This is the MCP tool - Helloooo, %s!", name)), nil + // If no session, return to basic error + return mcp.NewToolResultText(fmt.Sprintf("Failed to connect MCP tool, fallback to original prompt request: %s", promptRequest)), nil } + // For VSCode only agent tool, this change will included in a separate PR for MCP tool + // samplingText := fmt.Sprintf(` + // I'm using Azure Developer CLI (azd) and running command 'azd provision'. I encountered the following error: %s + + // Determine if this error is a azure related error or http response error or authentication error or other errors. Please: + + // 1. Explain what this specific error means and why it occurred + // 2. Provide step-by-step troubleshooting instructions + // 3. If it is a azure related error or http response error, check infra folder and suggest specific fixes for Bicep files or Terraform files based on files in infra folder. After that, if user has azure cli installed, provide the exact Azure CLI commands and azure portal instructions to verify the changes works + // 4. Include any relevant azure.yaml configuration changes that might be needed + + // Focus on actionable solutions rather than general advice. + // `, promptRequest) // Check if the session supports sampling if samplingSession, ok := session.(server.SessionWithSampling); ok { // Create a sampling request to get a creative greeting @@ -60,7 +74,7 @@ func helloHandler(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallTo Role: mcp.RoleUser, Content: mcp.TextContent{ Type: "text", - Text: fmt.Sprintf("Please provide a creative and enthusiastic greeting for %s. Make it feel that it is from someone mysterious and a little scary!", name), + Text: fmt.Sprintf("I'm running azd command 'azd provision'. %s", promptRequest), }, }, }, @@ -69,30 +83,33 @@ func helloHandler(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallTo }, } + log.Printf("🤖 Sampling Request: %+v\n", samplingRequest) + // Send the sampling request to get a response from the host's LLM samplingResponse, err := samplingSession.RequestSampling(ctx, samplingRequest) + log.Printf("🤖 Sampling Response: %+v\n", samplingResponse) if err != nil { // If sampling fails, fall back to a simple greeting - return mcp.NewToolResultText(fmt.Sprintf("This is the MCP tool - Helloooo, %s! (sampling failed: %v)", name, err)), nil + return mcp.NewToolResultText(fmt.Sprintf("Failed to send sampling request, fallback to original prompt request: %s", promptRequest)), nil } // Extract the generated greeting from the sampling response - var generatedGreeting string + var errorSuggestion string if samplingResponse != nil { // The response Content field contains the message content if textContent, ok := samplingResponse.Content.(mcp.TextContent); ok { - generatedGreeting = textContent.Text + errorSuggestion = textContent.Text } else if contentStr, ok := samplingResponse.Content.(string); ok { - generatedGreeting = contentStr + errorSuggestion = contentStr } } // If we got a response, use it - if generatedGreeting != "" { - return mcp.NewToolResultText(fmt.Sprintf("🤖 AI-Generated Greeting: %s", generatedGreeting)), nil + if errorSuggestion != "" { + return mcp.NewToolResultText(fmt.Sprintf("🤖 AI-Generated Error Suggestion: %s", errorSuggestion)), nil } } - // Fallback to simple greeting - return mcp.NewToolResultText(fmt.Sprintf("This is the MCP tool - Helloooo, %s!", name)), nil + // Fallback to raw error message + return mcp.NewToolResultText(fmt.Sprintf("Failed to generate error suggestions, fallback to original prompt request: %s", promptRequest)), nil } From 8866ab00fae144458c0d5158d174329cd848ed0e Mon Sep 17 00:00:00 2001 From: hemarina Date: Wed, 23 Jul 2025 15:54:16 -0700 Subject: [PATCH 016/116] clean from hooks new --- cli/azd/cmd/hooks.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/cli/azd/cmd/hooks.go b/cli/azd/cmd/hooks.go index 8b4863d9505..3b0bcd93451 100644 --- a/cli/azd/cmd/hooks.go +++ b/cli/azd/cmd/hooks.go @@ -39,12 +39,6 @@ func hooksActions(root *actions.ActionDescriptor) *actions.ActionDescriptor { ActionResolver: newHooksRunAction, }) - group.Add("new", &actions.ActionDescriptorOptions{ - Command: newHooksNewCmd(), - FlagsResolver: newHooksNewFlags, - ActionResolver: newHooksNewAction, - }) - return group } From 7a8d66ff12502ebd1ce181d680afff4a6231b900 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 25 Jul 2025 17:41:26 -0700 Subject: [PATCH 017/116] WIP: azd ai chat Get AI configuration from config Adds factory to create agent instances --- .../extensions/azd.ai.start/AZURE_AI_SETUP.md | 98 +++++ cli/azd/extensions/azd.ai.start/README.md | 33 ++ cli/azd/extensions/azd.ai.start/USAGE.md | 50 +++ cli/azd/extensions/azd.ai.start/build.ps1 | 71 ++++ cli/azd/extensions/azd.ai.start/build.sh | 66 ++++ cli/azd/extensions/azd.ai.start/changelog.md | 3 + .../extensions/azd.ai.start/extension.yaml | 9 + cli/azd/extensions/azd.ai.start/go.mod | 60 +++ cli/azd/extensions/azd.ai.start/go.sum | 352 +++++++++++++++++ .../azd.ai.start/internal/agent/agent.go | 362 ++++++++++++++++++ .../azd.ai.start/internal/agent/factory.go | 66 ++++ .../azd.ai.start/internal/agent/response.go | 25 ++ .../azd.ai.start/internal/agent/stats.go | 16 + .../internal/cmd/enhanced_integration.go | 100 +++++ .../azd.ai.start/internal/cmd/root.go | 97 +++++ .../azd.ai.start/internal/logging/logger.go | 134 +++++++ .../azd.ai.start/internal/session/action.go | 41 ++ .../azd.ai.start/internal/session/session.go | 48 +++ .../internal/tools/change_directory.go | 51 +++ .../azd.ai.start/internal/tools/copy_file.go | 71 ++++ .../internal/tools/create_directory.go | 41 ++ .../internal/tools/current_directory.go | 39 ++ .../internal/tools/delete_directory.go | 53 +++ .../internal/tools/delete_file.go | 43 +++ .../internal/tools/directory_list.go | 92 +++++ .../azd.ai.start/internal/tools/file_info.go | 67 ++++ .../internal/tools/http_fetcher.go | 43 +++ .../azd.ai.start/internal/tools/move_file.go | 62 +++ .../azd.ai.start/internal/tools/read_file.go | 37 ++ .../azd.ai.start/internal/tools/weather.go | 105 +++++ .../azd.ai.start/internal/tools/write_file.go | 101 +++++ .../azd.ai.start/internal/utils/helpers.go | 41 ++ .../internal/validation/parser.go | 93 +++++ .../azd.ai.start/internal/validation/types.go | 21 + .../internal/validation/validator.go | 68 ++++ cli/azd/extensions/azd.ai.start/main.go | 30 ++ 36 files changed, 2689 insertions(+) create mode 100644 cli/azd/extensions/azd.ai.start/AZURE_AI_SETUP.md create mode 100644 cli/azd/extensions/azd.ai.start/README.md create mode 100644 cli/azd/extensions/azd.ai.start/USAGE.md create mode 100644 cli/azd/extensions/azd.ai.start/build.ps1 create mode 100644 cli/azd/extensions/azd.ai.start/build.sh create mode 100644 cli/azd/extensions/azd.ai.start/changelog.md create mode 100644 cli/azd/extensions/azd.ai.start/extension.yaml create mode 100644 cli/azd/extensions/azd.ai.start/go.mod create mode 100644 cli/azd/extensions/azd.ai.start/go.sum create mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/agent.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/factory.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/response.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/stats.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/cmd/root.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/logging/logger.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/session/action.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/session/session.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/file_info.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/move_file.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/read_file.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/weather.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/write_file.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/utils/helpers.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/validation/parser.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/validation/types.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/validation/validator.go create mode 100644 cli/azd/extensions/azd.ai.start/main.go diff --git a/cli/azd/extensions/azd.ai.start/AZURE_AI_SETUP.md b/cli/azd/extensions/azd.ai.start/AZURE_AI_SETUP.md new file mode 100644 index 00000000000..9d985f2f36e --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/AZURE_AI_SETUP.md @@ -0,0 +1,98 @@ +# Azure AI Integration Setup + +This AI agent can work with both OpenAI and Azure OpenAI Service. Here's how to configure each: + +## Option 1: Azure OpenAI Service (Recommended for Azure users) + +Azure OpenAI provides the same models as OpenAI but hosted on Azure infrastructure with enterprise security and compliance. + +### Prerequisites +1. Azure subscription +2. Azure OpenAI resource created in Azure portal +3. GPT model deployed (e.g., GPT-3.5-turbo or GPT-4) + +### Environment Variables +```bash +# Set these environment variables for Azure OpenAI +export AZURE_OPENAI_ENDPOINT="https://your-resource-name.openai.azure.com" +export AZURE_OPENAI_API_KEY="your-azure-openai-api-key" +export AZURE_OPENAI_DEPLOYMENT_NAME="your-gpt-deployment-name" +``` + +### PowerShell (Windows) +```powershell +$env:AZURE_OPENAI_ENDPOINT="https://your-resource-name.openai.azure.com" +$env:AZURE_OPENAI_API_KEY="your-azure-openai-api-key" +$env:AZURE_OPENAI_DEPLOYMENT_NAME="your-gpt-deployment-name" +``` + +## Option 2: OpenAI API (Direct) + +### Environment Variables +```bash +export OPENAI_API_KEY="your-openai-api-key" +``` + +### PowerShell (Windows) +```powershell +$env:OPENAI_API_KEY="your-openai-api-key" +``` + +## Usage Examples + +```bash +# Interactive mode +azd ai.chat + +# Direct query +azd ai.chat "How do I deploy a Node.js app to Azure Container Apps?" + +# Azure-specific queries +azd ai.chat "What's the best way to set up CI/CD with Azure DevOps for my web app?" +azd ai.chat "How do I configure Azure Key Vault for my application secrets?" +``` + +## Azure OpenAI Advantages + +- **Enterprise Security**: Your data stays within your Azure tenant +- **Compliance**: Meets enterprise compliance requirements +- **Integration**: Better integration with other Azure services +- **Cost Control**: Better cost management and billing integration +- **Regional Deployment**: Deploy closer to your users for lower latency + +## Setup Steps for Azure OpenAI + +1. **Create Azure OpenAI Resource**: + ```bash + az cognitiveservices account create \ + --name myopenai \ + --resource-group myresourcegroup \ + --location eastus \ + --kind OpenAI \ + --sku s0 + ``` + +2. **Deploy a Model**: + - Go to Azure OpenAI Studio + - Navigate to "Deployments" + - Create a new deployment with your chosen model (e.g., gpt-35-turbo) + - Note the deployment name for the environment variable + +3. **Get API Key**: + ```bash + az cognitiveservices account keys list \ + --name myopenai \ + --resource-group myresourcegroup + ``` + +4. **Set Environment Variables** as shown above + +## Model Compatibility + +The agent supports various GPT models available in Azure OpenAI: +- GPT-3.5-turbo +- GPT-4 +- GPT-4-turbo +- And newer models as they become available + +Just make sure your deployment name matches the model you want to use. diff --git a/cli/azd/extensions/azd.ai.start/README.md b/cli/azd/extensions/azd.ai.start/README.md new file mode 100644 index 00000000000..9ff29633ea4 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/README.md @@ -0,0 +1,33 @@ +# Node.js Express App + +This is a simple Node.js application using Express with a basic routing setup. + +## Project Structure + +``` +. +├── app.js +├── package.json +├── README.md +└── routes + └── index.js +``` + +## Getting Started + +1. Install dependencies: + ```bash + npm install + ``` +2. Start the server: + ```bash + npm start + ``` +3. Visit [http://localhost:3000](http://localhost:3000) in your browser. + +## Features +- Express server setup +- Modular routing + +## License +ISC \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/USAGE.md b/cli/azd/extensions/azd.ai.start/USAGE.md new file mode 100644 index 00000000000..7218badc825 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/USAGE.md @@ -0,0 +1,50 @@ +# Azure AI Agent - Multi-turn Chat Demo + +Your Azure AI Agent now supports two modes: + +## 1. Single Query Mode +For one-time questions, pass the query as arguments: +```bash +azd.ai.start.exe "How do I deploy a Node.js app to Azure?" +``` + +## 2. Interactive Chat Mode +For multi-turn conversations, run without arguments: +```bash +azd.ai.start.exe +``` + +In interactive mode, you'll see: +- 🤖 Welcome message with instructions +- 💬 You: prompt for your input +- 🤖 AI Agent: responses with context awareness +- Type 'exit' or 'quit' to end the session +- Maintains conversation history for context + +### Features: +- ✅ **Context Aware**: Remembers previous messages in the conversation +- ✅ **Azure Focused**: Specialized for Azure development tasks +- ✅ **Easy Exit**: Type 'exit', 'quit', or Ctrl+C to quit +- ✅ **Memory Management**: Keeps last 10 exchanges to prevent context overflow +- ✅ **Error Handling**: Gracefully handles errors and continues the conversation + +### Example Interactive Session: +``` +🤖 Azure AI Agent - Interactive Chat Mode +Type 'exit', 'quit', or press Ctrl+C to exit +═══════════════════════════════════════════════ + +💬 You: What is Azure App Service? + +🤖 AI Agent: Azure App Service is a platform-as-a-service (PaaS)... + +💬 You: How do I deploy to it? + +🤖 AI Agent: Based on our previous discussion about App Service... + +💬 You: exit + +👋 Goodbye! Thanks for using Azure AI Agent! +``` + +The agent maintains conversation context, so follow-up questions work naturally! diff --git a/cli/azd/extensions/azd.ai.start/build.ps1 b/cli/azd/extensions/azd.ai.start/build.ps1 new file mode 100644 index 00000000000..8cdd4ae9281 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/build.ps1 @@ -0,0 +1,71 @@ +# Get the directory of the script +$EXTENSION_DIR = Split-Path -Parent $MyInvocation.MyCommand.Path + +# Change to the script directory +Set-Location -Path $EXTENSION_DIR + +# Create a safe version of EXTENSION_ID replacing dots with dashes +$EXTENSION_ID_SAFE = $env:EXTENSION_ID -replace '\.', '-' + +# Define output directory +$OUTPUT_DIR = if ($env:OUTPUT_DIR) { $env:OUTPUT_DIR } else { Join-Path $EXTENSION_DIR "bin" } + +# Create output directory if it doesn't exist +if (-not (Test-Path -Path $OUTPUT_DIR)) { + New-Item -ItemType Directory -Path $OUTPUT_DIR | Out-Null +} + +# Get Git commit hash and build date +$COMMIT = git rev-parse HEAD +$BUILD_DATE = (Get-Date -Format "yyyy-MM-ddTHH:mm:ssZ") + +# List of OS and architecture combinations +if ($env:EXTENSION_PLATFORM) { + $PLATFORMS = @($env:EXTENSION_PLATFORM) +} +else { + $PLATFORMS = @( + "windows/amd64", + "windows/arm64", + "darwin/amd64", + "darwin/arm64", + "linux/amd64", + "linux/arm64" + ) +} + +$APP_PATH = "$env:EXTENSION_ID/internal/cmd" + +# Loop through platforms and build +foreach ($PLATFORM in $PLATFORMS) { + $OS, $ARCH = $PLATFORM -split '/' + + $OUTPUT_NAME = Join-Path $OUTPUT_DIR "$EXTENSION_ID_SAFE-$OS-$ARCH" + + if ($OS -eq "windows") { + $OUTPUT_NAME += ".exe" + } + + Write-Host "Building for $OS/$ARCH..." + + # Delete the output file if it already exists + if (Test-Path -Path $OUTPUT_NAME) { + Remove-Item -Path $OUTPUT_NAME -Force + } + + # Set environment variables for Go build + $env:GOOS = $OS + $env:GOARCH = $ARCH + + go build ` + -ldflags="-X '$APP_PATH.Version=$env:EXTENSION_VERSION' -X '$APP_PATH.Commit=$COMMIT' -X '$APP_PATH.BuildDate=$BUILD_DATE'" ` + -o $OUTPUT_NAME + + if ($LASTEXITCODE -ne 0) { + Write-Host "An error occurred while building for $OS/$ARCH" + exit 1 + } +} + +Write-Host "Build completed successfully!" +Write-Host "Binaries are located in the $OUTPUT_DIR directory." diff --git a/cli/azd/extensions/azd.ai.start/build.sh b/cli/azd/extensions/azd.ai.start/build.sh new file mode 100644 index 00000000000..f1a995ec5e9 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/build.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +# Get the directory of the script +EXTENSION_DIR="$(cd "$(dirname "$0")" && pwd)" + +# Change to the script directory +cd "$EXTENSION_DIR" || exit + +# Create a safe version of EXTENSION_ID replacing dots with dashes +EXTENSION_ID_SAFE="${EXTENSION_ID//./-}" + +# Define output directory +OUTPUT_DIR="${OUTPUT_DIR:-$EXTENSION_DIR/bin}" + +# Create output and target directories if they don't exist +mkdir -p "$OUTPUT_DIR" + +# Get Git commit hash and build date +COMMIT=$(git rev-parse HEAD) +BUILD_DATE=$(date -u +%Y-%m-%dT%H:%M:%SZ) + +# List of OS and architecture combinations +if [ -n "$EXTENSION_PLATFORM" ]; then + PLATFORMS=("$EXTENSION_PLATFORM") +else + PLATFORMS=( + "windows/amd64" + "windows/arm64" + "darwin/amd64" + "darwin/arm64" + "linux/amd64" + "linux/arm64" + ) +fi + +APP_PATH="$EXTENSION_ID/internal/cmd" + +# Loop through platforms and build +for PLATFORM in "${PLATFORMS[@]}"; do + OS=$(echo "$PLATFORM" | cut -d'/' -f1) + ARCH=$(echo "$PLATFORM" | cut -d'/' -f2) + + OUTPUT_NAME="$OUTPUT_DIR/$EXTENSION_ID_SAFE-$OS-$ARCH" + + if [ "$OS" = "windows" ]; then + OUTPUT_NAME+='.exe' + fi + + echo "Building for $OS/$ARCH..." + + # Delete the output file if it already exists + [ -f "$OUTPUT_NAME" ] && rm -f "$OUTPUT_NAME" + + # Set environment variables for Go build + GOOS=$OS GOARCH=$ARCH go build \ + -ldflags="-X '$APP_PATH.Version=$EXTENSION_VERSION' -X '$APP_PATH.Commit=$COMMIT' -X '$APP_PATH.BuildDate=$BUILD_DATE'" \ + -o "$OUTPUT_NAME" + + if [ $? -ne 0 ]; then + echo "An error occurred while building for $OS/$ARCH" + exit 1 + fi +done + +echo "Build completed successfully!" +echo "Binaries are located in the $OUTPUT_DIR directory." diff --git a/cli/azd/extensions/azd.ai.start/changelog.md b/cli/azd/extensions/azd.ai.start/changelog.md new file mode 100644 index 00000000000..b88d613cce0 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/changelog.md @@ -0,0 +1,3 @@ +# Release History + +## 0.0.1 - Initial Version \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/extension.yaml b/cli/azd/extensions/azd.ai.start/extension.yaml new file mode 100644 index 00000000000..2c645db27b3 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/extension.yaml @@ -0,0 +1,9 @@ +capabilities: + - custom-commands +description: Enables interactive AI agent through AZD +displayName: AZD AI Agent +id: azd.ai.start +language: go +namespace: ai.chat +usage: azd ai.chat [options] +version: 0.0.1 diff --git a/cli/azd/extensions/azd.ai.start/go.mod b/cli/azd/extensions/azd.ai.start/go.mod new file mode 100644 index 00000000000..c840c72262d --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/go.mod @@ -0,0 +1,60 @@ +module azd.ai.start + +go 1.24.1 + +require ( + github.com/fatih/color v1.18.0 + github.com/spf13/cobra v1.9.1 + github.com/tmc/langchaingo v0.1.13 +) + +require ( + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.3.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c // indirect + github.com/dlclark/regexp2 v1.10.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/goph/emperror v0.17.2 // indirect + github.com/huandu/xstrings v1.3.3 // indirect + github.com/imdario/mergo v0.3.13 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect + github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/mitchellh/reflectwalk v1.0.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/nikolalohinski/gonja v1.5.3 // indirect + github.com/pelletier/go-toml/v2 v2.0.9 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pkoukk/tiktoken-go v0.1.6 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/shopspring/decimal v1.2.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/cast v1.3.1 // indirect + github.com/spf13/pflag v1.0.6 // indirect + github.com/stretchr/testify v1.10.0 // indirect + github.com/yargevad/filepathx v1.0.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 // indirect + golang.org/x/crypto v0.37.0 // indirect + golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect + golang.org/x/net v0.39.0 // indirect + golang.org/x/oauth2 v0.25.0 // indirect + golang.org/x/sync v0.13.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/text v0.24.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250407143221-ac9807e6c755 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755 // indirect + google.golang.org/grpc v1.71.1 // indirect + google.golang.org/protobuf v1.36.6 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/cli/azd/extensions/azd.ai.start/go.sum b/cli/azd/extensions/azd.ai.start/go.sum new file mode 100644 index 00000000000..bc863f91c5c --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/go.sum @@ -0,0 +1,352 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.114.0 h1:OIPFAdfrFDFO2ve2U7r/H5SwSbBzEdrBdE7xkgwc+kY= +cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E= +cloud.google.com/go/ai v0.7.0 h1:P6+b5p4gXlza5E+u7uvcgYlzZ7103ACg70YdZeC6oGE= +cloud.google.com/go/ai v0.7.0/go.mod h1:7ozuEcraovh4ABsPbrec3o4LmFl9HigNI3D5haxYeQo= +cloud.google.com/go/aiplatform v1.68.0 h1:EPPqgHDJpBZKRvv+OsB3cr0jYz3EL2pZ+802rBPcG8U= +cloud.google.com/go/aiplatform v1.68.0/go.mod h1:105MFA3svHjC3Oazl7yjXAmIR89LKhRAeNdnDKJczME= +cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= +cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= +cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= +cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU= +cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng= +cloud.google.com/go/vertexai v0.12.0 h1:zTadEo/CtsoyRXNx3uGCncoWAP1H2HakGqwznt+iMo8= +cloud.google.com/go/vertexai v0.12.0/go.mod h1:8u+d0TsvBfAAd2x5R6GMgbYhsLgo3J7lmP4bR8g2ig8= +github.com/AssemblyAI/assemblyai-go-sdk v1.3.0 h1:AtOVgGxUycvK4P4ypP+1ZupecvFgnfH+Jsum0o5ILoU= +github.com/AssemblyAI/assemblyai-go-sdk v1.3.0/go.mod h1:H0naZbvpIW49cDA5ZZ/gggeXqi7ojSGB1mqshRk6kNE= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= +github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM= +github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJsnnd3H7Ho5jQ= +github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o= +github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss= +github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= +github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= +github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c h1:pi62a7GwfbxvZDXhV4DfhxeePzpVCoyr9/rZaWH5eow= +github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c/go.mod h1:mSTaPODklWyhruY0DZgPw1DI97K5cHXfU3afMqGf0IM= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= +github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/getzep/zep-go v1.0.4 h1:09o26bPP2RAPKFjWuVWwUWLbtFDF/S8bfbilxzeZAAg= +github.com/getzep/zep-go v1.0.4/go.mod h1:HC1Gz7oiyrzOTvzeKC4dQKUiUy87zpIJl0ZFXXdHuss= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/generative-ai-go v0.15.1 h1:n8aQUpvhPOlGVuM2DRkJ2jvx04zpp42B778AROJa+pQ= +github.com/google/generative-ai-go v0.15.1/go.mod h1:AAucpWZjXsDKhQYWvCYuP6d0yB1kX998pJlOW1rAesw= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= +github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18= +github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= +github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= +github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= +github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80 h1:6Yzfa6GP0rIo/kULo2bwGEkFvCePZ3qHDDTC3/J9Swo= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58= +github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/nikolalohinski/gonja v1.5.3 h1:GsA+EEaZDZPGJ8JtpeGN78jidhOlxeJROpqMT9fTj9c= +github.com/nikolalohinski/gonja v1.5.3/go.mod h1:RmjwxNiXAEqcq1HeK5SSMmqFJvKOfTfXhkJv6YBtPa4= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0= +github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkoukk/tiktoken-go v0.1.6 h1:JF0TlJzhTbrI30wCvFuiw6FzP2+/bR+FIxUdgEAcUsw= +github.com/pkoukk/tiktoken-go v0.1.6/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tmc/langchaingo v0.1.13 h1:rcpMWBIi2y3B90XxfE4Ao8dhCQPVDMaNPnN5cGB1CaA= +github.com/tmc/langchaingo v0.1.13/go.mod h1:vpQ5NOIhpzxDfTZK9B6tf2GM/MoaHewPWM5KXXGh7hg= +github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= +github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181 h1:K+bMSIx9A7mLES1rtG+qKduLIXq40DAzYHtb0XuCukA= +gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181/go.mod h1:dzYhVIwWCtzPAa4QP98wfB9+mzt33MSmM8wsKiMi2ow= +gitlab.com/golang-commonmark/linkify v0.0.0-20191026162114-a0c2df6c8f82 h1:oYrL81N608MLZhma3ruL8qTM4xcpYECGut8KSxRY59g= +gitlab.com/golang-commonmark/linkify v0.0.0-20191026162114-a0c2df6c8f82/go.mod h1:Gn+LZmCrhPECMD3SOKlE+BOHwhOYD9j7WT9NUtkCrC8= +gitlab.com/golang-commonmark/markdown v0.0.0-20211110145824-bf3e522c626a h1:O85GKETcmnCNAfv4Aym9tepU8OE0NmcZNqPlXcsBKBs= +gitlab.com/golang-commonmark/markdown v0.0.0-20211110145824-bf3e522c626a/go.mod h1:LaSIs30YPGs1H5jwGgPhLzc8vkNc/k0rDX/fEZqiU/M= +gitlab.com/golang-commonmark/mdurl v0.0.0-20191124015652-932350d1cb84 h1:qqjvoVXdWIcZCLPMlzgA7P9FZWdPGPvP/l3ef8GzV6o= +gitlab.com/golang-commonmark/mdurl v0.0.0-20191124015652-932350d1cb84/go.mod h1:IJZ+fdMvbW2qW6htJx7sLJ04FEs4Ldl/MDsJtMKywfw= +gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f h1:Wku8eEdeJqIOFHtrfkYUByc4bCaTeA6fL0UJgfEiFMI= +gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f/go.mod h1:Tiuhl+njh/JIg0uS/sOJVYi0x2HEa5rc1OAaVsb5tAs= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 h1:A3SayB3rNyt+1S6qpI9mHPkeHTZbD7XILEqWnYZb2l0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0/go.mod h1:27iA5uvhuRNmalO+iEUdVn5ZMj2qy10Mm+XRIpRmyuU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 h1:Ss6D3hLXTM0KobyBYEAygXzFfGcjnmfEJOBgSbemCtg= +go.starlark.net v0.0.0-20230302034142-4b1e35fe2254/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= +google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20240528184218-531527333157 h1:u7WMYrIrVvs0TF5yaKwKNbcJyySYf+HAIFXxWltJOXE= +google.golang.org/genproto v0.0.0-20240528184218-531527333157/go.mod h1:ubQlAQnzejB8uZzszhrTCU2Fyp6Vi7ZE5nn0c3W8+qQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250407143221-ac9807e6c755 h1:AMLTAunltONNuzWgVPZXrjLWtXpsG6A3yLLPEoJ/IjU= +google.golang.org/genproto/googleapis/api v0.0.0-20250407143221-ac9807e6c755/go.mod h1:2R6XrVC8Oc08GlNh8ujEpc7HkLiEZ16QeY7FxIs20ac= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755 h1:TwXJCGVREgQ/cl18iY0Z4wJCTL/GmW+Um2oSwZiZPnc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI= +google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go new file mode 100644 index 00000000000..ba8c284aa52 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go @@ -0,0 +1,362 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package agent + +import ( + "context" + "fmt" + "time" + + "github.com/tmc/langchaingo/agents" + "github.com/tmc/langchaingo/schema" + "github.com/tmc/langchaingo/tools" + + "azd.ai.start/internal/logging" + "azd.ai.start/internal/session" + "azd.ai.start/internal/utils" + "azd.ai.start/internal/validation" +) + +// AzureAIAgent represents an enhanced Azure AI agent with action tracking and intent validation +type AzureAIAgent struct { + agent *agents.ConversationalAgent + executor *agents.Executor + memory schema.Memory + tools []tools.Tool + intentValidator *validation.IntentValidator + actionLogger *logging.ActionLogger + currentSession *session.ActionSession +} + +// ProcessQuery processes a user query with full action tracking and validation +func (aai *AzureAIAgent) ProcessQuery(ctx context.Context, userInput string) (*AgentResponse, error) { + // Start new action session + sess := session.NewActionSession(userInput) + aai.currentSession = sess + + fmt.Printf("\n🎯 Intent: %s\n", userInput) + fmt.Printf("📋 Planning and executing actions...\n") + fmt.Println("═══════════════════════════════════════") + + // Clear previous actions + aai.actionLogger.Clear() + + // Enhanced user input with explicit completion requirements + enhancedInput := fmt.Sprintf(`%s + +IMPORTANT: You must complete this task successfully. Do not stop until: +1. All required actions have been executed +2. Any files that need to be created are actually saved +3. You verify the results of your actions +4. The task is fully accomplished + +If a tool fails, analyze why and try again with corrections. If you need to create files, use the write_file tool with the complete content.`, userInput) + + // Execute with enhanced input + result, err := aai.executor.Call(ctx, map[string]any{ + "input": enhancedInput, + }) + + if err != nil { + sess.End() + fmt.Printf("❌ Execution failed: %s\n", err.Error()) + return nil, err + } + + // Get executed actions from logger and intermediate steps + executedActions := aai.actionLogger.GetActions() + for _, action := range executedActions { + sess.AddExecutedAction(action) + } + + // If no actions in logger but we have intermediate steps, extract them + if len(sess.ExecutedActions) == 0 { + if steps, ok := result["intermediateSteps"].([]schema.AgentStep); ok && len(steps) > 0 { + for _, step := range steps { + actionLog := session.ActionLog{ + Timestamp: time.Now(), + Action: step.Action.Tool, + Tool: step.Action.Tool, + Input: step.Action.ToolInput, + Output: step.Observation, + Success: true, + Duration: time.Millisecond * 100, // Approximate + } + sess.AddExecutedAction(actionLog) + } + } + } + + // Check if any actions were taken - if not, this was likely conversational + if len(sess.ExecutedActions) == 0 { + fmt.Printf("💬 No tool actions needed - appears to be conversational\n") + + sess.End() + validationResult := &validation.ValidationResult{ + Status: validation.ValidationComplete, + Explanation: "Conversational response - no actions required", + Confidence: 1.0, + } + sess.SetValidationResult(validationResult) + + // Display simple summary for conversational responses + fmt.Println("\n📊 Session Summary") + fmt.Println("═══════════════════════════════════════") + duration := sess.EndTime.Sub(sess.StartTime) + fmt.Printf("⏱️ Duration: %v\n", duration.Round(time.Millisecond)) + fmt.Println("\n💬 Conversational response - no tool actions needed") + fmt.Printf("🎯 Intent Status: %s (%.1f%% confidence)\n", validationResult.Status, validationResult.Confidence*100) + fmt.Println("═══════════════════════════════════════") + + return NewAgentResponse(result["output"].(string), sess, validationResult), nil + } + + // Actions were taken, so validate and potentially retry + var lastResult = result + var lastValidation *validation.ValidationResult + maxAttempts := 3 // Maximum retry attempts for incomplete tasks + + for attempt := 1; attempt <= maxAttempts; attempt++ { + // Validate intent completion with enhanced validation + fmt.Printf("\n🔍 Validating completion...\n") + validationResult := aai.intentValidator.ValidateCompletion( + userInput, + sess.ExecutedActions, + ) + lastValidation = validationResult + sess.SetValidationResult(validationResult) + + // Check if task is complete + if validationResult.Status == validation.ValidationComplete { + fmt.Printf("✅ Task completed successfully!\n") + break + } + + // If task is incomplete and we have more attempts, retry + if attempt < maxAttempts { + if validationResult.Status == validation.ValidationIncomplete || validationResult.Status == validation.ValidationPartial { + fmt.Printf("⚠️ Task incomplete (attempt %d/%d): %s\n", attempt, maxAttempts, validationResult.Explanation) + fmt.Printf("🔄 Analyzing what's missing and taking corrective action...\n") + + // Clear previous actions for retry + aai.actionLogger.Clear() + + // Enhanced retry with feedback about what was incomplete + retryInput := fmt.Sprintf(`%s + +IMPORTANT: You must complete this task successfully. Do not stop until: +1. All required actions have been executed +2. Any files that need to be created are actually saved +3. You verify the results of your actions +4. The task is fully accomplished + +PREVIOUS ATTEMPT ANALYSIS: The previous attempt was marked as %s. +Reason: %s + +Please analyze what was missing or incomplete and take the necessary additional actions to fully complete the task.`, + userInput, validationResult.Status, validationResult.Explanation) + + // Execute retry + retryResult, err := aai.executor.Call(ctx, map[string]any{ + "input": retryInput, + }) + + if err != nil { + fmt.Printf("❌ Retry attempt %d failed: %s\n", attempt+1, err.Error()) + if attempt == maxAttempts-1 { + sess.End() + return nil, err + } + continue + } + + lastResult = retryResult + + // Get new actions from this retry + retryActions := aai.actionLogger.GetActions() + if len(retryActions) == 0 { + if steps, ok := retryResult["intermediateSteps"].([]schema.AgentStep); ok && len(steps) > 0 { + for _, step := range steps { + actionLog := session.ActionLog{ + Timestamp: time.Now(), + Action: step.Action.Tool, + Tool: step.Action.Tool, + Input: step.Action.ToolInput, + Output: step.Observation, + Success: true, + Duration: time.Millisecond * 100, + } + retryActions = append(retryActions, actionLog) + } + } + } + + // Accumulate actions from retry + for _, action := range retryActions { + sess.AddExecutedAction(action) + } + continue + } + } else { + // This was the last attempt and still incomplete + fmt.Printf("⚠️ Task still incomplete after %d attempts: %s\n", maxAttempts, validationResult.Explanation) + fmt.Printf("💡 Consider:\n") + fmt.Printf(" - Breaking the task into smaller, more specific steps\n") + fmt.Printf(" - Checking if all required files were actually created\n") + fmt.Printf(" - Verifying tool outputs were successful\n") + } + } + + sess.End() + + // Display comprehensive summary + aai.displayCompleteSummary(sess, lastResult) + + return NewAgentResponse(lastResult["output"].(string), sess, lastValidation), nil +} + +// ProcessQueryWithRetry processes a query with automatic retry on failure +func (aai *AzureAIAgent) ProcessQueryWithRetry(ctx context.Context, userInput string, maxRetries int) (*AgentResponse, error) { + var lastErr error + var lastResponse *AgentResponse + + for attempt := 1; attempt <= maxRetries; attempt++ { + fmt.Printf("\n🔄 Attempt %d/%d\n", attempt, maxRetries) + + response, err := aai.ProcessQuery(ctx, userInput) + if err != nil { + lastErr = err + fmt.Printf("❌ Attempt %d failed: %s\n", attempt, err.Error()) + continue + } + + lastResponse = response + + // Check if task completed successfully + if response.Validation.Status == validation.ValidationComplete { + fmt.Printf("✅ Task completed successfully on attempt %d\n", attempt) + return response, nil + } + + if response.Validation.Status == validation.ValidationPartial { + fmt.Printf("⚠️ Partial completion on attempt %d: %s\n", attempt, response.Validation.Explanation) + } else { + fmt.Printf("❌ Task incomplete on attempt %d: %s\n", attempt, response.Validation.Explanation) + } + + // Clear memory for fresh retry + aai.ClearMemory(ctx) + } + + if lastResponse != nil { + return lastResponse, nil + } + + return nil, fmt.Errorf("all %d attempts failed, last error: %w", maxRetries, lastErr) +} + +// GetSessionStats returns statistics about the current session +func (aai *AzureAIAgent) GetSessionStats() *SessionStats { + if aai.currentSession == nil { + return &SessionStats{} + } + + stats := &SessionStats{ + TotalActions: len(aai.currentSession.ExecutedActions), + SuccessfulActions: 0, + FailedActions: 0, + TotalDuration: aai.currentSession.EndTime.Sub(aai.currentSession.StartTime), + } + + for _, action := range aai.currentSession.ExecutedActions { + if action.Success { + stats.SuccessfulActions++ + } else { + stats.FailedActions++ + } + } + + return stats +} + +// GetMemoryContent returns the current memory content for debugging +func (aai *AzureAIAgent) GetMemoryContent(ctx context.Context) (map[string]any, error) { + return aai.memory.LoadMemoryVariables(ctx, map[string]any{}) +} + +// ClearMemory clears the conversation memory +func (aai *AzureAIAgent) ClearMemory(ctx context.Context) error { + return aai.memory.Clear(ctx) +} + +// EnableVerboseLogging enables detailed iteration logging +func (aai *AzureAIAgent) EnableVerboseLogging() { + // This would enable more detailed logging in the action logger + fmt.Println("🔍 Verbose logging enabled - you'll see detailed iteration steps") +} + +// displayCompleteSummary displays a comprehensive summary of the session +func (aai *AzureAIAgent) displayCompleteSummary(sess *session.ActionSession, result map[string]any) { + fmt.Println("\n📊 Session Summary") + fmt.Println("═══════════════════════════════════════") + + // Display timing + duration := sess.EndTime.Sub(sess.StartTime) + fmt.Printf("⏱️ Duration: %v\n", duration.Round(time.Millisecond)) + + // Display actions with attempt grouping + if len(sess.ExecutedActions) > 0 { + fmt.Println("\n🔧 Actions Executed:") + for i, action := range sess.ExecutedActions { + status := "✅" + if !action.Success { + status = "❌" + } + fmt.Printf(" %s %d. %s (%v)\n", + status, i+1, + utils.TruncateString(action.Input, 50), + action.Duration.Round(time.Millisecond)) + } + } else { + fmt.Println("\n🔧 No explicit tool actions required") + } + + // Display validation result with enhanced messaging + if validationResult, ok := sess.ValidationResult.(*validation.ValidationResult); ok { + fmt.Printf("\n🎯 Intent Status: %s", validationResult.Status) + if validationResult.Confidence > 0 { + fmt.Printf(" (%.1f%% confidence)", validationResult.Confidence*100) + } + fmt.Println() + + if validationResult.Explanation != "" { + fmt.Printf("💭 Assessment: %s\n", validationResult.Explanation) + } + + // Show completion status with actionable advice + switch validationResult.Status { + case validation.ValidationComplete: + fmt.Printf("🎉 Task completed successfully!\n") + case validation.ValidationPartial: + fmt.Printf("⚠️ Task partially completed. Some aspects may need attention.\n") + case validation.ValidationIncomplete: + fmt.Printf("❌ Task incomplete. Additional actions may be needed.\n") + case validation.ValidationError: + fmt.Printf("⚠️ Validation error. Please review the actions taken.\n") + } + } + + // Display intermediate steps if available + if steps, ok := result["intermediateSteps"].([]schema.AgentStep); ok && len(steps) > 0 { + fmt.Printf("\n🔍 Reasoning Steps: %d\n", len(steps)) + for i, step := range steps { + fmt.Printf("Step %d:\n", i+1) + fmt.Printf(" Tool: %s\n", step.Action.Tool) + fmt.Printf(" Input: %s\n", step.Action.ToolInput) + fmt.Printf(" Observation: %s\n", utils.TruncateString(step.Observation, 200)) + } + } + + fmt.Println("═══════════════════════════════════════") +} diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/factory.go b/cli/azd/extensions/azd.ai.start/internal/agent/factory.go new file mode 100644 index 00000000000..3b979181591 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/agent/factory.go @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package agent + +import ( + "github.com/tmc/langchaingo/agents" + "github.com/tmc/langchaingo/llms/openai" + "github.com/tmc/langchaingo/memory" + "github.com/tmc/langchaingo/tools" + + "azd.ai.start/internal/logging" + mytools "azd.ai.start/internal/tools" + "azd.ai.start/internal/validation" +) + +// CreateAzureAIAgent creates a new enhanced Azure AI agent +func CreateAzureAIAgent(llm *openai.LLM) *AzureAIAgent { + // 1. Smart Memory with conversation buffer + smartMemory := memory.NewConversationBuffer() + + // 2. Action Logger with comprehensive callbacks + actionLogger := logging.NewActionLogger() + + // 3. Enhanced Tools - just the essentials + tools := []tools.Tool{ + // Directory operations + mytools.DirectoryListTool{}, + mytools.CreateDirectoryTool{}, + mytools.DeleteDirectoryTool{}, + mytools.ChangeDirectoryTool{}, + mytools.CurrentDirectoryTool{}, + + // File operations + mytools.ReadFileTool{}, + mytools.WriteFileTool{}, + mytools.CopyFileTool{}, + mytools.MoveFileTool{}, + mytools.DeleteFileTool{}, + mytools.FileInfoTool{}, + + // Other tools + mytools.HTTPFetcherTool{}, + mytools.WeatherTool{}, + tools.Calculator{}, + } + + // 4. Create agent with default settings + agent := agents.NewConversationalAgent(llm, tools) + + // 5. Enhanced Executor with aggressive completion settings + executor := agents.NewExecutor(agent, + agents.WithMemory(smartMemory), + agents.WithMaxIterations(1000), // Much higher limit for complex multi-step processes + agents.WithReturnIntermediateSteps(), + ) + + return &AzureAIAgent{ + agent: agent, + executor: executor, + memory: smartMemory, + tools: tools, + intentValidator: validation.NewIntentValidator(llm), + actionLogger: actionLogger, + } +} diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/response.go b/cli/azd/extensions/azd.ai.start/internal/agent/response.go new file mode 100644 index 00000000000..5836fd13f50 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/agent/response.go @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package agent + +import ( + "azd.ai.start/internal/session" + "azd.ai.start/internal/validation" +) + +// AgentResponse represents the complete response from the agent +type AgentResponse struct { + Output string + Session *session.ActionSession + Validation *validation.ValidationResult +} + +// NewAgentResponse creates a new agent response +func NewAgentResponse(output string, sess *session.ActionSession, validationResult *validation.ValidationResult) *AgentResponse { + return &AgentResponse{ + Output: output, + Session: sess, + Validation: validationResult, + } +} diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/stats.go b/cli/azd/extensions/azd.ai.start/internal/agent/stats.go new file mode 100644 index 00000000000..d7649186ebc --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/agent/stats.go @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package agent + +import ( + "time" +) + +// SessionStats provides statistics about an agent session +type SessionStats struct { + TotalActions int + SuccessfulActions int + FailedActions int + TotalDuration time.Duration +} diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go new file mode 100644 index 00000000000..3ebc8c17eff --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go @@ -0,0 +1,100 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package cmd + +import ( + "bufio" + "context" + "fmt" + "os" + "strings" + + "github.com/tmc/langchaingo/llms/openai" + + "azd.ai.start/internal/agent" +) + +// RunEnhancedAzureAgent runs the enhanced Azure AI agent with full capabilities +func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) error { + // Create the enhanced agent + azureAgent := agent.CreateAzureAIAgent(llm) + + fmt.Println("🤖 Enhanced Azure AI Agent - Interactive Mode") + fmt.Println("Features: Action Tracking | Intent Validation | Smart Memory") + fmt.Println("═══════════════════════════════════════════════════════════") + + // Handle initial query if provided + var initialQuery string + if len(args) > 0 { + initialQuery = strings.Join(args, " ") + } + + scanner := bufio.NewScanner(os.Stdin) + + for { + var userInput string + + if initialQuery != "" { + userInput = initialQuery + initialQuery = "" // Clear after first use + fmt.Printf("💬 You: %s\n", userInput) + } else { + fmt.Print("\n💬 You: ") + if !scanner.Scan() { + break // EOF or error + } + userInput = strings.TrimSpace(scanner.Text()) + } + + // Check for exit commands + if userInput == "" { + continue + } + + if strings.ToLower(userInput) == "exit" || strings.ToLower(userInput) == "quit" { + fmt.Println("👋 Goodbye! Thanks for using the Enhanced Azure AI Agent!") + break + } + + // Special commands + if strings.ToLower(userInput) == "clear" { + err := azureAgent.ClearMemory(ctx) + if err != nil { + fmt.Printf("❌ Failed to clear memory: %s\n", err.Error()) + } else { + fmt.Println("🧹 Memory cleared!") + } + continue + } + + if strings.ToLower(userInput) == "stats" { + stats := azureAgent.GetSessionStats() + fmt.Printf("📊 Session Stats:\n") + fmt.Printf(" Total Actions: %d\n", stats.TotalActions) + fmt.Printf(" Successful: %d\n", stats.SuccessfulActions) + fmt.Printf(" Failed: %d\n", stats.FailedActions) + if stats.TotalDuration > 0 { + fmt.Printf(" Duration: %v\n", stats.TotalDuration) + } + continue + } + + // Process the query with the enhanced agent + fmt.Printf("\n🤖 Enhanced AI Agent:\n") + response, err := azureAgent.ProcessQuery(ctx, userInput) + if err != nil { + fmt.Printf("❌ Error: %v\n", err) + continue + } + + // Display the final response + fmt.Printf("\n💬 Final Response:\n%s\n", response.Output) + } + + if err := scanner.Err(); err != nil { + return fmt.Errorf("error reading input: %w", err) + } + + return nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go new file mode 100644 index 00000000000..a202faf1872 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go @@ -0,0 +1,97 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package cmd + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/azure/azure-dev/cli/azd/pkg/azdext" + "github.com/spf13/cobra" + "github.com/tmc/langchaingo/llms/openai" +) + +func NewRootCommand() *cobra.Command { + rootCmd := &cobra.Command{ + Use: "azd ai.chat [options]", + Short: "Enables interactive AI agent through AZD", + SilenceUsage: true, + SilenceErrors: true, + CompletionOptions: cobra.CompletionOptions{ + DisableDefaultCmd: true, + }, + RunE: func(cmd *cobra.Command, args []string) error { + return runAIAgent(cmd.Context(), args) + }, + } + + return rootCmd +} + +type AiModelConfig struct { + Endpoint string `json:"endpoint"` + ApiKey string `json:"apiKey"` + DeploymentName string `json:"deploymentName"` +} + +// runAIAgent creates and runs the enhanced AI agent using LangChain Go +func runAIAgent(ctx context.Context, args []string) error { + // Create a new context that includes the AZD access token + ctx = azdext.WithAccessToken(ctx) + + // Create a new AZD client + azdClient, err := azdext.NewAzdClient() + if err != nil { + return fmt.Errorf("failed to create azd client: %w", err) + } + + defer azdClient.Close() + + getSectionResponse, err := azdClient. + UserConfig(). + GetSection(ctx, &azdext.GetUserConfigSectionRequest{ + Path: "ai.chat.model", + }) + if err != nil { + return fmt.Errorf("AI model configuration not found, %w", err) + } + + var aiConfig *AiModelConfig + if err := json.Unmarshal(getSectionResponse.Section, &aiConfig); err != nil { + return fmt.Errorf("failed to unmarshal AI model configuration: %w", err) + } + + // Common deployment names to try + azureAPIVersion := "2024-02-15-preview" + + var llm *openai.LLM + + // Try different deployment names + if aiConfig.Endpoint != "" && aiConfig.ApiKey != "" { + // Use Azure OpenAI with proper configuration + fmt.Printf("🔵 Trying Azure OpenAI with deployment: %s\n", aiConfig.DeploymentName) + + llm, err = openai.New( + openai.WithToken(aiConfig.ApiKey), + openai.WithBaseURL(aiConfig.Endpoint+"/"), + openai.WithAPIType(openai.APITypeAzure), + openai.WithAPIVersion(azureAPIVersion), + openai.WithModel(aiConfig.DeploymentName), + ) + + if err == nil { + fmt.Printf("✅ Successfully connected with deployment: %s\n", aiConfig.DeploymentName) + } else { + fmt.Printf("❌ Failed with deployment %s: %v\n", aiConfig.DeploymentName, err) + } + } + + if llm == nil { + return fmt.Errorf("failed to connect to any Azure OpenAI deployment") + } + + // Use the enhanced Azure AI agent with full capabilities + return RunEnhancedAzureAgent(ctx, llm, args) +} diff --git a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go new file mode 100644 index 00000000000..da3a48bd262 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go @@ -0,0 +1,134 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package logging + +import ( + "context" + "fmt" + "time" + + "github.com/tmc/langchaingo/schema" + + "azd.ai.start/internal/session" + "azd.ai.start/internal/utils" +) + +// ActionLogger tracks and logs all agent actions +type ActionLogger struct { + actions []session.ActionLog + current *session.ActionLog +} + +// NewActionLogger creates a new action logger +func NewActionLogger() *ActionLogger { + return &ActionLogger{ + actions: make([]session.ActionLog, 0), + } +} + +// HandleToolStart is called when a tool execution starts +func (al *ActionLogger) HandleToolStart(ctx context.Context, input string) { + al.current = &session.ActionLog{ + Timestamp: time.Now(), + Input: input, + } + fmt.Printf("🔧 Executing: %s\n", input) +} + +// HandleToolEnd is called when a tool execution ends +func (al *ActionLogger) HandleToolEnd(ctx context.Context, output string) { + if al.current != nil { + al.current.Output = output + al.current.Success = true + al.current.Duration = time.Since(al.current.Timestamp) + al.actions = append(al.actions, *al.current) + fmt.Printf("✅ Result: %s\n", utils.TruncateString(output, 100)) + } +} + +// HandleToolError is called when a tool execution fails +func (al *ActionLogger) HandleToolError(ctx context.Context, err error) { + if al.current != nil { + al.current.Output = err.Error() + al.current.Success = false + al.current.Duration = time.Since(al.current.Timestamp) + al.actions = append(al.actions, *al.current) + fmt.Printf("❌ Error: %s\n", err.Error()) + } +} + +// HandleAgentStart is called when agent planning starts +func (al *ActionLogger) HandleAgentStart(ctx context.Context, input map[string]any) { + if userInput, ok := input["input"].(string); ok { + fmt.Printf("🎯 Processing: %s\n", userInput) + } +} + +// HandleAgentEnd is called when agent planning ends +func (al *ActionLogger) HandleAgentEnd(ctx context.Context, output schema.AgentFinish) { + fmt.Printf("🏁 Agent completed planning\n") +} + +// HandleChainStart is called when chain execution starts +func (al *ActionLogger) HandleChainStart(ctx context.Context, input map[string]any) { + fmt.Printf("🔗 Starting chain execution\n") +} + +// HandleChainEnd is called when chain execution ends +func (al *ActionLogger) HandleChainEnd(ctx context.Context, output map[string]any) { + fmt.Printf("🔗 Chain execution completed\n") +} + +// HandleChainError is called when chain execution fails +func (al *ActionLogger) HandleChainError(ctx context.Context, err error) { + fmt.Printf("🔗 Chain execution failed: %s\n", err.Error()) +} + +// HandleLLMStart is called when LLM call starts +func (al *ActionLogger) HandleLLMStart(ctx context.Context, prompts []string) { + fmt.Printf("🤖 LLM thinking...\n") +} + +// HandleLLMEnd is called when LLM call ends +func (al *ActionLogger) HandleLLMEnd(ctx context.Context, result string) { + fmt.Printf("🤖 LLM response received\n") +} + +// HandleAgentAction is called when an agent action is planned +func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.AgentAction) { + al.current = &session.ActionLog{ + Timestamp: time.Now(), + Action: action.Tool, + Tool: action.Tool, + Input: action.ToolInput, + } + fmt.Printf("🎯 Agent planned action: %s with input: %s\n", action.Tool, action.ToolInput) +} + +// HandleAgentFinish is called when the agent finishes +func (al *ActionLogger) HandleAgentFinish(ctx context.Context, finish schema.AgentFinish) { + fmt.Printf("🏁 Agent finished with result\n") +} + +// HandleLLMError is called when LLM call fails +func (al *ActionLogger) HandleLLMError(ctx context.Context, err error) { + fmt.Printf("🤖 LLM error: %s\n", err.Error()) +} + +// HandleStreamingFunc handles streaming responses +func (al *ActionLogger) HandleStreamingFunc(ctx context.Context, chunk []byte) error { + // Optional: Handle streaming output + return nil +} + +// GetActions returns all logged actions +func (al *ActionLogger) GetActions() []session.ActionLog { + return al.actions +} + +// Clear clears all logged actions +func (al *ActionLogger) Clear() { + al.actions = al.actions[:0] + al.current = nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/session/action.go b/cli/azd/extensions/azd.ai.start/internal/session/action.go new file mode 100644 index 00000000000..1111c2c4fed --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/session/action.go @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package session + +import ( + "time" +) + +// ActionLog represents a single action taken by the agent +type ActionLog struct { + Timestamp time.Time + Action string + Tool string + Input string + Output string + Success bool + Duration time.Duration +} + +// NewActionLog creates a new action log +func NewActionLog(tool, input string) *ActionLog { + return &ActionLog{ + Timestamp: time.Now(), + Tool: tool, + Action: tool, + Input: input, + } +} + +// SetOutput sets the output and success status for the action +func (al *ActionLog) SetOutput(output string, success bool) { + al.Output = output + al.Success = success + al.Duration = time.Since(al.Timestamp) +} + +// SetDuration sets the duration for the action +func (al *ActionLog) SetDuration(duration time.Duration) { + al.Duration = duration +} diff --git a/cli/azd/extensions/azd.ai.start/internal/session/session.go b/cli/azd/extensions/azd.ai.start/internal/session/session.go new file mode 100644 index 00000000000..44f0156a912 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/session/session.go @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package session + +import ( + "time" +) + +// ActionSession tracks the current conversation session and actions +type ActionSession struct { + InitialIntent string + PlannedActions []string + ExecutedActions []ActionLog + ValidationResult interface{} // Use interface{} to avoid circular dependency + StartTime time.Time + EndTime time.Time +} + +// NewActionSession creates a new action session +func NewActionSession(initialIntent string) *ActionSession { + return &ActionSession{ + InitialIntent: initialIntent, + PlannedActions: []string{}, + ExecutedActions: []ActionLog{}, + StartTime: time.Now(), + } +} + +// Start marks the session as started +func (as *ActionSession) Start() { + as.StartTime = time.Now() +} + +// End marks the session as ended +func (as *ActionSession) End() { + as.EndTime = time.Now() +} + +// AddExecutedAction adds an executed action to the session +func (as *ActionSession) AddExecutedAction(action ActionLog) { + as.ExecutedActions = append(as.ExecutedActions, action) +} + +// SetValidationResult sets the validation result for the session +func (as *ActionSession) SetValidationResult(result interface{}) { + as.ValidationResult = result +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go new file mode 100644 index 00000000000..ac64b00f30f --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go @@ -0,0 +1,51 @@ +package tools + +import ( + "context" + "fmt" + "os" + "path/filepath" +) + +// ChangeDirectoryTool implements the Tool interface for changing the current working directory +type ChangeDirectoryTool struct{} + +func (t ChangeDirectoryTool) Name() string { + return "change_directory" +} + +func (t ChangeDirectoryTool) Description() string { + return "Change the current working directory. Input: directory path (e.g., '../parent' or './subfolder' or absolute path)" +} + +func (t ChangeDirectoryTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("directory path is required") + } + + // Get current directory for reference + currentDir, _ := os.Getwd() + + // Convert to absolute path + absPath, err := filepath.Abs(input) + if err != nil { + return "", fmt.Errorf("failed to resolve path %s: %w", input, err) + } + + // Check if directory exists + info, err := os.Stat(absPath) + if err != nil { + return "", fmt.Errorf("directory %s does not exist: %w", absPath, err) + } + if !info.IsDir() { + return "", fmt.Errorf("%s is not a directory", absPath) + } + + // Change directory + err = os.Chdir(absPath) + if err != nil { + return "", fmt.Errorf("failed to change directory to %s: %w", absPath, err) + } + + return fmt.Sprintf("Changed directory from %s to %s", currentDir, absPath), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go new file mode 100644 index 00000000000..310866dfe40 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go @@ -0,0 +1,71 @@ +package tools + +import ( + "context" + "fmt" + "io" + "os" + "strings" +) + +// CopyFileTool implements the Tool interface for copying files +type CopyFileTool struct{} + +func (t CopyFileTool) Name() string { + return "copy_file" +} + +func (t CopyFileTool) Description() string { + return "Copy a file to a new location. Input format: 'source|destination' (e.g., 'file.txt|backup.txt' or './docs/readme.md|./backup/readme.md')" +} + +func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("input is required in format 'source|destination'") + } + + // Split on first occurrence of '|' to separate source from destination + parts := strings.SplitN(input, "|", 2) + if len(parts) != 2 { + return "", fmt.Errorf("invalid input format. Use 'source|destination'") + } + + source := strings.TrimSpace(parts[0]) + destination := strings.TrimSpace(parts[1]) + + if source == "" || destination == "" { + return "", fmt.Errorf("both source and destination paths are required") + } + + // Check if source file exists + sourceInfo, err := os.Stat(source) + if err != nil { + return "", fmt.Errorf("source file %s does not exist: %w", source, err) + } + + if sourceInfo.IsDir() { + return "", fmt.Errorf("source %s is a directory. Use copy_directory for directories", source) + } + + // Open source file + sourceFile, err := os.Open(source) + if err != nil { + return "", fmt.Errorf("failed to open source file %s: %w", source, err) + } + defer sourceFile.Close() + + // Create destination file + destFile, err := os.Create(destination) + if err != nil { + return "", fmt.Errorf("failed to create destination file %s: %w", destination, err) + } + defer destFile.Close() + + // Copy contents + bytesWritten, err := io.Copy(destFile, sourceFile) + if err != nil { + return "", fmt.Errorf("failed to copy file: %w", err) + } + + return fmt.Sprintf("Successfully copied %s to %s (%d bytes)", source, destination, bytesWritten), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go new file mode 100644 index 00000000000..992eb1842fb --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go @@ -0,0 +1,41 @@ +package tools + +import ( + "context" + "fmt" + "os" +) + +// CreateDirectoryTool implements the Tool interface for creating directories +type CreateDirectoryTool struct{} + +func (t CreateDirectoryTool) Name() string { + return "create_directory" +} + +func (t CreateDirectoryTool) Description() string { + return "Create a directory (and any necessary parent directories). Input: directory path (e.g., 'docs' or './src/components')" +} + +func (t CreateDirectoryTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("directory path is required") + } + + err := os.MkdirAll(input, 0755) + if err != nil { + return "", fmt.Errorf("failed to create directory %s: %w", input, err) + } + + // Check if directory already existed or was newly created + info, err := os.Stat(input) + if err != nil { + return "", fmt.Errorf("failed to verify directory creation: %w", err) + } + + if !info.IsDir() { + return "", fmt.Errorf("%s exists but is not a directory", input) + } + + return fmt.Sprintf("Successfully created directory: %s", input), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go new file mode 100644 index 00000000000..d3cd1ff67b8 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go @@ -0,0 +1,39 @@ +package tools + +import ( + "context" + "fmt" + "os" + + "github.com/tmc/langchaingo/callbacks" +) + +// CurrentDirectoryTool implements the Tool interface for getting current directory +type CurrentDirectoryTool struct { + CallbacksHandler callbacks.Handler +} + +func (t CurrentDirectoryTool) Name() string { + return "cwd" +} + +func (t CurrentDirectoryTool) Description() string { + return "Get the current working directory to understand the project context. Input: use 'current' or '.' (any input works)" +} + +func (t CurrentDirectoryTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, input) + } + + dir, err := os.Getwd() + if err != nil { + return "", fmt.Errorf("failed to get current directory: %w", err) + } + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, dir) + } + + return dir, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go new file mode 100644 index 00000000000..e3231003825 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go @@ -0,0 +1,53 @@ +package tools + +import ( + "context" + "fmt" + "os" +) + +// DeleteDirectoryTool implements the Tool interface for deleting directories +type DeleteDirectoryTool struct{} + +func (t DeleteDirectoryTool) Name() string { + return "delete_directory" +} + +func (t DeleteDirectoryTool) Description() string { + return "Delete a directory and all its contents. Input: directory path (e.g., 'temp-folder' or './old-docs')" +} + +func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("directory path is required") + } + + // Check if directory exists + info, err := os.Stat(input) + if err != nil { + return "", fmt.Errorf("directory %s does not exist: %w", input, err) + } + + // Make sure it's a directory, not a file + if !info.IsDir() { + return "", fmt.Errorf("%s is a file, not a directory. Use delete_file to remove files", input) + } + + // Count contents before deletion for reporting + files, err := os.ReadDir(input) + fileCount := 0 + if err == nil { + fileCount = len(files) + } + + // Delete the directory and all contents + err = os.RemoveAll(input) + if err != nil { + return "", fmt.Errorf("failed to delete directory %s: %w", input, err) + } + + if fileCount > 0 { + return fmt.Sprintf("Successfully deleted directory: %s (contained %d items)", input, fileCount), nil + } + return fmt.Sprintf("Successfully deleted empty directory: %s", input), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go new file mode 100644 index 00000000000..71a5b7618d1 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go @@ -0,0 +1,43 @@ +package tools + +import ( + "context" + "fmt" + "os" +) + +// DeleteFileTool implements the Tool interface for deleting files +type DeleteFileTool struct{} + +func (t DeleteFileTool) Name() string { + return "delete_file" +} + +func (t DeleteFileTool) Description() string { + return "Delete a file. Input: file path (e.g., 'temp.txt' or './docs/old-file.md')" +} + +func (t DeleteFileTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("file path is required") + } + + // Check if file exists and get info + info, err := os.Stat(input) + if err != nil { + return "", fmt.Errorf("file %s does not exist: %w", input, err) + } + + // Make sure it's a file, not a directory + if info.IsDir() { + return "", fmt.Errorf("%s is a directory, not a file. Use delete_directory to remove directories", input) + } + + // Delete the file + err = os.Remove(input) + if err != nil { + return "", fmt.Errorf("failed to delete file %s: %w", input, err) + } + + return fmt.Sprintf("Successfully deleted file: %s (%d bytes)", input, info.Size()), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go b/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go new file mode 100644 index 00000000000..133d52c8cea --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go @@ -0,0 +1,92 @@ +package tools + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" +) + +// DirectoryListTool implements the Tool interface for listing directory contents +type DirectoryListTool struct{} + +func (t DirectoryListTool) Name() string { + return "list_directory" +} + +func (t DirectoryListTool) Description() string { + return "List files and folders in a directory. Input: directory path (use '.' for current directory)" +} + +func (t DirectoryListTool) Call(ctx context.Context, input string) (string, error) { + path := strings.TrimSpace(input) + if path == "" { + path = "." + } + + // Get absolute path for clarity + absPath, err := filepath.Abs(path) + if err != nil { + return "", fmt.Errorf("failed to get absolute path for %s: %w", path, err) + } + + // Check if directory exists + info, err := os.Stat(absPath) + if err != nil { + return "", fmt.Errorf("failed to access %s: %w", absPath, err) + } + if !info.IsDir() { + return "", fmt.Errorf("%s is not a directory", absPath) + } + + // List directory contents + files, err := os.ReadDir(absPath) + if err != nil { + return "", fmt.Errorf("failed to read directory %s: %w", absPath, err) + } + + var result strings.Builder + result.WriteString(fmt.Sprintf("Contents of %s:\n", absPath)) + result.WriteString(fmt.Sprintf("Total items: %d\n\n", len(files))) + + // Separate directories and files + var dirs []string + var regularFiles []string + + for _, file := range files { + if file.IsDir() { + dirs = append(dirs, file.Name()+"/") + } else { + info, err := file.Info() + if err != nil { + regularFiles = append(regularFiles, file.Name()) + } else { + regularFiles = append(regularFiles, fmt.Sprintf("%s (%d bytes)", file.Name(), info.Size())) + } + } + } + + // Display directories first + if len(dirs) > 0 { + result.WriteString("Directories:\n") + for _, dir := range dirs { + result.WriteString(fmt.Sprintf(" 📁 %s\n", dir)) + } + result.WriteString("\n") + } + + // Then display files + if len(regularFiles) > 0 { + result.WriteString("Files:\n") + for _, file := range regularFiles { + result.WriteString(fmt.Sprintf(" 📄 %s\n", file)) + } + } + + if len(dirs) == 0 && len(regularFiles) == 0 { + result.WriteString("Directory is empty.\n") + } + + return result.String(), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go b/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go new file mode 100644 index 00000000000..8951b35bc77 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go @@ -0,0 +1,67 @@ +package tools + +import ( + "context" + "fmt" + "os" + "strings" + "time" +) + +// FileInfoTool implements the Tool interface for getting detailed file information +type FileInfoTool struct{} + +func (t FileInfoTool) Name() string { + return "file_info" +} + +func (t FileInfoTool) Description() string { + return "Get detailed information about a file or directory. Input: file or directory path (e.g., 'README.md' or './docs')" +} + +func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("file or directory path is required") + } + + info, err := os.Stat(input) + if err != nil { + return "", fmt.Errorf("failed to get info for %s: %w", input, err) + } + + var result strings.Builder + result.WriteString(fmt.Sprintf("Information for: %s\n", input)) + result.WriteString("═══════════════════════════════════\n") + + // Type + if info.IsDir() { + result.WriteString("Type: Directory\n") + + // Count contents if it's a directory + if files, err := os.ReadDir(input); err == nil { + result.WriteString(fmt.Sprintf("Contents: %d items\n", len(files))) + } + } else { + result.WriteString("Type: File\n") + result.WriteString(fmt.Sprintf("Size: %d bytes\n", info.Size())) + } + + // Permissions + result.WriteString(fmt.Sprintf("Permissions: %s\n", info.Mode().String())) + + // Timestamps + result.WriteString(fmt.Sprintf("Modified: %s\n", info.ModTime().Format(time.RFC3339))) + + // Additional file details + if !info.IsDir() { + if info.Size() == 0 { + result.WriteString("Note: File is empty\n") + } else if info.Size() > 1024*1024 { + result.WriteString(fmt.Sprintf("Size (human): %.2f MB\n", float64(info.Size())/(1024*1024))) + } else if info.Size() > 1024 { + result.WriteString(fmt.Sprintf("Size (human): %.2f KB\n", float64(info.Size())/1024)) + } + } + + return result.String(), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go b/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go new file mode 100644 index 00000000000..e87c7131b54 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go @@ -0,0 +1,43 @@ +package tools + +import ( + "context" + "fmt" + "io" + "net/http" +) + +// HTTPFetcherTool implements the Tool interface for making HTTP requests +type HTTPFetcherTool struct{} + +func (t HTTPFetcherTool) Name() string { + return "http_fetcher" +} + +func (t HTTPFetcherTool) Description() string { + return "Make HTTP GET requests to fetch content from URLs. Input should be a valid URL." +} + +func (t HTTPFetcherTool) Call(ctx context.Context, input string) (string, error) { + resp, err := http.Get(input) + if err != nil { + return "", fmt.Errorf("failed to fetch URL %s: %w", input, err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("HTTP request failed with status: %s", resp.Status) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("failed to read response body: %w", err) + } + + // Limit response size to avoid overwhelming the context + if len(body) > 5000 { + return fmt.Sprintf("Content (first 5000 chars): %s...\n[Content truncated]", string(body[:5000])), nil + } + + return string(body), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go new file mode 100644 index 00000000000..f39d6ede31b --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go @@ -0,0 +1,62 @@ +package tools + +import ( + "context" + "fmt" + "os" + "strings" +) + +// MoveFileTool implements the Tool interface for moving/renaming files +type MoveFileTool struct{} + +func (t MoveFileTool) Name() string { + return "move_file" +} + +func (t MoveFileTool) Description() string { + return "Move or rename a file. Input format: 'source|destination' (e.g., 'old.txt|new.txt' or './file.txt|./folder/file.txt')" +} + +func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("input is required in format 'source|destination'") + } + + // Split on first occurrence of '|' to separate source from destination + parts := strings.SplitN(input, "|", 2) + if len(parts) != 2 { + return "", fmt.Errorf("invalid input format. Use 'source|destination'") + } + + source := strings.TrimSpace(parts[0]) + destination := strings.TrimSpace(parts[1]) + + if source == "" || destination == "" { + return "", fmt.Errorf("both source and destination paths are required") + } + + // Check if source exists + sourceInfo, err := os.Stat(source) + if err != nil { + return "", fmt.Errorf("source %s does not exist: %w", source, err) + } + + // Check if destination already exists + if _, err := os.Stat(destination); err == nil { + return "", fmt.Errorf("destination %s already exists", destination) + } + + // Move/rename the file + err = os.Rename(source, destination) + if err != nil { + return "", fmt.Errorf("failed to move %s to %s: %w", source, destination, err) + } + + fileType := "file" + if sourceInfo.IsDir() { + fileType = "directory" + } + + return fmt.Sprintf("Successfully moved %s from %s to %s (%d bytes)", fileType, source, destination, sourceInfo.Size()), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go new file mode 100644 index 00000000000..1aca867c288 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go @@ -0,0 +1,37 @@ +package tools + +import ( + "context" + "fmt" + "os" +) + +// ReadFileTool implements the Tool interface for reading file contents +type ReadFileTool struct{} + +func (t ReadFileTool) Name() string { + return "read_file" +} + +func (t ReadFileTool) Description() string { + return "Read the contents of a file. Input: file path (e.g., 'README.md' or './docs/setup.md')" +} + +func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("file path is required") + } + + content, err := os.ReadFile(input) + if err != nil { + return "", fmt.Errorf("failed to read file %s: %w", input, err) + } + + // Limit file size to avoid overwhelming context + if len(content) > 5000 { + return fmt.Sprintf("File: %s (first 5000 chars)\n%s...\n[File truncated - total size: %d bytes]", + input, string(content[:5000]), len(content)), nil + } + + return fmt.Sprintf("File: %s\n%s", input, string(content)), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/weather.go b/cli/azd/extensions/azd.ai.start/internal/tools/weather.go new file mode 100644 index 00000000000..a88c14f74e0 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/weather.go @@ -0,0 +1,105 @@ +package tools + +import ( + "context" + "fmt" + "math/rand" + "strings" + "time" +) + +// WeatherTool implements the Tool interface for getting weather information +type WeatherTool struct{} + +func (t WeatherTool) Name() string { + return "weather" +} + +func (t WeatherTool) Description() string { + return "Get current weather conditions for a city. Input: city name (e.g., 'San Diego' or 'New York')" +} + +func (t WeatherTool) Call(ctx context.Context, input string) (string, error) { + city := strings.TrimSpace(input) + if city == "" { + return "", fmt.Errorf("city name is required") + } + + // Initialize random seed based on current time + rand.Seed(time.Now().UnixNano()) + + // Generate more realistic temperature based on city + var temperature int + cityLower := strings.ToLower(city) + + // Assign temperature ranges based on typical climate + if strings.Contains(cityLower, "san diego") || strings.Contains(cityLower, "los angeles") || + strings.Contains(cityLower, "miami") || strings.Contains(cityLower, "phoenix") { + // Warm climate cities: 65-85°F + temperature = rand.Intn(21) + 65 + } else if strings.Contains(cityLower, "seattle") || strings.Contains(cityLower, "portland") || + strings.Contains(cityLower, "chicago") || strings.Contains(cityLower, "new york") { + // Moderate climate cities: 45-75°F + temperature = rand.Intn(31) + 45 + } else if strings.Contains(cityLower, "alaska") || strings.Contains(cityLower, "minneapolis") || + strings.Contains(cityLower, "denver") { + // Cold climate cities: 25-55°F + temperature = rand.Intn(31) + 25 + } else { + // Default range for unknown cities: 50-80°F + temperature = rand.Intn(31) + 50 + } + + // Weather conditions with probabilities + conditions := []string{ + "sunny", "sunny", "sunny", "sunny", // 40% chance + "partly cloudy", "partly cloudy", "partly cloudy", // 30% chance + "cloudy", "cloudy", // 20% chance + "rainy", // 10% chance + } + condition := conditions[rand.Intn(len(conditions))] + + // Add some variety to the response format + responseTemplates := []string{ + "It's %d°F and %s in %s", + "Current weather in %s: %d°F and %s", + "The weather in %s is %d°F with %s skies", + "%s is experiencing %s weather at %d°F", + } + + template := responseTemplates[rand.Intn(len(responseTemplates))] + + var response string + if strings.Contains(template, "It's %d°F and %s in %s") { + response = fmt.Sprintf(template, temperature, condition, city) + } else if strings.Contains(template, "Current weather in %s: %d°F and %s") { + response = fmt.Sprintf(template, city, temperature, condition) + } else if strings.Contains(template, "The weather in %s is %d°F with %s skies") { + response = fmt.Sprintf(template, city, temperature, condition) + } else { + // "%s is experiencing %s weather at %d°F" + response = fmt.Sprintf(template, city, condition, temperature) + } + + // Add some additional details occasionally + if rand.Intn(3) == 0 { + extras := []string{ + "Light breeze from the west.", + "Humidity is comfortable.", + "Perfect day to be outside!", + "Visibility is excellent.", + "No precipitation expected.", + } + if condition == "rainy" { + extras = []string{ + "Light rain expected to continue.", + "Bring an umbrella!", + "Rain should clear up by evening.", + } + } + extra := extras[rand.Intn(len(extras))] + response += ". " + extra + } + + return response, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go new file mode 100644 index 00000000000..1020e9c7283 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go @@ -0,0 +1,101 @@ +package tools + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" +) + +// WriteFileTool implements the Tool interface for writing file contents +type WriteFileTool struct{} + +func (t WriteFileTool) Name() string { + return "write_file" +} + +func (t WriteFileTool) Description() string { + return `Write content to a file. Input format: 'filepath|content' + +For multi-line content, use literal \n for newlines: +- Single line: 'test.txt|Hello World' +- Multi-line: 'script.bicep|param name string\nparam location string\nresource myResource...' + +Example Bicep file: +'main.bicep|param name string\nparam location string\n\nresource appService ''Microsoft.Web/sites@2022-03-01'' = {\n name: name\n location: location\n kind: ''app''\n properties: {\n serverFarmId: serverFarmId\n }\n}\n\noutput appServiceId string = appService.id' + +The tool will convert \n to actual newlines automatically.` +} + +func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("input is required in format 'filepath|content'") + } + + // Split on first occurrence of '|' to separate path from content + parts := strings.SplitN(input, "|", 2) + if len(parts) != 2 { + return "", fmt.Errorf("invalid input format. Use 'filepath|content'") + } + + filePath := strings.TrimSpace(parts[0]) + content := parts[1] + + // Convert literal \n sequences to actual newlines (for agents that escape newlines) + content = strings.ReplaceAll(content, "\\n", "\n") + content = strings.ReplaceAll(content, "\\t", "\t") + + // Clean up any trailing quotes that might have been added during formatting + content = strings.TrimSuffix(content, "'") + content = strings.TrimSuffix(content, "\")") + + // Clean up any quotes around the filepath (from agent formatting) + filePath = strings.Trim(filePath, "\"'") + + if filePath == "" { + return "", fmt.Errorf("filepath cannot be empty") + } + + // Ensure the directory exists + dir := filepath.Dir(filePath) + if dir != "." && dir != "" { + if err := os.MkdirAll(dir, 0755); err != nil { + return "", fmt.Errorf("failed to create directory %s: %w", dir, err) + } + } + + // Write the file + err := os.WriteFile(filePath, []byte(content), 0644) + if err != nil { + return "", fmt.Errorf("failed to write file %s: %w", filePath, err) + } + + // Verify the file was written correctly + writtenContent, err := os.ReadFile(filePath) + if err != nil { + return "", fmt.Errorf("failed to verify written file %s: %w", filePath, err) + } + + lineCount := strings.Count(string(writtenContent), "\n") + 1 + if content != "" && !strings.HasSuffix(content, "\n") { + lineCount = strings.Count(content, "\n") + 1 + } + + return fmt.Sprintf("Successfully wrote %d bytes (%d lines) to %s. Content preview:\n%s", + len(content), lineCount, filePath, getContentPreview(content)), nil +} + +// getContentPreview returns a preview of the content for verification +func getContentPreview(content string) string { + lines := strings.Split(content, "\n") + if len(lines) <= 5 { + return content + } + + preview := strings.Join(lines[:3], "\n") + preview += fmt.Sprintf("\n... (%d more lines) ...\n", len(lines)-5) + preview += strings.Join(lines[len(lines)-2:], "\n") + + return preview +} diff --git a/cli/azd/extensions/azd.ai.start/internal/utils/helpers.go b/cli/azd/extensions/azd.ai.start/internal/utils/helpers.go new file mode 100644 index 00000000000..130734eff77 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/utils/helpers.go @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package utils + +import ( + "fmt" + "strings" + "time" + + "azd.ai.start/internal/session" +) + +// TruncateString truncates a string to a maximum length +func TruncateString(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + return s[:maxLen] + "..." +} + +// FormatActionsForValidation formats actions for the validation prompt +func FormatActionsForValidation(actions []session.ActionLog) string { + if len(actions) == 0 { + return "No actions executed" + } + + var formatted strings.Builder + for i, action := range actions { + status := "SUCCESS" + if !action.Success { + status = "FAILED" + } + formatted.WriteString(fmt.Sprintf("%d. Tool: %s | Input: %s | Status: %s | Duration: %v\n", + i+1, action.Tool, TruncateString(action.Input, 100), status, action.Duration.Round(time.Millisecond))) + if action.Output != "" { + formatted.WriteString(fmt.Sprintf(" Output: %s\n", TruncateString(action.Output, 200))) + } + } + return formatted.String() +} diff --git a/cli/azd/extensions/azd.ai.start/internal/validation/parser.go b/cli/azd/extensions/azd.ai.start/internal/validation/parser.go new file mode 100644 index 00000000000..2f814546798 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/validation/parser.go @@ -0,0 +1,93 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package validation + +import ( + "strings" +) + +// ParseValidationResult parses the validation result from LLM response +func ParseValidationResult(response string) *ValidationResult { + result := &ValidationResult{ + Status: ValidationError, + Explanation: "Failed to parse validation response", + Confidence: 0.0, + } + + lines := strings.Split(response, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + + if strings.HasPrefix(line, "STATUS:") { + statusStr := strings.TrimSpace(strings.TrimPrefix(line, "STATUS:")) + switch strings.ToUpper(statusStr) { + case "COMPLETE": + result.Status = ValidationComplete + case "PARTIAL": + result.Status = ValidationPartial + case "INCOMPLETE": + result.Status = ValidationIncomplete + case "ERROR": + result.Status = ValidationError + } + } else if strings.HasPrefix(line, "EXPLANATION:") { + result.Explanation = strings.TrimSpace(strings.TrimPrefix(line, "EXPLANATION:")) + } else if strings.HasPrefix(line, "CONFIDENCE:") { + confidenceStr := strings.TrimSpace(strings.TrimPrefix(line, "CONFIDENCE:")) + if conf, err := parseFloat(confidenceStr); err == nil { + result.Confidence = conf + } + } + } + + // If we couldn't parse the status, try to infer from the response content + if result.Status == ValidationError { + responseUpper := strings.ToUpper(response) + if strings.Contains(responseUpper, "COMPLETE") { + result.Status = ValidationComplete + } else if strings.Contains(responseUpper, "PARTIAL") { + result.Status = ValidationPartial + } else if strings.Contains(responseUpper, "INCOMPLETE") { + result.Status = ValidationIncomplete + } + result.Explanation = response + result.Confidence = 0.7 + } + + return result +} + +// parseFloat safely parses a float from string +func parseFloat(s string) (float64, error) { + // Simple float parsing for confidence values + s = strings.TrimSpace(s) + if s == "1" || s == "1.0" { + return 1.0, nil + } else if s == "0" || s == "0.0" { + return 0.0, nil + } else if strings.HasPrefix(s, "0.") { + // Simple decimal parsing for common cases + switch s { + case "0.1": + return 0.1, nil + case "0.2": + return 0.2, nil + case "0.3": + return 0.3, nil + case "0.4": + return 0.4, nil + case "0.5": + return 0.5, nil + case "0.6": + return 0.6, nil + case "0.7": + return 0.7, nil + case "0.8": + return 0.8, nil + case "0.9": + return 0.9, nil + } + } + return 0.5, nil // Default confidence +} diff --git a/cli/azd/extensions/azd.ai.start/internal/validation/types.go b/cli/azd/extensions/azd.ai.start/internal/validation/types.go new file mode 100644 index 00000000000..4b0ebcd25bc --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/validation/types.go @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package validation + +// ValidationResult represents the result of intent validation +type ValidationResult struct { + Status ValidationStatus + Explanation string + Confidence float64 +} + +// ValidationStatus represents the completion status of the original intent +type ValidationStatus string + +const ( + ValidationComplete ValidationStatus = "COMPLETE" + ValidationPartial ValidationStatus = "PARTIAL" + ValidationIncomplete ValidationStatus = "INCOMPLETE" + ValidationError ValidationStatus = "ERROR" +) diff --git a/cli/azd/extensions/azd.ai.start/internal/validation/validator.go b/cli/azd/extensions/azd.ai.start/internal/validation/validator.go new file mode 100644 index 00000000000..f9ae0311062 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/validation/validator.go @@ -0,0 +1,68 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package validation + +import ( + "context" + "fmt" + + "github.com/tmc/langchaingo/llms" + + "azd.ai.start/internal/session" + "azd.ai.start/internal/utils" +) + +// IntentValidator validates whether the original intent was fulfilled +type IntentValidator struct { + llm llms.Model +} + +// NewIntentValidator creates a new intent validator +func NewIntentValidator(llm llms.Model) *IntentValidator { + return &IntentValidator{llm: llm} +} + +// ValidateCompletion validates whether the original intent was fulfilled +func (iv *IntentValidator) ValidateCompletion( + originalIntent string, + executedActions []session.ActionLog, +) *ValidationResult { + if len(executedActions) == 0 { + return &ValidationResult{ + Status: ValidationIncomplete, + Explanation: "No actions were executed", + Confidence: 1.0, + } + } + + validationPrompt := fmt.Sprintf(` +Original User Intent: %s + +Actions Executed: +%s + +Based on the original intent and the actions that were executed, evaluate whether the user's intent was fulfilled. + +Respond with one of: COMPLETE, PARTIAL, INCOMPLETE, ERROR + +Then provide a brief explanation of your assessment. + +Format your response as: +STATUS: [COMPLETE/PARTIAL/INCOMPLETE/ERROR] +EXPLANATION: [Your explanation] +CONFIDENCE: [0.0-1.0]`, + originalIntent, + utils.FormatActionsForValidation(executedActions)) + + result, err := iv.llm.Call(context.Background(), validationPrompt) + if err != nil { + return &ValidationResult{ + Status: ValidationError, + Explanation: fmt.Sprintf("Validation failed: %s", err.Error()), + Confidence: 0.0, + } + } + + return ParseValidationResult(result) +} diff --git a/cli/azd/extensions/azd.ai.start/main.go b/cli/azd/extensions/azd.ai.start/main.go new file mode 100644 index 00000000000..026e7c944e7 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/main.go @@ -0,0 +1,30 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package main + +import ( + "context" + "os" + + "azd.ai.start/internal/cmd" + "github.com/fatih/color" +) + +func init() { + forceColorVal, has := os.LookupEnv("FORCE_COLOR") + if has && forceColorVal == "1" { + color.NoColor = false + } +} + +func main() { + // Execute the root command + ctx := context.Background() + rootCmd := cmd.NewRootCommand() + + if err := rootCmd.ExecuteContext(ctx); err != nil { + color.Red("Error: %v", err) + os.Exit(1) + } +} From ee6c099b0f192f45f1fb7a76101307c7b31b18dd Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Mon, 28 Jul 2025 18:04:42 -0700 Subject: [PATCH 018/116] azd agent --- cli/azd/extensions/azd.ai.start/go.mod | 10 +- cli/azd/extensions/azd.ai.start/go.sum | 4 + .../azd.ai.start/internal/agent/agent.go | 436 ++++-------------- .../azd.ai.start/internal/agent/factory.go | 66 --- .../agent/prompts/default_agent_prefix.txt | 19 + .../azd.ai.start/internal/agent/response.go | 25 - .../azd.ai.start/internal/agent/stats.go | 16 - .../internal/cmd/enhanced_integration.go | 29 +- .../azd.ai.start/internal/cmd/root.go | 6 + .../azd.ai.start/internal/logging/logger.go | 164 ++++--- .../internal/tools/change_directory.go | 50 +- .../azd.ai.start/internal/tools/copy_file.go | 65 ++- .../internal/tools/create_directory.go | 44 +- .../internal/tools/delete_directory.go | 48 +- .../internal/tools/delete_file.go | 41 +- .../internal/tools/directory_list.go | 32 +- .../azd.ai.start/internal/tools/file_info.go | 63 ++- .../internal/tools/http_fetcher.go | 39 +- .../azd.ai.start/internal/tools/move_file.go | 53 ++- .../azd.ai.start/internal/tools/read_file.go | 35 +- .../azd.ai.start/internal/tools/weather.go | 20 +- .../azd.ai.start/internal/tools/write_file.go | 97 ++-- .../internal/validation/validator.go | 68 --- 23 files changed, 687 insertions(+), 743 deletions(-) delete mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/factory.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt delete mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/response.go delete mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/stats.go delete mode 100644 cli/azd/extensions/azd.ai.start/internal/validation/validator.go diff --git a/cli/azd/extensions/azd.ai.start/go.mod b/cli/azd/extensions/azd.ai.start/go.mod index c840c72262d..2a66f9854b7 100644 --- a/cli/azd/extensions/azd.ai.start/go.mod +++ b/cli/azd/extensions/azd.ai.start/go.mod @@ -3,6 +3,7 @@ module azd.ai.start go 1.24.1 require ( + github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c github.com/fatih/color v1.18.0 github.com/spf13/cobra v1.9.1 github.com/tmc/langchaingo v0.1.13 @@ -12,7 +13,6 @@ require ( github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.3.1 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect - github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c // indirect github.com/dlclark/regexp2 v1.10.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/google/go-cmp v0.7.0 // indirect @@ -24,7 +24,6 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/mitchellh/copystructure v1.0.0 // indirect github.com/mitchellh/reflectwalk v1.0.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -38,20 +37,13 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/cast v1.3.1 // indirect github.com/spf13/pflag v1.0.6 // indirect - github.com/stretchr/testify v1.10.0 // indirect github.com/yargevad/filepathx v1.0.0 // indirect - go.opentelemetry.io/otel v1.35.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 // indirect golang.org/x/crypto v0.37.0 // indirect golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect golang.org/x/net v0.39.0 // indirect - golang.org/x/oauth2 v0.25.0 // indirect - golang.org/x/sync v0.13.0 // indirect golang.org/x/sys v0.32.0 // indirect golang.org/x/text v0.24.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250407143221-ac9807e6c755 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755 // indirect google.golang.org/grpc v1.71.1 // indirect google.golang.org/protobuf v1.36.6 // indirect diff --git a/cli/azd/extensions/azd.ai.start/go.sum b/cli/azd/extensions/azd.ai.start/go.sum index bc863f91c5c..3faa8263d70 100644 --- a/cli/azd/extensions/azd.ai.start/go.sum +++ b/cli/azd/extensions/azd.ai.start/go.sum @@ -228,6 +228,10 @@ go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 h1:Ss6D3hLXTM0KobyBYEAygXzFfGcjnmfEJOBgSbemCtg= diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go index ba8c284aa52..e227fe85ae1 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go @@ -5,358 +5,124 @@ package agent import ( "context" + _ "embed" "fmt" - "time" "github.com/tmc/langchaingo/agents" + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/chains" + "github.com/tmc/langchaingo/llms/openai" + "github.com/tmc/langchaingo/memory" "github.com/tmc/langchaingo/schema" "github.com/tmc/langchaingo/tools" - "azd.ai.start/internal/logging" "azd.ai.start/internal/session" - "azd.ai.start/internal/utils" - "azd.ai.start/internal/validation" + mytools "azd.ai.start/internal/tools" ) -// AzureAIAgent represents an enhanced Azure AI agent with action tracking and intent validation -type AzureAIAgent struct { - agent *agents.ConversationalAgent - executor *agents.Executor - memory schema.Memory - tools []tools.Tool - intentValidator *validation.IntentValidator - actionLogger *logging.ActionLogger - currentSession *session.ActionSession -} - -// ProcessQuery processes a user query with full action tracking and validation -func (aai *AzureAIAgent) ProcessQuery(ctx context.Context, userInput string) (*AgentResponse, error) { - // Start new action session - sess := session.NewActionSession(userInput) - aai.currentSession = sess - - fmt.Printf("\n🎯 Intent: %s\n", userInput) - fmt.Printf("📋 Planning and executing actions...\n") - fmt.Println("═══════════════════════════════════════") - - // Clear previous actions - aai.actionLogger.Clear() - - // Enhanced user input with explicit completion requirements - enhancedInput := fmt.Sprintf(`%s - -IMPORTANT: You must complete this task successfully. Do not stop until: -1. All required actions have been executed -2. Any files that need to be created are actually saved -3. You verify the results of your actions -4. The task is fully accomplished - -If a tool fails, analyze why and try again with corrections. If you need to create files, use the write_file tool with the complete content.`, userInput) - - // Execute with enhanced input - result, err := aai.executor.Call(ctx, map[string]any{ - "input": enhancedInput, - }) - - if err != nil { - sess.End() - fmt.Printf("❌ Execution failed: %s\n", err.Error()) - return nil, err - } - - // Get executed actions from logger and intermediate steps - executedActions := aai.actionLogger.GetActions() - for _, action := range executedActions { - sess.AddExecutedAction(action) - } - - // If no actions in logger but we have intermediate steps, extract them - if len(sess.ExecutedActions) == 0 { - if steps, ok := result["intermediateSteps"].([]schema.AgentStep); ok && len(steps) > 0 { - for _, step := range steps { - actionLog := session.ActionLog{ - Timestamp: time.Now(), - Action: step.Action.Tool, - Tool: step.Action.Tool, - Input: step.Action.ToolInput, - Output: step.Observation, - Success: true, - Duration: time.Millisecond * 100, // Approximate - } - sess.AddExecutedAction(actionLog) - } - } - } - - // Check if any actions were taken - if not, this was likely conversational - if len(sess.ExecutedActions) == 0 { - fmt.Printf("💬 No tool actions needed - appears to be conversational\n") +//go:embed prompts/default_agent_prefix.txt +var _defaultAgentPrefix string - sess.End() - validationResult := &validation.ValidationResult{ - Status: validation.ValidationComplete, - Explanation: "Conversational response - no actions required", - Confidence: 1.0, - } - sess.SetValidationResult(validationResult) - - // Display simple summary for conversational responses - fmt.Println("\n📊 Session Summary") - fmt.Println("═══════════════════════════════════════") - duration := sess.EndTime.Sub(sess.StartTime) - fmt.Printf("⏱️ Duration: %v\n", duration.Round(time.Millisecond)) - fmt.Println("\n💬 Conversational response - no tool actions needed") - fmt.Printf("🎯 Intent Status: %s (%.1f%% confidence)\n", validationResult.Status, validationResult.Confidence*100) - fmt.Println("═══════════════════════════════════════") - - return NewAgentResponse(result["output"].(string), sess, validationResult), nil - } - - // Actions were taken, so validate and potentially retry - var lastResult = result - var lastValidation *validation.ValidationResult - maxAttempts := 3 // Maximum retry attempts for incomplete tasks - - for attempt := 1; attempt <= maxAttempts; attempt++ { - // Validate intent completion with enhanced validation - fmt.Printf("\n🔍 Validating completion...\n") - validationResult := aai.intentValidator.ValidateCompletion( - userInput, - sess.ExecutedActions, - ) - lastValidation = validationResult - sess.SetValidationResult(validationResult) - - // Check if task is complete - if validationResult.Status == validation.ValidationComplete { - fmt.Printf("✅ Task completed successfully!\n") - break - } - - // If task is incomplete and we have more attempts, retry - if attempt < maxAttempts { - if validationResult.Status == validation.ValidationIncomplete || validationResult.Status == validation.ValidationPartial { - fmt.Printf("⚠️ Task incomplete (attempt %d/%d): %s\n", attempt, maxAttempts, validationResult.Explanation) - fmt.Printf("🔄 Analyzing what's missing and taking corrective action...\n") - - // Clear previous actions for retry - aai.actionLogger.Clear() - - // Enhanced retry with feedback about what was incomplete - retryInput := fmt.Sprintf(`%s - -IMPORTANT: You must complete this task successfully. Do not stop until: -1. All required actions have been executed -2. Any files that need to be created are actually saved -3. You verify the results of your actions -4. The task is fully accomplished - -PREVIOUS ATTEMPT ANALYSIS: The previous attempt was marked as %s. -Reason: %s - -Please analyze what was missing or incomplete and take the necessary additional actions to fully complete the task.`, - userInput, validationResult.Status, validationResult.Explanation) - - // Execute retry - retryResult, err := aai.executor.Call(ctx, map[string]any{ - "input": retryInput, - }) - - if err != nil { - fmt.Printf("❌ Retry attempt %d failed: %s\n", attempt+1, err.Error()) - if attempt == maxAttempts-1 { - sess.End() - return nil, err - } - continue - } - - lastResult = retryResult - - // Get new actions from this retry - retryActions := aai.actionLogger.GetActions() - if len(retryActions) == 0 { - if steps, ok := retryResult["intermediateSteps"].([]schema.AgentStep); ok && len(steps) > 0 { - for _, step := range steps { - actionLog := session.ActionLog{ - Timestamp: time.Now(), - Action: step.Action.Tool, - Tool: step.Action.Tool, - Input: step.Action.ToolInput, - Output: step.Observation, - Success: true, - Duration: time.Millisecond * 100, - } - retryActions = append(retryActions, actionLog) - } - } - } - - // Accumulate actions from retry - for _, action := range retryActions { - sess.AddExecutedAction(action) - } - continue - } - } else { - // This was the last attempt and still incomplete - fmt.Printf("⚠️ Task still incomplete after %d attempts: %s\n", maxAttempts, validationResult.Explanation) - fmt.Printf("💡 Consider:\n") - fmt.Printf(" - Breaking the task into smaller, more specific steps\n") - fmt.Printf(" - Checking if all required files were actually created\n") - fmt.Printf(" - Verifying tool outputs were successful\n") - } - } - - sess.End() - - // Display comprehensive summary - aai.displayCompleteSummary(sess, lastResult) - - return NewAgentResponse(lastResult["output"].(string), sess, lastValidation), nil -} - -// ProcessQueryWithRetry processes a query with automatic retry on failure -func (aai *AzureAIAgent) ProcessQueryWithRetry(ctx context.Context, userInput string, maxRetries int) (*AgentResponse, error) { - var lastErr error - var lastResponse *AgentResponse - - for attempt := 1; attempt <= maxRetries; attempt++ { - fmt.Printf("\n🔄 Attempt %d/%d\n", attempt, maxRetries) - - response, err := aai.ProcessQuery(ctx, userInput) - if err != nil { - lastErr = err - fmt.Printf("❌ Attempt %d failed: %s\n", attempt, err.Error()) - continue - } - - lastResponse = response - - // Check if task completed successfully - if response.Validation.Status == validation.ValidationComplete { - fmt.Printf("✅ Task completed successfully on attempt %d\n", attempt) - return response, nil - } - - if response.Validation.Status == validation.ValidationPartial { - fmt.Printf("⚠️ Partial completion on attempt %d: %s\n", attempt, response.Validation.Explanation) - } else { - fmt.Printf("❌ Task incomplete on attempt %d: %s\n", attempt, response.Validation.Explanation) - } - - // Clear memory for fresh retry - aai.ClearMemory(ctx) - } - - if lastResponse != nil { - return lastResponse, nil - } - - return nil, fmt.Errorf("all %d attempts failed, last error: %w", maxRetries, lastErr) +// AzureAIAgent represents an enhanced Azure AI agent with action tracking, intent validation, and conversation memory +type AzureAIAgent struct { + agent *agents.ConversationalAgent + executor *agents.Executor + memory schema.Memory // Maintains conversation history for context-aware responses + tools []tools.Tool + actionLogger callbacks.Handler + currentSession *session.ActionSession } -// GetSessionStats returns statistics about the current session -func (aai *AzureAIAgent) GetSessionStats() *SessionStats { - if aai.currentSession == nil { - return &SessionStats{} - } - - stats := &SessionStats{ - TotalActions: len(aai.currentSession.ExecutedActions), - SuccessfulActions: 0, - FailedActions: 0, - TotalDuration: aai.currentSession.EndTime.Sub(aai.currentSession.StartTime), +func NewAzureAIAgent(llm *openai.LLM) *AzureAIAgent { + smartMemory := memory.NewConversationBuffer( + memory.WithInputKey("input"), + memory.WithOutputKey("output"), + memory.WithHumanPrefix("Human"), + memory.WithAIPrefix("AI"), + ) + + tools := []tools.Tool{ + // Directory operations + mytools.DirectoryListTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.CreateDirectoryTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.DeleteDirectoryTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.ChangeDirectoryTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.CurrentDirectoryTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + + // File operations + mytools.ReadFileTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.WriteFileTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.CopyFileTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.MoveFileTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.DeleteFileTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.FileInfoTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + + // Other tools + mytools.HTTPFetcherTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.WeatherTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + tools.Calculator{ + CallbacksHandler: llm.CallbacksHandler, + }, } - for _, action := range aai.currentSession.ExecutedActions { - if action.Success { - stats.SuccessfulActions++ - } else { - stats.FailedActions++ - } + // 4. Create agent with memory directly integrated + agent := agents.NewConversationalAgent(llm, tools, + agents.WithPromptPrefix(_defaultAgentPrefix), + agents.WithMemory(smartMemory), + agents.WithCallbacksHandler(llm.CallbacksHandler), + ) + + // 5. Create executor without separate memory configuration since agent already has it + executor := agents.NewExecutor(agent, + agents.WithMaxIterations(1000), // Much higher limit for complex multi-step processes + agents.WithMemory(smartMemory), + agents.WithCallbacksHandler(llm.CallbacksHandler), + agents.WithReturnIntermediateSteps(), + ) + + return &AzureAIAgent{ + agent: agent, + executor: executor, + memory: smartMemory, + tools: tools, + actionLogger: llm.CallbacksHandler, } - - return stats -} - -// GetMemoryContent returns the current memory content for debugging -func (aai *AzureAIAgent) GetMemoryContent(ctx context.Context) (map[string]any, error) { - return aai.memory.LoadMemoryVariables(ctx, map[string]any{}) -} - -// ClearMemory clears the conversation memory -func (aai *AzureAIAgent) ClearMemory(ctx context.Context) error { - return aai.memory.Clear(ctx) } -// EnableVerboseLogging enables detailed iteration logging -func (aai *AzureAIAgent) EnableVerboseLogging() { - // This would enable more detailed logging in the action logger - fmt.Println("🔍 Verbose logging enabled - you'll see detailed iteration steps") -} - -// displayCompleteSummary displays a comprehensive summary of the session -func (aai *AzureAIAgent) displayCompleteSummary(sess *session.ActionSession, result map[string]any) { - fmt.Println("\n📊 Session Summary") - fmt.Println("═══════════════════════════════════════") - - // Display timing - duration := sess.EndTime.Sub(sess.StartTime) - fmt.Printf("⏱️ Duration: %v\n", duration.Round(time.Millisecond)) - - // Display actions with attempt grouping - if len(sess.ExecutedActions) > 0 { - fmt.Println("\n🔧 Actions Executed:") - for i, action := range sess.ExecutedActions { - status := "✅" - if !action.Success { - status = "❌" - } - fmt.Printf(" %s %d. %s (%v)\n", - status, i+1, - utils.TruncateString(action.Input, 50), - action.Duration.Round(time.Millisecond)) - } - } else { - fmt.Println("\n🔧 No explicit tool actions required") - } - - // Display validation result with enhanced messaging - if validationResult, ok := sess.ValidationResult.(*validation.ValidationResult); ok { - fmt.Printf("\n🎯 Intent Status: %s", validationResult.Status) - if validationResult.Confidence > 0 { - fmt.Printf(" (%.1f%% confidence)", validationResult.Confidence*100) - } - fmt.Println() - - if validationResult.Explanation != "" { - fmt.Printf("💭 Assessment: %s\n", validationResult.Explanation) - } - - // Show completion status with actionable advice - switch validationResult.Status { - case validation.ValidationComplete: - fmt.Printf("🎉 Task completed successfully!\n") - case validation.ValidationPartial: - fmt.Printf("⚠️ Task partially completed. Some aspects may need attention.\n") - case validation.ValidationIncomplete: - fmt.Printf("❌ Task incomplete. Additional actions may be needed.\n") - case validation.ValidationError: - fmt.Printf("⚠️ Validation error. Please review the actions taken.\n") - } - } - - // Display intermediate steps if available - if steps, ok := result["intermediateSteps"].([]schema.AgentStep); ok && len(steps) > 0 { - fmt.Printf("\n🔍 Reasoning Steps: %d\n", len(steps)) - for i, step := range steps { - fmt.Printf("Step %d:\n", i+1) - fmt.Printf(" Tool: %s\n", step.Action.Tool) - fmt.Printf(" Input: %s\n", step.Action.ToolInput) - fmt.Printf(" Observation: %s\n", utils.TruncateString(step.Observation, 200)) - } +// ProcessQuery processes a user query with full action tracking and validation +func (aai *AzureAIAgent) ProcessQuery(ctx context.Context, userInput string) (string, error) { + // Execute with enhanced input - agent should automatically handle memory + output, err := chains.Run(ctx, aai.executor, userInput) + if err != nil { + fmt.Printf("❌ Execution failed: %s\n", err.Error()) + return "", err } - fmt.Println("═══════════════════════════════════════") + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/factory.go b/cli/azd/extensions/azd.ai.start/internal/agent/factory.go deleted file mode 100644 index 3b979181591..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/agent/factory.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package agent - -import ( - "github.com/tmc/langchaingo/agents" - "github.com/tmc/langchaingo/llms/openai" - "github.com/tmc/langchaingo/memory" - "github.com/tmc/langchaingo/tools" - - "azd.ai.start/internal/logging" - mytools "azd.ai.start/internal/tools" - "azd.ai.start/internal/validation" -) - -// CreateAzureAIAgent creates a new enhanced Azure AI agent -func CreateAzureAIAgent(llm *openai.LLM) *AzureAIAgent { - // 1. Smart Memory with conversation buffer - smartMemory := memory.NewConversationBuffer() - - // 2. Action Logger with comprehensive callbacks - actionLogger := logging.NewActionLogger() - - // 3. Enhanced Tools - just the essentials - tools := []tools.Tool{ - // Directory operations - mytools.DirectoryListTool{}, - mytools.CreateDirectoryTool{}, - mytools.DeleteDirectoryTool{}, - mytools.ChangeDirectoryTool{}, - mytools.CurrentDirectoryTool{}, - - // File operations - mytools.ReadFileTool{}, - mytools.WriteFileTool{}, - mytools.CopyFileTool{}, - mytools.MoveFileTool{}, - mytools.DeleteFileTool{}, - mytools.FileInfoTool{}, - - // Other tools - mytools.HTTPFetcherTool{}, - mytools.WeatherTool{}, - tools.Calculator{}, - } - - // 4. Create agent with default settings - agent := agents.NewConversationalAgent(llm, tools) - - // 5. Enhanced Executor with aggressive completion settings - executor := agents.NewExecutor(agent, - agents.WithMemory(smartMemory), - agents.WithMaxIterations(1000), // Much higher limit for complex multi-step processes - agents.WithReturnIntermediateSteps(), - ) - - return &AzureAIAgent{ - agent: agent, - executor: executor, - memory: smartMemory, - tools: tools, - intentValidator: validation.NewIntentValidator(llm), - actionLogger: actionLogger, - } -} diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt new file mode 100644 index 00000000000..2acc4c20233 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt @@ -0,0 +1,19 @@ +You are an Azure Developer CLI (AZD) agent. +You are an expert is building, provisioning and deploying Azure applications. +Always use Azure best patterns and practices. +If a tools exists that provides best practices and standards call this tool at the beginning of your workflow. + +IMPORTANT: You must complete this task successfully. Do not stop until: +1. All required actions have been executed +2. Any files that need to be created are actually saved +3. You verify the results of your actions +4. The task is fully accomplished + +If a tool fails, analyze why and try again with corrections. + +TOOLS: +------ + +Assistant has access to the following tools: + +{{.tool_descriptions}} diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/response.go b/cli/azd/extensions/azd.ai.start/internal/agent/response.go deleted file mode 100644 index 5836fd13f50..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/agent/response.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package agent - -import ( - "azd.ai.start/internal/session" - "azd.ai.start/internal/validation" -) - -// AgentResponse represents the complete response from the agent -type AgentResponse struct { - Output string - Session *session.ActionSession - Validation *validation.ValidationResult -} - -// NewAgentResponse creates a new agent response -func NewAgentResponse(output string, sess *session.ActionSession, validationResult *validation.ValidationResult) *AgentResponse { - return &AgentResponse{ - Output: output, - Session: sess, - Validation: validationResult, - } -} diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/stats.go b/cli/azd/extensions/azd.ai.start/internal/agent/stats.go deleted file mode 100644 index d7649186ebc..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/agent/stats.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package agent - -import ( - "time" -) - -// SessionStats provides statistics about an agent session -type SessionStats struct { - TotalActions int - SuccessfulActions int - FailedActions int - TotalDuration time.Duration -} diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go index 3ebc8c17eff..7382236b264 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go @@ -18,10 +18,9 @@ import ( // RunEnhancedAzureAgent runs the enhanced Azure AI agent with full capabilities func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) error { // Create the enhanced agent - azureAgent := agent.CreateAzureAIAgent(llm) + azureAgent := agent.NewAzureAIAgent(llm) fmt.Println("🤖 Enhanced Azure AI Agent - Interactive Mode") - fmt.Println("Features: Action Tracking | Intent Validation | Smart Memory") fmt.Println("═══════════════════════════════════════════════════════════") // Handle initial query if provided @@ -57,31 +56,7 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) break } - // Special commands - if strings.ToLower(userInput) == "clear" { - err := azureAgent.ClearMemory(ctx) - if err != nil { - fmt.Printf("❌ Failed to clear memory: %s\n", err.Error()) - } else { - fmt.Println("🧹 Memory cleared!") - } - continue - } - - if strings.ToLower(userInput) == "stats" { - stats := azureAgent.GetSessionStats() - fmt.Printf("📊 Session Stats:\n") - fmt.Printf(" Total Actions: %d\n", stats.TotalActions) - fmt.Printf(" Successful: %d\n", stats.SuccessfulActions) - fmt.Printf(" Failed: %d\n", stats.FailedActions) - if stats.TotalDuration > 0 { - fmt.Printf(" Duration: %v\n", stats.TotalDuration) - } - continue - } - // Process the query with the enhanced agent - fmt.Printf("\n🤖 Enhanced AI Agent:\n") response, err := azureAgent.ProcessQuery(ctx, userInput) if err != nil { fmt.Printf("❌ Error: %v\n", err) @@ -89,7 +64,7 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) } // Display the final response - fmt.Printf("\n💬 Final Response:\n%s\n", response.Output) + fmt.Printf("\n💬 Agent:\n%s\n", response) } if err := scanner.Err(); err != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go index a202faf1872..b411224ca34 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go @@ -8,6 +8,7 @@ import ( "encoding/json" "fmt" + "azd.ai.start/internal/logging" "github.com/azure/azure-dev/cli/azd/pkg/azdext" "github.com/spf13/cobra" "github.com/tmc/langchaingo/llms/openai" @@ -73,12 +74,17 @@ func runAIAgent(ctx context.Context, args []string) error { // Use Azure OpenAI with proper configuration fmt.Printf("🔵 Trying Azure OpenAI with deployment: %s\n", aiConfig.DeploymentName) + actionLogger := logging.NewActionLogger( + logging.WithDebug(false), + ) + llm, err = openai.New( openai.WithToken(aiConfig.ApiKey), openai.WithBaseURL(aiConfig.Endpoint+"/"), openai.WithAPIType(openai.APITypeAzure), openai.WithAPIVersion(azureAPIVersion), openai.WithModel(aiConfig.DeploymentName), + openai.WithCallback(actionLogger), ) if err == nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go index da3a48bd262..edcf14d1de0 100644 --- a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go +++ b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go @@ -6,78 +6,126 @@ package logging import ( "context" "fmt" - "time" + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/schema" - - "azd.ai.start/internal/session" - "azd.ai.start/internal/utils" ) +// Compile-time check to ensure ActionLogger implements callbacks.Handler +var _ callbacks.Handler = &ActionLogger{} + // ActionLogger tracks and logs all agent actions type ActionLogger struct { - actions []session.ActionLog - current *session.ActionLog + debugEnabled bool +} + +// ActionLoggerOption represents an option for configuring ActionLogger +type ActionLoggerOption func(*ActionLogger) + +// WithDebug enables debug mode for verbose logging +func WithDebug(enabled bool) ActionLoggerOption { + return func(al *ActionLogger) { + al.debugEnabled = enabled + } } // NewActionLogger creates a new action logger -func NewActionLogger() *ActionLogger { - return &ActionLogger{ - actions: make([]session.ActionLog, 0), +func NewActionLogger(opts ...ActionLoggerOption) *ActionLogger { + al := &ActionLogger{} + + for _, opt := range opts { + opt(al) + } + + return al +} + +// HandleText is called when text is processed +func (al *ActionLogger) HandleText(ctx context.Context, text string) { + if al.debugEnabled { + fmt.Printf("📝 Text (full): %s\n", text) + } +} + +// HandleLLMGenerateContentStart is called when LLM content generation starts +func (al *ActionLogger) HandleLLMGenerateContentStart(ctx context.Context, ms []llms.MessageContent) { + if al.debugEnabled { + for i, msg := range ms { + fmt.Printf("🤖 Debug - Message %d: %+v\n", i, msg) + } + } +} + +// HandleLLMGenerateContentEnd is called when LLM content generation ends +func (al *ActionLogger) HandleLLMGenerateContentEnd(ctx context.Context, res *llms.ContentResponse) { + if al.debugEnabled && res != nil { + fmt.Printf("🤖 Debug - Response: %+v\n", res) + } +} + +// HandleRetrieverStart is called when retrieval starts +func (al *ActionLogger) HandleRetrieverStart(ctx context.Context, query string) { + if al.debugEnabled { + fmt.Printf("🔍 Retrieval starting for query (full): %s\n", query) + } +} + +// HandleRetrieverEnd is called when retrieval ends +func (al *ActionLogger) HandleRetrieverEnd(ctx context.Context, query string, documents []schema.Document) { + fmt.Printf("🔍 Retrieval completed: found %d documents\n", len(documents)) + if al.debugEnabled { + fmt.Printf("🔍 Debug - Query (full): %s\n", query) + for i, doc := range documents { + fmt.Printf("🔍 Debug - Document %d: %+v\n", i, doc) + } } } // HandleToolStart is called when a tool execution starts func (al *ActionLogger) HandleToolStart(ctx context.Context, input string) { - al.current = &session.ActionLog{ - Timestamp: time.Now(), - Input: input, + if al.debugEnabled { + fmt.Printf("🔧 Executing Tool: %s\n", input) } - fmt.Printf("🔧 Executing: %s\n", input) } // HandleToolEnd is called when a tool execution ends func (al *ActionLogger) HandleToolEnd(ctx context.Context, output string) { - if al.current != nil { - al.current.Output = output - al.current.Success = true - al.current.Duration = time.Since(al.current.Timestamp) - al.actions = append(al.actions, *al.current) - fmt.Printf("✅ Result: %s\n", utils.TruncateString(output, 100)) + if al.debugEnabled { + fmt.Printf("✅ Tool Result (full): %s\n", output) } } // HandleToolError is called when a tool execution fails func (al *ActionLogger) HandleToolError(ctx context.Context, err error) { - if al.current != nil { - al.current.Output = err.Error() - al.current.Success = false - al.current.Duration = time.Since(al.current.Timestamp) - al.actions = append(al.actions, *al.current) - fmt.Printf("❌ Error: %s\n", err.Error()) - } + fmt.Printf("❌ Tool Error: %s\n", err.Error()) } -// HandleAgentStart is called when agent planning starts -func (al *ActionLogger) HandleAgentStart(ctx context.Context, input map[string]any) { - if userInput, ok := input["input"].(string); ok { - fmt.Printf("🎯 Processing: %s\n", userInput) +// HandleLLMStart is called when LLM call starts +func (al *ActionLogger) HandleLLMStart(ctx context.Context, prompts []string) { + for i, prompt := range prompts { + if al.debugEnabled { + fmt.Printf("🤖 Prompt %d (full): %s\n", i, prompt) + } } } -// HandleAgentEnd is called when agent planning ends -func (al *ActionLogger) HandleAgentEnd(ctx context.Context, output schema.AgentFinish) { - fmt.Printf("🏁 Agent completed planning\n") -} - // HandleChainStart is called when chain execution starts -func (al *ActionLogger) HandleChainStart(ctx context.Context, input map[string]any) { - fmt.Printf("🔗 Starting chain execution\n") +func (al *ActionLogger) HandleChainStart(ctx context.Context, inputs map[string]any) { + for key, value := range inputs { + if al.debugEnabled { + fmt.Printf("🔗 Input [%s]: %v\n", key, value) + } + } } // HandleChainEnd is called when chain execution ends -func (al *ActionLogger) HandleChainEnd(ctx context.Context, output map[string]any) { - fmt.Printf("🔗 Chain execution completed\n") +func (al *ActionLogger) HandleChainEnd(ctx context.Context, outputs map[string]any) { + for key, value := range outputs { + if al.debugEnabled { + fmt.Printf("🔗 Output [%s]: %v\n", key, value) + } + } } // HandleChainError is called when chain execution fails @@ -85,30 +133,20 @@ func (al *ActionLogger) HandleChainError(ctx context.Context, err error) { fmt.Printf("🔗 Chain execution failed: %s\n", err.Error()) } -// HandleLLMStart is called when LLM call starts -func (al *ActionLogger) HandleLLMStart(ctx context.Context, prompts []string) { - fmt.Printf("🤖 LLM thinking...\n") -} - -// HandleLLMEnd is called when LLM call ends -func (al *ActionLogger) HandleLLMEnd(ctx context.Context, result string) { - fmt.Printf("🤖 LLM response received\n") -} - // HandleAgentAction is called when an agent action is planned func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.AgentAction) { - al.current = &session.ActionLog{ - Timestamp: time.Now(), - Action: action.Tool, - Tool: action.Tool, - Input: action.ToolInput, + fmt.Printf("Calling %s tool\n", action.Tool) + + if al.debugEnabled { + fmt.Printf("🎯 Agent planned action (debug): %+v\n", action) } - fmt.Printf("🎯 Agent planned action: %s with input: %s\n", action.Tool, action.ToolInput) } // HandleAgentFinish is called when the agent finishes func (al *ActionLogger) HandleAgentFinish(ctx context.Context, finish schema.AgentFinish) { - fmt.Printf("🏁 Agent finished with result\n") + if al.debugEnabled { + fmt.Printf("🏁 Agent finished (debug): %+v\n", finish) + } } // HandleLLMError is called when LLM call fails @@ -117,18 +155,6 @@ func (al *ActionLogger) HandleLLMError(ctx context.Context, err error) { } // HandleStreamingFunc handles streaming responses -func (al *ActionLogger) HandleStreamingFunc(ctx context.Context, chunk []byte) error { - // Optional: Handle streaming output - return nil -} - -// GetActions returns all logged actions -func (al *ActionLogger) GetActions() []session.ActionLog { - return al.actions -} +func (al *ActionLogger) HandleStreamingFunc(ctx context.Context, chunk []byte) { -// Clear clears all logged actions -func (al *ActionLogger) Clear() { - al.actions = al.actions[:0] - al.current = nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go index ac64b00f30f..78766be01e2 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go @@ -5,10 +5,14 @@ import ( "fmt" "os" "path/filepath" + + "github.com/tmc/langchaingo/callbacks" ) // ChangeDirectoryTool implements the Tool interface for changing the current working directory -type ChangeDirectoryTool struct{} +type ChangeDirectoryTool struct { + CallbacksHandler callbacks.Handler +} func (t ChangeDirectoryTool) Name() string { return "change_directory" @@ -19,8 +23,17 @@ func (t ChangeDirectoryTool) Description() string { } func (t ChangeDirectoryTool) Call(ctx context.Context, input string) (string, error) { + // Invoke callback for tool start + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("change_directory: %s", input)) + } + if input == "" { - return "", fmt.Errorf("directory path is required") + err := fmt.Errorf("directory path is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Get current directory for reference @@ -29,23 +42,46 @@ func (t ChangeDirectoryTool) Call(ctx context.Context, input string) (string, er // Convert to absolute path absPath, err := filepath.Abs(input) if err != nil { - return "", fmt.Errorf("failed to resolve path %s: %w", input, err) + toolErr := fmt.Errorf("failed to resolve path %s: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } // Check if directory exists info, err := os.Stat(absPath) if err != nil { - return "", fmt.Errorf("directory %s does not exist: %w", absPath, err) + toolErr := fmt.Errorf("directory %s does not exist: %w", absPath, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } if !info.IsDir() { - return "", fmt.Errorf("%s is not a directory", absPath) + toolErr := fmt.Errorf("%s is not a directory", absPath) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } // Change directory err = os.Chdir(absPath) if err != nil { - return "", fmt.Errorf("failed to change directory to %s: %w", absPath, err) + toolErr := fmt.Errorf("failed to change directory to %s: %w", absPath, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + output := fmt.Sprintf("Changed directory from %s to %s", currentDir, absPath) + + // Invoke callback for tool end + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return fmt.Sprintf("Changed directory from %s to %s", currentDir, absPath), nil + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go index 310866dfe40..e24d7f548de 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go @@ -6,10 +6,14 @@ import ( "io" "os" "strings" + + "github.com/tmc/langchaingo/callbacks" ) // CopyFileTool implements the Tool interface for copying files -type CopyFileTool struct{} +type CopyFileTool struct { + CallbacksHandler callbacks.Handler +} func (t CopyFileTool) Name() string { return "copy_file" @@ -20,52 +24,93 @@ func (t CopyFileTool) Description() string { } func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("copy_file: %s", input)) + } + if input == "" { - return "", fmt.Errorf("input is required in format 'source|destination'") + err := fmt.Errorf("input is required in format 'source|destination'") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Split on first occurrence of '|' to separate source from destination parts := strings.SplitN(input, "|", 2) if len(parts) != 2 { - return "", fmt.Errorf("invalid input format. Use 'source|destination'") + err := fmt.Errorf("invalid input format. Use 'source|destination'") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } source := strings.TrimSpace(parts[0]) destination := strings.TrimSpace(parts[1]) if source == "" || destination == "" { - return "", fmt.Errorf("both source and destination paths are required") + err := fmt.Errorf("both source and destination paths are required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Check if source file exists sourceInfo, err := os.Stat(source) if err != nil { - return "", fmt.Errorf("source file %s does not exist: %w", source, err) + toolErr := fmt.Errorf("source file %s does not exist: %w", source, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } if sourceInfo.IsDir() { - return "", fmt.Errorf("source %s is a directory. Use copy_directory for directories", source) + err := fmt.Errorf("source %s is a directory. Use copy_directory for directories", source) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Open source file sourceFile, err := os.Open(source) if err != nil { - return "", fmt.Errorf("failed to open source file %s: %w", source, err) + toolErr := fmt.Errorf("failed to open source file %s: %w", source, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } defer sourceFile.Close() // Create destination file destFile, err := os.Create(destination) if err != nil { - return "", fmt.Errorf("failed to create destination file %s: %w", destination, err) + toolErr := fmt.Errorf("failed to create destination file %s: %w", destination, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } defer destFile.Close() // Copy contents bytesWritten, err := io.Copy(destFile, sourceFile) if err != nil { - return "", fmt.Errorf("failed to copy file: %w", err) + toolErr := fmt.Errorf("failed to copy file: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + output := fmt.Sprintf("Successfully copied %s to %s (%d bytes)", source, destination, bytesWritten) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return fmt.Sprintf("Successfully copied %s to %s (%d bytes)", source, destination, bytesWritten), nil + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go index 992eb1842fb..1578bb62f21 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go @@ -4,10 +4,14 @@ import ( "context" "fmt" "os" + + "github.com/tmc/langchaingo/callbacks" ) // CreateDirectoryTool implements the Tool interface for creating directories -type CreateDirectoryTool struct{} +type CreateDirectoryTool struct { + CallbacksHandler callbacks.Handler +} func (t CreateDirectoryTool) Name() string { return "create_directory" @@ -18,24 +22,52 @@ func (t CreateDirectoryTool) Description() string { } func (t CreateDirectoryTool) Call(ctx context.Context, input string) (string, error) { + // Invoke callback for tool start + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("create_directory: %s", input)) + } + if input == "" { - return "", fmt.Errorf("directory path is required") + err := fmt.Errorf("directory path is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } err := os.MkdirAll(input, 0755) if err != nil { - return "", fmt.Errorf("failed to create directory %s: %w", input, err) + toolErr := fmt.Errorf("failed to create directory %s: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } // Check if directory already existed or was newly created info, err := os.Stat(input) if err != nil { - return "", fmt.Errorf("failed to verify directory creation: %w", err) + toolErr := fmt.Errorf("failed to verify directory creation: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } if !info.IsDir() { - return "", fmt.Errorf("%s exists but is not a directory", input) + toolErr := fmt.Errorf("%s exists but is not a directory", input) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + output := fmt.Sprintf("Successfully created directory: %s", input) + + // Invoke callback for tool end + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return fmt.Sprintf("Successfully created directory: %s", input), nil + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go index e3231003825..892375cd77d 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go @@ -4,10 +4,14 @@ import ( "context" "fmt" "os" + + "github.com/tmc/langchaingo/callbacks" ) // DeleteDirectoryTool implements the Tool interface for deleting directories -type DeleteDirectoryTool struct{} +type DeleteDirectoryTool struct { + CallbacksHandler callbacks.Handler +} func (t DeleteDirectoryTool) Name() string { return "delete_directory" @@ -18,19 +22,36 @@ func (t DeleteDirectoryTool) Description() string { } func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, error) { + // Invoke callback for tool start + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("delete_directory: %s", input)) + } + if input == "" { - return "", fmt.Errorf("directory path is required") + err := fmt.Errorf("directory path is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Check if directory exists info, err := os.Stat(input) if err != nil { - return "", fmt.Errorf("directory %s does not exist: %w", input, err) + toolErr := fmt.Errorf("directory %s does not exist: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } // Make sure it's a directory, not a file if !info.IsDir() { - return "", fmt.Errorf("%s is a file, not a directory. Use delete_file to remove files", input) + toolErr := fmt.Errorf("%s is a file, not a directory. Use delete_file to remove files", input) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } // Count contents before deletion for reporting @@ -43,11 +64,24 @@ func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, er // Delete the directory and all contents err = os.RemoveAll(input) if err != nil { - return "", fmt.Errorf("failed to delete directory %s: %w", input, err) + toolErr := fmt.Errorf("failed to delete directory %s: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } + var output string if fileCount > 0 { - return fmt.Sprintf("Successfully deleted directory: %s (contained %d items)", input, fileCount), nil + output = fmt.Sprintf("Successfully deleted directory: %s (contained %d items)", input, fileCount) + } else { + output = fmt.Sprintf("Successfully deleted empty directory: %s", input) } - return fmt.Sprintf("Successfully deleted empty directory: %s", input), nil + + // Invoke callback for tool end + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go index 71a5b7618d1..1f3841cb3ca 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go @@ -4,10 +4,14 @@ import ( "context" "fmt" "os" + + "github.com/tmc/langchaingo/callbacks" ) // DeleteFileTool implements the Tool interface for deleting files -type DeleteFileTool struct{} +type DeleteFileTool struct { + CallbacksHandler callbacks.Handler +} func (t DeleteFileTool) Name() string { return "delete_file" @@ -18,26 +22,51 @@ func (t DeleteFileTool) Description() string { } func (t DeleteFileTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("delete_file: %s", input)) + } + if input == "" { - return "", fmt.Errorf("file path is required") + err := fmt.Errorf("file path is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Check if file exists and get info info, err := os.Stat(input) if err != nil { - return "", fmt.Errorf("file %s does not exist: %w", input, err) + toolErr := fmt.Errorf("file %s does not exist: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } // Make sure it's a file, not a directory if info.IsDir() { - return "", fmt.Errorf("%s is a directory, not a file. Use delete_directory to remove directories", input) + err := fmt.Errorf("%s is a directory, not a file. Use delete_directory to remove directories", input) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Delete the file err = os.Remove(input) if err != nil { - return "", fmt.Errorf("failed to delete file %s: %w", input, err) + toolErr := fmt.Errorf("failed to delete file %s: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + output := fmt.Sprintf("Successfully deleted file: %s (%d bytes)", input, info.Size()) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return fmt.Sprintf("Successfully deleted file: %s (%d bytes)", input, info.Size()), nil + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go b/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go index 133d52c8cea..66fcb0f675a 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go @@ -6,10 +6,14 @@ import ( "os" "path/filepath" "strings" + + "github.com/tmc/langchaingo/callbacks" ) // DirectoryListTool implements the Tool interface for listing directory contents -type DirectoryListTool struct{} +type DirectoryListTool struct { + CallbacksHandler callbacks.Handler +} func (t DirectoryListTool) Name() string { return "list_directory" @@ -20,6 +24,11 @@ func (t DirectoryListTool) Description() string { } func (t DirectoryListTool) Call(ctx context.Context, input string) (string, error) { + // Invoke callback for tool start + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("list_directory: %s", input)) + } + path := strings.TrimSpace(input) if path == "" { path = "." @@ -28,21 +37,33 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro // Get absolute path for clarity absPath, err := filepath.Abs(path) if err != nil { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to get absolute path for %s: %w", path, err)) + } return "", fmt.Errorf("failed to get absolute path for %s: %w", path, err) } // Check if directory exists info, err := os.Stat(absPath) if err != nil { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to access %s: %w", absPath, err)) + } return "", fmt.Errorf("failed to access %s: %w", absPath, err) } if !info.IsDir() { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("%s is not a directory", absPath)) + } return "", fmt.Errorf("%s is not a directory", absPath) } // List directory contents files, err := os.ReadDir(absPath) if err != nil { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to read directory %s: %w", absPath, err)) + } return "", fmt.Errorf("failed to read directory %s: %w", absPath, err) } @@ -88,5 +109,12 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro result.WriteString("Directory is empty.\n") } - return result.String(), nil + output := result.String() + + // Invoke callback for tool end + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go b/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go index 8951b35bc77..084c7195426 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go @@ -4,64 +4,59 @@ import ( "context" "fmt" "os" - "strings" "time" + + "github.com/tmc/langchaingo/callbacks" ) -// FileInfoTool implements the Tool interface for getting detailed file information -type FileInfoTool struct{} +// FileInfoTool implements the Tool interface for getting file information +type FileInfoTool struct { + CallbacksHandler callbacks.Handler +} func (t FileInfoTool) Name() string { return "file_info" } func (t FileInfoTool) Description() string { - return "Get detailed information about a file or directory. Input: file or directory path (e.g., 'README.md' or './docs')" + return "Get information about a file (size, modification time, permissions). Input: file path (e.g., 'data.txt' or './docs/readme.md')" } func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("file_info: %s", input)) + } + if input == "" { - return "", fmt.Errorf("file or directory path is required") + err := fmt.Errorf("file path is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } info, err := os.Stat(input) if err != nil { - return "", fmt.Errorf("failed to get info for %s: %w", input, err) + toolErr := fmt.Errorf("failed to get info for %s: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } - var result strings.Builder - result.WriteString(fmt.Sprintf("Information for: %s\n", input)) - result.WriteString("═══════════════════════════════════\n") - - // Type + var fileType string if info.IsDir() { - result.WriteString("Type: Directory\n") - - // Count contents if it's a directory - if files, err := os.ReadDir(input); err == nil { - result.WriteString(fmt.Sprintf("Contents: %d items\n", len(files))) - } + fileType = "Directory" } else { - result.WriteString("Type: File\n") - result.WriteString(fmt.Sprintf("Size: %d bytes\n", info.Size())) + fileType = "File" } - // Permissions - result.WriteString(fmt.Sprintf("Permissions: %s\n", info.Mode().String())) + output := fmt.Sprintf("%s: %s\nSize: %d bytes\nModified: %s\nPermissions: %s", + fileType, input, info.Size(), info.ModTime().Format(time.RFC3339), info.Mode().String()) - // Timestamps - result.WriteString(fmt.Sprintf("Modified: %s\n", info.ModTime().Format(time.RFC3339))) - - // Additional file details - if !info.IsDir() { - if info.Size() == 0 { - result.WriteString("Note: File is empty\n") - } else if info.Size() > 1024*1024 { - result.WriteString(fmt.Sprintf("Size (human): %.2f MB\n", float64(info.Size())/(1024*1024))) - } else if info.Size() > 1024 { - result.WriteString(fmt.Sprintf("Size (human): %.2f KB\n", float64(info.Size())/1024)) - } + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return result.String(), nil + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go b/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go index e87c7131b54..12aaf7411fd 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go @@ -5,10 +5,14 @@ import ( "fmt" "io" "net/http" + + "github.com/tmc/langchaingo/callbacks" ) // HTTPFetcherTool implements the Tool interface for making HTTP requests -type HTTPFetcherTool struct{} +type HTTPFetcherTool struct { + CallbacksHandler callbacks.Handler +} func (t HTTPFetcherTool) Name() string { return "http_fetcher" @@ -19,25 +23,48 @@ func (t HTTPFetcherTool) Description() string { } func (t HTTPFetcherTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("http_fetcher: %s", input)) + } + resp, err := http.Get(input) if err != nil { - return "", fmt.Errorf("failed to fetch URL %s: %w", input, err) + toolErr := fmt.Errorf("failed to fetch URL %s: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("HTTP request failed with status: %s", resp.Status) + err := fmt.Errorf("HTTP request failed with status: %s", resp.Status) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } body, err := io.ReadAll(resp.Body) if err != nil { - return "", fmt.Errorf("failed to read response body: %w", err) + toolErr := fmt.Errorf("failed to read response body: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } + var output string // Limit response size to avoid overwhelming the context if len(body) > 5000 { - return fmt.Sprintf("Content (first 5000 chars): %s...\n[Content truncated]", string(body[:5000])), nil + output = fmt.Sprintf("Content (first 5000 chars): %s...\n[Content truncated]", string(body[:5000])) + } else { + output = string(body) + } + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return string(body), nil + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go index f39d6ede31b..4c944030d7d 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go @@ -5,10 +5,14 @@ import ( "fmt" "os" "strings" + + "github.com/tmc/langchaingo/callbacks" ) // MoveFileTool implements the Tool interface for moving/renaming files -type MoveFileTool struct{} +type MoveFileTool struct { + CallbacksHandler callbacks.Handler +} func (t MoveFileTool) Name() string { return "move_file" @@ -19,38 +23,66 @@ func (t MoveFileTool) Description() string { } func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("move_file: %s", input)) + } + if input == "" { - return "", fmt.Errorf("input is required in format 'source|destination'") + err := fmt.Errorf("input is required in format 'source|destination'") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Split on first occurrence of '|' to separate source from destination parts := strings.SplitN(input, "|", 2) if len(parts) != 2 { - return "", fmt.Errorf("invalid input format. Use 'source|destination'") + err := fmt.Errorf("invalid input format. Use 'source|destination'") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } source := strings.TrimSpace(parts[0]) destination := strings.TrimSpace(parts[1]) if source == "" || destination == "" { - return "", fmt.Errorf("both source and destination paths are required") + err := fmt.Errorf("both source and destination paths are required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Check if source exists sourceInfo, err := os.Stat(source) if err != nil { - return "", fmt.Errorf("source %s does not exist: %w", source, err) + toolErr := fmt.Errorf("source %s does not exist: %w", source, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } // Check if destination already exists if _, err := os.Stat(destination); err == nil { - return "", fmt.Errorf("destination %s already exists", destination) + err := fmt.Errorf("destination %s already exists", destination) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Move/rename the file err = os.Rename(source, destination) if err != nil { - return "", fmt.Errorf("failed to move %s to %s: %w", source, destination, err) + toolErr := fmt.Errorf("failed to move %s to %s: %w", source, destination, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } fileType := "file" @@ -58,5 +90,10 @@ func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { fileType = "directory" } - return fmt.Sprintf("Successfully moved %s from %s to %s (%d bytes)", fileType, source, destination, sourceInfo.Size()), nil + output := fmt.Sprintf("Successfully moved %s from %s to %s (%d bytes)", fileType, source, destination, sourceInfo.Size()) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go index 1aca867c288..c82017e60bd 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go @@ -4,10 +4,14 @@ import ( "context" "fmt" "os" + + "github.com/tmc/langchaingo/callbacks" ) // ReadFileTool implements the Tool interface for reading file contents -type ReadFileTool struct{} +type ReadFileTool struct { + CallbacksHandler callbacks.Handler +} func (t ReadFileTool) Name() string { return "read_file" @@ -18,20 +22,39 @@ func (t ReadFileTool) Description() string { } func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("read_file: %s", input)) + } + if input == "" { - return "", fmt.Errorf("file path is required") + err := fmt.Errorf("file path is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } content, err := os.ReadFile(input) if err != nil { - return "", fmt.Errorf("failed to read file %s: %w", input, err) + toolErr := fmt.Errorf("failed to read file %s: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } + var output string // Limit file size to avoid overwhelming context if len(content) > 5000 { - return fmt.Sprintf("File: %s (first 5000 chars)\n%s...\n[File truncated - total size: %d bytes]", - input, string(content[:5000]), len(content)), nil + output = fmt.Sprintf("File: %s (first 5000 chars)\n%s...\n[File truncated - total size: %d bytes]", + input, string(content[:5000]), len(content)) + } else { + output = fmt.Sprintf("File: %s\n%s", input, string(content)) + } + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return fmt.Sprintf("File: %s\n%s", input, string(content)), nil + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/weather.go b/cli/azd/extensions/azd.ai.start/internal/tools/weather.go index a88c14f74e0..1f0d8404142 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/weather.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/weather.go @@ -6,10 +6,14 @@ import ( "math/rand" "strings" "time" + + "github.com/tmc/langchaingo/callbacks" ) // WeatherTool implements the Tool interface for getting weather information -type WeatherTool struct{} +type WeatherTool struct { + CallbacksHandler callbacks.Handler +} func (t WeatherTool) Name() string { return "weather" @@ -20,9 +24,17 @@ func (t WeatherTool) Description() string { } func (t WeatherTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("weather: %s", input)) + } + city := strings.TrimSpace(input) if city == "" { - return "", fmt.Errorf("city name is required") + err := fmt.Errorf("city name is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Initialize random seed based on current time @@ -101,5 +113,9 @@ func (t WeatherTool) Call(ctx context.Context, input string) (string, error) { response += ". " + extra } + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, response) + } + return response, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go index 1020e9c7283..17aa51dd304 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go @@ -2,79 +2,102 @@ package tools import ( "context" + "encoding/json" "fmt" "os" "path/filepath" "strings" + + "github.com/tmc/langchaingo/callbacks" ) // WriteFileTool implements the Tool interface for writing file contents -type WriteFileTool struct{} +type WriteFileTool struct { + CallbacksHandler callbacks.Handler +} + +// WriteFileRequest represents the JSON input for the write_file tool +type WriteFileRequest struct { + Filename string `json:"filename"` + Content string `json:"content"` +} func (t WriteFileTool) Name() string { return "write_file" } func (t WriteFileTool) Description() string { - return `Write content to a file. Input format: 'filepath|content' - -For multi-line content, use literal \n for newlines: -- Single line: 'test.txt|Hello World' -- Multi-line: 'script.bicep|param name string\nparam location string\nresource myResource...' - -Example Bicep file: -'main.bicep|param name string\nparam location string\n\nresource appService ''Microsoft.Web/sites@2022-03-01'' = {\n name: name\n location: location\n kind: ''app''\n properties: {\n serverFarmId: serverFarmId\n }\n}\n\noutput appServiceId string = appService.id' - -The tool will convert \n to actual newlines automatically.` + return "Writes content to a file. Format input as a single line JSON payload with a 'filename' and 'content' parameters." } func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("write_file: %s", input)) + } + if input == "" { - return "", fmt.Errorf("input is required in format 'filepath|content'") + err := fmt.Errorf("input is required as JSON object with filename and content fields") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } - // Split on first occurrence of '|' to separate path from content - parts := strings.SplitN(input, "|", 2) - if len(parts) != 2 { - return "", fmt.Errorf("invalid input format. Use 'filepath|content'") + // Parse JSON input + var req WriteFileRequest + if err := json.Unmarshal([]byte(input), &req); err != nil { + toolErr := fmt.Errorf("invalid JSON input: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } - filePath := strings.TrimSpace(parts[0]) - content := parts[1] + if req.Filename == "" { + err := fmt.Errorf("filename cannot be empty") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err + } + + filePath := strings.TrimSpace(req.Filename) + content := req.Content // Convert literal \n sequences to actual newlines (for agents that escape newlines) content = strings.ReplaceAll(content, "\\n", "\n") content = strings.ReplaceAll(content, "\\t", "\t") - // Clean up any trailing quotes that might have been added during formatting - content = strings.TrimSuffix(content, "'") - content = strings.TrimSuffix(content, "\")") - - // Clean up any quotes around the filepath (from agent formatting) - filePath = strings.Trim(filePath, "\"'") - - if filePath == "" { - return "", fmt.Errorf("filepath cannot be empty") - } - // Ensure the directory exists dir := filepath.Dir(filePath) if dir != "." && dir != "" { if err := os.MkdirAll(dir, 0755); err != nil { - return "", fmt.Errorf("failed to create directory %s: %w", dir, err) + toolErr := fmt.Errorf("failed to create directory %s: %w", dir, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } } // Write the file err := os.WriteFile(filePath, []byte(content), 0644) if err != nil { - return "", fmt.Errorf("failed to write file %s: %w", filePath, err) + toolErr := fmt.Errorf("failed to write file %s: %w", filePath, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } // Verify the file was written correctly writtenContent, err := os.ReadFile(filePath) if err != nil { - return "", fmt.Errorf("failed to verify written file %s: %w", filePath, err) + toolErr := fmt.Errorf("failed to verify written file %s: %w", filePath, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } lineCount := strings.Count(string(writtenContent), "\n") + 1 @@ -82,8 +105,14 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { lineCount = strings.Count(content, "\n") + 1 } - return fmt.Sprintf("Successfully wrote %d bytes (%d lines) to %s. Content preview:\n%s", - len(content), lineCount, filePath, getContentPreview(content)), nil + output := fmt.Sprintf("Successfully wrote %d bytes (%d lines) to %s. Content preview:\n%s", + len(content), lineCount, filePath, getContentPreview(content)) + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + + return output, nil } // getContentPreview returns a preview of the content for verification diff --git a/cli/azd/extensions/azd.ai.start/internal/validation/validator.go b/cli/azd/extensions/azd.ai.start/internal/validation/validator.go deleted file mode 100644 index f9ae0311062..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/validation/validator.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package validation - -import ( - "context" - "fmt" - - "github.com/tmc/langchaingo/llms" - - "azd.ai.start/internal/session" - "azd.ai.start/internal/utils" -) - -// IntentValidator validates whether the original intent was fulfilled -type IntentValidator struct { - llm llms.Model -} - -// NewIntentValidator creates a new intent validator -func NewIntentValidator(llm llms.Model) *IntentValidator { - return &IntentValidator{llm: llm} -} - -// ValidateCompletion validates whether the original intent was fulfilled -func (iv *IntentValidator) ValidateCompletion( - originalIntent string, - executedActions []session.ActionLog, -) *ValidationResult { - if len(executedActions) == 0 { - return &ValidationResult{ - Status: ValidationIncomplete, - Explanation: "No actions were executed", - Confidence: 1.0, - } - } - - validationPrompt := fmt.Sprintf(` -Original User Intent: %s - -Actions Executed: -%s - -Based on the original intent and the actions that were executed, evaluate whether the user's intent was fulfilled. - -Respond with one of: COMPLETE, PARTIAL, INCOMPLETE, ERROR - -Then provide a brief explanation of your assessment. - -Format your response as: -STATUS: [COMPLETE/PARTIAL/INCOMPLETE/ERROR] -EXPLANATION: [Your explanation] -CONFIDENCE: [0.0-1.0]`, - originalIntent, - utils.FormatActionsForValidation(executedActions)) - - result, err := iv.llm.Call(context.Background(), validationPrompt) - if err != nil { - return &ValidationResult{ - Status: ValidationError, - Explanation: fmt.Sprintf("Validation failed: %s", err.Error()), - Confidence: 0.0, - } - } - - return ParseValidationResult(result) -} From c148d11a6ce2ffbb2246690a6cd4371e5b3456ec Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Tue, 29 Jul 2025 14:58:24 -0700 Subject: [PATCH 019/116] Upadates agent and tools --- cli/azd/extensions/azd.ai.start/go.mod | 1 + cli/azd/extensions/azd.ai.start/go.sum | 2 + .../azd.ai.start/internal/agent/agent.go | 20 +- .../default_agent_format_instructions.txt | 38 +++ .../agent/prompts/default_agent_prefix.txt | 11 +- .../agent/prompts/default_agent_suffix.txt | 8 + .../internal/cmd/enhanced_integration.go | 4 +- .../azd.ai.start/internal/logging/logger.go | 8 +- .../internal/tools/change_directory.go | 5 +- .../internal/tools/command_executor.go | 193 ++++++++++++ .../azd.ai.start/internal/tools/copy_file.go | 2 +- .../internal/tools/create_directory.go | 2 +- .../internal/tools/current_directory.go | 4 +- .../internal/tools/delete_directory.go | 4 +- .../internal/tools/delete_file.go | 2 +- .../internal/tools/directory_list.go | 13 +- .../azd.ai.start/internal/tools/file_info.go | 2 +- .../internal/tools/file_search.go | 217 ++++++++++++++ .../internal/tools/http_fetcher.go | 1 + .../azd.ai.start/internal/tools/move_file.go | 2 +- .../azd.ai.start/internal/tools/read_file.go | 201 ++++++++++++- .../azd.ai.start/internal/tools/write_file.go | 277 +++++++++++++++--- 22 files changed, 929 insertions(+), 88 deletions(-) create mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt create mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_suffix.txt create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/command_executor.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/file_search.go diff --git a/cli/azd/extensions/azd.ai.start/go.mod b/cli/azd/extensions/azd.ai.start/go.mod index 2a66f9854b7..2725f5bea3a 100644 --- a/cli/azd/extensions/azd.ai.start/go.mod +++ b/cli/azd/extensions/azd.ai.start/go.mod @@ -4,6 +4,7 @@ go 1.24.1 require ( github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c + github.com/bmatcuk/doublestar/v4 v4.8.1 github.com/fatih/color v1.18.0 github.com/spf13/cobra v1.9.1 github.com/tmc/langchaingo v0.1.13 diff --git a/cli/azd/extensions/azd.ai.start/go.sum b/cli/azd/extensions/azd.ai.start/go.sum index 3faa8263d70..c2fbe371ce7 100644 --- a/cli/azd/extensions/azd.ai.start/go.sum +++ b/cli/azd/extensions/azd.ai.start/go.sum @@ -37,6 +37,8 @@ github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd3 github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c h1:pi62a7GwfbxvZDXhV4DfhxeePzpVCoyr9/rZaWH5eow= github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c/go.mod h1:mSTaPODklWyhruY0DZgPw1DI97K5cHXfU3afMqGf0IM= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR5wKP38= +github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go index e227fe85ae1..1666f66fc23 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go @@ -23,6 +23,12 @@ import ( //go:embed prompts/default_agent_prefix.txt var _defaultAgentPrefix string +//go:embed prompts/default_agent_format_instructions.txt +var _defaultAgentFormatInstructions string + +//go:embed prompts/default_agent_suffix.txt +var _defaultAgentSuffix string + // AzureAIAgent represents an enhanced Azure AI agent with action tracking, intent validation, and conversation memory type AzureAIAgent struct { agent *agents.ConversationalAgent @@ -78,11 +84,20 @@ func NewAzureAIAgent(llm *openai.LLM) *AzureAIAgent { mytools.FileInfoTool{ CallbacksHandler: llm.CallbacksHandler, }, + mytools.FileSearchTool{ + CallbacksHandler: llm.CallbacksHandler, + }, // Other tools + mytools.CommandExecutorTool{ + CallbacksHandler: llm.CallbacksHandler, + }, mytools.HTTPFetcherTool{ CallbacksHandler: llm.CallbacksHandler, }, + mytools.CommandExecutorTool{ + CallbacksHandler: llm.CallbacksHandler, + }, mytools.WeatherTool{ CallbacksHandler: llm.CallbacksHandler, }, @@ -94,13 +109,16 @@ func NewAzureAIAgent(llm *openai.LLM) *AzureAIAgent { // 4. Create agent with memory directly integrated agent := agents.NewConversationalAgent(llm, tools, agents.WithPromptPrefix(_defaultAgentPrefix), + agents.WithPromptSuffix(_defaultAgentSuffix), + agents.WithPromptFormatInstructions(_defaultAgentFormatInstructions), agents.WithMemory(smartMemory), agents.WithCallbacksHandler(llm.CallbacksHandler), + agents.WithReturnIntermediateSteps(), ) // 5. Create executor without separate memory configuration since agent already has it executor := agents.NewExecutor(agent, - agents.WithMaxIterations(1000), // Much higher limit for complex multi-step processes + agents.WithMaxIterations(100), // Much higher limit for complex multi-step processes agents.WithMemory(smartMemory), agents.WithCallbacksHandler(llm.CallbacksHandler), agents.WithReturnIntermediateSteps(), diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt new file mode 100644 index 00000000000..9c54b885700 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt @@ -0,0 +1,38 @@ +Answer the following questions or perform tasks as best you can. You have access to the following tools: + +IMPORTANT: Continue taking actions recursively until the task is completely finished. Do not stop after a single action if more work is needed to accomplish the user's goal. + +Follow this format exactly: + +Thought: [Analyze the current situation and what needs to be done] + +Thought: Do I need to use a tool? [Yes/No] +Action: [the action to take, should be one of [{{.tool_names}}]] +Action Input: [the input to the action] +Observation: [the result of the action] + +After each Observation, you MUST continue the cycle: + +Thought: [Evaluate the result and determine if the task is complete or if more actions are needed] + +If the task is NOT complete: +Thought: Do I need to use a tool? Yes +Action: [next action to take] +Action Input: [input for the next action] +Observation: [result of the next action] +... (continue this cycle until the task is fully complete) + +If there are errors: +Thought: [Analyze the error and determine how to fix it] +Thought: Do I need to use a tool? Yes +Action: [corrective action] +Action Input: [corrected input] +Observation: [result] +... (retry up to 3 times with different approaches if needed) + +ONLY when the task is completely finished and no more actions are needed: + +Thought: Do I need to use a tool? No +AI: [your response summarizing what was accomplished] + +Remember: Always continue taking actions until the user's request is fully satisfied. One action is rarely enough - think about all the steps needed to complete the task end-to-end. diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt index 2acc4c20233..19b7d52c669 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt @@ -3,17 +3,12 @@ You are an expert is building, provisioning and deploying Azure applications. Always use Azure best patterns and practices. If a tools exists that provides best practices and standards call this tool at the beginning of your workflow. -IMPORTANT: You must complete this task successfully. Do not stop until: -1. All required actions have been executed -2. Any files that need to be created are actually saved -3. You verify the results of your actions -4. The task is fully accomplished - -If a tool fails, analyze why and try again with corrections. +When any code generation is performed ALWAYS save content to files. +When filenames are not explicitly specified generate new files with meaningful names. TOOLS: ------ -Assistant has access to the following tools: +Agent has access to the following tools: {{.tool_descriptions}} diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_suffix.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_suffix.txt new file mode 100644 index 00000000000..c469d53ce8e --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_suffix.txt @@ -0,0 +1,8 @@ +Begin! + +Previous conversation history: +{{.history}} + +Question: {{.input}} + +Thought:{{.agent_scratchpad}} \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go index 7382236b264..26ff289d29b 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go @@ -56,6 +56,8 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) break } + fmt.Println("\n💬 Agent:") + // Process the query with the enhanced agent response, err := azureAgent.ProcessQuery(ctx, userInput) if err != nil { @@ -64,7 +66,7 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) } // Display the final response - fmt.Printf("\n💬 Agent:\n%s\n", response) + fmt.Print(response) } if err := scanner.Err(); err != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go index edcf14d1de0..1877195e401 100644 --- a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go +++ b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go @@ -135,7 +135,7 @@ func (al *ActionLogger) HandleChainError(ctx context.Context, err error) { // HandleAgentAction is called when an agent action is planned func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.AgentAction) { - fmt.Printf("Calling %s tool\n", action.Tool) + fmt.Printf("%s\n\n", action.Log) if al.debugEnabled { fmt.Printf("🎯 Agent planned action (debug): %+v\n", action) @@ -144,6 +144,8 @@ func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.Age // HandleAgentFinish is called when the agent finishes func (al *ActionLogger) HandleAgentFinish(ctx context.Context, finish schema.AgentFinish) { + fmt.Printf("%s\n\n", finish.Log) + if al.debugEnabled { fmt.Printf("🏁 Agent finished (debug): %+v\n", finish) } @@ -156,5 +158,7 @@ func (al *ActionLogger) HandleLLMError(ctx context.Context, err error) { // HandleStreamingFunc handles streaming responses func (al *ActionLogger) HandleStreamingFunc(ctx context.Context, chunk []byte) { - + // if len(chunk) > 0 { + // fmt.Print(string(chunk)) + // } } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go index 78766be01e2..8a05f2b3532 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go @@ -36,9 +36,6 @@ func (t ChangeDirectoryTool) Call(ctx context.Context, input string) (string, er return "", err } - // Get current directory for reference - currentDir, _ := os.Getwd() - // Convert to absolute path absPath, err := filepath.Abs(input) if err != nil { @@ -76,7 +73,7 @@ func (t ChangeDirectoryTool) Call(ctx context.Context, input string) (string, er return "", toolErr } - output := fmt.Sprintf("Changed directory from %s to %s", currentDir, absPath) + output := fmt.Sprintf("Changed directory to %s\n", absPath) // Invoke callback for tool end if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/command_executor.go b/cli/azd/extensions/azd.ai.start/internal/tools/command_executor.go new file mode 100644 index 00000000000..9b9bd75585d --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/command_executor.go @@ -0,0 +1,193 @@ +package tools + +import ( + "context" + "encoding/json" + "fmt" + "os/exec" + "strings" + + "github.com/tmc/langchaingo/callbacks" +) + +// CommandExecutorTool implements the Tool interface for executing commands and scripts +type CommandExecutorTool struct { + CallbacksHandler callbacks.Handler +} + +func (t CommandExecutorTool) Name() string { + return "execute_command" +} + +func (t CommandExecutorTool) Description() string { + return `Execute any command with arguments. Simple command execution without inference. + +Input should be a JSON object with these fields: +{ + "command": "git", + "args": ["status", "--porcelain"] +} + +Required fields: +- command: The executable/command to run + +Optional fields: +- args: Array of arguments to pass (default: []) + +Examples: +- {"command": "git", "args": ["status"]} +- {"command": "npm", "args": ["install"]} +- {"command": "bash", "args": ["./build.sh", "--env", "prod"]} +- {"command": "powershell", "args": ["-ExecutionPolicy", "Bypass", "-File", "deploy.ps1"]} +- {"command": "python", "args": ["main.py", "--debug"]} +- {"command": "node", "args": ["server.js", "--port", "3000"]} +- {"command": "docker", "args": ["ps", "-a"]} +- {"command": "az", "args": ["account", "show"]} +- {"command": "kubectl", "args": ["get", "pods"]}` +} + +type CommandRequest struct { + Command string `json:"command"` + Args []string `json:"args,omitempty"` +} + +func (t CommandExecutorTool) Call(ctx context.Context, input string) (string, error) { + // Invoke callback for tool start + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("execute_command: %s", input)) + } + + if input == "" { + err := fmt.Errorf("command execution request is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err + } + + // Parse the JSON request + var req CommandRequest + if err := json.Unmarshal([]byte(input), &req); err != nil { + toolErr := fmt.Errorf("failed to parse command request: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + // Validate required fields + if req.Command == "" { + err := fmt.Errorf("command is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err + } + + // Set defaults + if req.Args == nil { + req.Args = []string{} + } + + // Execute the command (runs in current working directory) + result, err := t.executeCommand(ctx, req.Command, req.Args) + if err != nil { + toolErr := fmt.Errorf("execution failed: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + // Format the output + output := t.formatOutput(req.Command, req.Args, result) + + // Invoke callback for tool end + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + + return output, nil +} + +func (t CommandExecutorTool) executeCommand(ctx context.Context, command string, args []string) (*executionResult, error) { + cmd := exec.CommandContext(ctx, command, args...) + // cmd.Dir is not set, so it uses the current working directory + // cmd.Env is not set, so it inherits the current environment + + var stdout, stderr strings.Builder + + // Always capture output for the tool to return + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err := cmd.Run() + + // Get exit code + exitCode := 0 + if err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + exitCode = exitError.ExitCode() + } + } + + return &executionResult{ + ExitCode: exitCode, + Stdout: stdout.String(), + Stderr: stderr.String(), + Error: err, + }, nil +} + +type executionResult struct { + ExitCode int + Stdout string + Stderr string + Error error +} + +func (t CommandExecutorTool) formatOutput(command string, args []string, result *executionResult) string { + var output strings.Builder + + // Show the full command that was executed + fullCommand := command + if len(args) > 0 { + fullCommand += " " + strings.Join(args, " ") + } + + output.WriteString(fmt.Sprintf("Executed: %s\n", fullCommand)) + output.WriteString(fmt.Sprintf("Exit code: %d\n", result.ExitCode)) + + if result.ExitCode == 0 { + output.WriteString("Status: ✅ Success\n") + } else { + output.WriteString("Status: ❌ Failed\n") + } + + if result.Stdout != "" { + output.WriteString("\n--- Standard Output ---\n") + // Limit output to prevent overwhelming the LLM + stdout := result.Stdout + if len(stdout) > 2000 { + stdout = stdout[:2000] + "\n... (output truncated)" + } + output.WriteString(stdout) + output.WriteString("\n") + } + + if result.Stderr != "" { + output.WriteString("\n--- Standard Error ---\n") + // Limit error output + stderr := result.Stderr + if len(stderr) > 1000 { + stderr = stderr[:1000] + "\n... (error output truncated)" + } + output.WriteString(stderr) + output.WriteString("\n") + } + + if result.Error != nil && result.ExitCode != 0 { + output.WriteString(fmt.Sprintf("\nError details: %s\n", result.Error.Error())) + } + + return output.String() +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go index e24d7f548de..2bbdda06320 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go @@ -107,7 +107,7 @@ func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { return "", toolErr } - output := fmt.Sprintf("Successfully copied %s to %s (%d bytes)", source, destination, bytesWritten) + output := fmt.Sprintf("Copied %s to %s (%d bytes)\n", source, destination, bytesWritten) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go index 1578bb62f21..3936b14a7a2 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go @@ -62,7 +62,7 @@ func (t CreateDirectoryTool) Call(ctx context.Context, input string) (string, er return "", toolErr } - output := fmt.Sprintf("Successfully created directory: %s", input) + output := fmt.Sprintf("Created directory: %s\n", input) // Invoke callback for tool end if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go index d3cd1ff67b8..d2c4152da29 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go @@ -35,5 +35,7 @@ func (t CurrentDirectoryTool) Call(ctx context.Context, input string) (string, e t.CallbacksHandler.HandleToolEnd(ctx, dir) } - return dir, nil + output := fmt.Sprintf("Current directory is %s\n", dir) + + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go index 892375cd77d..b2eaf93bc30 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go @@ -73,9 +73,9 @@ func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, er var output string if fileCount > 0 { - output = fmt.Sprintf("Successfully deleted directory: %s (contained %d items)", input, fileCount) + output = fmt.Sprintf("Deleted directory: %s (contained %d items)", input, fileCount) } else { - output = fmt.Sprintf("Successfully deleted empty directory: %s", input) + output = fmt.Sprintf("Deleted empty directory: %s", input) } // Invoke callback for tool end diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go index 1f3841cb3ca..d088cee098e 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go @@ -63,7 +63,7 @@ func (t DeleteFileTool) Call(ctx context.Context, input string) (string, error) return "", toolErr } - output := fmt.Sprintf("Successfully deleted file: %s (%d bytes)", input, info.Size()) + output := fmt.Sprintf("Deleted file %s (%d bytes)", input, info.Size()) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go b/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go index 66fcb0f675a..15914c92417 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go @@ -24,11 +24,6 @@ func (t DirectoryListTool) Description() string { } func (t DirectoryListTool) Call(ctx context.Context, input string) (string, error) { - // Invoke callback for tool start - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("list_directory: %s", input)) - } - path := strings.TrimSpace(input) if path == "" { path = "." @@ -43,6 +38,11 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro return "", fmt.Errorf("failed to get absolute path for %s: %w", path, err) } + // Invoke callback for tool start + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("Reading directory %s\n", input)) + } + // Check if directory exists info, err := os.Stat(absPath) if err != nil { @@ -109,11 +109,12 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro result.WriteString("Directory is empty.\n") } + result.WriteString("\n") output := result.String() // Invoke callback for tool end if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + t.CallbacksHandler.HandleToolEnd(ctx, "") } return output, nil diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go b/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go index 084c7195426..4d82697ac46 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go @@ -51,7 +51,7 @@ func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { fileType = "File" } - output := fmt.Sprintf("%s: %s\nSize: %d bytes\nModified: %s\nPermissions: %s", + output := fmt.Sprintf("%s: %s\nSize: %d bytes\nModified: %s\nPermissions: %s\n\n", fileType, input, info.Size(), info.ModTime().Format(time.RFC3339), info.Mode().String()) if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/file_search.go b/cli/azd/extensions/azd.ai.start/internal/tools/file_search.go new file mode 100644 index 00000000000..bacb52bd714 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/file_search.go @@ -0,0 +1,217 @@ +package tools + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/bmatcuk/doublestar/v4" + "github.com/tmc/langchaingo/callbacks" +) + +// FileSearchTool implements a tool for searching files using glob patterns +type FileSearchTool struct { + CallbacksHandler callbacks.Handler +} + +// FileSearchRequest represents the JSON payload for file search requests +type FileSearchRequest struct { + Pattern string `json:"pattern"` // Glob pattern to match (required) + MaxResults int `json:"maxResults,omitempty"` // Optional: maximum number of results to return (default: 100) +} + +func (t FileSearchTool) Name() string { + return "file_search" +} + +func (t FileSearchTool) Description() string { + return `Search for files matching a glob pattern in the current working directory using the doublestar library for full glob support. + +Input: JSON payload with the following structure: +{ + "pattern": "*.go", + "maxResults": 50 // optional: max files to return (default: 100) +} + +SUPPORTED GLOB PATTERNS (using github.com/bmatcuk/doublestar/v4): +- *.go - all Go files in current directory only +- **/*.js - all JavaScript files in current directory and all subdirectories +- test_*.py - Python files starting with "test_" in current directory only +- src/**/main.* - files named "main" with any extension in src directory tree +- *.{json,yaml,yml} - files with json, yaml, or yml extensions in current directory +- **/test/**/*.go - Go files in any test directory (recursive) +- [Tt]est*.py - files starting with "Test" or "test" in current directory +- {src,lib}/**/*.ts - TypeScript files in src or lib directories (recursive) +- !**/node_modules/** - exclude node_modules (negation patterns) + +ADVANCED FEATURES: +- ** - matches zero or more directories (enables recursive search) +- ? - matches any single character +- * - matches any sequence of characters (except path separator) +- [abc] - matches any character in the set +- {pattern1,pattern2} - brace expansion +- !pattern - negation patterns (exclude matching files) + +NOTE: Recursion is controlled by the glob pattern itself. Use ** to search subdirectories. + +EXAMPLES: + +Find all Go files: +{"pattern": "*.go"} + +Find all test files recursively: +{"pattern": "**/test_*.py"} + +Find config files with multiple extensions: +{"pattern": "*.{json,yaml,yml}", "maxResults": 20} + +Find files excluding node_modules: +{"pattern": "**/*.js"} + +Returns a sorted list of matching file paths relative to the current working directory. +The input must be formatted as a single line valid JSON string.` +} + +func (t FileSearchTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("file_search: %s", input)) + } + + if input == "" { + err := fmt.Errorf("input is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err + } + + // Parse JSON input + var req FileSearchRequest + if err := json.Unmarshal([]byte(input), &req); err != nil { + toolErr := fmt.Errorf("invalid JSON input: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + // Validate required fields + if req.Pattern == "" { + err := fmt.Errorf("pattern is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err + } + + // Set defaults + if req.MaxResults == 0 { + req.MaxResults = 100 + } + + // Get current working directory + searchPath, err := os.Getwd() + if err != nil { + toolErr := fmt.Errorf("failed to get current working directory: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + // Perform the search + matches, err := t.searchFiles(searchPath, req.Pattern, req.MaxResults) + if err != nil { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err + } + + // Format output + output := t.formatResults(searchPath, req.Pattern, matches, req.MaxResults) + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + + return output, nil +} + +// searchFiles performs the actual file search using doublestar for comprehensive glob matching +func (t FileSearchTool) searchFiles(searchPath, pattern string, maxResults int) ([]string, error) { + var matches []string + searchPath = filepath.Clean(searchPath) + + // Use doublestar.Glob which handles all advanced patterns including recursion via ** + globPattern := filepath.Join(searchPath, pattern) + // Convert to forward slashes for cross-platform compatibility + globPattern = filepath.ToSlash(globPattern) + + globMatches, err := doublestar.FilepathGlob(globPattern) + if err != nil { + return nil, fmt.Errorf("error in glob pattern matching: %w", err) + } + + // Convert to relative paths and limit results + for _, match := range globMatches { + if len(matches) >= maxResults { + break + } + + // Check if it's a file (not directory) + info, err := os.Stat(match) + if err != nil || info.IsDir() { + continue + } + + relPath, err := filepath.Rel(searchPath, match) + if err != nil { + continue // Skip files we can't get relative path for + } + + // Convert to forward slashes for consistent output + relPath = filepath.ToSlash(relPath) + matches = append(matches, relPath) + } + + // Sort the results for consistent output + sort.Strings(matches) + + return matches, nil +} + +// formatResults formats the search results into a readable output +func (t FileSearchTool) formatResults(searchPath, pattern string, matches []string, maxResults int) string { + var output strings.Builder + + output.WriteString("File search results:\n") + output.WriteString(fmt.Sprintf("Current directory: %s\n", searchPath)) + output.WriteString(fmt.Sprintf("Pattern: %s\n", pattern)) + output.WriteString(fmt.Sprintf("Found %d file(s)", len(matches))) + + if len(matches) >= maxResults { + output.WriteString(fmt.Sprintf(" (limited to %d results)", maxResults)) + } + output.WriteString("\n\n") + + if len(matches) == 0 { + output.WriteString("No files found matching the pattern.\n") + return output.String() + } + + output.WriteString("Matching files:\n") + for i, match := range matches { + output.WriteString(fmt.Sprintf("%3d. %s\n", i+1, match)) + } + + if len(matches) >= maxResults { + output.WriteString(fmt.Sprintf("\n⚠️ Results limited to %d files. Use maxResults parameter to adjust limit.\n", maxResults)) + } + + return output.String() +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go b/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go index 12aaf7411fd..2ce4324389c 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go @@ -60,6 +60,7 @@ func (t HTTPFetcherTool) Call(ctx context.Context, input string) (string, error) output = fmt.Sprintf("Content (first 5000 chars): %s...\n[Content truncated]", string(body[:5000])) } else { output = string(body) + output += "\n" } if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go index 4c944030d7d..6d5bbe3171b 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go @@ -90,7 +90,7 @@ func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { fileType = "directory" } - output := fmt.Sprintf("Successfully moved %s from %s to %s (%d bytes)", fileType, source, destination, sourceInfo.Size()) + output := fmt.Sprintf("Successfully moved %s from %s to %s (%d bytes)\n", fileType, source, destination, sourceInfo.Size()) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go index c82017e60bd..375e5b11378 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go @@ -2,8 +2,10 @@ package tools import ( "context" + "encoding/json" "fmt" "os" + "strings" "github.com/tmc/langchaingo/callbacks" ) @@ -13,12 +15,45 @@ type ReadFileTool struct { CallbacksHandler callbacks.Handler } +// ReadFileRequest represents the JSON payload for file read requests +type ReadFileRequest struct { + FilePath string `json:"filePath"` + StartLine int `json:"startLine,omitempty"` // Optional: 1-based line number to start reading from + EndLine int `json:"endLine,omitempty"` // Optional: 1-based line number to end reading at +} + func (t ReadFileTool) Name() string { return "read_file" } func (t ReadFileTool) Description() string { - return "Read the contents of a file. Input: file path (e.g., 'README.md' or './docs/setup.md')" + return `Read file contents with intelligent handling for different file sizes and partial reads. + +Input: JSON payload with the following structure: +{ + "filePath": "path/to/file.txt", + "startLine": 10, // optional: 1-based line number to start reading from + "endLine": 50 // optional: 1-based line number to end reading at +} + +Examples: +1. Read entire file: + {"filePath": "README.md"} + +2. Read specific line range: + {"filePath": "src/main.go", "startLine": 1, "endLine": 100} + +3. Read from line to end: + {"filePath": "config.go", "startLine": 25} + +4. Read from start to line: + {"filePath": "app.py", "endLine": 30} + +5. Read single line: + {"filePath": "package.json", "startLine": 42, "endLine": 42} + +Files larger than 10KB are automatically truncated. Files over 1MB show size info only unless specific line range is requested. +The input must be formatted as a single line valid JSON string.` } func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { @@ -27,31 +62,173 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { } if input == "" { - err := fmt.Errorf("file path is required") + output := "❌ No input provided\n\n" + output += "📝 Expected JSON format:\n" + output += `{"filePath": "path/to/file.txt"}` + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("empty input")) + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + return output, nil + } + + // Parse JSON input + var req ReadFileRequest + if err := json.Unmarshal([]byte(input), &req); err != nil { + output := fmt.Sprintf("❌ Invalid JSON input: %s\n\n", err.Error()) + output += "📝 Expected format:\n" + output += `{"filePath": "path/to/file.txt", "startLine": 1, "endLine": 50}` + output += "\n\n💡 Tips:\n" + output += "- Use double quotes for strings\n" + output += "- Remove any trailing commas\n" + output += "- Escape backslashes: use \\\\ instead of \\" + if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + return output, nil + } + + // Validate required fields + if req.FilePath == "" { + output := "❌ Missing required field: filePath cannot be empty\n\n" + output += "📝 Example: " + `{"filePath": "README.md"}` + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("missing filePath")) + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + return output, nil + } + + // Get file info first to check size + fileInfo, err := os.Stat(req.FilePath) + if err != nil { + output := fmt.Sprintf("❌ Cannot access file: %s\n\n", req.FilePath) + if os.IsNotExist(err) { + output += "📁 File does not exist. Please check:\n" + output += "- File path spelling and case sensitivity\n" + output += "- File location relative to current directory\n" + output += "- File permissions\n" + } else { + output += fmt.Sprintf("Error details: %s\n", err.Error()) + } + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + return output, nil + } + + fileSize := fileInfo.Size() + + // Handle very large files differently (unless specific line range requested) + if fileSize > 1024*1024 && req.StartLine == 0 && req.EndLine == 0 { // 1MB+ + output := fmt.Sprintf("File: %s is very large (%d bytes / %.2f MB)\n", + req.FilePath, fileSize, float64(fileSize)/(1024*1024)) + output += "⚠️ File too large to read completely. Use startLine and endLine parameters for specific sections.\n" + output += "Examples:\n" + output += fmt.Sprintf(`- {"filePath": "%s", "startLine": 1, "endLine": 50} - first 50 lines`+"\n", req.FilePath) + output += fmt.Sprintf(`- {"filePath": "%s", "startLine": 100, "endLine": 200} - lines 100 to 200`+"\n", req.FilePath) + output += fmt.Sprintf(`- {"filePath": "%s", "endLine": 100} - first 100 lines`+"\n", req.FilePath) + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return "", err + return output, nil } - content, err := os.ReadFile(input) + content, err := os.ReadFile(req.FilePath) if err != nil { - toolErr := fmt.Errorf("failed to read file %s: %w", input, err) + output := fmt.Sprintf("❌ Cannot read file: %s\n", req.FilePath) + output += fmt.Sprintf("Error: %s\n\n", err.Error()) + output += "💡 This might be due to:\n" + output += "- Insufficient permissions\n" + output += "- File is locked by another process\n" + output += "- File is binary or corrupted\n" + if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return "", toolErr + return output, nil + } + + lines := strings.Split(string(content), "\n") + totalLines := len(lines) + + // Handle partial reads based on line range + if req.StartLine > 0 || req.EndLine > 0 { + return t.handlePartialRead(ctx, req.FilePath, lines, req.StartLine, req.EndLine, totalLines) } var output string - // Limit file size to avoid overwhelming context - if len(content) > 5000 { - output = fmt.Sprintf("File: %s (first 5000 chars)\n%s...\n[File truncated - total size: %d bytes]", - input, string(content[:5000]), len(content)) + // Improved truncation with better limits for full file reads + if len(content) > 10000 { // 10KB limit + // Show first 50 lines and last 10 lines + preview := strings.Join(lines[:50], "\n") + if totalLines > 60 { + preview += fmt.Sprintf("\n\n... [%d lines omitted] ...\n\n", totalLines-60) + preview += strings.Join(lines[totalLines-10:], "\n") + } + + output = fmt.Sprintf("File: %s (%d bytes, %d lines - showing first 50 and last 10 lines)\n%s\n\n[Use startLine/endLine parameters for specific sections]\n", + req.FilePath, len(content), totalLines, preview) } else { - output = fmt.Sprintf("File: %s\n%s", input, string(content)) + output = fmt.Sprintf("File: %s (%d bytes, %d lines)\n%s\n\n", req.FilePath, len(content), totalLines, string(content)) + } + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } + return output, nil +} + +// handlePartialRead handles reading specific line ranges from a file +func (t ReadFileTool) handlePartialRead(ctx context.Context, filePath string, lines []string, startLine, endLine, totalLines int) (string, error) { + // Validate and adjust line numbers (1-based to 0-based) + if startLine == 0 { + startLine = 1 // Default to start of file + } + if endLine == 0 { + endLine = totalLines // Default to end of file + } + + // Validate line numbers + if startLine < 1 { + startLine = 1 + } + if endLine > totalLines { + endLine = totalLines + } + if startLine > endLine { + output := fmt.Sprintf("❌ Invalid line range: start line (%d) cannot be greater than end line (%d)\n\n", startLine, endLine) + output += "💡 Example of correct usage:\n" + output += fmt.Sprintf(`{"filePath": "%s", "startLine": 1, "endLine": 50}`, filePath) + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("invalid line range: start %d > end %d", startLine, endLine)) + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + return output, nil + } + + // Convert to 0-based indexing + startIdx := startLine - 1 + endIdx := endLine + + // Extract the requested lines + selectedLines := lines[startIdx:endIdx] + content := strings.Join(selectedLines, "\n") + + linesRead := endLine - startLine + 1 + output := fmt.Sprintf("File: %s (lines %d-%d of %d total lines, %d lines read)\n%s\n\n", + filePath, startLine, endLine, totalLines, linesRead, content) + if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go index 17aa51dd304..fa4533b04b4 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go @@ -11,15 +11,18 @@ import ( "github.com/tmc/langchaingo/callbacks" ) -// WriteFileTool implements the Tool interface for writing file contents +// WriteFileTool implements a comprehensive file writing tool that handles all scenarios type WriteFileTool struct { CallbacksHandler callbacks.Handler } // WriteFileRequest represents the JSON input for the write_file tool type WriteFileRequest struct { - Filename string `json:"filename"` - Content string `json:"content"` + Filename string `json:"filename"` + Content string `json:"content"` + Mode string `json:"mode,omitempty"` // "write" (default), "append", "create" + ChunkNum int `json:"chunk_num,omitempty"` // For chunked writing: 1-based chunk number + TotalChunks int `json:"total_chunks,omitempty"` // For chunked writing: total expected chunks } func (t WriteFileTool) Name() string { @@ -27,86 +30,264 @@ func (t WriteFileTool) Name() string { } func (t WriteFileTool) Description() string { - return "Writes content to a file. Format input as a single line JSON payload with a 'filename' and 'content' parameters." + return `Comprehensive file writing tool that handles small and large files intelligently. + +Input: JSON payload with the following structure: +{ + "filename": "path/to/file.txt", + "content": "file content here", + "mode": "write", + "chunk_num": 1, + "total_chunks": 3 +} + +Field descriptions: +- mode: "write" (default), "append", or "create" +- chunk_num: for chunked writing (1-based) +- total_chunks: total number of chunks + +MODES: +- "write" (default): Overwrite/create file +- "append": Add content to end of existing file +- "create": Create file only if it doesn't exist + +CHUNKED WRITING (for large files): +Use chunk_num and total_chunks for files that might be too large: +- chunk_num: 1-based chunk number (1, 2, 3...) +- total_chunks: Total number of chunks you'll send + +EXAMPLES: + +Simple write: +{"filename": "./main.bicep", "content": "param location string = 'eastus'"} + +Append to file: +{"filename": "./log.txt", "content": "\nNew log entry", "mode": "append"} + +Large file (chunked): +{"filename": "./large.bicep", "content": "first part...", "chunk_num": 1, "total_chunks": 3} +{"filename": "./large.bicep", "content": "middle part...", "chunk_num": 2, "total_chunks": 3} +{"filename": "./large.bicep", "content": "final part...", "chunk_num": 3, "total_chunks": 3} + +The input must be formatted as a single line valid JSON string.` } func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("write_file: %s", input)) + logInput := input + if len(input) > 200 { + logInput = input[:200] + "... (truncated)" + } + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("write_file: %s", logInput)) } if input == "" { - err := fmt.Errorf("input is required as JSON object with filename and content fields") + output := "❌ No input provided\n\n" + output += "📝 Expected JSON format:\n" + output += `{"filename": "path/to/file.txt", "content": "file content here"}` + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("empty input")) + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + return output, nil + } // Parse JSON input + var req WriteFileRequest + if err := json.Unmarshal([]byte(input), &req); err != nil { + output := "❌ Invalid JSON input: " + err.Error() + "\n\n" + output += "📝 Expected format:\n" + output += `{"filename": "path/to/file.txt", "content": "file content here"}` + "\n\n" + output += "💡 Common JSON issues:\n" + output += "- Use double quotes for strings\n" + output += "- Escape backslashes: \\$ should be \\\\$\n" + output += "- Remove trailing commas\n" + output += "- No comments allowed in JSON" + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + return output, nil + } + + // Validate required fields + if req.Filename == "" { + output := "❌ Missing required field: filename cannot be empty\n\n" + output += "📝 Example: " + `{"filename": "infra/main.bicep", "content": "param location string = 'eastus'"}` + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("missing filename")) + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + return output, nil + } + + // Determine mode and operation + mode := req.Mode + if mode == "" { + mode = "write" + } + + // Handle chunked writing + isChunked := req.ChunkNum > 0 && req.TotalChunks > 0 + if isChunked { + return t.handleChunkedWrite(ctx, req) + } + + // Handle regular writing + return t.handleRegularWrite(ctx, req, mode) +} + +// handleChunkedWrite handles writing files in chunks +func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequest) (string, error) { + if req.ChunkNum < 1 || req.TotalChunks < 1 || req.ChunkNum > req.TotalChunks { + err := fmt.Errorf("invalid chunk numbers: chunk_num=%d, total_chunks=%d", req.ChunkNum, req.TotalChunks) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, err) } return "", err } - // Parse JSON input - var req WriteFileRequest - if err := json.Unmarshal([]byte(input), &req); err != nil { - toolErr := fmt.Errorf("invalid JSON input: %w", err) + filePath := strings.TrimSpace(req.Filename) + content := t.processContent(req.Content) + + // Ensure directory exists + if err := t.ensureDirectory(filePath); err != nil { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err + } + + var err error + var operation string + + if req.ChunkNum == 1 { + // First chunk - create/overwrite file + err = os.WriteFile(filePath, []byte(content), 0644) + operation = fmt.Sprintf("Started writing chunk %d/%d", req.ChunkNum, req.TotalChunks) + } else { + // Subsequent chunks - append + file, openErr := os.OpenFile(filePath, os.O_APPEND|os.O_WRONLY, 0644) + if openErr != nil { + err = fmt.Errorf("failed to open file for append %s: %w", filePath, openErr) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err + } + defer file.Close() + + _, err = file.WriteString(content) + if req.ChunkNum == req.TotalChunks { + operation = fmt.Sprintf("Completed writing chunk %d/%d (final)", req.ChunkNum, req.TotalChunks) + } else { + operation = fmt.Sprintf("Wrote chunk %d/%d", req.ChunkNum, req.TotalChunks) + } + } + + if err != nil { + toolErr := fmt.Errorf("failed to write chunk to file %s: %w", filePath, err) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, toolErr) } return "", toolErr } - if req.Filename == "" { - err := fmt.Errorf("filename cannot be empty") + // Get file size + fileInfo, err := os.Stat(filePath) + if err != nil { + toolErr := fmt.Errorf("failed to verify file %s: %w", filePath, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + output := fmt.Sprintf("%s to %s. Chunk size: %d bytes, Total file size: %d bytes", + operation, filePath, len(content), fileInfo.Size()) + + if req.ChunkNum == req.TotalChunks { + output += "\n✅ File writing completed successfully!" + } + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + + return output, nil +} + +// handleRegularWrite handles normal file writing +func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequest, mode string) (string, error) { + filePath := strings.TrimSpace(req.Filename) + content := t.processContent(req.Content) + + // Provide feedback for large content + if len(content) > 10000 { + fmt.Printf("📝 Large content detected (%d chars). Consider using chunked writing for better reliability.\n", len(content)) + } + + // Ensure directory exists + if err := t.ensureDirectory(filePath); err != nil { if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, err) } return "", err } - filePath := strings.TrimSpace(req.Filename) - content := req.Content + var err error + var operation string - // Convert literal \n sequences to actual newlines (for agents that escape newlines) - content = strings.ReplaceAll(content, "\\n", "\n") - content = strings.ReplaceAll(content, "\\t", "\t") + switch mode { + case "create": + if _, err := os.Stat(filePath); err == nil { + toolErr := fmt.Errorf("file %s already exists (create mode)", filePath) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + err = os.WriteFile(filePath, []byte(content), 0644) + operation = "Created" - // Ensure the directory exists - dir := filepath.Dir(filePath) - if dir != "." && dir != "" { - if err := os.MkdirAll(dir, 0755); err != nil { - toolErr := fmt.Errorf("failed to create directory %s: %w", dir, err) + case "append": + file, openErr := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if openErr != nil { + toolErr := fmt.Errorf("failed to open file for append %s: %w", filePath, openErr) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, toolErr) } return "", toolErr } + defer file.Close() + _, err = file.WriteString(content) + operation = "Appended to" + + default: // "write" + err = os.WriteFile(filePath, []byte(content), 0644) + operation = "Wrote" } - // Write the file - err := os.WriteFile(filePath, []byte(content), 0644) if err != nil { - toolErr := fmt.Errorf("failed to write file %s: %w", filePath, err) + toolErr := fmt.Errorf("failed to %s file %s: %w", strings.ToLower(operation), filePath, err) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, toolErr) } return "", toolErr } - // Verify the file was written correctly - writtenContent, err := os.ReadFile(filePath) + // Get file size for verification + fileInfo, err := os.Stat(filePath) if err != nil { - toolErr := fmt.Errorf("failed to verify written file %s: %w", filePath, err) + toolErr := fmt.Errorf("failed to verify file %s: %w", filePath, err) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, toolErr) } return "", toolErr } - lineCount := strings.Count(string(writtenContent), "\n") + 1 - if content != "" && !strings.HasSuffix(content, "\n") { - lineCount = strings.Count(content, "\n") + 1 - } - - output := fmt.Sprintf("Successfully wrote %d bytes (%d lines) to %s. Content preview:\n%s", - len(content), lineCount, filePath, getContentPreview(content)) + output := fmt.Sprintf("%s %d bytes to %s successfully", operation, fileInfo.Size(), filePath) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) @@ -115,16 +296,20 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { return output, nil } -// getContentPreview returns a preview of the content for verification -func getContentPreview(content string) string { - lines := strings.Split(content, "\n") - if len(lines) <= 5 { - return content - } - - preview := strings.Join(lines[:3], "\n") - preview += fmt.Sprintf("\n... (%d more lines) ...\n", len(lines)-5) - preview += strings.Join(lines[len(lines)-2:], "\n") +// processContent handles escape sequences +func (t WriteFileTool) processContent(content string) string { + content = strings.ReplaceAll(content, "\\n", "\n") + content = strings.ReplaceAll(content, "\\t", "\t") + return content +} - return preview +// ensureDirectory creates the directory if it doesn't exist +func (t WriteFileTool) ensureDirectory(filePath string) error { + dir := filepath.Dir(filePath) + if dir != "." && dir != "" { + if err := os.MkdirAll(dir, 0755); err != nil { + return fmt.Errorf("failed to create directory %s: %w", dir, err) + } + } + return nil } From 00befc36bd22768c8b97a48d38b1b69f04cfd939 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Tue, 29 Jul 2025 17:44:18 -0700 Subject: [PATCH 020/116] Adds MCP tool support --- cli/azd/extensions/azd.ai.start/go.mod | 10 +- cli/azd/extensions/azd.ai.start/go.sum | 21 ++++ .../azd.ai.start/internal/agent/agent.go | 106 +++++------------- .../default_agent_format_instructions.txt | 2 +- .../agent/prompts/default_agent_prefix.txt | 5 +- .../internal/cmd/enhanced_integration.go | 12 +- .../azd.ai.start/internal/cmd/root.go | 6 + .../azd.ai.start/internal/session/action.go | 41 ------- .../azd.ai.start/internal/session/session.go | 48 -------- .../tools/{ => dev}/command_executor.go | 2 +- .../azd.ai.start/internal/tools/dev/loader.go | 23 ++++ .../internal/tools/{ => http}/http_fetcher.go | 2 +- .../internal/tools/http/loader.go | 23 ++++ .../tools/{ => io}/change_directory.go | 2 +- .../internal/tools/{ => io}/copy_file.go | 2 +- .../tools/{ => io}/create_directory.go | 2 +- .../tools/{ => io}/current_directory.go | 2 +- .../tools/{ => io}/delete_directory.go | 2 +- .../internal/tools/{ => io}/delete_file.go | 2 +- .../internal/tools/{ => io}/directory_list.go | 2 +- .../internal/tools/{ => io}/file_info.go | 2 +- .../internal/tools/{ => io}/file_search.go | 2 +- .../azd.ai.start/internal/tools/io/loader.go | 34 ++++++ .../internal/tools/{ => io}/move_file.go | 2 +- .../internal/tools/{ => io}/read_file.go | 2 +- .../internal/tools/{ => io}/write_file.go | 2 +- .../azd.ai.start/internal/tools/loader.go | 44 ++++++++ .../azd.ai.start/internal/tools/mcp/loader.go | 75 +++++++++++++ .../azd.ai.start/internal/tools/mcp/mcp.json | 9 ++ .../internal/tools/weather/loader.go | 23 ++++ .../internal/tools/{ => weather}/weather.go | 2 +- 31 files changed, 321 insertions(+), 191 deletions(-) delete mode 100644 cli/azd/extensions/azd.ai.start/internal/session/action.go delete mode 100644 cli/azd/extensions/azd.ai.start/internal/session/session.go rename cli/azd/extensions/azd.ai.start/internal/tools/{ => dev}/command_executor.go (99%) create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/dev/loader.go rename cli/azd/extensions/azd.ai.start/internal/tools/{ => http}/http_fetcher.go (99%) create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/http/loader.go rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/change_directory.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/copy_file.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/create_directory.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/current_directory.go (98%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/delete_directory.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/delete_file.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/directory_list.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/file_info.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/file_search.go (99%) create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/move_file.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/read_file.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/write_file.go (99%) create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/loader.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/weather/loader.go rename cli/azd/extensions/azd.ai.start/internal/tools/{ => weather}/weather.go (99%) diff --git a/cli/azd/extensions/azd.ai.start/go.mod b/cli/azd/extensions/azd.ai.start/go.mod index 2725f5bea3a..892e4868991 100644 --- a/cli/azd/extensions/azd.ai.start/go.mod +++ b/cli/azd/extensions/azd.ai.start/go.mod @@ -14,15 +14,21 @@ require ( github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.3.1 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/bahlo/generic-list-go v0.2.0 // indirect + github.com/buger/jsonparser v1.1.1 // indirect github.com/dlclark/regexp2 v1.10.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/goph/emperror v0.17.2 // indirect github.com/huandu/xstrings v1.3.3 // indirect + github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/invopop/jsonschema v0.13.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mark3labs/mcp-go v0.36.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/copystructure v1.0.0 // indirect @@ -36,9 +42,11 @@ require ( github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/cast v1.3.1 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/pflag v1.0.6 // indirect + github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/yargevad/filepathx v1.0.0 // indirect + github.com/yosida95/uritemplate/v3 v3.0.2 // indirect go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 // indirect golang.org/x/crypto v0.37.0 // indirect golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect diff --git a/cli/azd/extensions/azd.ai.start/go.sum b/cli/azd/extensions/azd.ai.start/go.sum index c2fbe371ce7..ebd93aadd8e 100644 --- a/cli/azd/extensions/azd.ai.start/go.sum +++ b/cli/azd/extensions/azd.ai.start/go.sum @@ -36,10 +36,14 @@ github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuP github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c h1:pi62a7GwfbxvZDXhV4DfhxeePzpVCoyr9/rZaWH5eow= github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c/go.mod h1:mSTaPODklWyhruY0DZgPw1DI97K5cHXfU3afMqGf0IM= +github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR5wKP38= github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= @@ -120,11 +124,16 @@ github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df h1:4lTJXCZw16BF0BCzrQ1LUzlMW4+2OwBkkYj1/bRybhY= +github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df/go.mod h1:oL2JAtsIp/1vnVy4UG4iDzL8SZwkOzqvRL3YR9PGPjs= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= +github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= @@ -141,6 +150,12 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80 h1:6Yzfa6GP0rIo/kULo2bwGEkFvCePZ3qHDDTC3/J9Swo= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mark3labs/mcp-go v0.32.0 h1:fgwmbfL2gbd67obg57OfV2Dnrhs1HtSdlY/i5fn7MU8= +github.com/mark3labs/mcp-go v0.32.0/go.mod h1:rXqOudj/djTORU/ThxYx8fqEVj/5pvTuuebQ2RC7uk4= +github.com/mark3labs/mcp-go v0.36.0 h1:rIZaijrRYPeSbJG8/qNDe0hWlGrCJ7FWHNMz2SQpTis= +github.com/mark3labs/mcp-go v0.36.0/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= @@ -184,6 +199,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= @@ -203,10 +220,14 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmc/langchaingo v0.1.13 h1:rcpMWBIi2y3B90XxfE4Ao8dhCQPVDMaNPnN5cGB1CaA= github.com/tmc/langchaingo v0.1.13/go.mod h1:vpQ5NOIhpzxDfTZK9B6tf2GM/MoaHewPWM5KXXGh7hg= +github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= +github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= +github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= +github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181 h1:K+bMSIx9A7mLES1rtG+qKduLIXq40DAzYHtb0XuCukA= gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181/go.mod h1:dzYhVIwWCtzPAa4QP98wfB9+mzt33MSmM8wsKiMi2ow= diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go index 1666f66fc23..2c301976d50 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go @@ -9,15 +9,13 @@ import ( "fmt" "github.com/tmc/langchaingo/agents" - "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/chains" "github.com/tmc/langchaingo/llms/openai" "github.com/tmc/langchaingo/memory" - "github.com/tmc/langchaingo/schema" "github.com/tmc/langchaingo/tools" - "azd.ai.start/internal/session" - mytools "azd.ai.start/internal/tools" + localtools "azd.ai.start/internal/tools" + mcptools "azd.ai.start/internal/tools/mcp" ) //go:embed prompts/default_agent_prefix.txt @@ -31,15 +29,10 @@ var _defaultAgentSuffix string // AzureAIAgent represents an enhanced Azure AI agent with action tracking, intent validation, and conversation memory type AzureAIAgent struct { - agent *agents.ConversationalAgent - executor *agents.Executor - memory schema.Memory // Maintains conversation history for context-aware responses - tools []tools.Tool - actionLogger callbacks.Handler - currentSession *session.ActionSession + executor *agents.Executor } -func NewAzureAIAgent(llm *openai.LLM) *AzureAIAgent { +func NewAzureAIAgent(llm *openai.LLM) (*AzureAIAgent, error) { smartMemory := memory.NewConversationBuffer( memory.WithInputKey("input"), memory.WithOutputKey("output"), @@ -47,67 +40,23 @@ func NewAzureAIAgent(llm *openai.LLM) *AzureAIAgent { memory.WithAIPrefix("AI"), ) - tools := []tools.Tool{ - // Directory operations - mytools.DirectoryListTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.CreateDirectoryTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.DeleteDirectoryTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.ChangeDirectoryTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.CurrentDirectoryTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - - // File operations - mytools.ReadFileTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.WriteFileTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.CopyFileTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.MoveFileTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.DeleteFileTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.FileInfoTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.FileSearchTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - - // Other tools - mytools.CommandExecutorTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.HTTPFetcherTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.CommandExecutorTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.WeatherTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - tools.Calculator{ - CallbacksHandler: llm.CallbacksHandler, - }, + toolLoaders := []localtools.ToolLoader{ + localtools.NewLocalToolsLoader(llm.CallbacksHandler), + mcptools.NewMcpToolsLoader(llm.CallbacksHandler), + } + + allTools := []tools.Tool{} + + for _, toolLoader := range toolLoaders { + categoryTools, err := toolLoader.LoadTools() + if err != nil { + return nil, err + } + allTools = append(allTools, categoryTools...) } // 4. Create agent with memory directly integrated - agent := agents.NewConversationalAgent(llm, tools, + agent := agents.NewConversationalAgent(llm, allTools, agents.WithPromptPrefix(_defaultAgentPrefix), agents.WithPromptSuffix(_defaultAgentSuffix), agents.WithPromptFormatInstructions(_defaultAgentFormatInstructions), @@ -125,22 +74,21 @@ func NewAzureAIAgent(llm *openai.LLM) *AzureAIAgent { ) return &AzureAIAgent{ - agent: agent, - executor: executor, - memory: smartMemory, - tools: tools, - actionLogger: llm.CallbacksHandler, - } + executor: executor, + }, nil } // ProcessQuery processes a user query with full action tracking and validation -func (aai *AzureAIAgent) ProcessQuery(ctx context.Context, userInput string) (string, error) { +func (aai *AzureAIAgent) ProcessQuery(ctx context.Context, userInput string) error { // Execute with enhanced input - agent should automatically handle memory - output, err := chains.Run(ctx, aai.executor, userInput) + _, err := chains.Run(ctx, aai.executor, userInput, + chains.WithMaxTokens(800), + chains.WithTemperature(0.3), + ) if err != nil { fmt.Printf("❌ Execution failed: %s\n", err.Error()) - return "", err + return err } - return output, nil + return nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt index 9c54b885700..1caac90782b 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt @@ -33,6 +33,6 @@ Observation: [result] ONLY when the task is completely finished and no more actions are needed: Thought: Do I need to use a tool? No -AI: [your response summarizing what was accomplished] +AI: [briefly summarize your response without all the details from your observations] Remember: Always continue taking actions until the user's request is fully satisfied. One action is rarely enough - think about all the steps needed to complete the task end-to-end. diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt index 19b7d52c669..0102a07432f 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt @@ -1,7 +1,10 @@ You are an Azure Developer CLI (AZD) agent. You are an expert is building, provisioning and deploying Azure applications. Always use Azure best patterns and practices. -If a tools exists that provides best practices and standards call this tool at the beginning of your workflow. + +Before starting your task initial task review available tools. +If any tools exist for best practices invoke the tool to learn more. +Incorporate learned best practices in your work. When any code generation is performed ALWAYS save content to files. When filenames are not explicitly specified generate new files with meaningful names. diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go index 26ff289d29b..cef02cdc1c7 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go @@ -18,7 +18,10 @@ import ( // RunEnhancedAzureAgent runs the enhanced Azure AI agent with full capabilities func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) error { // Create the enhanced agent - azureAgent := agent.NewAzureAIAgent(llm) + azureAgent, err := agent.NewAzureAIAgent(llm) + if err != nil { + return err + } fmt.Println("🤖 Enhanced Azure AI Agent - Interactive Mode") fmt.Println("═══════════════════════════════════════════════════════════") @@ -56,17 +59,16 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) break } - fmt.Println("\n💬 Agent:") + fmt.Printf("\n-------------------------------------------\n") // Process the query with the enhanced agent - response, err := azureAgent.ProcessQuery(ctx, userInput) + err := azureAgent.ProcessQuery(ctx, userInput) if err != nil { fmt.Printf("❌ Error: %v\n", err) continue } - // Display the final response - fmt.Print(response) + fmt.Printf("\n-------------------------------------------\n") } if err := scanner.Err(); err != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go index b411224ca34..e9fb57a2cac 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go @@ -64,6 +64,12 @@ func runAIAgent(ctx context.Context, args []string) error { return fmt.Errorf("failed to unmarshal AI model configuration: %w", err) } + _, _ = azdClient.Prompt().Confirm(ctx, &azdext.ConfirmRequest{ + Options: &azdext.ConfirmOptions{ + Message: "Ready?", + }, + }) + // Common deployment names to try azureAPIVersion := "2024-02-15-preview" diff --git a/cli/azd/extensions/azd.ai.start/internal/session/action.go b/cli/azd/extensions/azd.ai.start/internal/session/action.go deleted file mode 100644 index 1111c2c4fed..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/session/action.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package session - -import ( - "time" -) - -// ActionLog represents a single action taken by the agent -type ActionLog struct { - Timestamp time.Time - Action string - Tool string - Input string - Output string - Success bool - Duration time.Duration -} - -// NewActionLog creates a new action log -func NewActionLog(tool, input string) *ActionLog { - return &ActionLog{ - Timestamp: time.Now(), - Tool: tool, - Action: tool, - Input: input, - } -} - -// SetOutput sets the output and success status for the action -func (al *ActionLog) SetOutput(output string, success bool) { - al.Output = output - al.Success = success - al.Duration = time.Since(al.Timestamp) -} - -// SetDuration sets the duration for the action -func (al *ActionLog) SetDuration(duration time.Duration) { - al.Duration = duration -} diff --git a/cli/azd/extensions/azd.ai.start/internal/session/session.go b/cli/azd/extensions/azd.ai.start/internal/session/session.go deleted file mode 100644 index 44f0156a912..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/session/session.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package session - -import ( - "time" -) - -// ActionSession tracks the current conversation session and actions -type ActionSession struct { - InitialIntent string - PlannedActions []string - ExecutedActions []ActionLog - ValidationResult interface{} // Use interface{} to avoid circular dependency - StartTime time.Time - EndTime time.Time -} - -// NewActionSession creates a new action session -func NewActionSession(initialIntent string) *ActionSession { - return &ActionSession{ - InitialIntent: initialIntent, - PlannedActions: []string{}, - ExecutedActions: []ActionLog{}, - StartTime: time.Now(), - } -} - -// Start marks the session as started -func (as *ActionSession) Start() { - as.StartTime = time.Now() -} - -// End marks the session as ended -func (as *ActionSession) End() { - as.EndTime = time.Now() -} - -// AddExecutedAction adds an executed action to the session -func (as *ActionSession) AddExecutedAction(action ActionLog) { - as.ExecutedActions = append(as.ExecutedActions, action) -} - -// SetValidationResult sets the validation result for the session -func (as *ActionSession) SetValidationResult(result interface{}) { - as.ValidationResult = result -} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/command_executor.go b/cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/command_executor.go rename to cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go index 9b9bd75585d..410db725505 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/command_executor.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go @@ -1,4 +1,4 @@ -package tools +package dev import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/dev/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/dev/loader.go new file mode 100644 index 00000000000..655d52a42a8 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/dev/loader.go @@ -0,0 +1,23 @@ +package dev + +import ( + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/tools" +) + +// DevToolLoader loads development-related tools +type DevToolsLoader struct { + callbacksHandler callbacks.Handler +} + +func NewDevToolsLoader(callbacksHandler callbacks.Handler) *DevToolsLoader { + return &DevToolsLoader{ + callbacksHandler: callbacksHandler, + } +} + +func (l *DevToolsLoader) LoadTools() ([]tools.Tool, error) { + return []tools.Tool{ + &CommandExecutorTool{CallbacksHandler: l.callbacksHandler}, + }, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go b/cli/azd/extensions/azd.ai.start/internal/tools/http/http_fetcher.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go rename to cli/azd/extensions/azd.ai.start/internal/tools/http/http_fetcher.go index 2ce4324389c..cbd3628506b 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/http/http_fetcher.go @@ -1,4 +1,4 @@ -package tools +package http import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/http/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/http/loader.go new file mode 100644 index 00000000000..2233455e3e8 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/http/loader.go @@ -0,0 +1,23 @@ +package http + +import ( + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/tools" +) + +// HttpToolsLoader loads HTTP-related tools +type HttpToolsLoader struct { + callbackHandler callbacks.Handler +} + +func NewHttpToolsLoader(callbackHandler callbacks.Handler) *HttpToolsLoader { + return &HttpToolsLoader{ + callbackHandler: callbackHandler, + } +} + +func (l *HttpToolsLoader) LoadTools() ([]tools.Tool, error) { + return []tools.Tool{ + &HTTPFetcherTool{CallbacksHandler: l.callbackHandler}, + }, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/change_directory.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/change_directory.go index 8a05f2b3532..48094b919ff 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/change_directory.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go index 2bbdda06320..2db2eae1c1c 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go index 3936b14a7a2..d100e7aa834 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/current_directory.go similarity index 98% rename from cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/current_directory.go index d2c4152da29..59169eb24e9 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/current_directory.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go index b2eaf93bc30..72714f379b9 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go index d088cee098e..b893bb1ee29 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go index 15914c92417..37458cf4531 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/file_info.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go index 4d82697ac46..000828aa827 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/file_search.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/file_search.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go index bacb52bd714..8b1d8b2ab20 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/file_search.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go new file mode 100644 index 00000000000..6818542a308 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go @@ -0,0 +1,34 @@ +package io + +import ( + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/tools" +) + +// IoToolsLoader loads IO-related tools +type IoToolsLoader struct { + callbackHandler callbacks.Handler +} + +func NewIoToolsLoader(callbackHandler callbacks.Handler) *IoToolsLoader { + return &IoToolsLoader{ + callbackHandler: callbackHandler, + } +} + +func (l *IoToolsLoader) LoadTools() ([]tools.Tool, error) { + return []tools.Tool{ + &CurrentDirectoryTool{CallbacksHandler: l.callbackHandler}, + &ChangeDirectoryTool{CallbacksHandler: l.callbackHandler}, + &DirectoryListTool{CallbacksHandler: l.callbackHandler}, + &CreateDirectoryTool{CallbacksHandler: l.callbackHandler}, + &DeleteDirectoryTool{CallbacksHandler: l.callbackHandler}, + &ReadFileTool{CallbacksHandler: l.callbackHandler}, + &WriteFileTool{CallbacksHandler: l.callbackHandler}, + &CopyFileTool{CallbacksHandler: l.callbackHandler}, + &MoveFileTool{CallbacksHandler: l.callbackHandler}, + &DeleteFileTool{CallbacksHandler: l.callbackHandler}, + &FileInfoTool{CallbacksHandler: l.callbackHandler}, + &FileSearchTool{CallbacksHandler: l.callbackHandler}, + }, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/move_file.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go index 6d5bbe3171b..68db771d144 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/read_file.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go index 375e5b11378..5983316aae9 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/write_file.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go index fa4533b04b4..757111b4127 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/loader.go new file mode 100644 index 00000000000..ae2da0253c1 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/loader.go @@ -0,0 +1,44 @@ +package tools + +import ( + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/tools" + + "azd.ai.start/internal/tools/dev" + "azd.ai.start/internal/tools/io" +) + +// ToolLoader provides an interface for loading tools from different categories +type ToolLoader interface { + LoadTools() ([]tools.Tool, error) +} + +type LocalToolsLoader struct { + loaders []ToolLoader + callbackHandler callbacks.Handler +} + +func NewLocalToolsLoader(callbackHandler callbacks.Handler) *LocalToolsLoader { + return &LocalToolsLoader{ + loaders: []ToolLoader{ + dev.NewDevToolsLoader(callbackHandler), + io.NewIoToolsLoader(callbackHandler), + }, + callbackHandler: callbackHandler, + } +} + +// LoadLocalTools loads all tools from all categories with the provided callback handler +func (l *LocalToolsLoader) LoadTools() ([]tools.Tool, error) { + var allTools []tools.Tool + + for _, loader := range l.loaders { + categoryTools, err := loader.LoadTools() + if err != nil { + return nil, err + } + allTools = append(allTools, categoryTools...) + } + + return allTools, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go new file mode 100644 index 00000000000..5aee8932b06 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go @@ -0,0 +1,75 @@ +package mcp + +import ( + "encoding/json" + "fmt" + + _ "embed" + + langchaingo_mcp_adapter "github.com/i2y/langchaingo-mcp-adapter" + "github.com/mark3labs/mcp-go/client" + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/tools" +) + +//go:embed mcp.json +var _mcpJson string + +// McpConfig represents the overall MCP configuration structure +type McpConfig struct { + Servers map[string]ServerConfig `json:"servers"` +} + +// ServerConfig represents an individual server configuration +type ServerConfig struct { + Type string `json:"type"` + Command string `json:"command"` + Args []string `json:"args,omitempty"` + Env []string `json:"env,omitempty"` +} + +type McpToolsLoader struct { + callbackHandler callbacks.Handler +} + +func NewMcpToolsLoader(callbackHandler callbacks.Handler) *McpToolsLoader { + return &McpToolsLoader{ + callbackHandler: callbackHandler, + } +} + +func (l *McpToolsLoader) LoadTools() ([]tools.Tool, error) { + // Deserialize the embedded mcp.json configuration + var config McpConfig + if err := json.Unmarshal([]byte(_mcpJson), &config); err != nil { + return nil, fmt.Errorf("failed to parse mcp.json: %w", err) + } + + var allTools []tools.Tool + + // Iterate through each server configuration + for serverName, serverConfig := range config.Servers { + // Create MCP client for the server using stdio + mcpClient, err := client.NewStdioMCPClient(serverConfig.Command, serverConfig.Env, serverConfig.Args...) + if err != nil { + return nil, fmt.Errorf("failed to create MCP client for server %s: %w", serverName, err) + } + + // Create the adapter + adapter, err := langchaingo_mcp_adapter.New(mcpClient) + if err != nil { + return nil, fmt.Errorf("failed to create adapter for server %s: %w", serverName, err) + } + + // Get all tools from MCP server + mcpTools, err := adapter.Tools() + if err != nil { + return nil, fmt.Errorf("failed to get tools from server %s: %w", serverName, err) + } + + // Add the tools to our collection + allTools = append(allTools, mcpTools...) + } + + return allTools, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json new file mode 100644 index 00000000000..efca4416be8 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json @@ -0,0 +1,9 @@ +{ + "servers": { + "Azure": { + "type": "stdio", + "command": "azmcp", + "args": ["server", "start"] + } + } +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/weather/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/weather/loader.go new file mode 100644 index 00000000000..ce283e18fb3 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/weather/loader.go @@ -0,0 +1,23 @@ +package weather + +import ( + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/tools" +) + +// WeatherToolsLoader loads weather-related tools +type WeatherToolsLoader struct { + callbackHandler callbacks.Handler +} + +func NewWeatherToolsLoader(callbackHandler callbacks.Handler) *WeatherToolsLoader { + return &WeatherToolsLoader{ + callbackHandler: callbackHandler, + } +} + +func (l *WeatherToolsLoader) LoadTools() ([]tools.Tool, error) { + return []tools.Tool{ + &WeatherTool{CallbacksHandler: l.callbackHandler}, + }, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/weather.go b/cli/azd/extensions/azd.ai.start/internal/tools/weather/weather.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/weather.go rename to cli/azd/extensions/azd.ai.start/internal/tools/weather/weather.go index 1f0d8404142..0f8837c5124 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/weather.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/weather/weather.go @@ -1,4 +1,4 @@ -package tools +package weather import ( "context" From 08d9e326bf1eef0acce8cb2fd113c722dd73b17d Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Wed, 30 Jul 2025 13:31:14 -0700 Subject: [PATCH 021/116] UX updates, tool JSON payloads --- .../azd.ai.start/internal/agent/agent.go | 2 - .../default_agent_format_instructions.txt | 10 +- .../internal/cmd/enhanced_integration.go | 17 +- .../azd.ai.start/internal/cmd/root.go | 26 ++- .../azd.ai.start/internal/logging/logger.go | 121 ++++++----- .../internal/tools/io/directory_list.go | 191 ++++++++++++++---- .../internal/tools/io/file_info.go | 41 +++- .../internal/tools/io/file_search.go | 59 +++--- .../internal/tools/io/read_file.go | 161 +++++++++++++-- .../internal/tools/io/write_file.go | 130 +++++++++--- .../azd.ai.start/internal/tools/mcp/mcp.json | 14 +- 11 files changed, 572 insertions(+), 200 deletions(-) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go index 2c301976d50..a5822a677c2 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go @@ -6,7 +6,6 @@ package agent import ( "context" _ "embed" - "fmt" "github.com/tmc/langchaingo/agents" "github.com/tmc/langchaingo/chains" @@ -86,7 +85,6 @@ func (aai *AzureAIAgent) ProcessQuery(ctx context.Context, userInput string) err chains.WithTemperature(0.3), ) if err != nil { - fmt.Printf("❌ Execution failed: %s\n", err.Error()) return err } diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt index 1caac90782b..f7db6dd7e21 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt @@ -16,6 +16,7 @@ After each Observation, you MUST continue the cycle: Thought: [Evaluate the result and determine if the task is complete or if more actions are needed] If the task is NOT complete: + Thought: Do I need to use a tool? Yes Action: [next action to take] Action Input: [input for the next action] @@ -23,6 +24,7 @@ Observation: [result of the next action] ... (continue this cycle until the task is fully complete) If there are errors: + Thought: [Analyze the error and determine how to fix it] Thought: Do I need to use a tool? Yes Action: [corrective action] @@ -30,9 +32,9 @@ Action Input: [corrected input] Observation: [result] ... (retry up to 3 times with different approaches if needed) -ONLY when the task is completely finished and no more actions are needed: +Remember: Always continue taking actions until the user's request is fully satisfied. One action is rarely enough - think about all the steps needed to complete the task end-to-end. + +When you are done answering the questions and performing all your tasks you MUST use the following format: Thought: Do I need to use a tool? No -AI: [briefly summarize your response without all the details from your observations] - -Remember: Always continue taking actions until the user's request is fully satisfied. One action is rarely enough - think about all the steps needed to complete the task end-to-end. +AI: [briefly summarize your response without all the details from your observations] \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go index cef02cdc1c7..dad0371268a 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go @@ -10,6 +10,7 @@ import ( "os" "strings" + "github.com/fatih/color" "github.com/tmc/langchaingo/llms/openai" "azd.ai.start/internal/agent" @@ -40,13 +41,16 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) if initialQuery != "" { userInput = initialQuery initialQuery = "" // Clear after first use - fmt.Printf("💬 You: %s\n", userInput) + color.Cyan("💬 You: %s\n", userInput) } else { - fmt.Print("\n💬 You: ") + fmt.Print(color.CyanString("\n💬 You: ")) + color.Set(color.FgCyan) // Set blue color for user input if !scanner.Scan() { - break // EOF or error + color.Unset() // Reset color + break // EOF or error } userInput = strings.TrimSpace(scanner.Text()) + color.Unset() // Reset color after input } // Check for exit commands @@ -59,16 +63,11 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) break } - fmt.Printf("\n-------------------------------------------\n") - // Process the query with the enhanced agent err := azureAgent.ProcessQuery(ctx, userInput) if err != nil { - fmt.Printf("❌ Error: %v\n", err) - continue + return err } - - fmt.Printf("\n-------------------------------------------\n") } if err := scanner.Err(); err != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go index e9fb57a2cac..d57df89a2ea 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go @@ -7,6 +7,7 @@ import ( "context" "encoding/json" "fmt" + "os" "azd.ai.start/internal/logging" "github.com/azure/azure-dev/cli/azd/pkg/azdext" @@ -15,6 +16,8 @@ import ( ) func NewRootCommand() *cobra.Command { + var debug bool + rootCmd := &cobra.Command{ Use: "azd ai.chat [options]", Short: "Enables interactive AI agent through AZD", @@ -24,10 +27,12 @@ func NewRootCommand() *cobra.Command { DisableDefaultCmd: true, }, RunE: func(cmd *cobra.Command, args []string) error { - return runAIAgent(cmd.Context(), args) + return runAIAgent(cmd.Context(), args, debug) }, } + rootCmd.Flags().BoolVar(&debug, "debug", false, "Enable debug logging") + return rootCmd } @@ -38,7 +43,7 @@ type AiModelConfig struct { } // runAIAgent creates and runs the enhanced AI agent using LangChain Go -func runAIAgent(ctx context.Context, args []string) error { +func runAIAgent(ctx context.Context, args []string, debug bool) error { // Create a new context that includes the AZD access token ctx = azdext.WithAccessToken(ctx) @@ -64,11 +69,16 @@ func runAIAgent(ctx context.Context, args []string) error { return fmt.Errorf("failed to unmarshal AI model configuration: %w", err) } - _, _ = azdClient.Prompt().Confirm(ctx, &azdext.ConfirmRequest{ - Options: &azdext.ConfirmOptions{ - Message: "Ready?", - }, - }) + if debug { + defaultValue := true + + _, _ = azdClient.Prompt().Confirm(ctx, &azdext.ConfirmRequest{ + Options: &azdext.ConfirmOptions{ + Message: fmt.Sprintf("Ready? (PID: %d - You can attach a debugger now)", os.Getpid()), + DefaultValue: &defaultValue, + }, + }) + } // Common deployment names to try azureAPIVersion := "2024-02-15-preview" @@ -81,7 +91,7 @@ func runAIAgent(ctx context.Context, args []string) error { fmt.Printf("🔵 Trying Azure OpenAI with deployment: %s\n", aiConfig.DeploymentName) actionLogger := logging.NewActionLogger( - logging.WithDebug(false), + logging.WithDebug(debug), ) llm, err = openai.New( diff --git a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go index 1877195e401..d148b9273b2 100644 --- a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go +++ b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go @@ -5,8 +5,11 @@ package logging import ( "context" - "fmt" + "encoding/json" + "regexp" + "strings" + "github.com/fatih/color" "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/schema" @@ -43,122 +46,132 @@ func NewActionLogger(opts ...ActionLoggerOption) *ActionLogger { // HandleText is called when text is processed func (al *ActionLogger) HandleText(ctx context.Context, text string) { - if al.debugEnabled { - fmt.Printf("📝 Text (full): %s\n", text) - } } // HandleLLMGenerateContentStart is called when LLM content generation starts func (al *ActionLogger) HandleLLMGenerateContentStart(ctx context.Context, ms []llms.MessageContent) { - if al.debugEnabled { - for i, msg := range ms { - fmt.Printf("🤖 Debug - Message %d: %+v\n", i, msg) - } - } } // HandleLLMGenerateContentEnd is called when LLM content generation ends func (al *ActionLogger) HandleLLMGenerateContentEnd(ctx context.Context, res *llms.ContentResponse) { - if al.debugEnabled && res != nil { - fmt.Printf("🤖 Debug - Response: %+v\n", res) + // Parse and print thoughts as "THOUGHT: " from content + // IF thought contains: "Do I need to use a tool?", omit this thought. + + for _, choice := range res.Choices { + content := choice.Content + + if al.debugEnabled { + color.HiBlack("\nHandleLLMGenerateContentEnd\n%s\n", content) + } + + // Find all "Thought:" patterns and extract the content that follows + thoughtRegex := regexp.MustCompile(`(?i)thought:\s*(.*)`) + matches := thoughtRegex.FindAllStringSubmatch(content, -1) + + for _, match := range matches { + if len(match) > 1 { + thought := strings.TrimSpace(match[1]) + if thought != "" { + // Skip thoughts that contain "Do I need to use a tool?" + if !strings.Contains(strings.ToLower(thought), "do i need to use a tool?") { + color.White("\n🤖 Agent: %s\n", thought) + } + } + } + } } } // HandleRetrieverStart is called when retrieval starts func (al *ActionLogger) HandleRetrieverStart(ctx context.Context, query string) { - if al.debugEnabled { - fmt.Printf("🔍 Retrieval starting for query (full): %s\n", query) - } } // HandleRetrieverEnd is called when retrieval ends func (al *ActionLogger) HandleRetrieverEnd(ctx context.Context, query string, documents []schema.Document) { - fmt.Printf("🔍 Retrieval completed: found %d documents\n", len(documents)) - if al.debugEnabled { - fmt.Printf("🔍 Debug - Query (full): %s\n", query) - for i, doc := range documents { - fmt.Printf("🔍 Debug - Document %d: %+v\n", i, doc) - } - } } // HandleToolStart is called when a tool execution starts func (al *ActionLogger) HandleToolStart(ctx context.Context, input string) { - if al.debugEnabled { - fmt.Printf("🔧 Executing Tool: %s\n", input) - } } // HandleToolEnd is called when a tool execution ends func (al *ActionLogger) HandleToolEnd(ctx context.Context, output string) { - if al.debugEnabled { - fmt.Printf("✅ Tool Result (full): %s\n", output) - } } // HandleToolError is called when a tool execution fails func (al *ActionLogger) HandleToolError(ctx context.Context, err error) { - fmt.Printf("❌ Tool Error: %s\n", err.Error()) + color.Red("\nTool Error: %s\n", err.Error()) } // HandleLLMStart is called when LLM call starts func (al *ActionLogger) HandleLLMStart(ctx context.Context, prompts []string) { - for i, prompt := range prompts { - if al.debugEnabled { - fmt.Printf("🤖 Prompt %d (full): %s\n", i, prompt) - } - } } // HandleChainStart is called when chain execution starts func (al *ActionLogger) HandleChainStart(ctx context.Context, inputs map[string]any) { - for key, value := range inputs { - if al.debugEnabled { - fmt.Printf("🔗 Input [%s]: %v\n", key, value) - } - } } // HandleChainEnd is called when chain execution ends func (al *ActionLogger) HandleChainEnd(ctx context.Context, outputs map[string]any) { - for key, value := range outputs { - if al.debugEnabled { - fmt.Printf("🔗 Output [%s]: %v\n", key, value) - } - } } // HandleChainError is called when chain execution fails func (al *ActionLogger) HandleChainError(ctx context.Context, err error) { - fmt.Printf("🔗 Chain execution failed: %s\n", err.Error()) + color.Red("\n%s\n", err.Error()) } // HandleAgentAction is called when an agent action is planned func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.AgentAction) { - fmt.Printf("%s\n\n", action.Log) - + // Print "Calling " + // Inspect action.ToolInput. Attempt to parse input as JSON + // If is valid JSON and contains a param 'filename' then print filename. + // example: "Calling read_file " if al.debugEnabled { - fmt.Printf("🎯 Agent planned action (debug): %+v\n", action) + color.HiBlack("\nHandleAgentAction\n%s\n", action.Log) + } + + var toolInput map[string]interface{} + if err := json.Unmarshal([]byte(action.ToolInput), &toolInput); err == nil { + // Successfully parsed JSON, check for filename parameter + if filename, ok := toolInput["filename"]; ok { + if filenameStr, ok := filename.(string); ok { + color.Green("\n🤖 Agent: Calling %s %s\n", action.Tool, filenameStr) + return + } + } + // JSON parsed but no filename found, use fallback format + color.Green("\n🤖 Agent: Calling %s tool\n", action.Tool) + } else { + // JSON parsing failed, show the input as text + color.Green("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, action.ToolInput) } } // HandleAgentFinish is called when the agent finishes func (al *ActionLogger) HandleAgentFinish(ctx context.Context, finish schema.AgentFinish) { - fmt.Printf("%s\n\n", finish.Log) - + // Find summary from format "AI: " + // Print: if al.debugEnabled { - fmt.Printf("🏁 Agent finished (debug): %+v\n", finish) + color.HiBlack("\nHandleAgentFinish\n%s\n", finish.Log) + } + + // Use regex to find AI summary, capturing everything after "AI:" (including multi-line) + // The (?s) flag makes . match newlines, (.+) captures everything after "AI:" + aiRegex := regexp.MustCompile(`(?is)AI:\s*(.+)`) + matches := aiRegex.FindStringSubmatch(finish.Log) + + if len(matches) > 1 { + summary := strings.TrimSpace(matches[1]) + color.White("\n🤖 Agent: %s\n", summary) } + // If "AI:" not found, don't print anything } // HandleLLMError is called when LLM call fails func (al *ActionLogger) HandleLLMError(ctx context.Context, err error) { - fmt.Printf("🤖 LLM error: %s\n", err.Error()) + color.Red("\nLLM Error: %s\n", err.Error()) } // HandleStreamingFunc handles streaming responses func (al *ActionLogger) HandleStreamingFunc(ctx context.Context, chunk []byte) { - // if len(chunk) > 0 { - // fmt.Print(string(chunk)) - // } } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go index 37458cf4531..f02e8e8df5e 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go @@ -2,6 +2,7 @@ package io import ( "context" + "encoding/json" "fmt" "os" "path/filepath" @@ -15,102 +16,204 @@ type DirectoryListTool struct { CallbacksHandler callbacks.Handler } +// ErrorResponse represents a JSON error response structure that can be reused across all tools +type ErrorResponse struct { + Error bool `json:"error"` + Message string `json:"message"` +} + func (t DirectoryListTool) Name() string { return "list_directory" } func (t DirectoryListTool) Description() string { - return "List files and folders in a directory. Input: directory path (use '.' for current directory)" + return `List files and folders in a directory. +Input: JSON object with required 'path' field: {"path": ".", "includeHidden": false} +Returns: JSON with directory contents including file names, types, and sizes. +The input must be formatted as a single line valid JSON string.` } func (t DirectoryListTool) Call(ctx context.Context, input string) (string, error) { - path := strings.TrimSpace(input) - if path == "" { - path = "." + // Parse JSON input + type InputParams struct { + Path string `json:"path"` + IncludeHidden bool `json:"includeHidden,omitempty"` } - // Get absolute path for clarity - absPath, err := filepath.Abs(path) - if err != nil { + var params InputParams + + // Clean the input first + cleanInput := strings.TrimSpace(input) + + // Parse as JSON - this is now required + if err := json.Unmarshal([]byte(cleanInput), ¶ms); err != nil { + errorResponse := ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"path\": \".\", \"include_hidden\": false}", err.Error()), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to get absolute path for %s: %w", path, err)) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to parse JSON input: %w", err)) } - return "", fmt.Errorf("failed to get absolute path for %s: %w", path, err) + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } - // Invoke callback for tool start + // Validate required path field + if params.Path == "" { + params.Path = "." + } + + path := strings.TrimSpace(params.Path) + + // Add debug logging if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("Reading directory %s\n", input)) + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("Processing JSON input: path='%s', include_hidden=%v", path, params.IncludeHidden)) + } + + // Get absolute path for clarity - handle "." explicitly to avoid potential issues + var absPath string + var err error + + if path == "." { + // Explicitly get current working directory instead of relying on filepath.Abs(".") + absPath, err = os.Getwd() + if err != nil { + errorResponse := ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Failed to get current working directory: %s", err.Error()), + } + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to get current working directory: %w", err)) + } + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil + } + } else { + absPath, err = filepath.Abs(path) + if err != nil { + errorResponse := ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Failed to get absolute path for %s: %s", path, err.Error()), + } + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to get absolute path for %s: %w", path, err)) + } + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil + } + } + + // Invoke callback for tool execution start + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("Reading directory %s (absolute: %s)", path, absPath)) } // Check if directory exists + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("Checking if directory exists: '%s'", absPath)) + } + info, err := os.Stat(absPath) if err != nil { + var message string + if os.IsNotExist(err) { + message = fmt.Sprintf("Directory does not exist: %s", absPath) + } else { + message = fmt.Sprintf("Failed to access %s: %s (original input: '%s', cleaned path: '%s')", absPath, err.Error(), input, path) + } + + errorResponse := ErrorResponse{ + Error: true, + Message: message, + } if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to access %s: %w", absPath, err)) } - return "", fmt.Errorf("failed to access %s: %w", absPath, err) + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } + if !info.IsDir() { + errorResponse := ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Path is not a directory: %s", absPath), + } if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("%s is not a directory", absPath)) } - return "", fmt.Errorf("%s is not a directory", absPath) + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } // List directory contents files, err := os.ReadDir(absPath) if err != nil { + errorResponse := ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Failed to read directory %s: %s", absPath, err.Error()), + } if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to read directory %s: %w", absPath, err)) } - return "", fmt.Errorf("failed to read directory %s: %w", absPath, err) + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } - var result strings.Builder - result.WriteString(fmt.Sprintf("Contents of %s:\n", absPath)) - result.WriteString(fmt.Sprintf("Total items: %d\n\n", len(files))) + // Prepare JSON response structure + type FileInfo struct { + Name string `json:"name"` + Type string `json:"type"` + Size int64 `json:"size,omitempty"` + IsDir bool `json:"isDirectory"` + } - // Separate directories and files - var dirs []string - var regularFiles []string + type DirectoryResponse struct { + Path string `json:"path"` + TotalItems int `json:"totalItems"` + Items []FileInfo `json:"items"` + } + + var items []FileInfo for _, file := range files { + fileInfo := FileInfo{ + Name: file.Name(), + IsDir: file.IsDir(), + } + if file.IsDir() { - dirs = append(dirs, file.Name()+"/") + fileInfo.Type = "directory" } else { - info, err := file.Info() - if err != nil { - regularFiles = append(regularFiles, file.Name()) - } else { - regularFiles = append(regularFiles, fmt.Sprintf("%s (%d bytes)", file.Name(), info.Size())) + fileInfo.Type = "file" + if info, err := file.Info(); err == nil { + fileInfo.Size = info.Size() } } - } - // Display directories first - if len(dirs) > 0 { - result.WriteString("Directories:\n") - for _, dir := range dirs { - result.WriteString(fmt.Sprintf(" 📁 %s\n", dir)) - } - result.WriteString("\n") + items = append(items, fileInfo) } - // Then display files - if len(regularFiles) > 0 { - result.WriteString("Files:\n") - for _, file := range regularFiles { - result.WriteString(fmt.Sprintf(" 📄 %s\n", file)) - } + response := DirectoryResponse{ + Path: absPath, + TotalItems: len(files), + Items: items, } - if len(dirs) == 0 && len(regularFiles) == 0 { - result.WriteString("Directory is empty.\n") + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + errorResponse := ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Failed to marshal JSON response: %s", err.Error()), + } + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to marshal JSON response: %w", err)) + } + errorJsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(errorJsonData), nil } - result.WriteString("\n") - output := result.String() + output := string(jsonData) // Invoke callback for tool end if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go index 000828aa827..98528d50b91 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go @@ -2,6 +2,7 @@ package io import ( "context" + "encoding/json" "fmt" "os" "time" @@ -19,7 +20,7 @@ func (t FileInfoTool) Name() string { } func (t FileInfoTool) Description() string { - return "Get information about a file (size, modification time, permissions). Input: file path (e.g., 'data.txt' or './docs/readme.md')" + return "Get information about a file (size, modification time, permissions). Input: file path (e.g., 'data.txt' or './docs/readme.md'). Returns JSON with file information." } func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { @@ -44,15 +45,45 @@ func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { return "", toolErr } + // Prepare JSON response structure + type FileInfoResponse struct { + Path string `json:"path"` + Name string `json:"name"` + Type string `json:"type"` + IsDirectory bool `json:"isDirectory"` + Size int64 `json:"size"` + ModifiedTime time.Time `json:"modifiedTime"` + Permissions string `json:"permissions"` + } + var fileType string if info.IsDir() { - fileType = "Directory" + fileType = "directory" } else { - fileType = "File" + fileType = "file" + } + + response := FileInfoResponse{ + Path: input, + Name: info.Name(), + Type: fileType, + IsDirectory: info.IsDir(), + Size: info.Size(), + ModifiedTime: info.ModTime(), + Permissions: info.Mode().String(), + } + + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } - output := fmt.Sprintf("%s: %s\nSize: %d bytes\nModified: %s\nPermissions: %s\n\n", - fileType, input, info.Size(), info.ModTime().Format(time.RFC3339), info.Mode().String()) + output := string(jsonData) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go index 8b1d8b2ab20..dd7a7a0de9c 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go @@ -7,7 +7,6 @@ import ( "os" "path/filepath" "sort" - "strings" "github.com/bmatcuk/doublestar/v4" "github.com/tmc/langchaingo/callbacks" @@ -37,6 +36,8 @@ Input: JSON payload with the following structure: "maxResults": 50 // optional: max files to return (default: 100) } +Returns JSON with search results and metadata. + SUPPORTED GLOB PATTERNS (using github.com/bmatcuk/doublestar/v4): - *.go - all Go files in current directory only - **/*.js - all JavaScript files in current directory and all subdirectories @@ -132,8 +133,15 @@ func (t FileSearchTool) Call(ctx context.Context, input string) (string, error) return "", err } - // Format output - output := t.formatResults(searchPath, req.Pattern, matches, req.MaxResults) + // Format output as JSON + output, err := t.formatResults(searchPath, req.Pattern, matches, req.MaxResults) + if err != nil { + toolErr := fmt.Errorf("failed to format results: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) @@ -185,33 +193,32 @@ func (t FileSearchTool) searchFiles(searchPath, pattern string, maxResults int) return matches, nil } -// formatResults formats the search results into a readable output -func (t FileSearchTool) formatResults(searchPath, pattern string, matches []string, maxResults int) string { - var output strings.Builder - - output.WriteString("File search results:\n") - output.WriteString(fmt.Sprintf("Current directory: %s\n", searchPath)) - output.WriteString(fmt.Sprintf("Pattern: %s\n", pattern)) - output.WriteString(fmt.Sprintf("Found %d file(s)", len(matches))) - - if len(matches) >= maxResults { - output.WriteString(fmt.Sprintf(" (limited to %d results)", maxResults)) +// formatResults formats the search results into a JSON response +func (t FileSearchTool) formatResults(searchPath, pattern string, matches []string, maxResults int) (string, error) { + // Prepare JSON response structure + type FileSearchResponse struct { + CurrentDirectory string `json:"currentDirectory"` + Pattern string `json:"pattern"` + TotalFound int `json:"totalFound"` + MaxResults int `json:"maxResults"` + ResultsLimited bool `json:"resultsLimited"` + Matches []string `json:"matches"` } - output.WriteString("\n\n") - if len(matches) == 0 { - output.WriteString("No files found matching the pattern.\n") - return output.String() + response := FileSearchResponse{ + CurrentDirectory: searchPath, + Pattern: pattern, + TotalFound: len(matches), + MaxResults: maxResults, + ResultsLimited: len(matches) >= maxResults, + Matches: matches, } - output.WriteString("Matching files:\n") - for i, match := range matches { - output.WriteString(fmt.Sprintf("%3d. %s\n", i+1, match)) - } - - if len(matches) >= maxResults { - output.WriteString(fmt.Sprintf("\n⚠️ Results limited to %d files. Use maxResults parameter to adjust limit.\n", maxResults)) + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + return "", fmt.Errorf("failed to marshal JSON response: %w", err) } - return output.String() + return string(jsonData), nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go index 5983316aae9..0890e127e76 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go @@ -6,6 +6,7 @@ import ( "fmt" "os" "strings" + "time" "github.com/tmc/langchaingo/callbacks" ) @@ -22,12 +23,39 @@ type ReadFileRequest struct { EndLine int `json:"endLine,omitempty"` // Optional: 1-based line number to end reading at } +// ReadFileResponse represents the JSON output for the read_file tool +type ReadFileResponse struct { + Success bool `json:"success"` + FilePath string `json:"filePath"` + Content string `json:"content"` + IsTruncated bool `json:"isTruncated"` + IsPartial bool `json:"isPartial"` + LineRange *LineRange `json:"lineRange,omitempty"` + FileInfo ReadFileInfo `json:"fileInfo"` + Message string `json:"message,omitempty"` +} + +// LineRange represents the range of lines read +type LineRange struct { + StartLine int `json:"startLine"` + EndLine int `json:"endLine"` + TotalLines int `json:"totalLines"` + LinesRead int `json:"linesRead"` +} + +// ReadFileInfo represents file metadata for read operations +type ReadFileInfo struct { + Size int64 `json:"size"` + ModifiedTime time.Time `json:"modifiedTime"` + Permissions string `json:"permissions"` +} + func (t ReadFileTool) Name() string { return "read_file" } func (t ReadFileTool) Description() string { - return `Read file contents with intelligent handling for different file sizes and partial reads. + return `Read file contents with intelligent handling for different file sizes and partial reads. Returns JSON response with file content and metadata. Input: JSON payload with the following structure: { @@ -127,14 +155,30 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { // Handle very large files differently (unless specific line range requested) if fileSize > 1024*1024 && req.StartLine == 0 && req.EndLine == 0 { // 1MB+ - output := fmt.Sprintf("File: %s is very large (%d bytes / %.2f MB)\n", - req.FilePath, fileSize, float64(fileSize)/(1024*1024)) - output += "⚠️ File too large to read completely. Use startLine and endLine parameters for specific sections.\n" - output += "Examples:\n" - output += fmt.Sprintf(`- {"filePath": "%s", "startLine": 1, "endLine": 50} - first 50 lines`+"\n", req.FilePath) - output += fmt.Sprintf(`- {"filePath": "%s", "startLine": 100, "endLine": 200} - lines 100 to 200`+"\n", req.FilePath) - output += fmt.Sprintf(`- {"filePath": "%s", "endLine": 100} - first 100 lines`+"\n", req.FilePath) + response := ReadFileResponse{ + Success: false, + FilePath: req.FilePath, + Content: "", + IsTruncated: false, + IsPartial: false, + FileInfo: ReadFileInfo{ + Size: fileSize, + ModifiedTime: fileInfo.ModTime(), + Permissions: fileInfo.Mode().String(), + }, + Message: fmt.Sprintf("File is very large (%.2f MB). Use startLine and endLine parameters for specific sections.", float64(fileSize)/(1024*1024)), + } + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + output := string(jsonData) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) } @@ -162,10 +206,13 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { // Handle partial reads based on line range if req.StartLine > 0 || req.EndLine > 0 { - return t.handlePartialRead(ctx, req.FilePath, lines, req.StartLine, req.EndLine, totalLines) + return t.handlePartialRead(ctx, req.FilePath, lines, req.StartLine, req.EndLine, totalLines, fileInfo) } - var output string + var finalContent string + var isTruncated bool + var message string + // Improved truncation with better limits for full file reads if len(content) > 10000 { // 10KB limit // Show first 50 lines and last 10 lines @@ -174,13 +221,39 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { preview += fmt.Sprintf("\n\n... [%d lines omitted] ...\n\n", totalLines-60) preview += strings.Join(lines[totalLines-10:], "\n") } - - output = fmt.Sprintf("File: %s (%d bytes, %d lines - showing first 50 and last 10 lines)\n%s\n\n[Use startLine/endLine parameters for specific sections]\n", - req.FilePath, len(content), totalLines, preview) + finalContent = preview + isTruncated = true + message = "Large file truncated - showing first 50 and last 10 lines" } else { - output = fmt.Sprintf("File: %s (%d bytes, %d lines)\n%s\n\n", req.FilePath, len(content), totalLines, string(content)) + finalContent = string(content) + isTruncated = false + message = "File read successfully" + } + + response := ReadFileResponse{ + Success: true, + FilePath: req.FilePath, + Content: finalContent, + IsTruncated: isTruncated, + IsPartial: false, + FileInfo: ReadFileInfo{ + Size: fileSize, + ModifiedTime: fileInfo.ModTime(), + Permissions: fileInfo.Mode().String(), + }, + Message: message, + } + + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } + output := string(jsonData) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) } @@ -189,7 +262,7 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { } // handlePartialRead handles reading specific line ranges from a file -func (t ReadFileTool) handlePartialRead(ctx context.Context, filePath string, lines []string, startLine, endLine, totalLines int) (string, error) { +func (t ReadFileTool) handlePartialRead(ctx context.Context, filePath string, lines []string, startLine, endLine, totalLines int, fileInfo os.FileInfo) (string, error) { // Validate and adjust line numbers (1-based to 0-based) if startLine == 0 { startLine = 1 // Default to start of file @@ -206,10 +279,30 @@ func (t ReadFileTool) handlePartialRead(ctx context.Context, filePath string, li endLine = totalLines } if startLine > endLine { - output := fmt.Sprintf("❌ Invalid line range: start line (%d) cannot be greater than end line (%d)\n\n", startLine, endLine) - output += "💡 Example of correct usage:\n" - output += fmt.Sprintf(`{"filePath": "%s", "startLine": 1, "endLine": 50}`, filePath) + response := ReadFileResponse{ + Success: false, + FilePath: filePath, + Content: "", + IsTruncated: false, + IsPartial: false, + FileInfo: ReadFileInfo{ + Size: fileInfo.Size(), + ModifiedTime: fileInfo.ModTime(), + Permissions: fileInfo.Mode().String(), + }, + Message: fmt.Sprintf("Invalid line range: start line (%d) cannot be greater than end line (%d)", startLine, endLine), + } + + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + output := string(jsonData) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("invalid line range: start %d > end %d", startLine, endLine)) t.CallbacksHandler.HandleToolEnd(ctx, output) @@ -226,9 +319,37 @@ func (t ReadFileTool) handlePartialRead(ctx context.Context, filePath string, li content := strings.Join(selectedLines, "\n") linesRead := endLine - startLine + 1 - output := fmt.Sprintf("File: %s (lines %d-%d of %d total lines, %d lines read)\n%s\n\n", - filePath, startLine, endLine, totalLines, linesRead, content) + response := ReadFileResponse{ + Success: true, + FilePath: filePath, + Content: content, + IsTruncated: false, + IsPartial: true, + LineRange: &LineRange{ + StartLine: startLine, + EndLine: endLine, + TotalLines: totalLines, + LinesRead: linesRead, + }, + FileInfo: ReadFileInfo{ + Size: fileInfo.Size(), + ModifiedTime: fileInfo.ModTime(), + Permissions: fileInfo.Mode().String(), + }, + Message: fmt.Sprintf("Successfully read %d lines (%d-%d) from file", linesRead, startLine, endLine), + } + + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + output := string(jsonData) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go index 757111b4127..f0413af9e75 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go @@ -7,6 +7,7 @@ import ( "os" "path/filepath" "strings" + "time" "github.com/tmc/langchaingo/callbacks" ) @@ -20,9 +21,35 @@ type WriteFileTool struct { type WriteFileRequest struct { Filename string `json:"filename"` Content string `json:"content"` - Mode string `json:"mode,omitempty"` // "write" (default), "append", "create" - ChunkNum int `json:"chunk_num,omitempty"` // For chunked writing: 1-based chunk number - TotalChunks int `json:"total_chunks,omitempty"` // For chunked writing: total expected chunks + Mode string `json:"mode,omitempty"` // "write" (default), "append", "create" + ChunkNum int `json:"chunkNum,omitempty"` // For chunked writing: 1-based chunk number + TotalChunks int `json:"totalChunks,omitempty"` // For chunked writing: total expected chunks +} + +// WriteFileResponse represents the JSON output for the write_file tool +type WriteFileResponse struct { + Success bool `json:"success"` + Operation string `json:"operation"` + FilePath string `json:"filePath"` + BytesWritten int `json:"bytesWritten"` + IsChunked bool `json:"isChunked"` + ChunkInfo *ChunkInfo `json:"chunkInfo,omitempty"` + FileInfo FileInfoDetails `json:"fileInfo"` + Message string `json:"message,omitempty"` +} + +// ChunkInfo represents chunked writing details +type ChunkInfo struct { + ChunkNumber int `json:"chunkNumber"` + TotalChunks int `json:"totalChunks"` + IsComplete bool `json:"isComplete"` +} + +// FileInfoDetails represents file metadata +type FileInfoDetails struct { + Size int64 `json:"size"` + ModifiedTime time.Time `json:"modifiedTime"` + Permissions string `json:"permissions"` } func (t WriteFileTool) Name() string { @@ -30,21 +57,21 @@ func (t WriteFileTool) Name() string { } func (t WriteFileTool) Description() string { - return `Comprehensive file writing tool that handles small and large files intelligently. + return `Comprehensive file writing tool that handles small and large files intelligently. Returns JSON response with operation details. Input: JSON payload with the following structure: { "filename": "path/to/file.txt", "content": "file content here", "mode": "write", - "chunk_num": 1, - "total_chunks": 3 + "chunkNum": 1, + "totalChunks": 3 } Field descriptions: - mode: "write" (default), "append", or "create" -- chunk_num: for chunked writing (1-based) -- total_chunks: total number of chunks +- chunkNum: for chunked writing (1-based) +- totalChunks: total number of chunks MODES: - "write" (default): Overwrite/create file @@ -52,9 +79,9 @@ MODES: - "create": Create file only if it doesn't exist CHUNKED WRITING (for large files): -Use chunk_num and total_chunks for files that might be too large: -- chunk_num: 1-based chunk number (1, 2, 3...) -- total_chunks: Total number of chunks you'll send +Use chunkNum and totalChunks for files that might be too large: +- chunkNum: 1-based chunk number (1, 2, 3...) +- totalChunks: Total number of chunks you'll send EXAMPLES: @@ -65,9 +92,9 @@ Append to file: {"filename": "./log.txt", "content": "\nNew log entry", "mode": "append"} Large file (chunked): -{"filename": "./large.bicep", "content": "first part...", "chunk_num": 1, "total_chunks": 3} -{"filename": "./large.bicep", "content": "middle part...", "chunk_num": 2, "total_chunks": 3} -{"filename": "./large.bicep", "content": "final part...", "chunk_num": 3, "total_chunks": 3} +{"filename": "./large.bicep", "content": "first part...", "chunkNum": 1, "totalChunks": 3} +{"filename": "./large.bicep", "content": "middle part...", "chunkNum": 2, "totalChunks": 3} +{"filename": "./large.bicep", "content": "final part...", "chunkNum": 3, "totalChunks": 3} The input must be formatted as a single line valid JSON string.` } @@ -141,7 +168,7 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { // handleChunkedWrite handles writing files in chunks func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequest) (string, error) { if req.ChunkNum < 1 || req.TotalChunks < 1 || req.ChunkNum > req.TotalChunks { - err := fmt.Errorf("invalid chunk numbers: chunk_num=%d, total_chunks=%d", req.ChunkNum, req.TotalChunks) + err := fmt.Errorf("invalid chunk numbers: chunkNum=%d, totalChunks=%d", req.ChunkNum, req.TotalChunks) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, err) } @@ -165,7 +192,7 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ if req.ChunkNum == 1 { // First chunk - create/overwrite file err = os.WriteFile(filePath, []byte(content), 0644) - operation = fmt.Sprintf("Started writing chunk %d/%d", req.ChunkNum, req.TotalChunks) + operation = "write" } else { // Subsequent chunks - append file, openErr := os.OpenFile(filePath, os.O_APPEND|os.O_WRONLY, 0644) @@ -179,11 +206,7 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ defer file.Close() _, err = file.WriteString(content) - if req.ChunkNum == req.TotalChunks { - operation = fmt.Sprintf("Completed writing chunk %d/%d (final)", req.ChunkNum, req.TotalChunks) - } else { - operation = fmt.Sprintf("Wrote chunk %d/%d", req.ChunkNum, req.TotalChunks) - } + operation = "append" } if err != nil { @@ -194,7 +217,7 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ return "", toolErr } - // Get file size + // Get file info fileInfo, err := os.Stat(filePath) if err != nil { toolErr := fmt.Errorf("failed to verify file %s: %w", filePath, err) @@ -204,13 +227,43 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ return "", toolErr } - output := fmt.Sprintf("%s to %s. Chunk size: %d bytes, Total file size: %d bytes", - operation, filePath, len(content), fileInfo.Size()) + // Create JSON response + response := WriteFileResponse{ + Success: true, + Operation: operation, + FilePath: filePath, + BytesWritten: len(content), + IsChunked: true, + ChunkInfo: &ChunkInfo{ + ChunkNumber: req.ChunkNum, + TotalChunks: req.TotalChunks, + IsComplete: req.ChunkNum == req.TotalChunks, + }, + FileInfo: FileInfoDetails{ + Size: fileInfo.Size(), + ModifiedTime: fileInfo.ModTime(), + Permissions: fileInfo.Mode().String(), + }, + } if req.ChunkNum == req.TotalChunks { - output += "\n✅ File writing completed successfully!" + response.Message = "File writing completed successfully" + } else { + response.Message = fmt.Sprintf("Chunk %d/%d written successfully", req.ChunkNum, req.TotalChunks) + } + + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } + output := string(jsonData) + if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) } @@ -287,7 +340,32 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ return "", toolErr } - output := fmt.Sprintf("%s %d bytes to %s successfully", operation, fileInfo.Size(), filePath) + // Create JSON response + response := WriteFileResponse{ + Success: true, + Operation: operation, + FilePath: filePath, + BytesWritten: len(content), + IsChunked: false, + FileInfo: FileInfoDetails{ + Size: fileInfo.Size(), + ModifiedTime: fileInfo.ModTime(), + Permissions: fileInfo.Mode().String(), + }, + Message: fmt.Sprintf("File %s successfully", strings.ToLower(operation)), + } + + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + output := string(jsonData) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json index efca4416be8..dd2078ee03e 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json +++ b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json @@ -1,9 +1,19 @@ { "servers": { - "Azure": { + "bestpractices": { "type": "stdio", "command": "azmcp", - "args": ["server", "start"] + "args": ["server", "start", "--namespace", "bestpractices"] + }, + "subscription": { + "type": "stdio", + "command": "azmcp", + "args": ["server", "start", "--namespace", "subscription"] + }, + "deploy": { + "type": "stdio", + "command": "azmcp", + "args": ["server", "start", "--namespace", "deploy"] } } } From 95feb332751f54a2e34da54325e7cfa4ec5e7a60 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Wed, 30 Jul 2025 17:16:50 -0700 Subject: [PATCH 022/116] Adds c2c similar tools --- .../default_agent_format_instructions.txt | 2 +- .../internal/cmd/enhanced_integration.go | 2 +- .../azd.ai.start/internal/logging/logger.go | 60 +- .../tools/azd/azd_iac_generation_rules.go | 28 + .../internal/tools/azd/azd_plan_init.go | 28 + .../internal/tools/azd/azd_yaml_schema.go | 28 + .../azd.ai.start/internal/tools/azd/loader.go | 25 + .../azd/prompts/azd_iac_generation_rules.md | 195 ++ .../tools/azd/prompts/azd_plan_init.md | 267 +++ .../tools/azd/prompts/azd_yaml_schema.md | 18 + .../tools/azd/prompts/azure.yaml.json | 1819 +++++++++++++++++ .../internal/tools/azd/prompts/prompts.go | 14 + .../internal/tools/common/types.go | 7 + .../internal/tools/dev/command_executor.go | 189 +- .../internal/tools/io/copy_file.go | 140 +- .../internal/tools/io/create_directory.go | 4 + .../internal/tools/io/delete_directory.go | 4 + .../internal/tools/io/delete_file.go | 4 + .../internal/tools/io/directory_list.go | 21 +- .../internal/tools/io/file_info.go | 4 + .../internal/tools/io/move_file.go | 3 + .../azd.ai.start/internal/tools/loader.go | 2 + .../azd.ai.start/internal/tools/mcp/mcp.json | 14 +- 23 files changed, 2739 insertions(+), 139 deletions(-) create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_yaml_schema.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azure.yaml.json create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/common/types.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt index f7db6dd7e21..d66dcb43d88 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt @@ -34,7 +34,7 @@ Observation: [result] Remember: Always continue taking actions until the user's request is fully satisfied. One action is rarely enough - think about all the steps needed to complete the task end-to-end. -When you are done answering the questions and performing all your tasks you MUST use the following format: +When you are done answering the questions and performing all your tasks you MUST ALWAYS use the following format: Thought: Do I need to use a tool? No AI: [briefly summarize your response without all the details from your observations] \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go index dad0371268a..7e436b76e6d 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go @@ -66,7 +66,7 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) // Process the query with the enhanced agent err := azureAgent.ProcessQuery(ctx, userInput) if err != nil { - return err + continue } } diff --git a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go index d148b9273b2..aa693cc7f6b 100644 --- a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go +++ b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go @@ -6,6 +6,7 @@ package logging import ( "context" "encoding/json" + "fmt" "regexp" "strings" @@ -65,7 +66,9 @@ func (al *ActionLogger) HandleLLMGenerateContentEnd(ctx context.Context, res *ll } // Find all "Thought:" patterns and extract the content that follows - thoughtRegex := regexp.MustCompile(`(?i)thought:\s*(.*)`) + // (?is) flags: i=case insensitive, s=dot matches newlines + // .*? is non-greedy to stop at the first occurrence of next pattern or end + thoughtRegex := regexp.MustCompile(`(?is)thought:\s*(.*?)(?:\n\s*(?:action|final answer|observation|ai):|$)`) matches := thoughtRegex.FindAllStringSubmatch(content, -1) for _, match := range matches { @@ -120,6 +123,14 @@ func (al *ActionLogger) HandleChainError(ctx context.Context, err error) { color.Red("\n%s\n", err.Error()) } +// truncateString truncates a string to maxLen characters and adds "..." if truncated +func truncateString(s string, maxLen int) string { + if len(s) > maxLen { + return s[:maxLen-3] + "..." + } + return s +} + // HandleAgentAction is called when an agent action is planned func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.AgentAction) { // Print "Calling " @@ -132,18 +143,47 @@ func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.Age var toolInput map[string]interface{} if err := json.Unmarshal([]byte(action.ToolInput), &toolInput); err == nil { - // Successfully parsed JSON, check for filename parameter - if filename, ok := toolInput["filename"]; ok { - if filenameStr, ok := filename.(string); ok { - color.Green("\n🤖 Agent: Calling %s %s\n", action.Tool, filenameStr) - return + // Successfully parsed JSON, create comma-delimited key-value pairs + excludedKeys := map[string]bool{"content": true} + var params []string + + for key, value := range toolInput { + if excludedKeys[key] { + continue + } + + var valueStr string + switch v := value.(type) { + case []interface{}: + // Handle arrays by joining with spaces + var strSlice []string + for _, item := range v { + strSlice = append(strSlice, strings.TrimSpace(string(fmt.Sprintf("%v", item)))) + } + valueStr = strings.Join(strSlice, " ") + default: + valueStr = strings.TrimSpace(fmt.Sprintf("%v", v)) + } + + if valueStr != "" { + params = append(params, fmt.Sprintf("%s: %s", key, valueStr)) } } - // JSON parsed but no filename found, use fallback format - color.Green("\n🤖 Agent: Calling %s tool\n", action.Tool) + + var paramStr string + if len(params) > 0 { + paramStr = strings.Join(params, ", ") + } else { + paramStr = "tool" + } + + paramStr = truncateString(paramStr, 100) + output := fmt.Sprintf("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, paramStr) + color.Green(output) } else { - // JSON parsing failed, show the input as text - color.Green("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, action.ToolInput) + // JSON parsing failed, show the input as text with truncation + toolInput := truncateString(action.ToolInput, 100) + color.Green("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, toolInput) } } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go new file mode 100644 index 00000000000..f67c067e820 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go @@ -0,0 +1,28 @@ +package azd + +import ( + "context" + + "azd.ai.start/internal/tools/azd/prompts" + "github.com/tmc/langchaingo/tools" +) + +var _ tools.Tool = &AzdIacGenerationRulesTool{} + +type AzdIacGenerationRulesTool struct { +} + +func (t *AzdIacGenerationRulesTool) Name() string { + return "azd_iac_generation_rules" +} + +func (t *AzdIacGenerationRulesTool) Description() string { + return ` + Gets the infrastructure as code (IaC) rules and best practices and patterns to use when generating bicep files and modules for use within AZD. + Input: empty string + ` +} + +func (t *AzdIacGenerationRulesTool) Call(ctx context.Context, input string) (string, error) { + return prompts.AzdIacRulesPrompt, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go new file mode 100644 index 00000000000..1e648939d2b --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go @@ -0,0 +1,28 @@ +package azd + +import ( + "context" + + "azd.ai.start/internal/tools/azd/prompts" + "github.com/tmc/langchaingo/tools" +) + +var _ tools.Tool = &AzdPlanInitTool{} + +type AzdPlanInitTool struct { +} + +func (t *AzdPlanInitTool) Name() string { + return "azd_plan_init" +} + +func (t *AzdPlanInitTool) Description() string { + return ` + Gets the required workflow steps and best practices and patterns for initializing or migrating an application to use AZD. + Input: empty string + ` +} + +func (t *AzdPlanInitTool) Call(ctx context.Context, input string) (string, error) { + return prompts.AzdPlanInitPrompt, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go new file mode 100644 index 00000000000..db83ddf3d08 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go @@ -0,0 +1,28 @@ +package azd + +import ( + "context" + + "azd.ai.start/internal/tools/azd/prompts" + "github.com/tmc/langchaingo/tools" +) + +var _ tools.Tool = &AzdYamlSchemaTool{} + +type AzdYamlSchemaTool struct { +} + +func (t *AzdYamlSchemaTool) Name() string { + return "azd_yaml_schema" +} + +func (t *AzdYamlSchemaTool) Description() string { + return ` + Gets the Azure YAML JSON schema file specification and structure for azure.yaml configuration files used in AZD. + Input: empty string + ` +} + +func (t *AzdYamlSchemaTool) Call(ctx context.Context, input string) (string, error) { + return prompts.AzdYamlSchemaPrompt, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go new file mode 100644 index 00000000000..648d70bb569 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go @@ -0,0 +1,25 @@ +package azd + +import ( + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/tools" +) + +// AzdToolsLoader loads AZD-related tools +type AzdToolsLoader struct { + callbackHandler callbacks.Handler +} + +func NewAzdToolsLoader(callbackHandler callbacks.Handler) *AzdToolsLoader { + return &AzdToolsLoader{ + callbackHandler: callbackHandler, + } +} + +func (l *AzdToolsLoader) LoadTools() ([]tools.Tool, error) { + return []tools.Tool{ + &AzdPlanInitTool{}, + &AzdIacGenerationRulesTool{}, + &AzdYamlSchemaTool{}, + }, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md new file mode 100644 index 00000000000..dd89e2586b7 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md @@ -0,0 +1,195 @@ +# Infrastructure as Code (IaC) Generation Rules for Azure Developer CLI (AZD) + +This document provides comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules for AZD projects. Follow these rules strictly when generating Azure infrastructure code. + +## Core Generation Rules + +### File Structure and Organization + +- **REQUIRED**: Place all IaC files in the `./infra` folder within an AZD project +- **REQUIRED**: Name the main deployment file `main.bicep` - this is the primary deployment target +- **REQUIRED**: The root level `main.bicep` must be a subscription level deployment using `targetScope = 'subscription'` +- **REQUIRED**: The main.bicep file must create a resource group as the primary container for all resources +- **REQUIRED**: Pass the resource group scope to all child modules that deploy resources +- **REQUIRED**: Create modular, reusable Bicep files instead of monolithic templates +- **RECOMMENDED**: Organize modules by resource type or logical grouping + +### Azure Best Practices Compliance + +- **REQUIRED**: Follow Azure Well-Architected Framework principles +- **REQUIRED**: Use Bicep best practices including proper parameter validation and resource dependencies +- **REQUIRED**: Leverage Azure Verified Modules (AVM) when available - always check for existing AVM modules before creating custom ones +- **REQUIRED**: Implement least-privilege access principles + +### Naming Conventions + +- **REQUIRED**: Use consistent naming pattern: `{resourcePrefix}-{name}-{uniqueHash}` +- **REQUIRED**: Generate unique hash using combination of environment name, subscription ID, and resource group name +- **EXAMPLE**: `app-myservice-h3x9k2` where `h3x9k2` is generated from env/subscription/rg +- **FORBIDDEN**: Hard-code tenant IDs, subscription IDs, or resource group names + +### Module Parameters + +- **REQUIRED**: Every module must accept these standard parameters: + - `name` (string): Base name for the resource + - `location` (string): Azure region for deployment + - `tags` (object): Resource tags for governance +- **REQUIRED**: Modules that deploy Azure resources must use `targetScope = 'resourceGroup'` and be called with the resource group scope from main.bicep +- **REQUIRED**: Provide intelligent defaults for optional parameters +- **REQUIRED**: Use parameter decorators for validation (e.g., `@minLength`, `@allowed`) +- **RECOMMENDED**: Group related parameters using objects when appropriate + +### Tagging Strategy + +- **REQUIRED**: Tag resource groups with `azd-env-name: {environment-name}` +- **REQUIRED**: Tag hosting resources with `azd-service-name: {service-name}` +- **RECOMMENDED**: Include additional governance tags (cost center, owner, etc.) + +### Security and Compliance + +- **FORBIDDEN**: Hard-code secrets, connection strings, or sensitive values +- **REQUIRED**: Use Key Vault references for secrets +- **REQUIRED**: Enable diagnostic settings and logging where applicable +- **REQUIRED**: Follow principle of least privilege for managed identities + +### Quality Assurance + +- **REQUIRED**: Validate all generated Bicep code using Bicep CLI +- **REQUIRED**: Address all warnings and errors before considering code complete +- **REQUIRED**: Test deployment in a sandbox environment when possible + +## Supported Azure Services + +### Primary Hosting Resources (Choose One) + +1. **Azure Container Apps** ⭐ **(PREFERRED)** + - Best for containerized applications + - Built-in scaling and networking + - Supports both HTTP and background services + +2. **Azure App Service** + - Best for web applications and APIs + - Supports multiple runtime stacks + - Built-in CI/CD integration + +3. **Azure Function Apps** + - Best for serverless and event-driven workloads + - Multiple hosting plans available + - Trigger-based execution model + +4. **Azure Static Web Apps** + - Best for frontend applications + - Built-in GitHub/Azure DevOps integration + - Free tier available + +5. **Azure Kubernetes Service (AKS)** + - Best for complex containerized workloads + - Full Kubernetes capabilities + - Requires advanced configuration + +### Essential Supporting Resources + +**REQUIRED** - Include these resources in most AZD applications: + +- **Log Analytics Workspace** + - Central logging and monitoring + - Required for Application Insights + - Enable diagnostic settings for all resources + +- **Application Insights** + - Application performance monitoring + - Dependency tracking and telemetry + - Link to Log Analytics workspace + +- **Azure Key Vault** + - Secure storage for secrets, keys, and certificates + - Use managed identity for access + - Enable soft delete and purge protection + +**CONDITIONAL** - Include based on application requirements: + +- **Azure Container Registry** (for container-based apps) +- **Azure Service Bus** (for messaging scenarios) +- **Azure Cosmos DB** (for NoSQL data storage) +- **Azure SQL Database** (for relational data storage) +- **Azure Storage Account** (for blob/file storage) +- **Azure Cache for Redis** (for caching scenarios) + +## Code Generation Examples + +### Main.bicep Structure Template + +```bicep +targetScope = 'subscription' + +@description('Name of the environment') +param environmentName string + +@description('Location for all resources') +param location string + +@description('Tags to apply to all resources') +param tags object = {} + +// Generate unique suffix for resource names +var resourceSuffix = take(uniqueString(subscription().id, environmentName, location), 6) +var resourceGroupName = 'rg-${environmentName}-${resourceSuffix}' + +// Create the resource group +resource resourceGroup 'Microsoft.Resources/resourceGroups@2021-04-01' = { + name: resourceGroupName + location: location + tags: union(tags, { + 'azd-env-name': environmentName + }) +} + +// Example module deployment with resource group scope +module appService 'modules/app-service.bicep' = { + name: 'app-service' + scope: resourceGroup + params: { + name: 'myapp' + location: location + tags: tags + } +} +``` + +### Child Module Structure Template + +```bicep +targetScope = 'resourceGroup' + +@description('Base name for all resources') +param name string + +@description('Location for all resources') +param location string = resourceGroup().location + +@description('Tags to apply to all resources') +param tags object = {} + +// Generate unique suffix for resource names +var resourceSuffix = take(uniqueString(subscription().id, resourceGroup().name, name), 6) +var resourceName = '${name}-${resourceSuffix}' + +// Resource definitions here... +``` + +## Validation Checklist + +Before completing code generation, verify: + +- [ ] All files are in `./infra` folder +- [ ] `main.bicep` exists as primary deployment file with subscription scope +- [ ] Resource group is created in `main.bicep` and properly tagged +- [ ] All child modules use `targetScope = 'resourceGroup'` and receive resource group scope +- [ ] All resources use consistent naming convention +- [ ] Required tags are applied correctly +- [ ] No hard-coded secrets or identifiers +- [ ] Parameters have appropriate validation +- [ ] Bicep CLI validation passes without errors +- [ ] AVM modules are used where available +- [ ] Supporting resources are included as needed +- [ ] Security best practices are followed diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md new file mode 100644 index 00000000000..d8f7a391f68 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md @@ -0,0 +1,267 @@ +# AZD Application Initialization and Migration Plan + +This document provides a comprehensive, step-by-step plan for initializing or migrating applications to use Azure Developer CLI (AZD). Follow these steps sequentially to ensure successful AZD adoption. + +## Executive Summary + +Transform any application into an AZD-compatible project by: + +1. Analyzing the current codebase and architecture +2. Identifying all application components and dependencies +3. Generating required configuration and infrastructure files +4. Establishing the AZD environment structure + +## Phase 1: Discovery and Analysis + +### Step 1: Deep File System Analysis + +**REQUIRED ACTIONS:** + +- Scan all files in the current working directory recursively +- Document file structure, programming languages, and frameworks detected +- Identify configuration files (package.json, requirements.txt, pom.xml, etc.) +- Locate any existing Docker files, docker-compose files, or containerization configs +- Find database configuration files and connection strings +- Identify API endpoints, service definitions, and application entry points + +**OUTPUT:** Complete inventory of all discoverable application artifacts + +### Step 2: Component Classification + +**REQUIRED ACTIONS:** + +- Categorize each discovered component into one of these types: + - **Web Applications** (frontend, SPA, static sites) + - **API Services** (REST APIs, GraphQL, gRPC services) + - **Background Services** (workers, processors, scheduled jobs) + - **Databases** (relational, NoSQL, caching) + - **Messaging Systems** (queues, topics, event streams) + - **AI/ML Components** (models, inference endpoints, training jobs) + - **Supporting Services** (authentication, logging, monitoring) + +**OUTPUT:** Structured component inventory with classifications + +### Step 3: Dependency Mapping + +**REQUIRED ACTIONS:** + +- Map inter-component dependencies and communication patterns +- Identify external service dependencies (third-party APIs, SaaS services) +- Document data flow between components +- Identify shared resources and configuration + +**OUTPUT:** Component dependency graph and communication matrix + +## Phase 2: Architecture Planning and Azure Service Selection + +### Application Component Planning + +For each identified application component, execute the following analysis: + +**REQUIRED ANALYSIS:** + +- **Hosting Platform Selection:** + - **Azure Container Apps** (PREFERRED for microservices and containerized apps) + - **Azure App Service** (for web apps and APIs with specific runtime requirements) + - **Azure Functions** (for serverless and event-driven components) + - **Azure Static Web Apps** (for frontend applications and SPAs) + - **Azure Kubernetes Service** (for complex orchestration requirements) + +- **Containerization Assessment:** + - Determine if component can run in Docker container + - If Dockerfile doesn't exist, plan Docker container strategy + - Identify base images and runtime requirements + - Document port mappings and environment variables + +- **Configuration Requirements:** + - Identify environment-specific settings + - Map secrets and sensitive configuration + - Document connection strings and service endpoints + - Plan configuration injection strategy + +**OUTPUT:** Hosting strategy and containerization plan for each component + +### Database Component Planning + +For components using persistent data storage: + +**REQUIRED ANALYSIS:** + +- **Azure Database Service Selection:** + - **Azure SQL Database** (for relational data with SQL Server compatibility) + - **Azure Database for PostgreSQL** (for PostgreSQL workloads) + - **Azure Database for MySQL** (for MySQL workloads) + - **Azure Cosmos DB** (for NoSQL, multi-model data) + - **Azure Cache for Redis** (for caching and session storage) + +- **Migration Strategy:** + - Assess current database schema and data + - Plan data migration approach + - Identify backup and recovery requirements + - Document connection string patterns + +**OUTPUT:** Database hosting plan and migration strategy + +### Messaging Component Planning + +For components using asynchronous communication: + +**REQUIRED ANALYSIS:** + +- **Azure Messaging Service Selection:** + - **Azure Service Bus** (for reliable enterprise messaging) + - **Azure Event Hubs** (for high-throughput event streaming) + - **Azure Event Grid** (for event-driven architectures) + - **Azure Storage Queues** (for simple queue scenarios) + +- **Integration Planning:** + - Map message flows and routing + - Identify message schemas and formats + - Plan dead letter handling and error scenarios + - Document scaling and throughput requirements + +**OUTPUT:** Messaging architecture and integration plan + +### AI Component Planning + +For components using artificial intelligence or machine learning: + +**REQUIRED ANALYSIS:** + +- **Azure AI Service Selection:** + - **Azure OpenAI Service** (for GPT models and cognitive services) + - **Azure AI Services** (for vision, speech, language processing) + - **Azure Machine Learning** (for custom ML models and training) + - **Azure Cognitive Search** (for intelligent search capabilities) + +- **Model and Data Requirements:** + - Identify required AI models and versions + - Document input/output data formats + - Plan model deployment and scaling strategy + - Assess training data and pipeline requirements + +**OUTPUT:** AI service architecture and deployment plan + +## Phase 3: File Generation and Configuration + +### Step 1: Generate azure.yaml Configuration + +**REQUIRED ACTIONS:** + +- Create `azure.yaml` file in the root directory +- Define all services with appropriate hosting configurations +- Specify build and deployment instructions for each service +- Configure environment variable mappings +- Reference infrastructure templates correctly + +**TEMPLATE STRUCTURE:** + +```yaml +name: {project-name} +services: + {service-name}: + project: ./path/to/service + host: {hosting-type} + # Additional service-specific configuration +``` + +### Step 2: Generate Infrastructure as Code Files + +**REQUIRED ACTIONS:** + +- Create `./infra` directory structure +- Generate `main.bicep` as primary deployment template +- Create modular Bicep files for each resource type +- **CRITICAL:** Follow all rules from AZD IaC Generation Rules document +- Implement proper naming conventions and tagging strategies +- Include supporting resources (Log Analytics, Application Insights, Key Vault) + +### Step 3: Generate Container Configurations + +**REQUIRED ACTIONS:** + +- Create Dockerfile for each containerizable component +- Use appropriate base images for detected programming languages +- Configure health checks and startup commands +- Set proper working directories and file permissions +- Optimize for production deployment + +### Step 4: Generate Architecture Documentation + +**REQUIRED ACTIONS:** + +- Create `azd-arch-plan.md` with comprehensive analysis +- Document all discovered components and their relationships +- Include architecture diagrams (text-based or mermaid) +- Explain Azure service selections and rationale +- Provide deployment and operational guidance + +**DOCUMENT STRUCTURE:** + +- Executive Summary +- Application Architecture Overview +- Component Analysis +- Azure Service Mapping +- Infrastructure Design +- Deployment Strategy +- Operational Considerations + +## Phase 4: Environment Initialization + +### Step 1: Create AZD Environment + +**REQUIRED ACTIONS:** + +- Execute: `azd env new {directory-name}-dev` +- Use current working directory name as environment name base +- Configure environment-specific settings +- Validate environment configuration + +### Step 2: Validation and Testing + +**REQUIRED ACTIONS:** + +- Run `azd package` to validate service configurations +- Execute `azd provision --dry-run` to test infrastructure templates +- Verify all Bicep files compile without errors +- Check all referenced files and paths exist +- Validate environment variable configurations + +## Success Criteria + +The migration is successful when: + +- [ ] All application components are identified and classified +- [ ] `azure.yaml` file is valid and complete +- [ ] All infrastructure files are generated and error-free +- [ ] Required Dockerfiles are created for containerizable components +- [ ] `azd-arch-plan.md` provides comprehensive documentation +- [ ] AZD environment is initialized and validated +- [ ] `azd package` completes without errors +- [ ] `azd provision --dry-run` validates successfully + +## Common Patterns and Best Practices + +### For Multi-Service Applications + +- Use Azure Container Apps for microservices architecture +- Implement shared infrastructure (networking, monitoring) +- Configure service-to-service communication properly + +### For Data-Intensive Applications + +- Co-locate compute and data services in same region +- Implement proper connection pooling and caching +- Configure backup and disaster recovery + +### For AI-Enabled Applications + +- Separate AI services from main application logic +- Implement proper error handling for AI service calls +- Plan for model updates and versioning + +### For High-Availability Applications + +- Configure multiple availability zones +- Implement health checks and auto-scaling +- Plan for disaster recovery scenarios diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_yaml_schema.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_yaml_schema.md new file mode 100644 index 00000000000..9701dd9c5ac --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_yaml_schema.md @@ -0,0 +1,18 @@ +# Azure YAML Schema + +This document contains the JSON schema specification for the azure.yaml configuration file used in Azure Developer CLI (AZD) projects. + +## Schema Content + + + +The azure.yaml file is the main configuration file for AZD projects and defines: + +- Project metadata +- Services configuration +- Infrastructure settings +- Hooks and workflows +- Environment variables +- And other project-specific settings + +This schema helps validate and provide IntelliSense support for azure.yaml files in various editors and tools. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azure.yaml.json b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azure.yaml.json new file mode 100644 index 00000000000..747fd7fa649 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azure.yaml.json @@ -0,0 +1,1819 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://raw.githubusercontent.com/Azure/azure-dev/main/schemas/v1.0/azure.yaml.json", + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "minLength": 2, + "title": "Name of the application", + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$", + "description": "The application name. Only lowercase letters, numbers, and hyphens (-) are allowed. The name must start and end with a letter or number." + }, + "resourceGroup": { + "type": "string", + "minLength": 3, + "maxLength": 64, + "title": "Name of the Azure resource group", + "description": "When specified will override the resource group name used for infrastructure provisioning. Supports environment variable substitution." + }, + "metadata": { + "type": "object", + "properties": { + "template": { + "type": "string", + "title": "Identifier of the template from which the application was created. Optional.", + "examples": [ + "todo-nodejs-mongo@0.0.1-beta" + ] + } + } + }, + "infra": { + "type": "object", + "title": "The infrastructure configuration used for the application", + "description": "Optional. Provides additional configuration for Azure infrastructure provisioning.", + "additionalProperties": true, + "properties": { + "provider": { + "type": "string", + "title": "Type of infrastructure provisioning provider", + "description": "Optional. The infrastructure provisioning provider used to provision the Azure resources for the application. (Default: bicep)", + "enum": [ + "bicep", + "terraform" + ] + }, + "path": { + "type": "string", + "title": "Path to the location that contains Azure provisioning templates", + "description": "Optional. The relative folder path to the Azure provisioning templates for the specified provider. (Default: infra)" + }, + "module": { + "type": "string", + "title": "Name of the default module within the Azure provisioning templates", + "description": "Optional. The name of the Azure provisioning module used when provisioning resources. (Default: main)" + } + } + }, + "services": { + "type": "object", + "title": "Definition of services that comprise the application", + "minProperties": 1, + "additionalProperties": { + "type": "object", + "additionalProperties": false, + "required": [ + "host" + ], + "properties": { + "apiVersion": { + "type": "string", + "title": "Resource provider API version for deployments", + "description": "Optional. The resource provider API version to use for the service. If not specified, the default SDK API version is used. Only valid when host is 'containerapp'." + }, + "resourceGroup": { + "type": "string", + "title": "Name of the Azure resource group that contains the resource", + "description": "By default, the CLI will discover the Azure resource within the default resource group. When specified, the CLI will instead find the Azure resource within the specified resource group. Supports environment variable substitution." + }, + "resourceName": { + "type": "string", + "title": "Name of the Azure resource that implements the service", + "description": "By default, the CLI will discover the Azure resource with tag 'azd-service-name' set to the current service's name. When specified, the CLI will instead find the Azure resource with the matching resource name. Supports environment variable substitution." + }, + "project": { + "type": "string", + "title": "Path to the service source code directory" + }, + "image": { + "type": "string", + "title": "Optional. The source image to be used for the container image instead of building from source. Supports environment variable substitution.", + "description": "If omitted, container image will be built from source specified in the 'project' property. Setting both 'project' and 'image' is invalid." + }, + "host": { + "type": "string", + "title": "Required. The type of Azure resource used for service implementation", + "description": "The Azure service that will be used as the target for deployment operations for the service.", + "enum": [ + "appservice", + "containerapp", + "function", + "springapp", + "staticwebapp", + "aks", + "ai.endpoint" + ] + }, + "language": { + "type": "string", + "title": "Service implementation language", + "enum": [ + "dotnet", + "csharp", + "fsharp", + "py", + "python", + "js", + "ts", + "java", + "docker" + ] + }, + "module": { + "type": "string", + "title": "(DEPRECATED) Path of the infrastructure module used to deploy the service relative to the root infra folder", + "description": "If omitted, the CLI will assume the module name is the same as the service name. This property will be deprecated in a future release." + }, + "dist": { + "type": "string", + "title": "Relative path to service deployment artifacts" + }, + "docker": { + "$ref": "#/definitions/docker" + }, + "k8s": { + "$ref": "#/definitions/aksOptions" + }, + "config": { + "type": "object", + "additionalProperties": true + }, + "hooks": { + "type": "object", + "title": "Service level hooks", + "description": "Hooks should match `service` event names prefixed with `pre` or `post` depending on when the script should execute. When specifying paths they should be relative to the service path.", + "additionalProperties": false, + "properties": { + "predeploy": { + "title": "pre deploy hook", + "description": "Runs before the service is deployed to Azure", + "$ref": "#/definitions/hooks" + }, + "postdeploy": { + "title": "post deploy hook", + "description": "Runs after the service is deployed to Azure", + "$ref": "#/definitions/hooks" + }, + "prerestore": { + "title": "pre restore hook", + "description": "Runs before the service dependencies are restored", + "$ref": "#/definitions/hooks" + }, + "postrestore": { + "title": "post restore hook", + "description": "Runs after the service dependencies are restored", + "$ref": "#/definitions/hooks" + }, + "prebuild": { + "title": "pre build hook", + "description": "Runs before the service is built", + "$ref": "#/definitions/hooks" + }, + "postbuild": { + "title": "post build hook", + "description": "Runs after the service is built", + "$ref": "#/definitions/hooks" + }, + "prepackage": { + "title": "pre package hook", + "description": "Runs before the service is deployment package is created", + "$ref": "#/definitions/hooks" + }, + "postpackage": { + "title": "post package hook", + "description": "Runs after the service is deployment package is created", + "$ref": "#/definitions/hooks" + } + } + } + }, + "allOf": [ + { + "if": { + "properties": { + "host": { + "const": "containerapp" + } + } + }, + "then": { + "anyOf": [ + { + "required": [ + "image" + ], + "properties": { + "language": false + }, + "not": { + "required": [ + "project" + ] + } + }, + { + "required": [ + "project" + ], + "not": { + "required": [ + "image" + ] + } + } + ] + } + }, + { + "if": { + "not": { + "properties": { + "host": { + "const": "containerapp" + } + } + } + }, + "then": { + "properties": { + "image": false + } + } + }, + { + "if": { + "not": { + "properties": { + "host": { + "enum": [ + "containerapp", + "aks", + "ai.endpoint" + ] + } + } + } + }, + "then": { + "required": [ + "project", + "language" + ], + "properties": { + "docker": false + } + } + }, + { + "if": { + "properties": { + "host": { + "const": "ai.endpoint" + } + } + }, + "then": { + "required": [ + "config" + ], + "properties": { + "config": { + "$ref": "#/definitions/aiEndpointConfig", + "title": "The Azure AI endpoint configuration.", + "description": "Required. Provides additional configuration for Azure AI online endpoint deployment." + } + } + } + }, + { + "if": { + "not": { + "properties": { + "host": { + "enum": [ + "aks" + ] + } + } + } + }, + "then": { + "properties": { + "k8s": false + } + } + }, + { + "if": { + "properties": { + "language": { + "const": "java" + } + } + }, + "then": { + "properties": { + "dist": { + "type": "string", + "description": "Optional. The path to the directory containing a single Java archive file (.jar/.ear/.war), or the path to the specific Java archive file to be included in the deployment artifact. If omitted, the CLI will detect the output directory based on the build system in-use. For maven, the default output directory 'target' is assumed." + } + } + } + }, + { + "if": { + "not": { + "properties": { + "host": { + "const": "containerapp" + } + } + } + }, + "then": { + "properties": { + "apiVersion": false + } + } + }, + { + "properties": { + "dist": { + "type": "string", + "description": "Optional. The CLI will use files under this path to create the deployment artifact (ZIP file). If omitted, all files under service project directory will be included." + } + } + } + ] + } + }, + "resources": { + "type": "object", + "additionalProperties": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "title": "Type of resource", + "description": "The type of resource to be created. (Example: db.postgres)", + "enum": [ + "db.postgres", + "db.mysql", + "db.redis", + "db.mongo", + "db.cosmos", + "ai.openai.model", + "ai.project", + "ai.search", + "host.containerapp", + "host.appservice", + "messaging.eventhubs", + "messaging.servicebus", + "storage", + "keyvault" + ] + }, + "uses": { + "type": "array", + "title": "Other resources that this resource uses", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "existing": { + "type": "boolean", + "title": "An existing resource for referencing purposes", + "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", + "default": false + } + }, + "allOf": [ + { + "if": { + "properties": { + "type": { + "const": "host.appservice" + } + } + }, + "then": { + "$ref": "#/definitions/appServiceResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "host.containerapp" + } + } + }, + "then": { + "$ref": "#/definitions/containerAppResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "ai.openai.model" + } + } + }, + "then": { + "$ref": "#/definitions/aiModelResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "ai.project" + } + } + }, + "then": { + "$ref": "#/definitions/aiProjectResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "ai.search" + } + } + }, + "then": { + "$ref": "#/definitions/aiSearchResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "db.postgres" + } + } + }, + "then": { + "$ref": "#/definitions/genericDbResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "db.mysql" + } + } + }, + "then": { + "$ref": "#/definitions/genericDbResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "db.redis" + } + } + }, + "then": { + "$ref": "#/definitions/genericDbResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "db.mongo" + } + } + }, + "then": { + "$ref": "#/definitions/genericDbResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "db.cosmos" + } + } + }, + "then": { + "$ref": "#/definitions/cosmosDbResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "messaging.eventhubs" + } + } + }, + "then": { + "$ref": "#/definitions/eventHubsResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "messaging.servicebus" + } + } + }, + "then": { + "$ref": "#/definitions/serviceBusResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "storage" + } + } + }, + "then": { + "$ref": "#/definitions/storageAccountResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "keyvault" + } + } + }, + "then": { + "$ref": "#/definitions/keyVaultResource" + } + } + ] + } + }, + "pipeline": { + "type": "object", + "title": "Definition of continuous integration pipeline", + "properties": { + "provider": { + "type": "string", + "title": "Type of pipeline provider", + "description": "Optional. The pipeline provider to be used for continuous integration. (Default: github)", + "enum": [ + "github", + "azdo" + ] + }, + "variables": { + "type": "array", + "title": "Optional. List of azd environment variables to be used in the pipeline as variables.", + "description": "If variable is found on azd environment, it is set as a variable for the pipeline.", + "items": { + "type": "string" + } + }, + "secrets": { + "type": "array", + "title": "Optional. List of azd environment variables to be used in the pipeline as secrets.", + "description": "If variable is found on azd environment, it is set as a secret for the pipeline.", + "items": { + "type": "string" + } + } + } + }, + "hooks": { + "type": "object", + "title": "Command level hooks", + "description": "Hooks should match `azd` command names prefixed with `pre` or `post` depending on when the script should execute. When specifying paths they should be relative to the project path.", + "additionalProperties": false, + "properties": { + "preprovision": { + "title": "pre provision hook", + "description": "Runs before the `provision` command", + "$ref": "#/definitions/hooks" + }, + "postprovision": { + "title": "post provision hook", + "description": "Runs after the `provision` command", + "$ref": "#/definitions/hooks" + }, + "preinfracreate": { + "title": "pre infra create hook", + "description": "Runs before the `infra create` or `provision` commands", + "$ref": "#/definitions/hooks" + }, + "postinfracreate": { + "title": "post infra create hook", + "description": "Runs after the `infra create` or `provision` commands", + "$ref": "#/definitions/hooks" + }, + "preinfradelete": { + "title": "pre infra delete hook", + "description": "Runs before the `infra delete` or `down` commands", + "$ref": "#/definitions/hooks" + }, + "postinfradelete": { + "title": "post infra delete hook", + "description": "Runs after the `infra delete` or `down` commands", + "$ref": "#/definitions/hooks" + }, + "predown": { + "title": "pre down hook", + "description": "Runs before the `infra delete` or `down` commands", + "$ref": "#/definitions/hooks" + }, + "postdown": { + "title": "post down hook", + "description": "Runs after the `infra delete` or `down` commands", + "$ref": "#/definitions/hooks" + }, + "preup": { + "title": "pre up hook", + "description": "Runs before the `up` command", + "$ref": "#/definitions/hooks" + }, + "postup": { + "title": "post up hook", + "description": "Runs after the `up` command", + "$ref": "#/definitions/hooks" + }, + "prepackage": { + "title": "pre package hook", + "description": "Runs before the `package` command", + "$ref": "#/definitions/hooks" + }, + "postpackage": { + "title": "post package hook", + "description": "Runs after the `package` command", + "$ref": "#/definitions/hooks" + }, + "predeploy": { + "title": "pre deploy hook", + "description": "Runs before the `deploy` command", + "$ref": "#/definitions/hooks" + }, + "postdeploy": { + "title": "post deploy hook", + "description": "Runs after the `deploy` command", + "$ref": "#/definitions/hooks" + }, + "prerestore": { + "title": "pre restore hook", + "description": "Runs before the `restore` command", + "$ref": "#/definitions/hooks" + }, + "postrestore": { + "title": "post restore hook", + "description": "Runs after the `restore` command", + "$ref": "#/definitions/hooks" + } + } + }, + "requiredVersions": { + "type": "object", + "additionalProperties": false, + "properties": { + "azd": { + "type": "string", + "title": "A range of supported versions of `azd` for this project", + "description": "A range of supported versions of `azd` for this project. If the version of `azd` is outside this range, the project will fail to load. Optional (allows all versions if absent).", + "examples": [ + ">= 0.6.0-beta.3" + ] + } + } + }, + "state": { + "type": "object", + "title": "The state configuration used for the project.", + "description": "Optional. Provides additional configuration for state management.", + "additionalProperties": false, + "properties": { + "remote": { + "type": "object", + "additionalProperties": false, + "title": "The remote state configuration.", + "description": "Optional. Provides additional configuration for remote state management such as Azure Blob Storage.", + "required": [ + "backend" + ], + "properties": { + "backend": { + "type": "string", + "title": "The remote state backend type.", + "description": "Optional. The remote state backend type. (Default: AzureBlobStorage)", + "default": "AzureBlobStorage", + "enum": [ + "AzureBlobStorage" + ] + }, + "config": { + "type": "object", + "additionalProperties": true + } + }, + "allOf": [ + { + "if": { + "properties": { + "backend": { + "const": "AzureBlobStorage" + } + } + }, + "then": { + "required": [ + "config" + ], + "properties": { + "config": { + "$ref": "#/definitions/azureBlobStorageConfig" + } + } + } + } + ] + } + } + }, + "platform": { + "type": "object", + "title": "The platform configuration used for the project.", + "description": "Optional. Provides additional configuration for platform specific features such as Azure Dev Center.", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "title": "The platform type.", + "description": "Required. The platform type. (Example: devcenter)", + "enum": [ + "devcenter" + ] + }, + "config": { + "type": "object", + "additionalProperties": true + } + }, + "allOf": [ + { + "if": { + "properties": { + "type": { + "const": "devcenter" + } + } + }, + "then": { + "properties": { + "config": { + "$ref": "#/definitions/azureDevCenterConfig" + } + } + } + } + ] + }, + "workflows": { + "type": "object", + "title": "The workflows configuration used for the project.", + "description": "Optional. Provides additional configuration for workflows such as override azd up behavior.", + "additionalProperties": false, + "properties": { + "up": { + "title": "The up workflow configuration", + "description": "When specified will override the default behavior for the azd up workflow. Common use cases include changing the order of the provision, package and deploy commands.", + "$ref": "#/definitions/workflow" + } + } + }, + "cloud": { + "type": "object", + "title": "The cloud configuration used for the project.", + "description": "Optional. Provides additional configuration for deploying to sovereign clouds such as Azure Government. The default cloud is AzureCloud.", + "additionalProperties": false, + "properties": { + "name": { + "enum": [ + "AzureCloud", + "AzureChinaCloud", + "AzureUSGovernment" + ] + } + } + } + }, + "definitions": { + "hooks": { + "anyOf": [ + { + "$ref": "#/definitions/hook" + }, + { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/hook" + } + } + ] + }, + "hook": { + "type": "object", + "additionalProperties": false, + "properties": { + "shell": { + "type": "string", + "title": "Type of shell to execute scripts", + "description": "Optional. The type of shell to use for the hook. (Default: sh)", + "enum": [ + "sh", + "pwsh" + ], + "default": "sh" + }, + "run": { + "type": "string", + "title": "Required. The inline script or relative path of your scripts from the project or service path", + "description": "When specifying an inline script you also must specify the `shell` to use. This is automatically inferred when using paths." + }, + "continueOnError": { + "type": "boolean", + "default": false, + "title": "Whether or not a script error will halt the azd command", + "description": "Optional. When set to true will continue to run the command even after a script error has occurred. (Default: false)" + }, + "interactive": { + "type": "boolean", + "default": false, + "title": "Whether the script will run in interactive mode", + "description": "Optional. When set to true will bind the script to stdin, stdout & stderr of the running console. (Default: false)" + }, + "windows": { + "title": "The hook configuration used for Windows environments", + "description": "When specified overrides the hook configuration when executed in Windows environments", + "default": null, + "$ref": "#/definitions/hook" + }, + "posix": { + "title": "The hook configuration used for POSIX (Linux & MacOS) environments", + "description": "When specified overrides the hook configuration when executed in POSIX environments", + "default": null, + "$ref": "#/definitions/hook" + }, + "secrets": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "Optional. Map of azd environment variables to hook secrets.", + "description": "If variable was set as a secret in the environment, the secret value will be passed to the hook.", + "examples": [ + { + "WITH_SECRET_VALUE": "ENV_VAR_WITH_SECRET" + } + ] + } + }, + "allOf": [ + { + "if": { + "allOf": [ + { + "required": [ + "windows" + ] + }, + { + "required": [ + "posix" + ] + } + ] + }, + "then": { + "properties": { + "run": false, + "shell": false, + "interactive": false, + "continueOnError": false, + "secrets": false + } + } + }, + { + "if": { + "anyOf": [ + { + "required": [ + "interactive" + ] + }, + { + "required": [ + "continueOnError" + ] + }, + { + "required": [ + "secrets" + ] + }, + { + "required": [ + "shell" + ] + } + ] + }, + "then": { + "required": [ + "run" + ] + } + } + ] + }, + "docker": { + "type": "object", + "description": "This is only applicable when `host` is `containerapp` or `aks`", + "additionalProperties": false, + "properties": { + "path": { + "type": "string", + "title": "The path to the Dockerfile", + "description": "Path to the Dockerfile is relative to your service", + "default": "./Dockerfile" + }, + "context": { + "type": "string", + "title": "The docker build context", + "description": "When specified overrides the default context", + "default": "." + }, + "platform": { + "type": "string", + "title": "The platform target", + "default": "amd64" + }, + "registry": { + "type": "string", + "title": "Optional. The container registry to push the image to.", + "description": "If omitted, will default to value of AZURE_CONTAINER_REGISTRY_ENDPOINT environment variable. Supports environment variable substitution." + }, + "image": { + "type": "string", + "title": "Optional. The name that will be applied to the built container image.", + "description": "If omitted, will default to the '{appName}/{serviceName}-{environmentName}'. Supports environment variable substitution." + }, + "tag": { + "type": "string", + "title": "The tag that will be applied to the built container image.", + "description": "If omitted, will default to 'azd-deploy-{unix time (seconds)}'. Supports environment variable substitution. For example, to generate unique tags for a given release: myapp/myimage:${DOCKER_IMAGE_TAG}" + }, + "buildArgs": { + "type": "array", + "title": "Optional. Build arguments to pass to the docker build command", + "description": "Build arguments to pass to the docker build command.", + "items": { + "type": "string" + } + }, + "remoteBuild": { + "type": "boolean", + "title": "Optional. Whether to build the image remotely", + "description": "If set to true, the image will be built remotely using the Azure Container Registry remote build feature. If set to false, the image will be built locally using Docker." + } + } + }, + "aksOptions": { + "type": "object", + "title": "Optional. The Azure Kubernetes Service (AKS) configuration options", + "additionalProperties": false, + "properties": { + "deploymentPath": { + "type": "string", + "title": "Optional. The relative path from the service path to the k8s deployment manifests. (Default: manifests)", + "description": "When set it will override the default deployment path location for k8s deployment manifests.", + "default": "manifests" + }, + "namespace": { + "type": "string", + "title": "Optional. The k8s namespace of the deployed resources. (Default: Project name)", + "description": "When specified a new k8s namespace will be created if it does not already exist" + }, + "deployment": { + "type": "object", + "title": "Optional. The k8s deployment configuration", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "title": "Optional. The name of the k8s deployment resource to use during deployment. (Default: Service name)", + "description": "Used during deployment to ensure if the k8s deployment rollout has been completed. If not set will search for a deployment resource in the same namespace that contains the service name." + } + } + }, + "service": { + "type": "object", + "title": "Optional. The k8s service configuration", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "title": "Optional. The name of the k8s service resource to use as the default service endpoint. (Default: Service name)", + "description": "Used when determining endpoints for the default service resource. If not set will search for a deployment resource in the same namespace that contains the service name." + } + } + }, + "ingress": { + "type": "object", + "title": "Optional. The k8s ingress configuration", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "title": "Optional. The name of the k8s ingress resource to use as the default service endpoint. (Default: Service name)", + "description": "Used when determining endpoints for the default ingress resource. If not set will search for a deployment resource in the same namespace that contains the service name." + }, + "relativePath": { + "type": "string", + "title": "Optional. The relative path to the service from the root of your ingress controller.", + "description": "When set will be appended to the root of your ingress resource path." + } + } + }, + "helm": { + "type": "object", + "title": "Optional. The helm configuration", + "additionalProperties": false, + "properties": { + "repositories": { + "type": "array", + "title": "Optional. The helm repositories to add", + "description": "When set will add the helm repositories to the helm client.", + "minItems": 1, + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "name", + "url" + ], + "properties": { + "name": { + "type": "string", + "title": "The name of the helm repository", + "description": "The name of the helm repository to add." + }, + "url": { + "type": "string", + "title": "The url of the helm repository", + "description": "The url of the helm repository to add." + } + } + } + }, + "releases": { + "type": "array", + "title": "Optional. The helm releases to install", + "description": "When set will install the helm releases to the k8s cluster.", + "minItems": 1, + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "name", + "chart" + ], + "properties": { + "name": { + "type": "string", + "title": "The name of the helm release", + "description": "The name of the helm release to install." + }, + "chart": { + "type": "string", + "title": "The name of the helm chart", + "description": "The name of the helm chart to install." + }, + "version": { + "type": "string", + "title": "The version of the helm chart", + "description": "The version of the helm chart to install." + }, + "namespace": { + "type": "string", + "title": "Optional. The k8s namespace to install the helm chart", + "description": "When set will install the helm chart to the specified namespace. Defaults to the service namespace." + }, + "values": { + "type": "string", + "title": "Optional. Relative path from service to a values.yaml to pass to the helm chart", + "description": "When set will pass the values to the helm chart." + } + } + } + } + } + }, + "kustomize": { + "type": "object", + "title": "Optional. The kustomize configuration", + "additionalProperties": false, + "properties": { + "dir": { + "type": "string", + "title": "Optional. The relative path to the kustomize directory.", + "description": "When set will use the kustomize directory to deploy to the k8s cluster. Supports environment variable substitution." + }, + "edits": { + "type": "array", + "title": "Optional. The kustomize edits to apply before deployment.", + "description": "When set will apply the edits to the kustomize directory before deployment. Supports environment variable substitution.", + "items": { + "type": "string" + } + }, + "env": { + "type": "object", + "title": "Optional. The environment key/value pairs used to generate a .env file.", + "description": "When set will generate a .env file in the kustomize directory. Values support environment variable substitution.", + "additionalProperties": { + "type": [ + "string", + "boolean", + "number" + ] + } + } + } + } + } + }, + "azureBlobStorageConfig": { + "type": "object", + "title": "The Azure Blob Storage remote state backend configuration.", + "description": "Optional. Provides additional configuration for remote state management such as Azure Blob Storage.", + "additionalProperties": false, + "required": [ + "accountName" + ], + "properties": { + "accountName": { + "type": "string", + "title": "The Azure Storage account name.", + "description": "Required. The Azure Storage account name." + }, + "containerName": { + "type": "string", + "title": "The Azure Storage container name.", + "description": "Optional. The Azure Storage container name. Defaults to project name if not specified." + }, + "endpoint": { + "type": "string", + "title": "The Azure Storage endpoint.", + "description": "Optional. The Azure Storage endpoint. (Default: blob.core.windows.net)" + } + } + }, + "azureDevCenterConfig": { + "type": "object", + "title": "The dev center configuration used for the project.", + "description": "Optional. Provides additional project configuration for Azure Dev Center integration.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "title": "The name of the Azure Dev Center", + "description": "Optional. Used as the default dev center for this project." + }, + "project": { + "type": "string", + "title": "The name of the Azure Dev Center project.", + "description": "Optional. Used as the default dev center project for this project." + }, + "catalog": { + "type": "string", + "title": "The name of the Azure Dev Center catalog.", + "description": "Optional. Used as the default dev center catalog for this project." + }, + "environmentDefinition": { + "type": "string", + "title": "The name of the Dev Center catalog environment definition.", + "description": "Optional. Used as the default dev center environment definition for this project." + }, + "environmentType": { + "type": "string", + "title": "The Dev Center project environment type used for the deployment environment.", + "description": "Optional. Used as the default environment type for this project." + } + } + }, + "workflow": { + "anyOf": [ + { + "type": "object", + "additionalProperties": false, + "required": [ + "steps" + ], + "properties": { + "steps": { + "type": "array", + "title": "The steps to execute in the workflow", + "description": "The steps to execute in the workflow. (Example: provision, package, deploy)", + "minItems": 1, + "items": { + "type": "object", + "$ref": "#/definitions/workflowStep" + } + } + } + }, + { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/workflowStep" + } + } + ] + }, + "workflowStep": { + "properties": { + "azd": { + "title": "The azd command command configuration", + "description": "The azd command configuration to execute. (Example: up)", + "$ref": "#/definitions/azdCommand" + } + } + }, + "azdCommand": { + "anyOf": [ + { + "type": "string", + "title": "The azd command to execute", + "description": "The name and args of the azd command to execute. (Example: deploy --all)" + }, + { + "type": "object", + "additionalProperties": false, + "required": [ + "args" + ], + "properties": { + "args": { + "type": "array", + "title": "The arguments or flags to pass to the azd command", + "description": "The arguments to pass to the azd command. (Example: --all)", + "minItems": 1 + } + } + } + ] + }, + "aiComponentConfig": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name of the AI component.", + "description": "Optional. When omitted AZD will generate a name based on the component type and the service name. Supports environment variable substitution." + }, + "path": { + "type": "string", + "title": "Path to the AI component configuration file or path.", + "description": "Required. The path to the AI component configuration file or path to the AI component source code." + }, + "overrides": { + "type": "object", + "title": "A map of key value pairs used to override the AI component configuration.", + "description": "Optional. Supports environment variable substitution.", + "additionalProperties": { + "type": "string" + } + } + }, + "required": [ + "path" + ] + }, + "aiDeploymentConfig": { + "allOf": [ + { + "$ref": "#/definitions/aiComponentConfig" + }, + { + "type": "object", + "properties": { + "environment": { + "type": "object", + "title": "A map of key/value pairs to set as environment variables for the deployment.", + "description": "Optional. Values support OS & AZD environment variable substitution.", + "additionalProperties": { + "type": "string" + } + } + } + } + ] + }, + "aiEndpointConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "workspace": { + "type": "string", + "title": "The name of the AI Studio project workspace.", + "description": "Optional. When omitted AZD will use the value specified in the 'AZUREAI_PROJECT_NAME' environment variable. Supports environment variable substitution." + }, + "flow": { + "$ref": "#/definitions/aiComponentConfig", + "title": "The Azure AI Studio Prompt Flow configuration.", + "description": "Optional. When omitted a prompt flow will be not created." + }, + "environment": { + "$ref": "#/definitions/aiComponentConfig", + "title": "The Azure AI Studio custom environment configuration.", + "description": "Optional. When omitted a custom environment will not be created." + }, + "model": { + "$ref": "#/definitions/aiComponentConfig", + "title": "The Azure AI Studio model configuration.", + "description": "Optional. When omitted a model will not be created." + }, + "deployment": { + "$ref": "#/definitions/aiDeploymentConfig", + "title": "The Azure AI Studio online endpoint deployment configuration.", + "description": "Required. A new online endpoint deployment will be created and traffic will automatically to shifted to the new deployment upon successful completion." + } + }, + "required": [ + "deployment" + ] + }, + "appServiceResource": { + "type": "object", + "description": "An Azure App Service web app.", + "additionalProperties": false, + "required": [ + "port", + "runtime" + ], + "properties": { + "type": { + "type": "string", + "const": "host.appservice" + }, + "uses": { + "type": "array", + "title": "Other resources that this resource uses", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "port": { + "type": "integer", + "title": "Port that the web app listens on", + "description": "Optional. The port that the web app listens on. (Default: 80)" + }, + "env": { + "type": "array", + "title": "Environment variables to set for the web app", + "items": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "title": "Name of the environment variable" + }, + "value": { + "type": "string", + "title": "Value of the environment variable. Supports environment variable substitution." + }, + "secret": { + "type": "string", + "title": "Secret value of the environment variable. Supports environment variable substitution." + } + } + } + }, + "runtime": { + "type": "object", + "title": "Runtime stack configuration", + "description": "Required. The language runtime configuration for the App Service web app.", + "required": [ + "stack", + "version" + ], + "properties": { + "stack": { + "type": "string", + "title": "Language runtime stack", + "description": "Required. The language runtime stack.", + "enum": [ + "node", + "python" + ] + }, + "version": { + "type": "string", + "title": "Runtime stack version", + "description": "Required. The language runtime version. Format varies by stack. (Example: '22-lts' for Node, '3.13' for Python)" + } + } + }, + "startupCommand": { + "type": "string", + "title": "Startup command", + "description": "Optional. Startup command that will be run as part of web app startup." + } + } + }, + "containerAppResource": { + "type": "object", + "description": "A Docker-based container app.", + "additionalProperties": false, + "required": [ + "port" + ], + "properties": { + "type": { + "type": "string", + "const": "host.containerapp" + }, + "uses": { + "type": "array", + "title": "Other resources that this resource uses", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "port": { + "type": "integer", + "title": "Port that the container app listens on", + "description": "Optional. The port that the container app listens on. (Default: 80)" + }, + "env": { + "type": "array", + "title": "Environment variables to set for the container app", + "items": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "title": "Name of the environment variable" + }, + "value": { + "type": "string", + "title": "Value of the environment variable. Supports environment variable substitution." + }, + "secret": { + "type": "string", + "title": "Secret value of the environment variable. Supports environment variable substitution." + } + } + } + } + } + }, + "aiModelResource": { + "type": "object", + "description": "A deployed, ready-to-use AI model.", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "const": "ai.openai.model" + }, + "existing": { + "type": "boolean", + "title": "An existing resource for referencing purposes", + "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", + "default": false + }, + "model": { + "type": "object", + "description": "The underlying AI model.", + "additionalProperties": false, + "required": [ + "name", + "version" + ], + "properties": { + "name": { + "type": "string", + "title": "The name of the AI model.", + "description": "Required. The name of the AI model." + }, + "version": { + "type": "string", + "title": "The version of the AI model.", + "description": "Required. The version of the AI model." + } + } + } + }, + "allOf": [ + { + "if": { + "properties": { + "existing": { + "const": false + } + } + }, + "then": { + "required": [ + "model" + ] + } + } + ] + }, + "aiProjectResource": { + "type": "object", + "description": "An Azure AI Foundry project with models.", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "const": "ai.project" + }, + "existing": { + "type": "boolean", + "title": "An existing resource for referencing purposes", + "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", + "default": false + }, + "models": { + "type": "array", + "title": "AI models to deploy", + "description": "Optional. The AI models to be deployed as part of the AI project.", + "minItems": 1, + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "name", + "version", + "format", + "sku" + ], + "properties": { + "name": { + "type": "string", + "title": "The name of the AI model.", + "description": "Required. The name of the AI model." + }, + "version": { + "type": "string", + "title": "The version of the AI model.", + "description": "Required. The version of the AI model." + }, + "format": { + "type": "string", + "title": "The format of the AI model.", + "description": "Required. The format of the AI model. (Example: Microsoft, OpenAI)" + }, + "sku": { + "type": "object", + "title": "The SKU configuration for the AI model.", + "description": "Required. The SKU details for the AI model.", + "additionalProperties": false, + "required": [ + "name", + "usageName", + "capacity" + ], + "properties": { + "name": { + "type": "string", + "title": "The name of the SKU.", + "description": "Required. The name of the SKU. (Example: GlobalStandard)" + }, + "usageName": { + "type": "string", + "title": "The usage name of the SKU.", + "description": "Required. The usage name of the SKU for billing purposes. (Example: AIServices.GlobalStandard.MaaS, OpenAI.GlobalStandard.gpt-4o-mini)" + }, + "capacity": { + "type": "integer", + "title": "The capacity of the SKU.", + "description": "Required. The capacity of the SKU." + } + } + } + } + } + } + } + }, + "aiSearchResource": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "const": "ai.search" + }, + "existing": { + "type": "boolean", + "title": "An existing resource for referencing purposes", + "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", + "default": false + } + } + }, + "genericDbResource": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "title": "Type of resource", + "description": "The type of resource to be created. (Example: db.postgres)", + "enum": [ + "db.postgres", + "db.redis", + "db.mysql", + "db.mongo" + ] + } + } + }, + "cosmosDbResource": { + "type": "object", + "description": "A deployed, ready-to-use Azure Cosmos DB for NoSQL database.", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "const": "db.cosmos" + }, + "containers": { + "type": "array", + "title": "Containers", + "description": "Containers to be created to store data. Each container stores a collection of items.", + "items": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "title": "Container name.", + "description": "Required. The name of the container." + }, + "partitionKeys": { + "type": "array", + "title": "Partition keys.", + "description": "Required. The partition key(s) used to distribute data across partitions. The ordering of keys matters. By default, a single partition key '/id' is naturally a great choice for most applications.", + "minLength": 1, + "maxLength": 3, + "items": { + "type": "string" + } + } + } + } + } + } + }, + "eventHubsResource": { + "type": "object", + "description": "An Azure Event Hubs namespace.", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "const": "messaging.eventhubs" + }, + "existing": { + "type": "boolean", + "title": "An existing resource for referencing purposes", + "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", + "default": false + }, + "hubs": { + "type": "array", + "title": "Hubs to create in the Event Hubs namespace", + "additionalProperties": false, + "items": { + "type": "string" + }, + "uniqueItems": true + } + } + }, + "serviceBusResource": { + "type": "object", + "description": "An Azure Service Bus namespace.", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "const": "messaging.servicebus" + }, + "existing": { + "type": "boolean", + "title": "An existing resource for referencing purposes", + "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", + "default": false + }, + "queues": { + "type": "array", + "title": "Queues to create in the Service Bus namespace", + "additionalProperties": false, + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "topics": { + "type": "array", + "title": "Topics to create in the Service Bus namespace", + "additionalProperties": false, + "items": { + "type": "string" + }, + "uniqueItems": true + } + } + }, + "storageAccountResource": { + "type": "object", + "description": "A deployed, ready-to-use Azure Storage Account.", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "const": "storage" + }, + "existing": { + "type": "boolean", + "title": "An existing resource for referencing purposes", + "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", + "default": false + }, + "containers": { + "type": "array", + "title": "Azure Storage Account container names.", + "description": "The container names of Azure Storage Account.", + "items": { + "type": "string", + "title": "Azure Storage Account container name", + "description": "The container name of Azure Storage Account." + } + } + } + }, + "keyVaultResource": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "const": "keyvault" + }, + "existing": { + "type": "boolean", + "title": "An existing resource for referencing purposes", + "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", + "default": false + } + } + } + } +} \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go new file mode 100644 index 00000000000..05a9bc1619f --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go @@ -0,0 +1,14 @@ +package prompts + +import ( + _ "embed" +) + +//go:embed azd_plan_init.md +var AzdPlanInitPrompt string + +//go:embed azd_iac_generation_rules.md +var AzdIacRulesPrompt string + +//go:embed azure.yaml.json +var AzdYamlSchemaPrompt string diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/common/types.go b/cli/azd/extensions/azd.ai.start/internal/tools/common/types.go new file mode 100644 index 00000000000..47f14eea64e --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/common/types.go @@ -0,0 +1,7 @@ +package common + +// ErrorResponse represents a JSON error response structure that can be reused across all tools +type ErrorResponse struct { + Error bool `json:"error"` + Message string `json:"message"` +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go b/cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go index 410db725505..e1fc36a05f3 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go @@ -4,9 +4,12 @@ import ( "context" "encoding/json" "fmt" + "os" "os/exec" + "runtime" "strings" + "azd.ai.start/internal/tools/common" "github.com/tmc/langchaingo/callbacks" ) @@ -20,7 +23,7 @@ func (t CommandExecutorTool) Name() string { } func (t CommandExecutorTool) Description() string { - return `Execute any command with arguments. Simple command execution without inference. + return `Execute any command with arguments through the system shell for better compatibility. Input should be a JSON object with these fields: { @@ -34,10 +37,19 @@ Required fields: Optional fields: - args: Array of arguments to pass (default: []) +Returns a JSON response with execution details: +- Success responses include: command, fullCommand, exitCode, success, stdout, stderr +- Error responses include: error (true), message + +The tool automatically uses the appropriate shell: +- Windows: cmd.exe /C for built-in commands and proper path resolution +- Unix/Linux/macOS: sh -c for POSIX compatibility + Examples: - {"command": "git", "args": ["status"]} - {"command": "npm", "args": ["install"]} -- {"command": "bash", "args": ["./build.sh", "--env", "prod"]} +- {"command": "dir"} (Windows built-in command) +- {"command": "ls", "args": ["-la"]} (Unix command) - {"command": "powershell", "args": ["-ExecutionPolicy", "Bypass", "-File", "deploy.ps1"]} - {"command": "python", "args": ["main.py", "--debug"]} - {"command": "node", "args": ["server.js", "--port", "3000"]} @@ -51,6 +63,15 @@ type CommandRequest struct { Args []string `json:"args,omitempty"` } +type CommandResponse struct { + Command string `json:"command"` + FullCommand string `json:"fullCommand"` + ExitCode int `json:"exitCode"` + Success bool `json:"success"` + Stdout string `json:"stdout,omitempty"` + Stderr string `json:"stderr,omitempty"` +} + func (t CommandExecutorTool) Call(ctx context.Context, input string) (string, error) { // Invoke callback for tool start if t.CallbacksHandler != nil { @@ -58,30 +79,42 @@ func (t CommandExecutorTool) Call(ctx context.Context, input string) (string, er } if input == "" { - err := fmt.Errorf("command execution request is required") + errorResponse := common.ErrorResponse{ + Error: true, + Message: "command execution request is required", + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("command execution request is required")) } - return "", err + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } // Parse the JSON request var req CommandRequest if err := json.Unmarshal([]byte(input), &req); err != nil { - toolErr := fmt.Errorf("failed to parse command request: %w", err) + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("failed to parse command request: %s", err.Error()), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to parse command request: %w", err)) } - return "", toolErr + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } // Validate required fields if req.Command == "" { - err := fmt.Errorf("command is required") + errorResponse := common.ErrorResponse{ + Error: true, + Message: "command is required", + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("command is required")) } - return "", err + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } // Set defaults @@ -92,28 +125,66 @@ func (t CommandExecutorTool) Call(ctx context.Context, input string) (string, er // Execute the command (runs in current working directory) result, err := t.executeCommand(ctx, req.Command, req.Args) if err != nil { - toolErr := fmt.Errorf("execution failed: %w", err) + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("execution failed: %s", err.Error()), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("execution failed: %w", err)) } - return "", toolErr + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } - // Format the output - output := t.formatOutput(req.Command, req.Args, result) + // Create the success response (even if command had non-zero exit code) + response := t.createSuccessResponse(req.Command, req.Args, result) + + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("failed to marshal JSON response: %s", err.Error()), + } + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to marshal JSON response: %w", err)) + } + errorJsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(errorJsonData), nil + } // Invoke callback for tool end if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + t.CallbacksHandler.HandleToolEnd(ctx, string(jsonData)) } - return output, nil + return string(jsonData), nil } func (t CommandExecutorTool) executeCommand(ctx context.Context, command string, args []string) (*executionResult, error) { - cmd := exec.CommandContext(ctx, command, args...) - // cmd.Dir is not set, so it uses the current working directory - // cmd.Env is not set, so it inherits the current environment + // Handle shell-specific command execution for better compatibility + var cmd *exec.Cmd + + if runtime.GOOS == "windows" { + // On Windows, use cmd.exe to handle built-in commands and path resolution + allArgs := append([]string{"/C", command}, args...) + cmd = exec.CommandContext(ctx, "cmd", allArgs...) + } else { + // On Unix-like systems, use sh for better command resolution + fullCommand := command + if len(args) > 0 { + fullCommand += " " + strings.Join(args, " ") + } + cmd = exec.CommandContext(ctx, "sh", "-c", fullCommand) + } + + // Set working directory explicitly to current directory + if wd, err := os.Getwd(); err == nil { + cmd.Dir = wd + } + + // Inherit environment variables + cmd.Env = os.Environ() var stdout, stderr strings.Builder @@ -122,11 +193,18 @@ func (t CommandExecutorTool) executeCommand(ctx context.Context, command string, cmd.Stderr = &stderr err := cmd.Run() - // Get exit code + // Get exit code and determine if this is a system error vs command error exitCode := 0 + var cmdError error + if err != nil { if exitError, ok := err.(*exec.ExitError); ok { + // Command ran but exited with non-zero code - this is normal exitCode = exitError.ExitCode() + cmdError = nil // Don't treat non-zero exit as a system error + } else { + // System error (command not found, permission denied, etc.) + cmdError = err } } @@ -134,60 +212,41 @@ func (t CommandExecutorTool) executeCommand(ctx context.Context, command string, ExitCode: exitCode, Stdout: stdout.String(), Stderr: stderr.String(), - Error: err, - }, nil + Error: cmdError, // Only system errors, not command exit codes + }, cmdError // Return system errors to caller } -type executionResult struct { - ExitCode int - Stdout string - Stderr string - Error error -} - -func (t CommandExecutorTool) formatOutput(command string, args []string, result *executionResult) string { - var output strings.Builder - - // Show the full command that was executed +func (t CommandExecutorTool) createSuccessResponse(command string, args []string, result *executionResult) CommandResponse { + // Create full command string fullCommand := command if len(args) > 0 { fullCommand += " " + strings.Join(args, " ") } - output.WriteString(fmt.Sprintf("Executed: %s\n", fullCommand)) - output.WriteString(fmt.Sprintf("Exit code: %d\n", result.ExitCode)) - - if result.ExitCode == 0 { - output.WriteString("Status: ✅ Success\n") - } else { - output.WriteString("Status: ❌ Failed\n") + // Limit output to prevent overwhelming the response + stdout := result.Stdout + if len(stdout) > 2000 { + stdout = stdout[:2000] + "\n... (output truncated)" } - if result.Stdout != "" { - output.WriteString("\n--- Standard Output ---\n") - // Limit output to prevent overwhelming the LLM - stdout := result.Stdout - if len(stdout) > 2000 { - stdout = stdout[:2000] + "\n... (output truncated)" - } - output.WriteString(stdout) - output.WriteString("\n") + stderr := result.Stderr + if len(stderr) > 1000 { + stderr = stderr[:1000] + "\n... (error output truncated)" } - if result.Stderr != "" { - output.WriteString("\n--- Standard Error ---\n") - // Limit error output - stderr := result.Stderr - if len(stderr) > 1000 { - stderr = stderr[:1000] + "\n... (error output truncated)" - } - output.WriteString(stderr) - output.WriteString("\n") - } - - if result.Error != nil && result.ExitCode != 0 { - output.WriteString(fmt.Sprintf("\nError details: %s\n", result.Error.Error())) + return CommandResponse{ + Command: command, + FullCommand: fullCommand, + ExitCode: result.ExitCode, + Success: result.ExitCode == 0, + Stdout: stdout, + Stderr: stderr, } +} - return output.String() +type executionResult struct { + ExitCode int + Stdout string + Stderr string + Error error } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go index 2db2eae1c1c..ba48734dd51 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go @@ -2,11 +2,13 @@ package io import ( "context" + "encoding/json" "fmt" "io" "os" "strings" + "azd.ai.start/internal/tools/common" "github.com/tmc/langchaingo/callbacks" ) @@ -20,96 +22,160 @@ func (t CopyFileTool) Name() string { } func (t CopyFileTool) Description() string { - return "Copy a file to a new location. Input format: 'source|destination' (e.g., 'file.txt|backup.txt' or './docs/readme.md|./backup/readme.md')" + return `Copy a file to a new location. +Input: JSON object with required 'source' and 'destination' fields: {"source": "file.txt", "destination": "backup.txt"} +Returns: JSON with copy operation details or error information. +The input must be formatted as a single line valid JSON string.` } func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("copy_file: %s", input)) + // Parse JSON input + type InputParams struct { + Source string `json:"source"` + Destination string `json:"destination"` } - if input == "" { - err := fmt.Errorf("input is required in format 'source|destination'") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + var params InputParams + + // Clean the input first + cleanInput := strings.TrimSpace(input) + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("copy_file: %s", cleanInput)) } - // Split on first occurrence of '|' to separate source from destination - parts := strings.SplitN(input, "|", 2) - if len(parts) != 2 { - err := fmt.Errorf("invalid input format. Use 'source|destination'") + // Parse as JSON - this is now required + if err := json.Unmarshal([]byte(cleanInput), ¶ms); err != nil { + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"source\": \"file.txt\", \"destination\": \"backup.txt\"}", err.Error()), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to parse JSON input: %w", err)) } - return "", err + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } - source := strings.TrimSpace(parts[0]) - destination := strings.TrimSpace(parts[1]) + source := strings.TrimSpace(params.Source) + destination := strings.TrimSpace(params.Destination) if source == "" || destination == "" { - err := fmt.Errorf("both source and destination paths are required") + errorResponse := common.ErrorResponse{ + Error: true, + Message: "Both source and destination paths are required", + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("both source and destination paths are required")) } - return "", err + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } // Check if source file exists sourceInfo, err := os.Stat(source) if err != nil { - toolErr := fmt.Errorf("source file %s does not exist: %w", source, err) + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Source file %s does not exist: %s", source, err.Error()), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("source file %s does not exist: %w", source, err)) } - return "", toolErr + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } if sourceInfo.IsDir() { - err := fmt.Errorf("source %s is a directory. Use copy_directory for directories", source) + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Source %s is a directory. Use copy_directory for directories", source), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("source %s is a directory. Use copy_directory for directories", source)) } - return "", err + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } // Open source file sourceFile, err := os.Open(source) if err != nil { - toolErr := fmt.Errorf("failed to open source file %s: %w", source, err) + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Failed to open source file %s: %s", source, err.Error()), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to open source file %s: %w", source, err)) } - return "", toolErr + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } defer sourceFile.Close() // Create destination file destFile, err := os.Create(destination) if err != nil { - toolErr := fmt.Errorf("failed to create destination file %s: %w", destination, err) + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Failed to create destination file %s: %s", destination, err.Error()), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to create destination file %s: %w", destination, err)) } - return "", toolErr + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } defer destFile.Close() // Copy contents bytesWritten, err := io.Copy(destFile, sourceFile) if err != nil { - toolErr := fmt.Errorf("failed to copy file: %w", err) + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Failed to copy file: %s", err.Error()), + } + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to copy file: %w", err)) + } + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil + } + + // Prepare JSON response structure + type CopyResponse struct { + Success bool `json:"success"` + Source string `json:"source"` + Destination string `json:"destination"` + BytesCopied int64 `json:"bytesCopied"` + Message string `json:"message"` + } + + response := CopyResponse{ + Success: true, + Source: source, + Destination: destination, + BytesCopied: bytesWritten, + Message: fmt.Sprintf("Successfully copied %s to %s (%d bytes)", source, destination, bytesWritten), + } + + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Failed to marshal JSON response: %s", err.Error()), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to marshal JSON response: %w", err)) } - return "", toolErr + errorJsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(errorJsonData), nil } - output := fmt.Sprintf("Copied %s to %s (%d bytes)\n", source, destination, bytesWritten) + output := string(jsonData) if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + t.CallbacksHandler.HandleToolEnd(ctx, fmt.Sprintf("Copied %s to %s (%d bytes)", source, destination, bytesWritten)) } return output, nil diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go index d100e7aa834..c79ac8d46c6 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "strings" "github.com/tmc/langchaingo/callbacks" ) @@ -27,6 +28,9 @@ func (t CreateDirectoryTool) Call(ctx context.Context, input string) (string, er t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("create_directory: %s", input)) } + input = strings.TrimPrefix(input, `"`) + input = strings.TrimSuffix(input, `"`) + if input == "" { err := fmt.Errorf("directory path is required") if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go index 72714f379b9..7afb090f868 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "strings" "github.com/tmc/langchaingo/callbacks" ) @@ -27,6 +28,9 @@ func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, er t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("delete_directory: %s", input)) } + input = strings.TrimPrefix(input, `"`) + input = strings.TrimSuffix(input, `"`) + if input == "" { err := fmt.Errorf("directory path is required") if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go index b893bb1ee29..57c51b415de 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "strings" "github.com/tmc/langchaingo/callbacks" ) @@ -26,6 +27,9 @@ func (t DeleteFileTool) Call(ctx context.Context, input string) (string, error) t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("delete_file: %s", input)) } + input = strings.TrimPrefix(input, `"`) + input = strings.TrimSuffix(input, `"`) + if input == "" { err := fmt.Errorf("file path is required") if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go index f02e8e8df5e..c0b4e09ee2e 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go @@ -8,6 +8,7 @@ import ( "path/filepath" "strings" + "azd.ai.start/internal/tools/common" "github.com/tmc/langchaingo/callbacks" ) @@ -16,12 +17,6 @@ type DirectoryListTool struct { CallbacksHandler callbacks.Handler } -// ErrorResponse represents a JSON error response structure that can be reused across all tools -type ErrorResponse struct { - Error bool `json:"error"` - Message string `json:"message"` -} - func (t DirectoryListTool) Name() string { return "list_directory" } @@ -47,7 +42,7 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro // Parse as JSON - this is now required if err := json.Unmarshal([]byte(cleanInput), ¶ms); err != nil { - errorResponse := ErrorResponse{ + errorResponse := common.ErrorResponse{ Error: true, Message: fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"path\": \".\", \"include_hidden\": false}", err.Error()), } @@ -78,7 +73,7 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro // Explicitly get current working directory instead of relying on filepath.Abs(".") absPath, err = os.Getwd() if err != nil { - errorResponse := ErrorResponse{ + errorResponse := common.ErrorResponse{ Error: true, Message: fmt.Sprintf("Failed to get current working directory: %s", err.Error()), } @@ -91,7 +86,7 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro } else { absPath, err = filepath.Abs(path) if err != nil { - errorResponse := ErrorResponse{ + errorResponse := common.ErrorResponse{ Error: true, Message: fmt.Sprintf("Failed to get absolute path for %s: %s", path, err.Error()), } @@ -122,7 +117,7 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro message = fmt.Sprintf("Failed to access %s: %s (original input: '%s', cleaned path: '%s')", absPath, err.Error(), input, path) } - errorResponse := ErrorResponse{ + errorResponse := common.ErrorResponse{ Error: true, Message: message, } @@ -134,7 +129,7 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro } if !info.IsDir() { - errorResponse := ErrorResponse{ + errorResponse := common.ErrorResponse{ Error: true, Message: fmt.Sprintf("Path is not a directory: %s", absPath), } @@ -148,7 +143,7 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro // List directory contents files, err := os.ReadDir(absPath) if err != nil { - errorResponse := ErrorResponse{ + errorResponse := common.ErrorResponse{ Error: true, Message: fmt.Sprintf("Failed to read directory %s: %s", absPath, err.Error()), } @@ -202,7 +197,7 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro // Convert to JSON jsonData, err := json.MarshalIndent(response, "", " ") if err != nil { - errorResponse := ErrorResponse{ + errorResponse := common.ErrorResponse{ Error: true, Message: fmt.Sprintf("Failed to marshal JSON response: %s", err.Error()), } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go index 98528d50b91..afc5a0aca15 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "os" + "strings" "time" "github.com/tmc/langchaingo/callbacks" @@ -28,6 +29,9 @@ func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("file_info: %s", input)) } + input = strings.TrimPrefix(input, `"`) + input = strings.TrimSuffix(input, `"`) + if input == "" { err := fmt.Errorf("file path is required") if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go index 68db771d144..51c12488774 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go @@ -27,6 +27,9 @@ func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("move_file: %s", input)) } + input = strings.TrimPrefix(input, `"`) + input = strings.TrimSuffix(input, `"`) + if input == "" { err := fmt.Errorf("input is required in format 'source|destination'") if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/loader.go index ae2da0253c1..3a2dab5c83a 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/loader.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/loader.go @@ -4,6 +4,7 @@ import ( "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" + "azd.ai.start/internal/tools/azd" "azd.ai.start/internal/tools/dev" "azd.ai.start/internal/tools/io" ) @@ -21,6 +22,7 @@ type LocalToolsLoader struct { func NewLocalToolsLoader(callbackHandler callbacks.Handler) *LocalToolsLoader { return &LocalToolsLoader{ loaders: []ToolLoader{ + azd.NewAzdToolsLoader(callbackHandler), dev.NewDevToolsLoader(callbackHandler), io.NewIoToolsLoader(callbackHandler), }, diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json index dd2078ee03e..efca4416be8 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json +++ b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json @@ -1,19 +1,9 @@ { "servers": { - "bestpractices": { + "Azure": { "type": "stdio", "command": "azmcp", - "args": ["server", "start", "--namespace", "bestpractices"] - }, - "subscription": { - "type": "stdio", - "command": "azmcp", - "args": ["server", "start", "--namespace", "subscription"] - }, - "deploy": { - "type": "stdio", - "command": "azmcp", - "args": ["server", "start", "--namespace", "deploy"] + "args": ["server", "start"] } } } From 9a36c51c6535444a4cf262cafe222ce8490c9ddd Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Wed, 30 Jul 2025 17:30:27 -0700 Subject: [PATCH 023/116] Wire up noop sampling handler --- .../azd.ai.start/internal/tools/mcp/loader.go | 35 +++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go index 5aee8932b06..51464e0229d 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go @@ -1,6 +1,7 @@ package mcp import ( + "context" "encoding/json" "fmt" @@ -8,6 +9,8 @@ import ( langchaingo_mcp_adapter "github.com/i2y/langchaingo-mcp-adapter" "github.com/mark3labs/mcp-go/client" + "github.com/mark3labs/mcp-go/client/transport" + "github.com/mark3labs/mcp-go/mcp" "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" ) @@ -28,6 +31,14 @@ type ServerConfig struct { Env []string `json:"env,omitempty"` } +type McpSamplingHandler struct { +} + +func (h *McpSamplingHandler) CreateMessage(ctx context.Context, request mcp.CreateMessageRequest) (*mcp.CreateMessageResult, error) { + // TODO: implement sampling handler + return &mcp.CreateMessageResult{}, nil +} + type McpToolsLoader struct { callbackHandler callbacks.Handler } @@ -50,9 +61,29 @@ func (l *McpToolsLoader) LoadTools() ([]tools.Tool, error) { // Iterate through each server configuration for serverName, serverConfig := range config.Servers { // Create MCP client for the server using stdio - mcpClient, err := client.NewStdioMCPClient(serverConfig.Command, serverConfig.Env, serverConfig.Args...) + samplingHandler := &McpSamplingHandler{} + stdioTransport := transport.NewStdio(serverConfig.Command, serverConfig.Env, serverConfig.Args...) + mcpClient := client.NewClient(stdioTransport, client.WithSamplingHandler(samplingHandler)) + + ctx := context.Background() + + if err := mcpClient.Start(ctx); err != nil { + return nil, err + } + + // Initialize the connection + _, err := mcpClient.Initialize(ctx, mcp.InitializeRequest{ + Params: mcp.InitializeParams{ + ProtocolVersion: mcp.LATEST_PROTOCOL_VERSION, + ClientInfo: mcp.Implementation{ + Name: "azd-agent-host", + Version: "1.0.0", + }, + Capabilities: mcp.ClientCapabilities{}, + }, + }) if err != nil { - return nil, fmt.Errorf("failed to create MCP client for server %s: %w", serverName, err) + return nil, err } // Create the adapter From 8960505418ed3d8e7e09cc7822907c520106adfd Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Thu, 31 Jul 2025 13:18:11 -0700 Subject: [PATCH 024/116] Adds sampling handler --- .../azd.ai.start/internal/agent/agent.go | 5 +- .../azd.ai.start/internal/logging/logger.go | 37 ++++-- .../azd.ai.start/internal/tools/mcp/loader.go | 31 +---- .../internal/tools/mcp/sampling_handler.go | 111 ++++++++++++++++++ 4 files changed, 149 insertions(+), 35 deletions(-) create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go index a5822a677c2..d3a38403d4b 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go @@ -39,9 +39,12 @@ func NewAzureAIAgent(llm *openai.LLM) (*AzureAIAgent, error) { memory.WithAIPrefix("AI"), ) + // Create sampling handler for MCP + samplingHandler := mcptools.NewMcpSamplingHandler(llm) + toolLoaders := []localtools.ToolLoader{ localtools.NewLocalToolsLoader(llm.CallbacksHandler), - mcptools.NewMcpToolsLoader(llm.CallbacksHandler), + mcptools.NewMcpToolsLoader(llm.CallbacksHandler, samplingHandler), } allTools := []tools.Tool{} diff --git a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go index aa693cc7f6b..ee56ceb3625 100644 --- a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go +++ b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go @@ -155,12 +155,29 @@ func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.Age var valueStr string switch v := value.(type) { case []interface{}: + // Skip empty arrays + if len(v) == 0 { + continue + } // Handle arrays by joining with spaces var strSlice []string for _, item := range v { strSlice = append(strSlice, strings.TrimSpace(string(fmt.Sprintf("%v", item)))) } valueStr = strings.Join(strSlice, " ") + case map[string]interface{}: + // Skip empty maps + if len(v) == 0 { + continue + } + valueStr = strings.TrimSpace(fmt.Sprintf("%v", v)) + case string: + // Skip empty strings + trimmed := strings.TrimSpace(v) + if trimmed == "" { + continue + } + valueStr = trimmed default: valueStr = strings.TrimSpace(fmt.Sprintf("%v", v)) } @@ -173,17 +190,23 @@ func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.Age var paramStr string if len(params) > 0 { paramStr = strings.Join(params, ", ") + paramStr = truncateString(paramStr, 100) + output := fmt.Sprintf("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, paramStr) + color.Green(output) } else { - paramStr = "tool" + output := fmt.Sprintf("\n🤖 Agent: Calling %s tool\n", action.Tool) + color.Green(output) } - - paramStr = truncateString(paramStr, 100) - output := fmt.Sprintf("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, paramStr) - color.Green(output) } else { // JSON parsing failed, show the input as text with truncation - toolInput := truncateString(action.ToolInput, 100) - color.Green("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, toolInput) + toolInput := strings.TrimSpace(action.ToolInput) + if toolInput == "" { + output := fmt.Sprintf("\n🤖 Agent: Calling %s tool\n", action.Tool) + color.Green(output) + } else { + toolInput = truncateString(toolInput, 100) + color.Green("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, toolInput) + } } } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go index 51464e0229d..e30c3fb5e0e 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go @@ -10,7 +10,6 @@ import ( langchaingo_mcp_adapter "github.com/i2y/langchaingo-mcp-adapter" "github.com/mark3labs/mcp-go/client" "github.com/mark3labs/mcp-go/client/transport" - "github.com/mark3labs/mcp-go/mcp" "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" ) @@ -31,21 +30,15 @@ type ServerConfig struct { Env []string `json:"env,omitempty"` } -type McpSamplingHandler struct { -} - -func (h *McpSamplingHandler) CreateMessage(ctx context.Context, request mcp.CreateMessageRequest) (*mcp.CreateMessageResult, error) { - // TODO: implement sampling handler - return &mcp.CreateMessageResult{}, nil -} - type McpToolsLoader struct { callbackHandler callbacks.Handler + samplingHandler client.SamplingHandler } -func NewMcpToolsLoader(callbackHandler callbacks.Handler) *McpToolsLoader { +func NewMcpToolsLoader(callbackHandler callbacks.Handler, samplingHandler client.SamplingHandler) *McpToolsLoader { return &McpToolsLoader{ callbackHandler: callbackHandler, + samplingHandler: samplingHandler, } } @@ -61,9 +54,8 @@ func (l *McpToolsLoader) LoadTools() ([]tools.Tool, error) { // Iterate through each server configuration for serverName, serverConfig := range config.Servers { // Create MCP client for the server using stdio - samplingHandler := &McpSamplingHandler{} stdioTransport := transport.NewStdio(serverConfig.Command, serverConfig.Env, serverConfig.Args...) - mcpClient := client.NewClient(stdioTransport, client.WithSamplingHandler(samplingHandler)) + mcpClient := client.NewClient(stdioTransport, client.WithSamplingHandler(l.samplingHandler)) ctx := context.Background() @@ -71,21 +63,6 @@ func (l *McpToolsLoader) LoadTools() ([]tools.Tool, error) { return nil, err } - // Initialize the connection - _, err := mcpClient.Initialize(ctx, mcp.InitializeRequest{ - Params: mcp.InitializeParams{ - ProtocolVersion: mcp.LATEST_PROTOCOL_VERSION, - ClientInfo: mcp.Implementation{ - Name: "azd-agent-host", - Version: "1.0.0", - }, - Capabilities: mcp.ClientCapabilities{}, - }, - }) - if err != nil { - return nil, err - } - // Create the adapter adapter, err := langchaingo_mcp_adapter.New(mcpClient) if err != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go new file mode 100644 index 00000000000..d9505abf102 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go @@ -0,0 +1,111 @@ +package mcp + +import ( + "context" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/tmc/langchaingo/llms" +) + +type McpSamplingHandler struct { + llm llms.Model +} + +func NewMcpSamplingHandler(llm llms.Model) *McpSamplingHandler { + return &McpSamplingHandler{ + llm: llm, + } +} + +func (h *McpSamplingHandler) CreateMessage(ctx context.Context, request mcp.CreateMessageRequest) (*mcp.CreateMessageResult, error) { + messages := []llms.MessageContent{} + for _, inputMessage := range request.Messages { + // Map MCP Role to langchaingo ChatMessageType + var chatMessageType llms.ChatMessageType + switch inputMessage.Role { + case mcp.RoleAssistant: + chatMessageType = llms.ChatMessageTypeAI + case mcp.RoleUser: + chatMessageType = llms.ChatMessageTypeHuman + default: + // Fallback for unknown roles + chatMessageType = llms.ChatMessageTypeHuman + } + + // Handle Content field - it's defined as 'any' in MCP SamplingMessage + var parts []llms.ContentPart + switch content := inputMessage.Content.(type) { + case string: + // Simple text content + parts = []llms.ContentPart{ + llms.TextContent{ + Text: content, + }, + } + case []interface{}: + // Array of content parts (could be text, images, etc.) + for _, part := range content { + if textPart, ok := part.(string); ok { + parts = append(parts, llms.TextContent{ + Text: textPart, + }) + } + // Could add support for other content types here (images, etc.) + } + case map[string]interface{}: + // Map content - convert each key/value pair to text content + for key, value := range content { + parts = append(parts, llms.TextContent{ + Text: fmt.Sprintf("%s: %v", key, value), + }) + } + default: + // Fallback: convert to string + parts = []llms.ContentPart{ + llms.TextContent{ + Text: fmt.Sprintf("%v", content), + }, + } + } + + messages = append(messages, llms.MessageContent{ + Role: chatMessageType, + Parts: parts, + }) + } + + res, err := h.llm.GenerateContent(ctx, messages) + if err != nil { + return nil, err + } + + // Transform langchaingo response back to MCP format + // Get model name from hints if available + modelName := "" + if request.ModelPreferences != nil && len(request.ModelPreferences.Hints) > 0 { + modelName = request.ModelPreferences.Hints[0].Name + } + + if len(res.Choices) == 0 { + return &mcp.CreateMessageResult{ + SamplingMessage: mcp.SamplingMessage{ + Role: mcp.RoleAssistant, + Content: "", + }, + Model: modelName, + StopReason: "no_choices", + }, nil + } + + // Use the first choice + choice := res.Choices[0] + + return &mcp.CreateMessageResult{ + SamplingMessage: mcp.SamplingMessage{ + Role: mcp.RoleAssistant, + Content: choice.Content, + }, + Model: modelName, + }, nil +} From de487a4da95eb1b5b71a4178b1044b88a796aeb6 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Thu, 31 Jul 2025 18:25:42 -0700 Subject: [PATCH 025/116] Fixed sampling --- cli/azd/extensions/azd.ai.start/README.md | 33 ---- cli/azd/extensions/azd.ai.start/USAGE.md | 50 ------ .../azd.ai.start/internal/agent/agent.go | 83 +++++++-- .../internal/cmd/enhanced_integration.go | 17 +- .../azd.ai.start/internal/cmd/root.go | 39 ++++- .../azd.ai.start/internal/logging/logger.go | 2 +- .../internal/tools/mcp/sampling_handler.go | 157 +++++++++++------- .../azd.ai.start/internal/utils/helpers.go | 41 ----- .../internal/validation/parser.go | 93 ----------- .../azd.ai.start/internal/validation/types.go | 21 --- 10 files changed, 200 insertions(+), 336 deletions(-) delete mode 100644 cli/azd/extensions/azd.ai.start/README.md delete mode 100644 cli/azd/extensions/azd.ai.start/USAGE.md delete mode 100644 cli/azd/extensions/azd.ai.start/internal/utils/helpers.go delete mode 100644 cli/azd/extensions/azd.ai.start/internal/validation/parser.go delete mode 100644 cli/azd/extensions/azd.ai.start/internal/validation/types.go diff --git a/cli/azd/extensions/azd.ai.start/README.md b/cli/azd/extensions/azd.ai.start/README.md deleted file mode 100644 index 9ff29633ea4..00000000000 --- a/cli/azd/extensions/azd.ai.start/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# Node.js Express App - -This is a simple Node.js application using Express with a basic routing setup. - -## Project Structure - -``` -. -├── app.js -├── package.json -├── README.md -└── routes - └── index.js -``` - -## Getting Started - -1. Install dependencies: - ```bash - npm install - ``` -2. Start the server: - ```bash - npm start - ``` -3. Visit [http://localhost:3000](http://localhost:3000) in your browser. - -## Features -- Express server setup -- Modular routing - -## License -ISC \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/USAGE.md b/cli/azd/extensions/azd.ai.start/USAGE.md deleted file mode 100644 index 7218badc825..00000000000 --- a/cli/azd/extensions/azd.ai.start/USAGE.md +++ /dev/null @@ -1,50 +0,0 @@ -# Azure AI Agent - Multi-turn Chat Demo - -Your Azure AI Agent now supports two modes: - -## 1. Single Query Mode -For one-time questions, pass the query as arguments: -```bash -azd.ai.start.exe "How do I deploy a Node.js app to Azure?" -``` - -## 2. Interactive Chat Mode -For multi-turn conversations, run without arguments: -```bash -azd.ai.start.exe -``` - -In interactive mode, you'll see: -- 🤖 Welcome message with instructions -- 💬 You: prompt for your input -- 🤖 AI Agent: responses with context awareness -- Type 'exit' or 'quit' to end the session -- Maintains conversation history for context - -### Features: -- ✅ **Context Aware**: Remembers previous messages in the conversation -- ✅ **Azure Focused**: Specialized for Azure development tasks -- ✅ **Easy Exit**: Type 'exit', 'quit', or Ctrl+C to quit -- ✅ **Memory Management**: Keeps last 10 exchanges to prevent context overflow -- ✅ **Error Handling**: Gracefully handles errors and continues the conversation - -### Example Interactive Session: -``` -🤖 Azure AI Agent - Interactive Chat Mode -Type 'exit', 'quit', or press Ctrl+C to exit -═══════════════════════════════════════════════ - -💬 You: What is Azure App Service? - -🤖 AI Agent: Azure App Service is a platform-as-a-service (PaaS)... - -💬 You: How do I deploy to it? - -🤖 AI Agent: Based on our previous discussion about App Service... - -💬 You: exit - -👋 Goodbye! Thanks for using Azure AI Agent! -``` - -The agent maintains conversation context, so follow-up questions work naturally! diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go index d3a38403d4b..fc1cd9e73cb 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go @@ -9,11 +9,13 @@ import ( "github.com/tmc/langchaingo/agents" "github.com/tmc/langchaingo/chains" - "github.com/tmc/langchaingo/llms/openai" + "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/memory" "github.com/tmc/langchaingo/tools" + "azd.ai.start/internal/logging" localtools "azd.ai.start/internal/tools" + "azd.ai.start/internal/tools/mcp" mcptools "azd.ai.start/internal/tools/mcp" ) @@ -26,12 +28,42 @@ var _defaultAgentFormatInstructions string //go:embed prompts/default_agent_suffix.txt var _defaultAgentSuffix string -// AzureAIAgent represents an enhanced Azure AI agent with action tracking, intent validation, and conversation memory -type AzureAIAgent struct { - executor *agents.Executor +// AzdAiAgent represents an enhanced AZD Copilot agent with action tracking, intent validation, and conversation memory +type AzdAiAgent struct { + debug bool + defaultModel llms.Model + samplingModel llms.Model + executor *agents.Executor } -func NewAzureAIAgent(llm *openai.LLM) (*AzureAIAgent, error) { +type AgentOption func(*AzdAiAgent) + +func WithDebug(debug bool) AgentOption { + return func(agent *AzdAiAgent) { + agent.debug = debug + } +} + +func WithSamplingModel(model llms.Model) AgentOption { + return func(agent *AzdAiAgent) { + agent.samplingModel = model + } +} + +func NewAzdAiAgent(llm llms.Model, opts ...AgentOption) (*AzdAiAgent, error) { + azdAgent := &AzdAiAgent{ + defaultModel: llm, + samplingModel: llm, + } + + for _, opt := range opts { + opt(azdAgent) + } + + actionLogger := logging.NewActionLogger( + logging.WithDebug(azdAgent.debug), + ) + smartMemory := memory.NewConversationBuffer( memory.WithInputKey("input"), memory.WithOutputKey("output"), @@ -40,48 +72,63 @@ func NewAzureAIAgent(llm *openai.LLM) (*AzureAIAgent, error) { ) // Create sampling handler for MCP - samplingHandler := mcptools.NewMcpSamplingHandler(llm) + samplingHandler := mcptools.NewMcpSamplingHandler( + azdAgent.samplingModel, + mcp.WithDebug(azdAgent.debug), + ) toolLoaders := []localtools.ToolLoader{ - localtools.NewLocalToolsLoader(llm.CallbacksHandler), - mcptools.NewMcpToolsLoader(llm.CallbacksHandler, samplingHandler), + localtools.NewLocalToolsLoader(actionLogger), + mcptools.NewMcpToolsLoader(actionLogger, samplingHandler), } allTools := []tools.Tool{} + // Define block list of excluded tools + excludedTools := map[string]bool{ + "extension_az": true, + "extension_azd": true, + // Add more excluded tools here as needed + } + for _, toolLoader := range toolLoaders { categoryTools, err := toolLoader.LoadTools() if err != nil { return nil, err } - allTools = append(allTools, categoryTools...) + + // Filter out excluded tools + for _, tool := range categoryTools { + if !excludedTools[tool.Name()] { + allTools = append(allTools, tool) + } + } } // 4. Create agent with memory directly integrated - agent := agents.NewConversationalAgent(llm, allTools, + conversationAgent := agents.NewConversationalAgent(llm, allTools, agents.WithPromptPrefix(_defaultAgentPrefix), agents.WithPromptSuffix(_defaultAgentSuffix), agents.WithPromptFormatInstructions(_defaultAgentFormatInstructions), agents.WithMemory(smartMemory), - agents.WithCallbacksHandler(llm.CallbacksHandler), + agents.WithCallbacksHandler(actionLogger), agents.WithReturnIntermediateSteps(), ) // 5. Create executor without separate memory configuration since agent already has it - executor := agents.NewExecutor(agent, - agents.WithMaxIterations(100), // Much higher limit for complex multi-step processes + executor := agents.NewExecutor(conversationAgent, + agents.WithMaxIterations(500), // Much higher limit for complex multi-step processes agents.WithMemory(smartMemory), - agents.WithCallbacksHandler(llm.CallbacksHandler), + agents.WithCallbacksHandler(actionLogger), agents.WithReturnIntermediateSteps(), ) - return &AzureAIAgent{ - executor: executor, - }, nil + azdAgent.executor = executor + return azdAgent, nil } // ProcessQuery processes a user query with full action tracking and validation -func (aai *AzureAIAgent) ProcessQuery(ctx context.Context, userInput string) error { +func (aai *AzdAiAgent) ProcessQuery(ctx context.Context, userInput string) error { // Execute with enhanced input - agent should automatically handle memory _, err := chains.Run(ctx, aai.executor, userInput, chains.WithMaxTokens(800), diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go index 7e436b76e6d..3a27dc4643c 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go @@ -11,20 +11,13 @@ import ( "strings" "github.com/fatih/color" - "github.com/tmc/langchaingo/llms/openai" "azd.ai.start/internal/agent" ) -// RunEnhancedAzureAgent runs the enhanced Azure AI agent with full capabilities -func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) error { - // Create the enhanced agent - azureAgent, err := agent.NewAzureAIAgent(llm) - if err != nil { - return err - } - - fmt.Println("🤖 Enhanced Azure AI Agent - Interactive Mode") +// RunEnhancedAgentLoop runs the enhanced AZD Copilot agent with full capabilities +func RunEnhancedAgentLoop(ctx context.Context, agent *agent.AzdAiAgent, args []string) error { + fmt.Println("🤖 AZD Copilot - Interactive Mode") fmt.Println("═══════════════════════════════════════════════════════════") // Handle initial query if provided @@ -59,12 +52,12 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) } if strings.ToLower(userInput) == "exit" || strings.ToLower(userInput) == "quit" { - fmt.Println("👋 Goodbye! Thanks for using the Enhanced Azure AI Agent!") + fmt.Println("👋 Goodbye! Thanks for using AZD Copilot!") break } // Process the query with the enhanced agent - err := azureAgent.ProcessQuery(ctx, userInput) + err := agent.ProcessQuery(ctx, userInput) if err != nil { continue } diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go index d57df89a2ea..4031d40b7f5 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go @@ -9,6 +9,7 @@ import ( "fmt" "os" + "azd.ai.start/internal/agent" "azd.ai.start/internal/logging" "github.com/azure/azure-dev/cli/azd/pkg/azdext" "github.com/spf13/cobra" @@ -83,18 +84,17 @@ func runAIAgent(ctx context.Context, args []string, debug bool) error { // Common deployment names to try azureAPIVersion := "2024-02-15-preview" - var llm *openai.LLM + var defaultModel *openai.LLM + var samplingModel *openai.LLM + + actionLogger := logging.NewActionLogger(logging.WithDebug(debug)) // Try different deployment names if aiConfig.Endpoint != "" && aiConfig.ApiKey != "" { // Use Azure OpenAI with proper configuration fmt.Printf("🔵 Trying Azure OpenAI with deployment: %s\n", aiConfig.DeploymentName) - actionLogger := logging.NewActionLogger( - logging.WithDebug(debug), - ) - - llm, err = openai.New( + defaultModel, err = openai.New( openai.WithToken(aiConfig.ApiKey), openai.WithBaseURL(aiConfig.Endpoint+"/"), openai.WithAPIType(openai.APITypeAzure), @@ -108,12 +108,33 @@ func runAIAgent(ctx context.Context, args []string, debug bool) error { } else { fmt.Printf("❌ Failed with deployment %s: %v\n", aiConfig.DeploymentName, err) } + + samplingModel, err = openai.New( + openai.WithToken(aiConfig.ApiKey), + openai.WithBaseURL(aiConfig.Endpoint+"/"), + openai.WithAPIType(openai.APITypeAzure), + openai.WithAPIVersion(azureAPIVersion), + openai.WithModel(aiConfig.DeploymentName), + ) + + if err != nil { + return err + } + } + + // Create the enhanced agent + azdAgent, err := agent.NewAzdAiAgent(defaultModel, + agent.WithSamplingModel(samplingModel), + agent.WithDebug(debug), + ) + if err != nil { + return err } - if llm == nil { + if defaultModel == nil { return fmt.Errorf("failed to connect to any Azure OpenAI deployment") } - // Use the enhanced Azure AI agent with full capabilities - return RunEnhancedAzureAgent(ctx, llm, args) + // Use the enhanced AZD Copilot agent with full capabilities + return RunEnhancedAgentLoop(ctx, azdAgent, args) } diff --git a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go index ee56ceb3625..e3f9b64e0e4 100644 --- a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go +++ b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go @@ -68,7 +68,7 @@ func (al *ActionLogger) HandleLLMGenerateContentEnd(ctx context.Context, res *ll // Find all "Thought:" patterns and extract the content that follows // (?is) flags: i=case insensitive, s=dot matches newlines // .*? is non-greedy to stop at the first occurrence of next pattern or end - thoughtRegex := regexp.MustCompile(`(?is)thought:\s*(.*?)(?:\n\s*(?:action|final answer|observation|ai):|$)`) + thoughtRegex := regexp.MustCompile(`(?is)thought:\s*(.*?)(?:\n\s*(?:action|final answer|observation|ai|thought):|$)`) matches := thoughtRegex.FindAllStringSubmatch(content, -1) for _, match := range matches { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go index d9505abf102..ffd948429c6 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go @@ -2,110 +2,151 @@ package mcp import ( "context" + "encoding/json" "fmt" + "strings" + "github.com/fatih/color" "github.com/mark3labs/mcp-go/mcp" "github.com/tmc/langchaingo/llms" ) type McpSamplingHandler struct { - llm llms.Model + llm llms.Model + debug bool } -func NewMcpSamplingHandler(llm llms.Model) *McpSamplingHandler { - return &McpSamplingHandler{ +type SamplingHandlerOption func(*McpSamplingHandler) + +func WithDebug(debug bool) SamplingHandlerOption { + return func(h *McpSamplingHandler) { + h.debug = debug + } +} + +func NewMcpSamplingHandler(llm llms.Model, opts ...SamplingHandlerOption) *McpSamplingHandler { + handler := &McpSamplingHandler{ llm: llm, } + + for _, opt := range opts { + opt(handler) + } + + return handler +} + +// cleanContent converts literal line break escape sequences to actual line break characters +func (h *McpSamplingHandler) cleanContent(content string) string { + // Replace literal escape sequences with actual control characters + // Handle Windows-style \r\n first (most common), then individual ones + content = strings.ReplaceAll(content, "\\r\\n", "\r\n") + content = strings.ReplaceAll(content, "\\n", "\n") + content = strings.ReplaceAll(content, "\\r", "\r") + return content } func (h *McpSamplingHandler) CreateMessage(ctx context.Context, request mcp.CreateMessageRequest) (*mcp.CreateMessageResult, error) { - messages := []llms.MessageContent{} - for _, inputMessage := range request.Messages { - // Map MCP Role to langchaingo ChatMessageType - var chatMessageType llms.ChatMessageType - switch inputMessage.Role { - case mcp.RoleAssistant: - chatMessageType = llms.ChatMessageTypeAI - case mcp.RoleUser: - chatMessageType = llms.ChatMessageTypeHuman - default: - // Fallback for unknown roles - chatMessageType = llms.ChatMessageTypeHuman + if h.debug { + requestJson, err := json.MarshalIndent(request, "", " ") + if err != nil { + return nil, err } - // Handle Content field - it's defined as 'any' in MCP SamplingMessage + color.HiBlack("\nSamplingStart\n%s\n", requestJson) + } + + messages := []llms.MessageContent{} + for _, msg := range request.Messages { var parts []llms.ContentPart - switch content := inputMessage.Content.(type) { + + switch content := msg.Content.(type) { + case mcp.TextContent: + parts = append(parts, llms.TextPart(h.cleanContent(content.Text))) case string: // Simple text content - parts = []llms.ContentPart{ - llms.TextContent{ - Text: content, - }, + parts = append(parts, llms.TextPart(h.cleanContent(content))) + case map[string]interface{}: + // Map content - convert each key/value pair to text content + for key, value := range content { + if key == "text" { + parts = append(parts, llms.TextPart(h.cleanContent(fmt.Sprintf("%v", value)))) + break + } } case []interface{}: // Array of content parts (could be text, images, etc.) for _, part := range content { if textPart, ok := part.(string); ok { - parts = append(parts, llms.TextContent{ - Text: textPart, - }) + parts = append(parts, llms.TextPart(h.cleanContent(textPart))) } - // Could add support for other content types here (images, etc.) - } - case map[string]interface{}: - // Map content - convert each key/value pair to text content - for key, value := range content { - parts = append(parts, llms.TextContent{ - Text: fmt.Sprintf("%s: %v", key, value), - }) } + default: // Fallback: convert to string - parts = []llms.ContentPart{ - llms.TextContent{ - Text: fmt.Sprintf("%v", content), - }, - } + parts = append(parts, llms.TextPart(h.cleanContent(fmt.Sprintf("%v", content)))) } messages = append(messages, llms.MessageContent{ - Role: chatMessageType, + Role: llms.ChatMessageTypeAI, Parts: parts, }) } + if h.debug { + inputJson, err := json.MarshalIndent(messages, "", " ") + if err != nil { + return nil, err + } + + color.HiBlack("\nSamplingLLMContent\n%s\n", inputJson) + } + res, err := h.llm.GenerateContent(ctx, messages) if err != nil { - return nil, err + return &mcp.CreateMessageResult{ + SamplingMessage: mcp.SamplingMessage{ + Role: mcp.RoleAssistant, + Content: llms.TextPart(err.Error()), + }, + Model: "llm-delegated", + StopReason: "error", + }, nil } - // Transform langchaingo response back to MCP format - // Get model name from hints if available - modelName := "" - if request.ModelPreferences != nil && len(request.ModelPreferences.Hints) > 0 { - modelName = request.ModelPreferences.Hints[0].Name - } + var samplingResponse *mcp.CreateMessageResult if len(res.Choices) == 0 { - return &mcp.CreateMessageResult{ + samplingResponse = &mcp.CreateMessageResult{ SamplingMessage: mcp.SamplingMessage{ Role: mcp.RoleAssistant, - Content: "", + Content: llms.TextPart(""), }, - Model: modelName, + Model: "llm-delegated", StopReason: "no_choices", - }, nil + } + } else { + // Use the first choice + choice := res.Choices[0] + + samplingResponse = &mcp.CreateMessageResult{ + SamplingMessage: mcp.SamplingMessage{ + Role: mcp.RoleAssistant, + Content: llms.TextPart(choice.Content), + }, + Model: "llm-delegated", + StopReason: "endTurn", + } } - // Use the first choice - choice := res.Choices[0] + if h.debug { + responseJson, err := json.MarshalIndent(samplingResponse, "", " ") + if err != nil { + return nil, err + } + + color.HiBlack("\nSamplingEnd\n%s\n", responseJson) + } - return &mcp.CreateMessageResult{ - SamplingMessage: mcp.SamplingMessage{ - Role: mcp.RoleAssistant, - Content: choice.Content, - }, - Model: modelName, - }, nil + return samplingResponse, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/utils/helpers.go b/cli/azd/extensions/azd.ai.start/internal/utils/helpers.go deleted file mode 100644 index 130734eff77..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/utils/helpers.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package utils - -import ( - "fmt" - "strings" - "time" - - "azd.ai.start/internal/session" -) - -// TruncateString truncates a string to a maximum length -func TruncateString(s string, maxLen int) string { - if len(s) <= maxLen { - return s - } - return s[:maxLen] + "..." -} - -// FormatActionsForValidation formats actions for the validation prompt -func FormatActionsForValidation(actions []session.ActionLog) string { - if len(actions) == 0 { - return "No actions executed" - } - - var formatted strings.Builder - for i, action := range actions { - status := "SUCCESS" - if !action.Success { - status = "FAILED" - } - formatted.WriteString(fmt.Sprintf("%d. Tool: %s | Input: %s | Status: %s | Duration: %v\n", - i+1, action.Tool, TruncateString(action.Input, 100), status, action.Duration.Round(time.Millisecond))) - if action.Output != "" { - formatted.WriteString(fmt.Sprintf(" Output: %s\n", TruncateString(action.Output, 200))) - } - } - return formatted.String() -} diff --git a/cli/azd/extensions/azd.ai.start/internal/validation/parser.go b/cli/azd/extensions/azd.ai.start/internal/validation/parser.go deleted file mode 100644 index 2f814546798..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/validation/parser.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package validation - -import ( - "strings" -) - -// ParseValidationResult parses the validation result from LLM response -func ParseValidationResult(response string) *ValidationResult { - result := &ValidationResult{ - Status: ValidationError, - Explanation: "Failed to parse validation response", - Confidence: 0.0, - } - - lines := strings.Split(response, "\n") - for _, line := range lines { - line = strings.TrimSpace(line) - - if strings.HasPrefix(line, "STATUS:") { - statusStr := strings.TrimSpace(strings.TrimPrefix(line, "STATUS:")) - switch strings.ToUpper(statusStr) { - case "COMPLETE": - result.Status = ValidationComplete - case "PARTIAL": - result.Status = ValidationPartial - case "INCOMPLETE": - result.Status = ValidationIncomplete - case "ERROR": - result.Status = ValidationError - } - } else if strings.HasPrefix(line, "EXPLANATION:") { - result.Explanation = strings.TrimSpace(strings.TrimPrefix(line, "EXPLANATION:")) - } else if strings.HasPrefix(line, "CONFIDENCE:") { - confidenceStr := strings.TrimSpace(strings.TrimPrefix(line, "CONFIDENCE:")) - if conf, err := parseFloat(confidenceStr); err == nil { - result.Confidence = conf - } - } - } - - // If we couldn't parse the status, try to infer from the response content - if result.Status == ValidationError { - responseUpper := strings.ToUpper(response) - if strings.Contains(responseUpper, "COMPLETE") { - result.Status = ValidationComplete - } else if strings.Contains(responseUpper, "PARTIAL") { - result.Status = ValidationPartial - } else if strings.Contains(responseUpper, "INCOMPLETE") { - result.Status = ValidationIncomplete - } - result.Explanation = response - result.Confidence = 0.7 - } - - return result -} - -// parseFloat safely parses a float from string -func parseFloat(s string) (float64, error) { - // Simple float parsing for confidence values - s = strings.TrimSpace(s) - if s == "1" || s == "1.0" { - return 1.0, nil - } else if s == "0" || s == "0.0" { - return 0.0, nil - } else if strings.HasPrefix(s, "0.") { - // Simple decimal parsing for common cases - switch s { - case "0.1": - return 0.1, nil - case "0.2": - return 0.2, nil - case "0.3": - return 0.3, nil - case "0.4": - return 0.4, nil - case "0.5": - return 0.5, nil - case "0.6": - return 0.6, nil - case "0.7": - return 0.7, nil - case "0.8": - return 0.8, nil - case "0.9": - return 0.9, nil - } - } - return 0.5, nil // Default confidence -} diff --git a/cli/azd/extensions/azd.ai.start/internal/validation/types.go b/cli/azd/extensions/azd.ai.start/internal/validation/types.go deleted file mode 100644 index 4b0ebcd25bc..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/validation/types.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package validation - -// ValidationResult represents the result of intent validation -type ValidationResult struct { - Status ValidationStatus - Explanation string - Confidence float64 -} - -// ValidationStatus represents the completion status of the original intent -type ValidationStatus string - -const ( - ValidationComplete ValidationStatus = "COMPLETE" - ValidationPartial ValidationStatus = "PARTIAL" - ValidationIncomplete ValidationStatus = "INCOMPLETE" - ValidationError ValidationStatus = "ERROR" -) From 9cffc50ba4ec417828c4eccef02c1a2a6f5e7c55 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 1 Aug 2025 14:30:46 -0700 Subject: [PATCH 026/116] Adds azd helper tools --- .../default_agent_format_instructions.txt | 2 +- .../tools/azd/azd_architecture_planning.go | 31 ++ .../tools/azd/azd_azure_yaml_generation.go | 31 ++ .../tools/azd/azd_discovery_analysis.go | 31 ++ .../tools/azd/azd_docker_generation.go | 31 ++ .../tools/azd/azd_iac_generation_rules.go | 3 +- .../azd/azd_infrastructure_generation.go | 31 ++ .../internal/tools/azd/azd_plan_init.go | 3 +- .../tools/azd/azd_project_validation.go | 37 +++ .../internal/tools/azd/azd_yaml_schema.go | 3 +- .../azd.ai.start/internal/tools/azd/loader.go | 15 + .../internal/tools/azd/prompts/README.md | 199 ++++++++++++ .../azd/prompts/azd_architecture_planning.md | 165 ++++++++++ .../azd/prompts/azd_azure_yaml_generation.md | 200 ++++++++++++ .../azd/prompts/azd_discovery_analysis.md | 200 ++++++++++++ .../azd/prompts/azd_docker_generation.md | 174 +++++++++++ .../azd/prompts/azd_iac_generation_rules.md | 22 ++ .../prompts/azd_infrastructure_generation.md | 159 ++++++++++ .../tools/azd/prompts/azd_plan_init.md | 291 +++++------------- .../azd/prompts/azd_project_validation.md | 181 +++++++++++ .../internal/tools/azd/prompts/prompts.go | 15 + .../azd.ai.start/internal/tools/io/loader.go | 2 +- .../internal/tools/io/write_file.go | 150 +++------ 23 files changed, 1646 insertions(+), 330 deletions(-) create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_architecture_planning.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_azure_yaml_generation.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_discovery_analysis.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_docker_generation.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_infrastructure_generation.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_project_validation.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/README.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_architecture_planning.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_azure_yaml_generation.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_discovery_analysis.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_docker_generation.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_infrastructure_generation.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_project_validation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt index d66dcb43d88..4ff35663ba8 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt @@ -34,7 +34,7 @@ Observation: [result] Remember: Always continue taking actions until the user's request is fully satisfied. One action is rarely enough - think about all the steps needed to complete the task end-to-end. -When you are done answering the questions and performing all your tasks you MUST ALWAYS use the following format: +When you are done or handing control back to the user you MUST ALWAYS use the following format: Thought: Do I need to use a tool? No AI: [briefly summarize your response without all the details from your observations] \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_architecture_planning.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_architecture_planning.go new file mode 100644 index 00000000000..44894270ea0 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_architecture_planning.go @@ -0,0 +1,31 @@ +package azd + +import ( + "context" + + "azd.ai.start/internal/tools/azd/prompts" + "github.com/tmc/langchaingo/tools" +) + +var _ tools.Tool = &AzdArchitecturePlanningTool{} + +type AzdArchitecturePlanningTool struct { +} + +func (t *AzdArchitecturePlanningTool) Name() string { + return "azd_architecture_planning" +} + +func (t *AzdArchitecturePlanningTool) Description() string { + return ` + Performs Azure service selection and architecture planning for applications preparing for Azure Developer CLI (AZD) initialization. + This is Phase 2 of the AZD migration process that maps components to Azure services, plans hosting strategies, + and designs infrastructure architecture based on discovery results. + + Input: "./azd-arch-plan.md" + ` +} + +func (t *AzdArchitecturePlanningTool) Call(ctx context.Context, input string) (string, error) { + return prompts.AzdArchitecturePlanningPrompt, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_azure_yaml_generation.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_azure_yaml_generation.go new file mode 100644 index 00000000000..ea2ae2e26f2 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_azure_yaml_generation.go @@ -0,0 +1,31 @@ +package azd + +import ( + "context" + + "azd.ai.start/internal/tools/azd/prompts" + "github.com/tmc/langchaingo/tools" +) + +var _ tools.Tool = &AzdAzureYamlGenerationTool{} + +type AzdAzureYamlGenerationTool struct { +} + +func (t *AzdAzureYamlGenerationTool) Name() string { + return "azd_azure_yaml_generation" +} + +func (t *AzdAzureYamlGenerationTool) Description() string { + return ` + Generates the azure.yaml configuration file for Azure Developer CLI (AZD) projects. + This specialized tool focuses on creating service definitions, hosting configurations, + and deployment instructions. Can be used independently for service configuration updates. + + Input: "./azd-arch-plan.md" + ` +} + +func (t *AzdAzureYamlGenerationTool) Call(ctx context.Context, input string) (string, error) { + return prompts.AzdAzureYamlGenerationPrompt, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_discovery_analysis.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_discovery_analysis.go new file mode 100644 index 00000000000..db865d67398 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_discovery_analysis.go @@ -0,0 +1,31 @@ +package azd + +import ( + "context" + + "azd.ai.start/internal/tools/azd/prompts" + "github.com/tmc/langchaingo/tools" +) + +var _ tools.Tool = &AzdDiscoveryAnalysisTool{} + +type AzdDiscoveryAnalysisTool struct { +} + +func (t *AzdDiscoveryAnalysisTool) Name() string { + return "azd_discovery_analysis" +} + +func (t *AzdDiscoveryAnalysisTool) Description() string { + return ` + Performs comprehensive discovery and analysis of applications to prepare them for Azure Developer CLI (AZD) initialization. + This is Phase 1 of the AZD migration process that analyzes codebase, identifies components and dependencies, + and creates a foundation for architecture planning. + + Input: "./azd-arch-plan.md" + ` +} + +func (t *AzdDiscoveryAnalysisTool) Call(ctx context.Context, input string) (string, error) { + return prompts.AzdDiscoveryAnalysisPrompt, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_docker_generation.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_docker_generation.go new file mode 100644 index 00000000000..89ddea37bae --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_docker_generation.go @@ -0,0 +1,31 @@ +package azd + +import ( + "context" + + "azd.ai.start/internal/tools/azd/prompts" + "github.com/tmc/langchaingo/tools" +) + +var _ tools.Tool = &AzdDockerGenerationTool{} + +type AzdDockerGenerationTool struct { +} + +func (t *AzdDockerGenerationTool) Name() string { + return "azd_docker_generation" +} + +func (t *AzdDockerGenerationTool) Description() string { + return ` + Generates Dockerfiles and container configurations for Azure Developer CLI (AZD) projects. + This specialized tool focuses on containerization requirements, creating optimized Dockerfiles + for different programming languages, and configuring container-specific settings for Azure hosting. + + Input: "./azd-arch-plan.md" + ` +} + +func (t *AzdDockerGenerationTool) Call(ctx context.Context, input string) (string, error) { + return prompts.AzdDockerGenerationPrompt, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go index f67c067e820..47e2c5a738e 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go @@ -19,7 +19,8 @@ func (t *AzdIacGenerationRulesTool) Name() string { func (t *AzdIacGenerationRulesTool) Description() string { return ` Gets the infrastructure as code (IaC) rules and best practices and patterns to use when generating bicep files and modules for use within AZD. - Input: empty string + + Input: "./azd-arch-plan.md" ` } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_infrastructure_generation.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_infrastructure_generation.go new file mode 100644 index 00000000000..b93c5960369 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_infrastructure_generation.go @@ -0,0 +1,31 @@ +package azd + +import ( + "context" + + "azd.ai.start/internal/tools/azd/prompts" + "github.com/tmc/langchaingo/tools" +) + +var _ tools.Tool = &AzdInfrastructureGenerationTool{} + +type AzdInfrastructureGenerationTool struct { +} + +func (t *AzdInfrastructureGenerationTool) Name() string { + return "azd_infrastructure_generation" +} + +func (t *AzdInfrastructureGenerationTool) Description() string { + return ` + Generates Bicep infrastructure templates for Azure Developer CLI (AZD) projects. + This specialized tool focuses on creating modular Bicep templates, parameter files, + and implementing Azure security and operational best practices for infrastructure as code. + + Input: "./azd-arch-plan.md" + ` +} + +func (t *AzdInfrastructureGenerationTool) Call(ctx context.Context, input string) (string, error) { + return prompts.AzdInfrastructureGenerationPrompt, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go index 1e648939d2b..005ebafb441 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go @@ -19,7 +19,8 @@ func (t *AzdPlanInitTool) Name() string { func (t *AzdPlanInitTool) Description() string { return ` Gets the required workflow steps and best practices and patterns for initializing or migrating an application to use AZD. - Input: empty string + + Input: "./azd-arch-plan.md" ` } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_project_validation.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_project_validation.go new file mode 100644 index 00000000000..de7639839a5 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_project_validation.go @@ -0,0 +1,37 @@ +package azd + +import ( + "context" + _ "embed" + + "github.com/tmc/langchaingo/tools" +) + +//go:embed prompts/azd_project_validation.md +var azdProjectValidationPrompt string + +// AzdProjectValidationTool validates an AZD project by running comprehensive checks on all components +// including azure.yaml schema validation, Bicep template validation, environment setup, packaging, +// and deployment preview. +type AzdProjectValidationTool struct{} + +// Name returns the name of the tool. +func (t *AzdProjectValidationTool) Name() string { + return "azd_project_validation" +} + +// Description returns the description of the tool. +func (t *AzdProjectValidationTool) Description() string { + return ` + Validates an AZD project by running comprehensive checks on all components including azure.yaml schema validation, Bicep template validation, environment setup, packaging, and deployment preview. + + Input: "./azd-arch-plan.md"` +} + +// Call executes the tool with the given arguments. +func (t *AzdProjectValidationTool) Call(ctx context.Context, args string) (string, error) { + return azdProjectValidationPrompt, nil +} + +// Ensure AzdProjectValidationTool implements the Tool interface. +var _ tools.Tool = (*AzdProjectValidationTool)(nil) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go index db83ddf3d08..850091db4ea 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go @@ -19,7 +19,8 @@ func (t *AzdYamlSchemaTool) Name() string { func (t *AzdYamlSchemaTool) Description() string { return ` Gets the Azure YAML JSON schema file specification and structure for azure.yaml configuration files used in AZD. - Input: empty string + + Input: ` } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go index 648d70bb569..b4ac9a4de31 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go @@ -18,7 +18,22 @@ func NewAzdToolsLoader(callbackHandler callbacks.Handler) *AzdToolsLoader { func (l *AzdToolsLoader) LoadTools() ([]tools.Tool, error) { return []tools.Tool{ + // Original orchestrating tool &AzdPlanInitTool{}, + + // Core workflow tools (use in sequence) + &AzdDiscoveryAnalysisTool{}, + &AzdArchitecturePlanningTool{}, + + // Focused file generation tools (use as needed) + &AzdAzureYamlGenerationTool{}, + &AzdInfrastructureGenerationTool{}, + &AzdDockerGenerationTool{}, + + // Validation tool (final step) + &AzdProjectValidationTool{}, + + // Supporting tools &AzdIacGenerationRulesTool{}, &AzdYamlSchemaTool{}, }, nil diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/README.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/README.md new file mode 100644 index 00000000000..01c5a3ab0dd --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/README.md @@ -0,0 +1,199 @@ +# AZD Modular Tools Overview + +This document provides an overview of the modular AZD initialization tools that replace the monolithic `azd_plan_init` tool. Each tool is designed to be used independently or as part of a complete AZD migration workflow. + +## Tool Structure + +The AZD initialization process has been broken down into focused, modular tools: + +### 1. Discovery and Analysis Tool (`azd_discovery_analysis`) + +**Purpose:** Analyze applications and identify components and dependencies +**Use When:** Starting a new AZD migration or need to understand an existing codebase +**Output:** Component inventory and dependency mapping in `azd-arch-plan.md` + +### 2. Architecture Planning Tool (`azd_architecture_planning`) + +**Purpose:** Select Azure services and plan hosting strategies +**Use When:** You have discovered components and need to plan Azure service mapping +**Prerequisites:** Completed discovery and analysis +**Output:** Architecture decisions and service selections in `azd-arch-plan.md` + +### 3. Azure.yaml Generation Tool (`azd_azure_yaml_generation`) + +**Purpose:** Generate azure.yaml service configuration file +**Use When:** You need to create or update just the service definitions +**Prerequisites:** Understanding of application services and hosting requirements +**Output:** Valid `azure.yaml` file + +### 4. Infrastructure Generation Tool (`azd_infrastructure_generation`) + +**Purpose:** Generate Bicep infrastructure templates +**Use When:** You need to create or update just the infrastructure components +**Prerequisites:** Architecture decisions about Azure services +**Output:** Complete Bicep template structure + +### 5. Docker Generation Tool (`azd_docker_generation`) + +**Purpose:** Generate Dockerfiles and container configurations +**Use When:** You need containerization for your services +**Prerequisites:** Understanding of application services and containerization needs +**Output:** Optimized Dockerfiles and .dockerignore files + +### 6. Project Validation Tool (`azd_project_validation`) + +**Purpose:** Validate the complete AZD project setup and configuration +**Use When:** All files are generated and you need to validate the setup +**Prerequisites:** All configuration files generated +**Output:** Validation report and ready-to-deploy confirmation + +## Complete Workflow + +For a full AZD migration, use the tools in this sequence: + +``` +1. azd_discovery_analysis + ↓ +2. azd_architecture_planning + ↓ +3a. azd_azure_yaml_generation +3b. azd_infrastructure_generation +3c. azd_docker_generation (if containerization needed) + ↓ +4. azd_project_validation +``` + +## Selective Usage + +You can also use individual tools for specific tasks: + +**Generate only azure.yaml:** +``` +azd_discovery_analysis → azd_azure_yaml_generation +``` + +**Generate only infrastructure:** +``` +azd_architecture_planning → azd_infrastructure_generation +``` + +**Add containerization:** +``` +azd_docker_generation (based on existing analysis) +``` + +**Validate existing project:** +``` +azd_project_validation (for validation and testing) +``` + +## Central Planning Document + +All tools use `azd-arch-plan.md` as the central planning document: + +- **Created by:** Discovery and Analysis tool +- **Updated by:** All subsequent tools +- **Purpose:** Track progress, document decisions, and maintain project state +- **Location:** Current working directory + +## Key Features + +### Modular Design +- Each tool has a specific responsibility +- Tools can be used independently or together +- Clear prerequisites and outputs +- Consistent documentation patterns + +### Azure Best Practices +- All tools implement Azure best practices +- Security-first approach +- Cost optimization considerations +- Operational excellence patterns + +### LLM Optimized +- Clear, actionable instructions +- Structured output formats +- Comprehensive validation steps +- Troubleshooting guidance + +### Progress Tracking +- Checkboxes for completed actions +- Clear success criteria +- Validation requirements +- Next step guidance + +## Tool Selection Guide + +**Use the Discovery Tool when:** +- Starting a new AZD migration +- Don't understand the application structure +- Need to document existing architecture +- Want to identify all components and dependencies + +**Use the Architecture Planning Tool when:** +- Have component inventory +- Need to select Azure services +- Planning hosting strategies +- Designing infrastructure architecture + +**Use the File Generation Tool when:** +- Have architecture decisions +- Need to create all AZD files +- Want complete project setup +- Ready to implement infrastructure + +**Use the Environment Initialization Tool when:** +- All files are generated +- Ready to create AZD environment +- Need to validate complete setup +- Preparing for deployment + +**Use the Azure.yaml Generation Tool when:** +- Only need service configuration +- Updating existing azure.yaml +- Working with known service requirements +- Quick service definition setup + +**Use the Infrastructure Generation Tool when:** +- Only need Bicep templates +- Updating existing infrastructure +- Working with specific Azure service requirements +- Advanced infrastructure customization + +## Benefits of Modular Approach + +### For Users +- **Faster iterations:** Update only what you need +- **Better understanding:** Focus on one aspect at a time +- **Reduced complexity:** Smaller, focused tasks +- **Flexible workflow:** Use tools in different orders based on needs + +### For LLMs +- **Clearer context:** Each tool has specific scope +- **Better accuracy:** Focused instructions reduce errors +- **Improved validation:** Tool-specific validation steps +- **Enhanced troubleshooting:** Targeted problem resolution + +### For Maintenance +- **Easier updates:** Modify individual tools without affecting others +- **Better testing:** Test each tool independently +- **Clearer documentation:** Each tool is self-contained +- **Improved reusability:** Tools can be repurposed for different scenarios + +## Migration from Original Tool + +If you were using the original `azd_plan_init` tool, here's how to migrate: + +**Original Phase 1 (Discovery and Analysis):** +Use `azd_discovery_analysis` tool + +**Original Phase 2 (Architecture Planning):** +Use `azd_architecture_planning` tool + +**Original Phase 3 (File Generation):** +Use `azd_azure_yaml_generation` + `azd_infrastructure_generation` + `azd_docker_generation` for focused file generation + +**Original Phase 4 (Project Validation):** +Use `azd_project_validation` tool for final validation and setup verification + +The modular tools provide the same functionality with improved focus and flexibility. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_architecture_planning.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_architecture_planning.md new file mode 100644 index 00000000000..b85778ecb24 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_architecture_planning.md @@ -0,0 +1,165 @@ +# AZD Architecture Planning Tool + +This tool performs Azure service selection and architecture planning for Azure Developer CLI (AZD) initialization. This is Phase 2 of the AZD migration process. + +## Overview + +Use discovery results to select appropriate Azure services, plan hosting strategies, and design infrastructure architecture. + +**IMPORTANT:** Before starting, review the `azd-arch-plan.md` file in your current working directory to understand discovered components and dependencies from the discovery phase. + +## Success Criteria + +- [ ] Azure service selections made for all components +- [ ] Hosting strategies defined for each service +- [ ] Containerization plans documented +- [ ] Infrastructure architecture designed +- [ ] Ready to proceed to file generation phase + +## Azure Service Selection + +**REQUIRED ANALYSIS:** + +For each discovered application component, select the most appropriate Azure hosting platform: + +### Azure Container Apps (PREFERRED) + +**Use for:** Microservices, containerized applications, event-driven workloads +**Benefits:** Auto-scaling, managed Kubernetes, simplified deployment +**Consider when:** Component can be containerized, needs elastic scaling + +### Azure App Service + +**Use for:** Web applications, REST APIs with specific runtime needs +**Benefits:** Managed platform, built-in CI/CD, easy SSL/custom domains +**Consider when:** Need specific runtime versions, Windows-specific features + +### Azure Functions + +**Use for:** Event processing, scheduled tasks, lightweight APIs +**Benefits:** Serverless, automatic scaling, pay-per-execution +**Consider when:** Event-driven processing, stateless operations + +### Azure Static Web Apps + +**Use for:** Frontend SPAs, static sites, JAMstack applications +**Benefits:** Global CDN, built-in authentication, API integration +**Consider when:** Static content, minimal backend requirements + +## Selection Criteria + +**REQUIRED ANALYSIS:** + +For each discovered component, consider: + +- Scalability requirements and traffic patterns +- Runtime and platform needs +- Operational complexity preferences +- Cost considerations +- Team expertise and preferences + +## Containerization Planning + +**REQUIRED ASSESSMENT:** + +For each component, determine: + +- **Containerization Feasibility:** Can it run in Docker? Windows-specific dependencies? +- **Docker Strategy:** Base image selection, port mappings, environment variables +- **Resource Requirements:** CPU, memory, storage needs +- **Health Check Strategy:** Endpoint patterns for monitoring + +## Data Storage Planning + +**REQUIRED ANALYSIS:** + +Select appropriate Azure database services: + +### Azure SQL Database + +**Use for:** SQL Server compatibility, complex queries, ACID compliance +**Consider when:** Relational data model, existing SQL Server applications + +### Azure Database for PostgreSQL/MySQL + +**Use for:** PostgreSQL/MySQL workloads, web applications +**Consider when:** Specific database engine compatibility required + +### Azure Cosmos DB + +**Use for:** NoSQL requirements, global scale, flexible schemas +**Consider when:** Multiple data models, global distribution needed + +### Azure Cache for Redis + +**Use for:** Application caching, session storage, real-time analytics +**Consider when:** Performance optimization, session management + +## Messaging and Integration Planning + +**REQUIRED ANALYSIS:** + +Select messaging services based on patterns: + +### Azure Service Bus + +**Use for:** Enterprise messaging, guaranteed delivery, complex routing +**Consider when:** Reliable messaging, enterprise scenarios + +### Azure Event Hubs + +**Use for:** High-throughput event streaming, telemetry ingestion +**Consider when:** Big data scenarios, real-time analytics + +### Azure Event Grid + +**Use for:** Event-driven architectures, reactive programming +**Consider when:** Decoupled systems, serverless architectures + +## Update Architecture Documentation + +**REQUIRED ACTIONS:** + +Update `azd-arch-plan.md` with: + +### Azure Service Mapping Table + +```markdown +| Component | Current Tech | Azure Service | Rationale | +|-----------|-------------|---------------|-----------| +| Web App | React | Static Web Apps | Frontend SPA | +| API Service | Node.js | Container Apps | Microservice architecture | +| Database | PostgreSQL | Azure Database for PostgreSQL | Existing dependency | +``` + +### Hosting Strategy Summary + +- Document hosting decisions for each component +- Include containerization plans where applicable +- Note resource requirements and scaling strategies + +### Infrastructure Architecture + +- Resource group organization strategy +- Networking and security design approach +- Monitoring and logging strategy +- Integration patterns between services + +### Next Steps Checklist + +- [ ] Azure service selected for each component with rationale +- [ ] Hosting strategies defined +- [ ] Containerization plans documented +- [ ] Data storage strategies planned +- [ ] Ready to proceed to file generation phase + +## Next Phase + +After completing architecture planning, proceed to the appropriate file generation tool: + +- Use `azd_azure_yaml_generation` tool for azure.yaml configuration +- Use `azd_infrastructure_generation` tool for Bicep templates +- Use `azd_docker_generation` tool for container configurations +- Use `azd_project_validation` tool for final project validation + +**IMPORTANT:** Keep `azd-arch-plan.md` updated as the central reference for all architecture decisions. This document guides subsequent phases and serves as implementation documentation. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_azure_yaml_generation.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_azure_yaml_generation.md new file mode 100644 index 00000000000..84a7618ea0a --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_azure_yaml_generation.md @@ -0,0 +1,200 @@ +# AZD Azure.yaml Generation Tool + +This specialized tool generates the `azure.yaml` configuration file for Azure Developer CLI (AZD) projects. + +## Overview + +Generate a valid `azure.yaml` configuration file with proper service hosting, build, and deployment settings. + +**IMPORTANT:** Before starting, check if `azd-arch-plan.md` exists in your current working directory. If it exists, review it to understand previous analysis and architecture decisions. Use the existing `azd_yaml_schema` tool for schema validation. + +## Success Criteria + +- [ ] Valid `azure.yaml` file created in root directory +- [ ] All application services properly configured +- [ ] Service hosting configurations match Azure service selections +- [ ] Build and deployment instructions complete +- [ ] File validates against AZD schema (use `azd_yaml_schema` tool) + +## Service Analysis Requirements + +**REQUIRED ACTIONS:** + +1. **Identify Application Services:** + - Frontend applications (React, Angular, Vue.js, static sites) + - Backend services (REST APIs, microservices, GraphQL, gRPC) + - Function-based services (Azure Functions) + - Background services and workers + +2. **Determine Hosting Requirements:** + - **Container Apps:** Microservices, APIs, containerized web apps + - **App Service:** Traditional web applications, APIs + - **Static Web Apps:** Frontend SPAs, static sites + - **Functions:** Event-driven, serverless workloads + +3. **Analyze Build Requirements:** + - Programming language and framework + - Package manager (npm, pip, dotnet, maven) + - Build commands and output directories + - Dependency management needs + +## Azure.yaml Configuration Requirements + +**REQUIRED ACTIONS:** + +Create a complete `azure.yaml` file in the root directory following these patterns: + +### Basic Structure Requirements + +**IMPORTANT:** Use the `azd_yaml_schema` tool for complete schema definition, structure requirements, and validation rules. + +Basic structure: + +```yaml +name: [project-name] +services: + # Service configurations +infra: + provider: bicep + path: infra +``` + +### Service Configuration Patterns + +**Azure Container Apps (for microservices, APIs, containerized apps):** + +```yaml +services: + api: + project: ./src/api + language: js + host: containerapp + docker: + path: ./src/api/Dockerfile +``` + +**Azure App Service (for traditional web apps):** + +```yaml +services: + webapp: + project: ./src/webapp + language: js + host: appservice +``` + +**Azure Functions (for serverless workloads):** + +```yaml +services: + functions: + project: ./src/functions + language: js + host: function +``` + +**Azure Static Web Apps (for SPAs, static sites):** + +```yaml +services: + frontend: + project: ./src/frontend + language: js + host: staticwebapp + dist: build +``` + +### Advanced Configuration Options + +**Environment Variables:** + +```yaml +services: + api: + env: + - name: NODE_ENV + value: production + - name: DATABASE_URL + value: "{{ .Env.DATABASE_URL }}" +``` + +**Custom Build Commands:** + +```yaml +services: + frontend: + hooks: + prebuild: + posix: npm install + build: + posix: npm run build +``` + +## Configuration Requirements + +**CRITICAL REQUIREMENTS:** + +- Service names must be valid Azure resource names (alphanumeric, hyphens only) +- All `project` paths must point to existing directories +- All `docker.path` references must point to existing Dockerfiles +- Host types must be: `containerapp`, `appservice`, `function`, or `staticwebapp` +- Language must match detected programming language +- `dist` paths must match build output directories + +## Validation Requirements + +**VALIDATION STEPS:** + +1. **Schema Validation:** Use `azd_yaml_schema` tool for authoritative schema validation +2. **Path Validation:** Ensure all referenced paths exist +3. **Configuration Testing:** Run `azd show` to test service discovery + +**Validation Commands:** + +```bash +# Validate configuration +azd config show + +# Test service discovery +azd show +``` + +## Common Patterns + +**Multi-Service Microservices:** + +- Frontend: Static Web App +- APIs: Container Apps with Dockerfiles +- Background Services: Container Apps or Functions + +**Full-Stack Application:** + +- Frontend: Static Web App +- Backend: Container App or App Service + +**Serverless Application:** + +- Frontend: Static Web App +- APIs: Azure Functions + +## Update Documentation + +**REQUIRED ACTIONS:** + +Update `azd-arch-plan.md` with: + +- Generated azure.yaml location and schema version +- Service configuration table (service, type, host, language, path) +- Hosting strategy summary by Azure service type +- Build and deployment configuration decisions +- Docker configuration details +- Validation results + +## Next Steps + +After azure.yaml generation is complete: + +1. Validate configuration using `azd_yaml_schema` tool +2. Test service discovery with `azd show` + +**IMPORTANT:** Reference existing tools for specific functionality. Use `azd_yaml_schema` for schema validation. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_discovery_analysis.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_discovery_analysis.md new file mode 100644 index 00000000000..10ff9e4e49c --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_discovery_analysis.md @@ -0,0 +1,200 @@ +# AZD Application Discovery and Analysis Tool + +This tool performs comprehensive discovery and analysis of applications to prepare them for Azure Developer CLI (AZD) initialization. This is Phase 1 of the AZD migration process. + +Always use Azure best practices with intelligent defaults. + +## Overview + +This tool analyzes your current codebase and architecture to: +1. Identify all application components and dependencies +2. Classify components by type and hosting requirements +3. Map dependencies and communication patterns +4. Provide foundation for architecture planning + +**IMPORTANT:** Before starting, check if `azd-arch-plan.md` exists in your current working directory. If it exists, review it to understand what analysis has already been completed and build upon that work. + +## Success Criteria + +The discovery and analysis is successful when: + +- [ ] Complete file system inventory is documented +- [ ] All application components are identified and classified +- [ ] Component dependencies are mapped +- [ ] Results are documented in `azd-arch-plan.md` +- [ ] Ready to proceed to architecture planning phase + +## Step 1: Deep File System Analysis + +**REQUIRED ACTIONS:** + +- Scan all files in the current working directory recursively +- Document file structure, programming languages, and frameworks detected +- Identify configuration files (package.json, requirements.txt, pom.xml, etc.) +- Locate any existing Docker files, docker-compose files, or containerization configs +- Find database configuration files and connection strings +- Identify API endpoints, service definitions, and application entry points +- Look for existing CI/CD pipeline files (.github/workflows, azure-pipelines.yml, etc.) +- Identify documentation files (README.md, API docs, architecture docs) + +**ANALYSIS QUESTIONS TO ANSWER:** + +- What programming languages and frameworks are used? +- What build systems and package managers are in use? +- Are there existing containerization configurations? +- What ports and endpoints are exposed? +- What external dependencies are required? +- Are there existing deployment or infrastructure configurations? + +**OUTPUT:** Complete inventory of all discoverable application artifacts + +## Step 2: Component Classification + +**REQUIRED ACTIONS:** + +Categorize each discovered component into one of these types: + +- **Web Applications** (frontend, SPA, static sites) + - React, Angular, Vue.js applications + - Static HTML/CSS/JavaScript sites + - Server-rendered web applications + +- **API Services** (REST APIs, GraphQL, gRPC services) + - RESTful web APIs + - GraphQL endpoints + - gRPC services + - Microservices + +- **Background Services** (workers, processors, scheduled jobs) + - Message queue processors + - Scheduled task runners + - Data processing pipelines + - Event handlers + +- **Databases** (relational, NoSQL, caching) + - SQL Server, PostgreSQL, MySQL databases + - NoSQL databases (MongoDB, CosmosDB) + - Caching layers (Redis, Memcached) + - Database migration scripts + +- **Messaging Systems** (queues, topics, event streams) + - Message queues + - Event streaming platforms + - Pub/sub systems + +- **AI/ML Components** (models, inference endpoints, training jobs) + - Machine learning models + - AI inference endpoints + - Training pipelines + - Data preprocessing services + +- **Supporting Services** (authentication, logging, monitoring) + - Authentication services + - Logging aggregators + - Monitoring and metrics + - Configuration services + +**CLASSIFICATION CRITERIA:** + +For each component, determine: +- Primary function and responsibility +- Runtime requirements +- Scalability needs +- Security considerations +- Integration points + +**OUTPUT:** Structured component inventory with classifications + +## Step 3: Dependency Mapping + +**REQUIRED ACTIONS:** + +- Map inter-component dependencies and communication patterns +- Identify external service dependencies (third-party APIs, SaaS services) +- Document data flow between components +- Identify shared resources and configuration +- Analyze network communication requirements +- Document authentication and authorization flows + +**DEPENDENCY ANALYSIS:** + +- **Internal Dependencies:** How components communicate with each other +- **External Dependencies:** Third-party services, APIs, databases +- **Data Dependencies:** Shared databases, file systems, caches +- **Configuration Dependencies:** Shared settings, secrets, environment variables +- **Runtime Dependencies:** Required services for startup and operation + +**COMMUNICATION PATTERNS TO IDENTIFY:** + +- Synchronous HTTP/HTTPS calls +- Asynchronous messaging +- Database connections +- File system access +- Caching patterns +- Authentication flows + +**OUTPUT:** Component dependency graph and communication matrix + +## Step 4: Generate Discovery Report + +**REQUIRED ACTIONS:** + +Create or update `azd-arch-plan.md` with the following sections: + +```markdown +# AZD Architecture Plan + +## Discovery and Analysis Results + +### Application Overview +- [Summary of application type and purpose] +- [Key technologies and frameworks identified] +- [Overall architecture pattern (monolith, microservices, etc.)] + +### Component Inventory +[For each component discovered:] +- **Component Name:** [name] +- **Type:** [classification] +- **Technology:** [language/framework] +- **Location:** [file path/directory] +- **Purpose:** [brief description] +- **Entry Points:** [how component is accessed] +- **Configuration:** [key config files] + +### Dependency Map +[Visual or text representation of dependencies] +- **Component A** → **Component B** (HTTP API) +- **Component B** → **Database** (SQL connection) +- **Component A** → **External API** (REST calls) + +### External Dependencies +- [List of third-party services] +- [Required environment variables] +- [External configuration requirements] + +### Next Steps +- [ ] Review discovery results +- [ ] Proceed to architecture planning phase +- [ ] Use `azd_architecture_planning` tool +``` + +## Validation and Next Steps + +**VALIDATION CHECKLIST:** + +- [ ] All major application components identified +- [ ] Component types and technologies documented +- [ ] Dependencies mapped and understood +- [ ] External services and APIs catalogued +- [ ] `azd-arch-plan.md` created or updated with findings + +**NEXT PHASE:** + +After completing this discovery phase, proceed to the **Architecture Planning** phase using the `azd_architecture_planning` tool. This next phase will use your discovery results to: + +- Select appropriate Azure services for each component +- Plan hosting strategies and containerization +- Design infrastructure architecture +- Prepare for configuration file generation + +**IMPORTANT:** Keep the `azd-arch-plan.md` file updated throughout the process as it serves as the central planning document for your AZD migration. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_docker_generation.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_docker_generation.md new file mode 100644 index 00000000000..38091d58d9a --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_docker_generation.md @@ -0,0 +1,174 @@ +# AZD Docker Generation Tool + +This specialized tool generates Dockerfiles and container configurations for Azure Developer CLI (AZD) projects. + +## Overview + +Generate optimized Dockerfiles for different programming languages and frameworks with Azure Container Apps best practices. + +**IMPORTANT:** Before starting, check if `azd-arch-plan.md` exists in your current working directory. If it exists, review it to understand discovered services and containerization requirements. + +## Success Criteria + +- [ ] Dockerfiles created for all containerizable services +- [ ] .dockerignore files generated for build optimization +- [ ] Health checks and security configurations implemented +- [ ] Multi-stage builds used where appropriate +- [ ] Azure Container Apps best practices followed + +## Containerization Requirements Analysis + +**REQUIRED ACTIONS:** + +1. **Identify Containerization Candidates:** + - Microservices and APIs (REST, GraphQL, gRPC) + - Web applications needing runtime flexibility + - Background services and workers + - Custom applications with specific runtime requirements + +2. **Services That Don't Need Containerization:** + - Static websites (use Azure Static Web Apps) + - Azure Functions (serverless, managed runtime) + - Database services (use managed Azure databases) + +3. **Language and Framework Detection:** + - Programming language (Node.js, Python, .NET, Java, Go, etc.) + - Framework type (Express, FastAPI, ASP.NET Core, Spring Boot) + - Build requirements (npm, pip, dotnet, maven, gradle) + - Runtime dependencies and port configurations +- **Programming language** (Node.js, Python, .NET, Java, Go, etc.) + +## Dockerfile Generation Requirements + +**REQUIRED ACTIONS:** + +For each containerizable service, generate optimized Dockerfiles following these patterns: + +### Language-Specific Requirements + +**Node.js Applications:** +- Use `node:18-alpine` base image +- Implement multi-stage build (build + runtime) +- Copy package*.json first for layer caching +- Use `npm ci --only=production` +- Create non-root user (`nodejs`) +- Expose appropriate port (typically 3000) +- Include health check endpoint +- Use `CMD ["npm", "start"]` + +**Python Applications:** +- Use `python:3.11-slim` base image +- Set environment variables: `PYTHONDONTWRITEBYTECODE=1`, `PYTHONUNBUFFERED=1` +- Copy requirements.txt first for caching +- Use `pip install --no-cache-dir` +- Create non-root user (`appuser`) +- Expose appropriate port (typically 8000) +- Include health check endpoint +- Use appropriate startup command (uvicorn, gunicorn, etc.) + +**.NET Applications:** +- Use `mcr.microsoft.com/dotnet/sdk:8.0` for build stage +- Use `mcr.microsoft.com/dotnet/aspnet:8.0` for runtime +- Multi-stage build: restore → build → publish → runtime +- Copy .csproj first for layer caching +- Create non-root user (`appuser`) +- Expose port 8080 (standard for .NET in containers) +- Include health check endpoint +- Use `ENTRYPOINT ["dotnet", "AppName.dll"]` + +**Java/Spring Boot Applications:** +- Use `openjdk:17-jdk-slim` for build, `openjdk:17-jre-slim` for runtime +- Copy pom.xml/build.gradle first for dependency caching +- Multi-stage build pattern +- Create non-root user (`appuser`) +- Expose port 8080 +- Include actuator health check +- Use `CMD ["java", "-jar", "app.jar"]` + +## Security and Best Practices + +**CRITICAL REQUIREMENTS:** + +- **Always use non-root users** in production stage +- **Use minimal base images** (alpine, slim variants) +- **Implement multi-stage builds** to reduce image size +- **Include health check endpoints** for Container Apps +- **Set proper working directories** and file permissions +- **Use layer caching** by copying dependency files first +- **Never include secrets** in container images + +## .dockerignore Requirements + +**REQUIRED ACTIONS:** + +Create .dockerignore files with these patterns: + +**Universal Exclusions:** +- Version control: `.git`, `.gitignore` +- Documentation: `README.md`, `*.md` +- IDE files: `.vscode/`, `.idea/`, `*.swp` +- OS files: `.DS_Store`, `Thumbs.db` +- Docker files: `Dockerfile*`, `.dockerignore`, `docker-compose*.yml` +- Build artifacts and logs + +**Language-Specific Exclusions:** +- **Node.js:** `node_modules/`, `npm-debug.log*`, `coverage/`, `dist/` +- **Python:** `__pycache__/`, `*.pyc`, `venv/`, `.pytest_cache/`, `dist/` +- **.NET:** `bin/`, `obj/`, `*.user`, `packages/`, `.vs/` +- **Java:** `target/`, `*.class`, `.mvn/repository` + +## Health Check Implementation + +**REQUIRED ACTIONS:** + +Each containerized service must include a health check endpoint: + +- **Endpoint:** `/health` (standard convention) +- **Response:** JSON with status and timestamp +- **HTTP Status:** 200 for healthy, 503 for unhealthy +- **Timeout:** 3 seconds maximum response time +- **Content:** `{"status": "healthy", "timestamp": "ISO-8601"}` + +## Container Optimization + +**REQUIRED OPTIMIZATIONS:** + +- Use multi-stage builds to exclude build tools from production images +- Copy package/dependency files before source code for better caching +- Combine RUN commands to reduce layers +- Clean package manager caches in same RUN command +- Use specific versions for base images (avoid `latest`) +- Set resource limits appropriate for Azure Container Apps + +## Validation and Testing + +**VALIDATION REQUIREMENTS:** + +- All Dockerfiles must build successfully: `docker build -t test-image .` +- Containers must run with non-root users +- Health checks must respond correctly +- Images should be optimized for size (use `docker images` to verify) +- Container startup time should be reasonable (<30 seconds) + +## Update Documentation + +**REQUIRED ACTIONS:** + +Update `azd-arch-plan.md` with: + +- List of generated Dockerfiles and their languages +- Container configurations (ports, health checks, users) +- Security implementations (non-root users, minimal images) +- Build optimizations applied +- Local testing commands + +## Next Steps + +After Docker generation is complete: + +1. Test all containers build successfully locally +2. Integrate Dockerfile paths into `azure.yaml` service definitions +3. Configure Container Apps infrastructure to use these images +4. Set up Azure Container Registry for image storage + +**IMPORTANT:** Reference existing tools for schema validation. For azure.yaml updates, use the `azd_azure_yaml_generation` tool. For infrastructure setup, use the `azd_infrastructure_generation` tool. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md index dd89e2586b7..bff5ab8418d 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md @@ -8,6 +8,7 @@ This document provides comprehensive rules and guidelines for generating Bicep I - **REQUIRED**: Place all IaC files in the `./infra` folder within an AZD project - **REQUIRED**: Name the main deployment file `main.bicep` - this is the primary deployment target +- **REQUIRED**: Create a `main.parameters.json` file alongside `main.bicep` containing all parameter defaults for the Bicep deployment - **REQUIRED**: The root level `main.bicep` must be a subscription level deployment using `targetScope = 'subscription'` - **REQUIRED**: The main.bicep file must create a resource group as the primary container for all resources - **REQUIRED**: Pass the resource group scope to all child modules that deploy resources @@ -156,6 +157,26 @@ module appService 'modules/app-service.bicep' = { } ``` +### Main.parameters.json Structure Template + +```json +{ + "$schema": "https://schema.management.azure.com/schemas/2018-05-01/subscriptionDeploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "environmentName": { + "value": "${AZURE_ENV_NAME}" + }, + "location": { + "value": "${AZURE_LOCATION}" + }, + "tags": { + "value": {} + } + } +} +``` + ### Child Module Structure Template ```bicep @@ -183,6 +204,7 @@ Before completing code generation, verify: - [ ] All files are in `./infra` folder - [ ] `main.bicep` exists as primary deployment file with subscription scope +- [ ] `main.parameters.json` exists alongside `main.bicep` with parameter defaults - [ ] Resource group is created in `main.bicep` and properly tagged - [ ] All child modules use `targetScope = 'resourceGroup'` and receive resource group scope - [ ] All resources use consistent naming convention diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_infrastructure_generation.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_infrastructure_generation.md new file mode 100644 index 00000000000..e7ff88ef55c --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_infrastructure_generation.md @@ -0,0 +1,159 @@ +# AZD Infrastructure Generation Tool + +This specialized tool generates Bicep infrastructure templates for Azure Developer CLI (AZD) projects. + +## Overview + +Generate modular Bicep templates following Azure security and operational best practices. + +**IMPORTANT:** +- Before starting, check if `azd-arch-plan.md` exists to understand architecture decisions +- **Use the `azd_iac_generation_rules` tool for complete IaC rules, naming conventions, and best practices** + +## Success Criteria + +- [ ] Complete Bicep template structure created in `./infra` directory +- [ ] All templates compile without errors (`az bicep build --file infra/main.bicep`) +- [ ] Infrastructure supports all services defined in `azure.yaml` +- [ ] Follows all rules from `azd_iac_generation_rules` tool +- [ ] Parameter files configured appropriately + +## Requirements Analysis + +**REQUIRED ACTIONS:** + +1. **Review IaC Rules:** Use `azd_iac_generation_rules` tool to get complete file structure, naming conventions, and compliance requirements + +2. **Analyze Infrastructure Needs:** + - Map services from `azure.yaml` to required Azure resources + - Identify shared resources (Log Analytics, Container Registry, Key Vault) + - Determine connectivity and security requirements + +3. **Service Infrastructure Mapping:** + - **Container Apps:** Environment, Log Analytics, Container Registry, App Insights, Managed Identity + - **App Service:** Service Plan, App Service, App Insights + - **Functions:** Function App, Storage Account, App Insights + - **Static Web Apps:** Static Web App resource + - **Database:** SQL/CosmosDB/PostgreSQL with appropriate SKUs + +## Generation Workflow + +**REQUIRED ACTIONS:** + +1. **Create Directory Structure:** + Follow structure from `azd_iac_generation_rules` tool: + ``` + ./infra/ + ├── main.bicep + ├── main.parameters.json + ├── modules/ + └── [additional files per rules] + ``` + +2. **Generate Main Template:** + - Use subscription-level scope (`targetScope = 'subscription'`) + - Create resource group with proper tagging + - Deploy modules conditionally based on service requirements + - Follow naming conventions from IaC rules tool + +3. **Generate Module Templates:** + - Create focused modules for each service type + - Use resource group scope for all modules + - Accept standardized parameters (environmentName, location, tags) + - Output connection information for applications + +4. **Generate Parameter Files:** + - Provide sensible defaults for all parameters + - Use parameter references for environment-specific values + - Include all required parameters from IaC rules + +``` +./infra/ +├── main.bicep # Primary deployment template +├── main.parameters.json # Default parameters +├── modules/ +│ ├── container-apps.bicep +│ ├── app-service.bicep +│ ├── functions.bicep +│ ├── database.bicep +│ ├── storage.bicep +│ ├── keyvault.bicep +│ └── monitoring.bicep +└── resources.bicep # Shared resources +``` + +## Template Requirements + +### Main Template (main.bicep) + +**CRITICAL REQUIREMENTS:** + +- Use `targetScope = 'subscription'` +- Accept standardized parameters: `environmentName`, `location`, `principalId` +- Include feature flags for conditional deployment (e.g., `deployDatabase`) +- Create resource group with proper tagging (`azd-env-name`, `azd-provisioned`) +- Call modules conditionally based on feature flags +- Output connection strings and service endpoints + +### Module Templates + +## Generate Infrastructure Files + +**WORKFLOW REQUIREMENTS:** + +1. **Create Directory Structure:** + + ```text + ./infra/ + ├── main.bicep + ├── main.parameters.json + ├── modules/ + └── [service-specific modules] + ``` + +2. **Generate Main Template (main.bicep):** + - Use `targetScope = 'subscription'` + - Create resource group with proper tagging + - Deploy modules conditionally based on service requirements + +3. **Generate Module Templates:** + - Create focused modules for each service type + - Use standardized parameters (`environmentName`, `location`, `tags`) + - Output connection information for applications + +4. **Generate Parameter Files:** + - Provide sensible defaults for all parameters + - Use parameter references for environment-specific values + +## Validation and Testing + +**VALIDATION REQUIREMENTS:** + +- All Bicep templates must compile without errors: `az bicep build --file infra/main.bicep` +- Validate deployment: `az deployment sub validate --template-file infra/main.bicep` +- Test with AZD: `azd provision --dry-run` +- Use existing tools for schema validation (reference `azd_yaml_schema` tool for azure.yaml validation) + +## Update Documentation + +**REQUIRED ACTIONS:** + +Update `azd-arch-plan.md` with: + +- List of generated infrastructure files +- Resource naming conventions used +- Security configurations implemented +- Parameter requirements +- Output variables available +- Validation results + +## Next Steps + +After infrastructure generation is complete: + +1. Validate all templates compile successfully +2. Test deployment with `azd provision --dry-run` +3. Deploy with `azd provision` (creates resources) +4. Proceed to application deployment with `azd deploy` + +**IMPORTANT:** Reference existing tools instead of duplicating functionality. For azure.yaml validation, use the `azd_yaml_schema` tool. For Bicep best practices, follow the AZD IaC Generation Rules document. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md index d8f7a391f68..5859bf69840 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md @@ -1,267 +1,124 @@ # AZD Application Initialization and Migration Plan -This document provides a comprehensive, step-by-step plan for initializing or migrating applications to use Azure Developer CLI (AZD). Follow these steps sequentially to ensure successful AZD adoption. +This document provides a comprehensive, step-by-step plan for initializing or migrating applications to use Azure Developer CLI (AZD). This is the orchestrating tool that guides you through using the specialized AZD tools. -## Executive Summary - -Transform any application into an AZD-compatible project by: - -1. Analyzing the current codebase and architecture -2. Identifying all application components and dependencies -3. Generating required configuration and infrastructure files -4. Establishing the AZD environment structure - -## Phase 1: Discovery and Analysis - -### Step 1: Deep File System Analysis - -**REQUIRED ACTIONS:** - -- Scan all files in the current working directory recursively -- Document file structure, programming languages, and frameworks detected -- Identify configuration files (package.json, requirements.txt, pom.xml, etc.) -- Locate any existing Docker files, docker-compose files, or containerization configs -- Find database configuration files and connection strings -- Identify API endpoints, service definitions, and application entry points - -**OUTPUT:** Complete inventory of all discoverable application artifacts - -### Step 2: Component Classification - -**REQUIRED ACTIONS:** - -- Categorize each discovered component into one of these types: - - **Web Applications** (frontend, SPA, static sites) - - **API Services** (REST APIs, GraphQL, gRPC services) - - **Background Services** (workers, processors, scheduled jobs) - - **Databases** (relational, NoSQL, caching) - - **Messaging Systems** (queues, topics, event streams) - - **AI/ML Components** (models, inference endpoints, training jobs) - - **Supporting Services** (authentication, logging, monitoring) - -**OUTPUT:** Structured component inventory with classifications - -### Step 3: Dependency Mapping +**IMPORTANT: Before starting any workflow, ALWAYS check if `azd-arch-plan.md` exists in the current directory and review it to understand current progress, previous decisions, and what work has already been completed. This prevents duplicate work and ensures continuity.** -**REQUIRED ACTIONS:** +Always use Azure best practices with intelligent defaults. -- Map inter-component dependencies and communication patterns -- Identify external service dependencies (third-party APIs, SaaS services) -- Document data flow between components -- Identify shared resources and configuration - -**OUTPUT:** Component dependency graph and communication matrix - -## Phase 2: Architecture Planning and Azure Service Selection - -### Application Component Planning - -For each identified application component, execute the following analysis: +## Executive Summary -**REQUIRED ANALYSIS:** +Transform any application into an AZD-compatible project using a structured approach with specialized tools. Each tool has a focused responsibility and builds upon the previous phase to create a complete AZD deployment. -- **Hosting Platform Selection:** - - **Azure Container Apps** (PREFERRED for microservices and containerized apps) - - **Azure App Service** (for web apps and APIs with specific runtime requirements) - - **Azure Functions** (for serverless and event-driven components) - - **Azure Static Web Apps** (for frontend applications and SPAs) - - **Azure Kubernetes Service** (for complex orchestration requirements) +## Success Criteria -- **Containerization Assessment:** - - Determine if component can run in Docker container - - If Dockerfile doesn't exist, plan Docker container strategy - - Identify base images and runtime requirements - - Document port mappings and environment variables +The migration is successful when: -- **Configuration Requirements:** - - Identify environment-specific settings - - Map secrets and sensitive configuration - - Document connection strings and service endpoints - - Plan configuration injection strategy +- [ ] All application components are identified and classified +- [ ] `azure.yaml` file is valid and complete +- [ ] All infrastructure files are generated and error-free +- [ ] Required Dockerfiles are created for containerizable components +- [ ] `azd-arch-plan.md` provides comprehensive documentation +- [ ] AZD environment is initialized and configured +- [ ] **All validation checks pass (use `azd_project_validation` tool)** -**OUTPUT:** Hosting strategy and containerization plan for each component +## Complete Workflow Guide -### Database Component Planning +### Phase 1: Review Existing Progress -For components using persistent data storage: +Check if the file `azd-arch-plan.md` exists in the current directory and review it to understand current progress, previous decisions, and what work has already been completed. This prevents duplicate work and ensures continuity. -**REQUIRED ANALYSIS:** +- If file exists: Review thoroughly and skip completed phases +- If file doesn't exist: Proceed to Phase 2 -- **Azure Database Service Selection:** - - **Azure SQL Database** (for relational data with SQL Server compatibility) - - **Azure Database for PostgreSQL** (for PostgreSQL workloads) - - **Azure Database for MySQL** (for MySQL workloads) - - **Azure Cosmos DB** (for NoSQL, multi-model data) - - **Azure Cache for Redis** (for caching and session storage) +### Phase 2: Discovery and Analysis -- **Migration Strategy:** - - Assess current database schema and data - - Plan data migration approach - - Identify backup and recovery requirements - - Document connection string patterns +**Tool:** `azd_discovery_analysis` -**OUTPUT:** Database hosting plan and migration strategy +Scans files recursively, documents structure/languages/frameworks, identifies entry points, maps dependencies, and creates component inventory in `azd-arch-plan.md`. -### Messaging Component Planning +### Phase 3: Architecture Planning and Azure Service Selection -For components using asynchronous communication: +**Tool:** `azd_architecture_planning` -**REQUIRED ANALYSIS:** +Maps components to Azure services, plans hosting strategies, designs database/messaging architecture, and creates containerization strategies. Updates `azd-arch-plan.md`. -- **Azure Messaging Service Selection:** - - **Azure Service Bus** (for reliable enterprise messaging) - - **Azure Event Hubs** (for high-throughput event streaming) - - **Azure Event Grid** (for event-driven architectures) - - **Azure Storage Queues** (for simple queue scenarios) +### Phase 4: File Generation -- **Integration Planning:** - - Map message flows and routing - - Identify message schemas and formats - - Plan dead letter handling and error scenarios - - Document scaling and throughput requirements +Generate all necessary AZD files using these focused tools (most projects need all three): -**OUTPUT:** Messaging architecture and integration plan +#### 1. Generate Azure.yaml Configuration -### AI Component Planning +**Tool:** `azd_azure_yaml_generation` (Required for all AZD projects) -For components using artificial intelligence or machine learning: +#### 2. Generate Infrastructure Templates -**REQUIRED ANALYSIS:** +**Tool:** `azd_infrastructure_generation` (Required for all AZD projects) -- **Azure AI Service Selection:** - - **Azure OpenAI Service** (for GPT models and cognitive services) - - **Azure AI Services** (for vision, speech, language processing) - - **Azure Machine Learning** (for custom ML models and training) - - **Azure Cognitive Search** (for intelligent search capabilities) +#### 3. Generate Docker Configurations -- **Model and Data Requirements:** - - Identify required AI models and versions - - Document input/output data formats - - Plan model deployment and scaling strategy - - Assess training data and pipeline requirements +**Tool:** `azd_docker_generation` (Required for containerizable services) -**OUTPUT:** AI service architecture and deployment plan +**Use in sequence:** azure.yaml → infrastructure → docker -## Phase 3: File Generation and Configuration +### Phase 5: Project Validation and Environment Setup -### Step 1: Generate azure.yaml Configuration +**Tool:** `azd_project_validation` -**REQUIRED ACTIONS:** +Validates azure.yaml against schema, compiles Bicep templates, ensures AZD environment exists, tests packaging, validates deployment with preview, and provides readiness confirmation. -- Create `azure.yaml` file in the root directory -- Define all services with appropriate hosting configurations -- Specify build and deployment instructions for each service -- Configure environment variable mappings -- Reference infrastructure templates correctly +## Usage Patterns -**TEMPLATE STRUCTURE:** +### Complete New Project Migration -```yaml -name: {project-name} -services: - {service-name}: - project: ./path/to/service - host: {hosting-type} - # Additional service-specific configuration +```text +1. Review existing azd-arch-plan.md (Phase 1) +2. azd_discovery_analysis +3. azd_architecture_planning +4. azd_azure_yaml_generation +5. azd_infrastructure_generation +6. azd_docker_generation (if containerization needed) +7. azd_project_validation ``` -### Step 2: Generate Infrastructure as Code Files +### Update Existing AZD Project -**REQUIRED ACTIONS:** - -- Create `./infra` directory structure -- Generate `main.bicep` as primary deployment template -- Create modular Bicep files for each resource type -- **CRITICAL:** Follow all rules from AZD IaC Generation Rules document -- Implement proper naming conventions and tagging strategies -- Include supporting resources (Log Analytics, Application Insights, Key Vault) - -### Step 3: Generate Container Configurations - -**REQUIRED ACTIONS:** - -- Create Dockerfile for each containerizable component -- Use appropriate base images for detected programming languages -- Configure health checks and startup commands -- Set proper working directories and file permissions -- Optimize for production deployment - -### Step 4: Generate Architecture Documentation - -**REQUIRED ACTIONS:** - -- Create `azd-arch-plan.md` with comprehensive analysis -- Document all discovered components and their relationships -- Include architecture diagrams (text-based or mermaid) -- Explain Azure service selections and rationale -- Provide deployment and operational guidance - -**DOCUMENT STRUCTURE:** - -- Executive Summary -- Application Architecture Overview -- Component Analysis -- Azure Service Mapping -- Infrastructure Design -- Deployment Strategy -- Operational Considerations - -## Phase 4: Environment Initialization - -### Step 1: Create AZD Environment - -**REQUIRED ACTIONS:** - -- Execute: `azd env new {directory-name}-dev` -- Use current working directory name as environment name base -- Configure environment-specific settings -- Validate environment configuration - -### Step 2: Validation and Testing - -**REQUIRED ACTIONS:** +```text +1. Review existing azd-arch-plan.md (Phase 1) +2. azd_azure_yaml_generation → azd_infrastructure_generation → azd_docker_generation → azd_project_validation +``` -- Run `azd package` to validate service configurations -- Execute `azd provision --dry-run` to test infrastructure templates -- Verify all Bicep files compile without errors -- Check all referenced files and paths exist -- Validate environment variable configurations +### Quick Service Addition -## Success Criteria +```text +1. Review existing azd-arch-plan.md (Phase 1) +2. azd_discovery_analysis → azd_azure_yaml_generation → azd_docker_generation → azd_project_validation +``` -The migration is successful when: +## Central Planning Document -- [ ] All application components are identified and classified -- [ ] `azure.yaml` file is valid and complete -- [ ] All infrastructure files are generated and error-free -- [ ] Required Dockerfiles are created for containerizable components -- [ ] `azd-arch-plan.md` provides comprehensive documentation -- [ ] AZD environment is initialized and validated -- [ ] `azd package` completes without errors -- [ ] `azd provision --dry-run` validates successfully +**CRITICAL:** `azd-arch-plan.md` is the central coordination file that tracks progress, documents decisions, and maintains project state. Always review this file before starting any tool to understand current progress and avoid duplicate work. -## Common Patterns and Best Practices +## Supporting Resources -### For Multi-Service Applications +### Schema and Validation -- Use Azure Container Apps for microservices architecture -- Implement shared infrastructure (networking, monitoring) -- Configure service-to-service communication properly +- Use `azd_yaml_schema` tool to get complete azure.yaml schema information +- Use `azd_iac_generation_rules` tool for Infrastructure as Code best practices -### For Data-Intensive Applications +### Troubleshooting -- Co-locate compute and data services in same region -- Implement proper connection pooling and caching -- Configure backup and disaster recovery +Each tool includes: -### For AI-Enabled Applications +- Validation checklists +- Testing commands +- Common issues and solutions +- Next step guidance -- Separate AI services from main application logic -- Implement proper error handling for AI service calls -- Plan for model updates and versioning +## Getting Started -### For High-Availability Applications +**Standard workflow:** +1. Review existing `azd-arch-plan.md` (Phase 1) +2. `azd_discovery_analysis` → `azd_architecture_planning` → File generation tools → `azd_project_validation` -- Configure multiple availability zones -- Implement health checks and auto-scaling -- Plan for disaster recovery scenarios +Keep `azd-arch-plan.md` updated throughout the process as the central coordination document. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_project_validation.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_project_validation.md new file mode 100644 index 00000000000..e8e06172fa9 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_project_validation.md @@ -0,0 +1,181 @@ +# AZD Project Validation Tool + +Validates an AZD project by running comprehensive checks on all components including azure.yaml schema validation, Bicep template validation, environment setup, packaging, and deployment preview. + +## Purpose + +This tool performs end-to-end validation of an AZD project to ensure all components are properly configured and the project is ready for deployment. It centralizes all validation logic to provide a single source of truth for project readiness. + +## Validation Workflow + +### 1. Azure.yaml Schema Validation + +**REQUIRED ACTION:** +Use the `azd_yaml_schema` tool to validate the azure.yaml file against the official schema. + +**Validation Steps:** + +- Check if `azure.yaml` exists in current directory +- Validate schema compliance using `azd_yaml_schema` tool +- Report any schema violations or missing required fields +- Verify service definitions and configurations + +### 2. Bicep Template Validation + +**REQUIRED ACTIONS:** + +1. **Find Bicep Files:** Scan `./infra` directory for `.bicep` files +2. **Compile Templates:** Run `az bicep build --file --stdout` for each template +3. **Validate Syntax:** Ensure all templates compile without errors +4. **Check Dependencies:** Verify module references and parameter passing + +**Commands to Run:** + +```powershell +# Compile main template +az bicep build --file ./infra/main.bicep + +# Validate deployment (requires Azure CLI login) +az deployment sub validate --template-file ./infra/main.bicep --parameters ./infra/main.parameters.json --location +``` + +### 3. AZD Environment Validation + +**REQUIRED ACTIONS:** + +1. **Check Environment Exists:** Run `azd env list` to see available environments +2. **Create Environment if Missing:** + - If no environments exist, run `azd env new ` + - Use current directory name as environment name (sanitized) +3. **Verify Environment Selection:** Ensure an environment is currently selected + +**Commands to Run:** + +```powershell +# List existing environments +azd env list + +# Create new environment if none exist (replace with directory name) +azd env new + +# Select environment if not already selected +azd env select +``` + +### 4. Package Validation + +**REQUIRED ACTION:** +Run `azd package` to validate all services can be packaged successfully. + +**Validation Steps:** + +- Verify all service source paths are valid +- Check Docker builds complete successfully (for containerized services) +- Ensure all build artifacts are created +- Validate package manifests + +**Command to Run:** + +```powershell +azd package +``` + +### 5. Deployment Preview Validation + +**REQUIRED ACTION:** +Run `azd provision --preview` to validate infrastructure deployment without actually creating resources. + +**Validation Steps:** + +- Verify Azure authentication is working +- Check resource group creation plan +- Validate all Bicep modules deploy correctly +- Ensure parameter values are properly resolved +- Confirm no deployment conflicts + +**Command to Run:** + +```powershell +azd provision --preview +``` + +## Success Criteria + +The project validation is successful when: + +- [ ] `azure.yaml` passes schema validation +- [ ] All Bicep templates compile without errors or warnings +- [ ] AZD environment exists and is properly configured +- [ ] `azd package` completes without errors or warnings +- [ ] `azd provision --preview` completes without errors or warnings +- [ ] All service configurations are valid +- [ ] No missing dependencies or configuration issues + +## Error Handling + +### Common Issues and Solutions + +**Azure.yaml Schema Errors:** + +- Use `azd_yaml_schema` tool to get correct schema format +- Check service names match directory structure +- Verify all required fields are present + +**Bicep Compilation Errors:** + +- Check module paths and parameter names +- Verify resource naming conventions follow Azure requirements +- Ensure all required parameters have values + +**Environment Issues:** + +- Run `azd auth login` if authentication fails +- Check Azure subscription access and permissions +- Verify location parameter is valid Azure region + +**Package Errors:** + +- Check service source paths in azure.yaml +- Verify Docker builds work locally for containerized services +- Ensure all build dependencies are available + +**Provision Preview Errors:** + +- Verify Azure subscription has sufficient permissions +- Check resource quotas and limits +- Ensure resource names are globally unique where required + +## Update Documentation + +**REQUIRED ACTIONS:** + +Update `azd-arch-plan.md` with: + +- Validation results for each component +- Any issues found and resolutions applied +- Environment configuration details +- Deployment preview summary +- Project readiness status + +## Next Steps + +After successful validation: + +1. **Deploy Infrastructure:** Run `azd provision` to create Azure resources +2. **Deploy Applications:** Run `azd deploy` to deploy services +3. **Complete Deployment:** Run `azd up` to provision and deploy in one step +4. **Monitor Deployment:** Use `azd monitor` to check application health +5. **View Logs:** Use `azd logs` to view deployment and runtime logs + +### Production Preparation + +For production deployment: + +- Create production environment: `azd env new -prod` +- Configure production-specific settings and secrets +- Set up monitoring, alerting, and backup procedures +- Document operational procedures and runbooks + +**DEPLOYMENT READY:** Your AZD migration is complete and ready for deployment with `azd up`. + +**IMPORTANT:** This tool centralizes all validation logic. Other tools should reference this tool for validation rather than duplicating validation steps. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go index 05a9bc1619f..c8f7752cba1 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go @@ -12,3 +12,18 @@ var AzdIacRulesPrompt string //go:embed azure.yaml.json var AzdYamlSchemaPrompt string + +//go:embed azd_discovery_analysis.md +var AzdDiscoveryAnalysisPrompt string + +//go:embed azd_architecture_planning.md +var AzdArchitecturePlanningPrompt string + +//go:embed azd_azure_yaml_generation.md +var AzdAzureYamlGenerationPrompt string + +//go:embed azd_infrastructure_generation.md +var AzdInfrastructureGenerationPrompt string + +//go:embed azd_docker_generation.md +var AzdDockerGenerationPrompt string diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go index 6818542a308..5ddc49c749c 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go @@ -24,7 +24,7 @@ func (l *IoToolsLoader) LoadTools() ([]tools.Tool, error) { &CreateDirectoryTool{CallbacksHandler: l.callbackHandler}, &DeleteDirectoryTool{CallbacksHandler: l.callbackHandler}, &ReadFileTool{CallbacksHandler: l.callbackHandler}, - &WriteFileTool{CallbacksHandler: l.callbackHandler}, + &WriteFileTool{}, &CopyFileTool{CallbacksHandler: l.callbackHandler}, &MoveFileTool{CallbacksHandler: l.callbackHandler}, &DeleteFileTool{CallbacksHandler: l.callbackHandler}, diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go index f0413af9e75..bc1ac20d9b8 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go @@ -9,13 +9,11 @@ import ( "strings" "time" - "github.com/tmc/langchaingo/callbacks" + "azd.ai.start/internal/tools/common" ) // WriteFileTool implements a comprehensive file writing tool that handles all scenarios -type WriteFileTool struct { - CallbacksHandler callbacks.Handler -} +type WriteFileTool struct{} // WriteFileRequest represents the JSON input for the write_file tool type WriteFileRequest struct { @@ -99,54 +97,44 @@ Large file (chunked): The input must be formatted as a single line valid JSON string.` } -func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - logInput := input - if len(input) > 200 { - logInput = input[:200] + "... (truncated)" - } - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("write_file: %s", logInput)) +// createErrorResponse creates a JSON error response +func (t WriteFileTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, + } + + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + + return fallbackMsg, nil } + output := string(jsonData) + + return output, nil +} + +func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { if input == "" { - output := "❌ No input provided\n\n" - output += "📝 Expected JSON format:\n" - output += `{"filename": "path/to/file.txt", "content": "file content here"}` + return t.createErrorResponse(fmt.Errorf("empty input"), "No input provided.") + } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("empty input")) - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil - } // Parse JSON input + // Parse JSON input var req WriteFileRequest if err := json.Unmarshal([]byte(input), &req); err != nil { - output := "❌ Invalid JSON input: " + err.Error() + "\n\n" - output += "📝 Expected format:\n" - output += `{"filename": "path/to/file.txt", "content": "file content here"}` + "\n\n" - output += "💡 Common JSON issues:\n" - output += "- Use double quotes for strings\n" - output += "- Escape backslashes: \\$ should be \\\\$\n" - output += "- Remove trailing commas\n" - output += "- No comments allowed in JSON" - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil + return t.createErrorResponse(err, "Invalid JSON input") } // Validate required fields if req.Filename == "" { - output := "❌ Missing required field: filename cannot be empty\n\n" - output += "📝 Example: " + `{"filename": "infra/main.bicep", "content": "param location string = 'eastus'"}` - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("missing filename")) - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil + return t.createErrorResponse(fmt.Errorf("missing filename"), "Missing required field: filename cannot be empty.") } // Determine mode and operation @@ -168,11 +156,7 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { // handleChunkedWrite handles writing files in chunks func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequest) (string, error) { if req.ChunkNum < 1 || req.TotalChunks < 1 || req.ChunkNum > req.TotalChunks { - err := fmt.Errorf("invalid chunk numbers: chunkNum=%d, totalChunks=%d", req.ChunkNum, req.TotalChunks) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("invalid chunk numbers: chunkNum=%d, totalChunks=%d", req.ChunkNum, req.TotalChunks), fmt.Sprintf("Invalid chunk numbers: chunkNum=%d, totalChunks=%d. ChunkNum must be between 1 and totalChunks", req.ChunkNum, req.TotalChunks)) } filePath := strings.TrimSpace(req.Filename) @@ -180,10 +164,7 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ // Ensure directory exists if err := t.ensureDirectory(filePath); err != nil { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(err, fmt.Sprintf("Failed to create directory for file %s: %s", filePath, err.Error())) } var err error @@ -197,11 +178,7 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ // Subsequent chunks - append file, openErr := os.OpenFile(filePath, os.O_APPEND|os.O_WRONLY, 0644) if openErr != nil { - err = fmt.Errorf("failed to open file for append %s: %w", filePath, openErr) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(openErr, fmt.Sprintf("Failed to open file for append %s: %s", filePath, openErr.Error())) } defer file.Close() @@ -210,21 +187,13 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ } if err != nil { - toolErr := fmt.Errorf("failed to write chunk to file %s: %w", filePath, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to write chunk to file %s: %s", filePath, err.Error())) } // Get file info fileInfo, err := os.Stat(filePath) if err != nil { - toolErr := fmt.Errorf("failed to verify file %s: %w", filePath, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to verify file %s: %s", filePath, err.Error())) } // Create JSON response @@ -255,19 +224,11 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ // Convert to JSON jsonData, err := json.MarshalIndent(response, "", " ") if err != nil { - toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } output := string(jsonData) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil } @@ -283,10 +244,7 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ // Ensure directory exists if err := t.ensureDirectory(filePath); err != nil { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(err, fmt.Sprintf("Failed to create directory for file %s: %s", filePath, err.Error())) } var err error @@ -295,11 +253,7 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ switch mode { case "create": if _, err := os.Stat(filePath); err == nil { - toolErr := fmt.Errorf("file %s already exists (create mode)", filePath) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(fmt.Errorf("file %s already exists (create mode)", filePath), fmt.Sprintf("File %s already exists. Cannot create file in 'create' mode when file already exists", filePath)) } err = os.WriteFile(filePath, []byte(content), 0644) operation = "Created" @@ -307,11 +261,7 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ case "append": file, openErr := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if openErr != nil { - toolErr := fmt.Errorf("failed to open file for append %s: %w", filePath, openErr) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(openErr, fmt.Sprintf("Failed to open file for append %s: %s", filePath, openErr.Error())) } defer file.Close() _, err = file.WriteString(content) @@ -323,21 +273,13 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ } if err != nil { - toolErr := fmt.Errorf("failed to %s file %s: %w", strings.ToLower(operation), filePath, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to %s file %s: %s", strings.ToLower(operation), filePath, err.Error())) } // Get file size for verification fileInfo, err := os.Stat(filePath) if err != nil { - toolErr := fmt.Errorf("failed to verify file %s: %w", filePath, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to verify file %s: %s", filePath, err.Error())) } // Create JSON response @@ -358,19 +300,11 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ // Convert to JSON jsonData, err := json.MarshalIndent(response, "", " ") if err != nil { - toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } output := string(jsonData) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil } From bf7f39f28b00900c1dc36dee7bbbae29c02e3615 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 1 Aug 2025 15:05:33 -0700 Subject: [PATCH 027/116] Moved files around --- cli/azd/extensions/azd.ai.start/internal/agent/agent.go | 8 ++++---- .../azd.ai.start/internal/{ => agent}/logging/logger.go | 0 .../{ => agent}/tools/azd/azd_architecture_planning.go | 2 +- .../{ => agent}/tools/azd/azd_azure_yaml_generation.go | 2 +- .../{ => agent}/tools/azd/azd_discovery_analysis.go | 2 +- .../{ => agent}/tools/azd/azd_docker_generation.go | 2 +- .../{ => agent}/tools/azd/azd_iac_generation_rules.go | 2 +- .../tools/azd/azd_infrastructure_generation.go | 2 +- .../internal/{ => agent}/tools/azd/azd_plan_init.go | 2 +- .../{ => agent}/tools/azd/azd_project_validation.go | 0 .../internal/{ => agent}/tools/azd/azd_yaml_schema.go | 2 +- .../azd.ai.start/internal/{ => agent}/tools/azd/loader.go | 0 .../internal/{ => agent}/tools/azd/prompts/README.md | 0 .../tools/azd/prompts/azd_architecture_planning.md | 0 .../tools/azd/prompts/azd_azure_yaml_generation.md | 0 .../tools/azd/prompts/azd_discovery_analysis.md | 0 .../tools/azd/prompts/azd_docker_generation.md | 0 .../tools/azd/prompts/azd_iac_generation_rules.md | 0 .../tools/azd/prompts/azd_infrastructure_generation.md | 0 .../{ => agent}/tools/azd/prompts/azd_plan_init.md | 0 .../tools/azd/prompts/azd_project_validation.md | 0 .../{ => agent}/tools/azd/prompts/azd_yaml_schema.md | 0 .../{ => agent}/tools/azd/prompts/azure.yaml.json | 0 .../internal/{ => agent}/tools/azd/prompts/prompts.go | 0 .../internal/{ => agent}/tools/common/types.go | 0 .../internal/{ => agent}/tools/dev/command_executor.go | 2 +- .../azd.ai.start/internal/{ => agent}/tools/dev/loader.go | 0 .../internal/{ => agent}/tools/http/http_fetcher.go | 0 .../internal/{ => agent}/tools/http/loader.go | 0 .../internal/{ => agent}/tools/io/change_directory.go | 0 .../internal/{ => agent}/tools/io/copy_file.go | 2 +- .../internal/{ => agent}/tools/io/create_directory.go | 0 .../internal/{ => agent}/tools/io/current_directory.go | 0 .../internal/{ => agent}/tools/io/delete_directory.go | 0 .../internal/{ => agent}/tools/io/delete_file.go | 0 .../internal/{ => agent}/tools/io/directory_list.go | 2 +- .../internal/{ => agent}/tools/io/file_info.go | 0 .../internal/{ => agent}/tools/io/file_search.go | 0 .../azd.ai.start/internal/{ => agent}/tools/io/loader.go | 0 .../internal/{ => agent}/tools/io/move_file.go | 0 .../internal/{ => agent}/tools/io/read_file.go | 0 .../internal/{ => agent}/tools/io/write_file.go | 2 +- .../azd.ai.start/internal/{ => agent}/tools/loader.go | 6 +++--- .../azd.ai.start/internal/{ => agent}/tools/mcp/loader.go | 0 .../azd.ai.start/internal/{ => agent}/tools/mcp/mcp.json | 0 .../internal/{ => agent}/tools/mcp/sampling_handler.go | 0 .../internal/{ => agent}/tools/weather/loader.go | 0 .../internal/{ => agent}/tools/weather/weather.go | 0 cli/azd/extensions/azd.ai.start/internal/cmd/root.go | 2 +- 49 files changed, 20 insertions(+), 20 deletions(-) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/logging/logger.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_architecture_planning.go (94%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_azure_yaml_generation.go (94%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_discovery_analysis.go (94%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_docker_generation.go (94%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_iac_generation_rules.go (92%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_infrastructure_generation.go (94%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_plan_init.go (91%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_project_validation.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_yaml_schema.go (92%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/loader.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/README.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_architecture_planning.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_azure_yaml_generation.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_discovery_analysis.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_docker_generation.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_iac_generation_rules.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_infrastructure_generation.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_plan_init.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_project_validation.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_yaml_schema.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azure.yaml.json (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/prompts.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/common/types.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/dev/command_executor.go (99%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/dev/loader.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/http/http_fetcher.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/http/loader.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/change_directory.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/copy_file.go (99%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/create_directory.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/current_directory.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/delete_directory.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/delete_file.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/directory_list.go (99%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/file_info.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/file_search.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/loader.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/move_file.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/read_file.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/write_file.go (99%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/loader.go (89%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/mcp/loader.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/mcp/mcp.json (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/mcp/sampling_handler.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/weather/loader.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/weather/weather.go (100%) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go index fc1cd9e73cb..128de9af89a 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go @@ -13,10 +13,10 @@ import ( "github.com/tmc/langchaingo/memory" "github.com/tmc/langchaingo/tools" - "azd.ai.start/internal/logging" - localtools "azd.ai.start/internal/tools" - "azd.ai.start/internal/tools/mcp" - mcptools "azd.ai.start/internal/tools/mcp" + "azd.ai.start/internal/agent/logging" + localtools "azd.ai.start/internal/agent/tools" + "azd.ai.start/internal/agent/tools/mcp" + mcptools "azd.ai.start/internal/agent/tools/mcp" ) //go:embed prompts/default_agent_prefix.txt diff --git a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go b/cli/azd/extensions/azd.ai.start/internal/agent/logging/logger.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/logging/logger.go rename to cli/azd/extensions/azd.ai.start/internal/agent/logging/logger.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_architecture_planning.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_architecture_planning.go similarity index 94% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_architecture_planning.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_architecture_planning.go index 44894270ea0..1e29c2050f4 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_architecture_planning.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_architecture_planning.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/tools/azd/prompts" + "azd.ai.start/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_azure_yaml_generation.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_azure_yaml_generation.go similarity index 94% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_azure_yaml_generation.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_azure_yaml_generation.go index ea2ae2e26f2..d5012b63f10 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_azure_yaml_generation.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_azure_yaml_generation.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/tools/azd/prompts" + "azd.ai.start/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_discovery_analysis.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_discovery_analysis.go similarity index 94% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_discovery_analysis.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_discovery_analysis.go index db865d67398..7b7c39a5077 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_discovery_analysis.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_discovery_analysis.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/tools/azd/prompts" + "azd.ai.start/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_docker_generation.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_docker_generation.go similarity index 94% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_docker_generation.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_docker_generation.go index 89ddea37bae..67a76b4d9fa 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_docker_generation.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_docker_generation.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/tools/azd/prompts" + "azd.ai.start/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_iac_generation_rules.go similarity index 92% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_iac_generation_rules.go index 47e2c5a738e..40cf27facfa 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_iac_generation_rules.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/tools/azd/prompts" + "azd.ai.start/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_infrastructure_generation.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_infrastructure_generation.go similarity index 94% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_infrastructure_generation.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_infrastructure_generation.go index b93c5960369..44876b94300 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_infrastructure_generation.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_infrastructure_generation.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/tools/azd/prompts" + "azd.ai.start/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_plan_init.go similarity index 91% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_plan_init.go index 005ebafb441..c45c5d21d12 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_plan_init.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/tools/azd/prompts" + "azd.ai.start/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_project_validation.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_project_validation.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_project_validation.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_project_validation.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_yaml_schema.go similarity index 92% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_yaml_schema.go index 850091db4ea..9ed0d3806cc 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_yaml_schema.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/tools/azd/prompts" + "azd.ai.start/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/README.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/README.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/README.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/README.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_architecture_planning.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_architecture_planning.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_architecture_planning.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_architecture_planning.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_azure_yaml_generation.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_azure_yaml_generation.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_discovery_analysis.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_discovery_analysis.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_discovery_analysis.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_discovery_analysis.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_docker_generation.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_docker_generation.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_docker_generation.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_docker_generation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_infrastructure_generation.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_infrastructure_generation.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_plan_init.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_plan_init.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_project_validation.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_project_validation.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_project_validation.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_project_validation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_yaml_schema.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_yaml_schema.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_yaml_schema.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_yaml_schema.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azure.yaml.json b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azure.yaml.json similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azure.yaml.json rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azure.yaml.json diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/prompts.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/prompts.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/common/types.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/common/types.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/common/types.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/common/types.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/command_executor.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/command_executor.go index e1fc36a05f3..483b4281719 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/command_executor.go @@ -9,7 +9,7 @@ import ( "runtime" "strings" - "azd.ai.start/internal/tools/common" + "azd.ai.start/internal/agent/tools/common" "github.com/tmc/langchaingo/callbacks" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/dev/loader.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/dev/loader.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/http/http_fetcher.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/http/http_fetcher.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/http/http_fetcher.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/http/http_fetcher.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/http/loader.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/http/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/http/loader.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/http/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/change_directory.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/change_directory.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/change_directory.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/change_directory.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/copy_file.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/copy_file.go index ba48734dd51..7abd91de036 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/copy_file.go @@ -8,7 +8,7 @@ import ( "os" "strings" - "azd.ai.start/internal/tools/common" + "azd.ai.start/internal/agent/tools/common" "github.com/tmc/langchaingo/callbacks" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/create_directory.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/create_directory.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/current_directory.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/current_directory.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/current_directory.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/current_directory.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/delete_directory.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/delete_directory.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/delete_file.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/delete_file.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/directory_list.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/directory_list.go index c0b4e09ee2e..40bd8f80fb1 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/directory_list.go @@ -8,7 +8,7 @@ import ( "path/filepath" "strings" - "azd.ai.start/internal/tools/common" + "azd.ai.start/internal/agent/tools/common" "github.com/tmc/langchaingo/callbacks" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/file_info.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/file_info.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/file_search.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/file_search.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/move_file.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/move_file.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/read_file.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/read_file.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/write_file.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/write_file.go index bc1ac20d9b8..e367be33e00 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/write_file.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "azd.ai.start/internal/tools/common" + "azd.ai.start/internal/agent/tools/common" ) // WriteFileTool implements a comprehensive file writing tool that handles all scenarios diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/loader.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/loader.go similarity index 89% rename from cli/azd/extensions/azd.ai.start/internal/tools/loader.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/loader.go index 3a2dab5c83a..8aee4593a0c 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/loader.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/loader.go @@ -4,9 +4,9 @@ import ( "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" - "azd.ai.start/internal/tools/azd" - "azd.ai.start/internal/tools/dev" - "azd.ai.start/internal/tools/io" + "azd.ai.start/internal/agent/tools/azd" + "azd.ai.start/internal/agent/tools/dev" + "azd.ai.start/internal/agent/tools/io" ) // ToolLoader provides an interface for loading tools from different categories diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json b/cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/mcp.json similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/mcp.json diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/sampling_handler.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/sampling_handler.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/weather/loader.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/weather/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/weather/loader.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/weather/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/weather/weather.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/weather/weather.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/weather/weather.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/weather/weather.go diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go index 4031d40b7f5..81cc5cac8cd 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go @@ -10,7 +10,7 @@ import ( "os" "azd.ai.start/internal/agent" - "azd.ai.start/internal/logging" + "azd.ai.start/internal/agent/logging" "github.com/azure/azure-dev/cli/azd/pkg/azdext" "github.com/spf13/cobra" "github.com/tmc/langchaingo/llms/openai" From 972dcb25750f58f5e0f7f0cbf5fd31a2181239c1 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 1 Aug 2025 17:05:16 -0700 Subject: [PATCH 028/116] WIP: Initial integration of agent mode for init --- cli/azd/cmd/container.go | 6 + cli/azd/cmd/init.go | 45 +++ .../extensions/azd.ai.start/AZURE_AI_SETUP.md | 98 ----- cli/azd/extensions/azd.ai.start/build.ps1 | 71 ---- cli/azd/extensions/azd.ai.start/build.sh | 66 --- cli/azd/extensions/azd.ai.start/changelog.md | 3 - .../extensions/azd.ai.start/extension.yaml | 9 - cli/azd/extensions/azd.ai.start/go.mod | 61 --- cli/azd/extensions/azd.ai.start/go.sum | 379 ------------------ .../internal/cmd/enhanced_integration.go | 71 ---- .../azd.ai.start/internal/cmd/root.go | 140 ------- cli/azd/extensions/azd.ai.start/main.go | 30 -- .../azd.ai.start => }/internal/agent/agent.go | 70 +++- .../internal/agent/logging/logger.go | 0 .../default_agent_format_instructions.txt | 0 .../agent/prompts/default_agent_prefix.txt | 0 .../agent/prompts/default_agent_suffix.txt | 0 .../tools/azd/azd_architecture_planning.go | 2 +- .../tools/azd/azd_azure_yaml_generation.go | 2 +- .../agent/tools/azd/azd_discovery_analysis.go | 2 +- .../agent/tools/azd/azd_docker_generation.go | 2 +- .../tools/azd/azd_iac_generation_rules.go | 2 +- .../azd/azd_infrastructure_generation.go | 2 +- .../internal/agent/tools/azd/azd_plan_init.go | 2 +- .../agent/tools/azd/azd_project_validation.go | 0 .../agent/tools/azd/azd_yaml_schema.go | 2 +- .../internal/agent/tools/azd/loader.go | 0 .../agent/tools/azd/prompts/README.md | 0 .../azd/prompts/azd_architecture_planning.md | 0 .../azd/prompts/azd_azure_yaml_generation.md | 0 .../azd/prompts/azd_discovery_analysis.md | 0 .../azd/prompts/azd_docker_generation.md | 0 .../azd/prompts/azd_iac_generation_rules.md | 0 .../prompts/azd_infrastructure_generation.md | 0 .../agent/tools/azd/prompts/azd_plan_init.md | 0 .../azd/prompts/azd_project_validation.md | 0 .../tools/azd/prompts/azd_yaml_schema.md | 0 .../agent/tools/azd/prompts/azure.yaml.json | 0 .../agent/tools/azd/prompts/prompts.go | 0 .../internal/agent/tools/common/types.go | 0 .../agent/tools/dev/command_executor.go | 2 +- .../internal/agent/tools/dev/loader.go | 0 .../internal/agent/tools/http/http_fetcher.go | 0 .../internal/agent/tools/http/loader.go | 0 .../agent/tools/io/change_directory.go | 0 .../internal/agent/tools/io/copy_file.go | 2 +- .../agent/tools/io/create_directory.go | 0 .../agent/tools/io/current_directory.go | 0 .../agent/tools/io/delete_directory.go | 0 .../internal/agent/tools/io/delete_file.go | 0 .../internal/agent/tools/io/directory_list.go | 2 +- .../internal/agent/tools/io/file_info.go | 0 .../internal/agent/tools/io/file_search.go | 0 .../internal/agent/tools/io/loader.go | 0 .../internal/agent/tools/io/move_file.go | 0 .../internal/agent/tools/io/read_file.go | 0 .../internal/agent/tools/io/write_file.go | 2 +- .../internal/agent/tools/loader.go | 6 +- .../internal/agent/tools/mcp/loader.go | 0 .../internal/agent/tools/mcp/mcp.json | 0 .../agent/tools/mcp/sampling_handler.go | 0 .../internal/agent/tools/weather/loader.go | 0 .../internal/agent/tools/weather/weather.go | 0 cli/azd/pkg/llm/azure_openai.go | 91 ++--- cli/azd/pkg/llm/client.go | 13 - cli/azd/pkg/llm/manager.go | 169 +++----- cli/azd/pkg/llm/manager_test.go | 121 ------ cli/azd/pkg/llm/model_factory.go | 28 ++ cli/azd/pkg/llm/ollama.go | 61 ++- go.mod | 28 ++ go.sum | 231 +++++++++++ 71 files changed, 554 insertions(+), 1267 deletions(-) delete mode 100644 cli/azd/extensions/azd.ai.start/AZURE_AI_SETUP.md delete mode 100644 cli/azd/extensions/azd.ai.start/build.ps1 delete mode 100644 cli/azd/extensions/azd.ai.start/build.sh delete mode 100644 cli/azd/extensions/azd.ai.start/changelog.md delete mode 100644 cli/azd/extensions/azd.ai.start/extension.yaml delete mode 100644 cli/azd/extensions/azd.ai.start/go.mod delete mode 100644 cli/azd/extensions/azd.ai.start/go.sum delete mode 100644 cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go delete mode 100644 cli/azd/extensions/azd.ai.start/internal/cmd/root.go delete mode 100644 cli/azd/extensions/azd.ai.start/main.go rename cli/azd/{extensions/azd.ai.start => }/internal/agent/agent.go (64%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/logging/logger.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/prompts/default_agent_format_instructions.txt (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/prompts/default_agent_prefix.txt (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/prompts/default_agent_suffix.txt (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_architecture_planning.go (92%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_azure_yaml_generation.go (91%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_discovery_analysis.go (91%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_docker_generation.go (91%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_iac_generation_rules.go (90%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_infrastructure_generation.go (91%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_plan_init.go (88%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_project_validation.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_yaml_schema.go (88%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/loader.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/README.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_architecture_planning.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_discovery_analysis.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_docker_generation.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_plan_init.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_project_validation.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_yaml_schema.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azure.yaml.json (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/prompts.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/common/types.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/dev/command_executor.go (99%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/dev/loader.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/http/http_fetcher.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/http/loader.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/change_directory.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/copy_file.go (98%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/create_directory.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/current_directory.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/delete_directory.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/delete_file.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/directory_list.go (98%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/file_info.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/file_search.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/loader.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/move_file.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/read_file.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/write_file.go (99%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/loader.go (84%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/mcp/loader.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/mcp/mcp.json (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/mcp/sampling_handler.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/weather/loader.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/weather/weather.go (100%) delete mode 100644 cli/azd/pkg/llm/client.go create mode 100644 cli/azd/pkg/llm/model_factory.go diff --git a/cli/azd/cmd/container.go b/cli/azd/cmd/container.go index be53dcf16fb..34b1f7df3b8 100644 --- a/cli/azd/cmd/container.go +++ b/cli/azd/cmd/container.go @@ -546,7 +546,13 @@ func registerCommonDependencies(container *ioc.NestedContainer) { return serviceManager, err }) }) + + // AI & LLM components container.MustRegisterSingleton(llm.NewManager) + container.MustRegisterSingleton(llm.NewModelFactory) + container.MustRegisterNamedSingleton("ollama", llm.NewOllamaModelProvider) + container.MustRegisterNamedSingleton("azure", llm.NewAzureOpenAiModelProvider) + container.MustRegisterSingleton(repository.NewInitializer) container.MustRegisterSingleton(alpha.NewFeaturesManager) container.MustRegisterSingleton(config.NewUserConfigManager) diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index 9bd5832209f..3395047e7c1 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -14,6 +14,8 @@ import ( "github.com/MakeNowJust/heredoc/v2" "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" + "github.com/azure/azure-dev/cli/azd/internal/agent" + "github.com/azure/azure-dev/cli/azd/internal/agent/logging" "github.com/azure/azure-dev/cli/azd/internal/repository" "github.com/azure/azure-dev/cli/azd/internal/tracing" "github.com/azure/azure-dev/cli/azd/internal/tracing/fields" @@ -24,6 +26,7 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/extensions" "github.com/azure/azure-dev/cli/azd/pkg/input" "github.com/azure/azure-dev/cli/azd/pkg/lazy" + "github.com/azure/azure-dev/cli/azd/pkg/llm" "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/azure/azure-dev/cli/azd/pkg/output/ux" "github.com/azure/azure-dev/cli/azd/pkg/project" @@ -131,6 +134,7 @@ type initAction struct { featuresManager *alpha.FeatureManager extensionsManager *extensions.Manager azd workflow.AzdCommandRunner + llmManager *llm.Manager } func newInitAction( @@ -145,6 +149,7 @@ func newInitAction( featuresManager *alpha.FeatureManager, extensionsManager *extensions.Manager, azd workflow.AzdCommandRunner, + llmManager *llm.Manager, ) actions.Action { return &initAction{ lazyAzdCtx: lazyAzdCtx, @@ -158,6 +163,7 @@ func newInitAction( featuresManager: featuresManager, extensionsManager: extensionsManager, azd: azd, + llmManager: llmManager, } } @@ -344,6 +350,10 @@ func (i *initAction) Run(ctx context.Context) (*actions.ActionResult, error) { header = fmt.Sprintf("Initialized environment %s.", env.Name()) followUp = "" + case initWithCopilot: + if err := i.initAppWithCopilot(ctx); err != nil { + return nil, err + } default: panic("unhandled init type") } @@ -360,6 +370,37 @@ func (i *initAction) Run(ctx context.Context) (*actions.ActionResult, error) { }, nil } +func (i *initAction) initAppWithCopilot(ctx context.Context) error { + defaultModelContainer, err := i.llmManager.GetDefaultModel() + if err != nil { + return err + } + + actionLogger := logging.NewActionLogger() + samplingModelContainer, err := i.llmManager.GetDefaultModel(llm.WithLogger(actionLogger)) + + azdAgent, err := agent.NewAzdAiAgent( + defaultModelContainer.Model, + agent.WithSamplingModel(samplingModelContainer.Model), + ) + if err != nil { + return err + } + + initPrompt := `Goal: Initialize or migrate the AZD project from the current working directory. + +Read and review the 'azd-arch-plan.md' file if it exists to get current status +Run the 'azd_plan_init' tool and follow the steps +Finally - run the 'azd_project_validation' tool to ensure the process is fully completed + ` + + if err := azdAgent.RunConversationLoop(ctx, []string{initPrompt}); err != nil { + return err + } + + return nil +} + type initType int const ( @@ -367,6 +408,7 @@ const ( initFromApp initAppTemplate initEnvironment + initWithCopilot ) func promptInitType(console input.Console, ctx context.Context) (initType, error) { @@ -375,6 +417,7 @@ func promptInitType(console input.Console, ctx context.Context) (initType, error Options: []string{ "Scan current directory", // This now covers minimal project creation too "Select a template", + "AZD Copilot", }, }) if err != nil { @@ -386,6 +429,8 @@ func promptInitType(console input.Console, ctx context.Context) (initType, error return initFromApp, nil case 1: return initAppTemplate, nil + case 2: + return initWithCopilot, nil default: panic("unhandled selection") } diff --git a/cli/azd/extensions/azd.ai.start/AZURE_AI_SETUP.md b/cli/azd/extensions/azd.ai.start/AZURE_AI_SETUP.md deleted file mode 100644 index 9d985f2f36e..00000000000 --- a/cli/azd/extensions/azd.ai.start/AZURE_AI_SETUP.md +++ /dev/null @@ -1,98 +0,0 @@ -# Azure AI Integration Setup - -This AI agent can work with both OpenAI and Azure OpenAI Service. Here's how to configure each: - -## Option 1: Azure OpenAI Service (Recommended for Azure users) - -Azure OpenAI provides the same models as OpenAI but hosted on Azure infrastructure with enterprise security and compliance. - -### Prerequisites -1. Azure subscription -2. Azure OpenAI resource created in Azure portal -3. GPT model deployed (e.g., GPT-3.5-turbo or GPT-4) - -### Environment Variables -```bash -# Set these environment variables for Azure OpenAI -export AZURE_OPENAI_ENDPOINT="https://your-resource-name.openai.azure.com" -export AZURE_OPENAI_API_KEY="your-azure-openai-api-key" -export AZURE_OPENAI_DEPLOYMENT_NAME="your-gpt-deployment-name" -``` - -### PowerShell (Windows) -```powershell -$env:AZURE_OPENAI_ENDPOINT="https://your-resource-name.openai.azure.com" -$env:AZURE_OPENAI_API_KEY="your-azure-openai-api-key" -$env:AZURE_OPENAI_DEPLOYMENT_NAME="your-gpt-deployment-name" -``` - -## Option 2: OpenAI API (Direct) - -### Environment Variables -```bash -export OPENAI_API_KEY="your-openai-api-key" -``` - -### PowerShell (Windows) -```powershell -$env:OPENAI_API_KEY="your-openai-api-key" -``` - -## Usage Examples - -```bash -# Interactive mode -azd ai.chat - -# Direct query -azd ai.chat "How do I deploy a Node.js app to Azure Container Apps?" - -# Azure-specific queries -azd ai.chat "What's the best way to set up CI/CD with Azure DevOps for my web app?" -azd ai.chat "How do I configure Azure Key Vault for my application secrets?" -``` - -## Azure OpenAI Advantages - -- **Enterprise Security**: Your data stays within your Azure tenant -- **Compliance**: Meets enterprise compliance requirements -- **Integration**: Better integration with other Azure services -- **Cost Control**: Better cost management and billing integration -- **Regional Deployment**: Deploy closer to your users for lower latency - -## Setup Steps for Azure OpenAI - -1. **Create Azure OpenAI Resource**: - ```bash - az cognitiveservices account create \ - --name myopenai \ - --resource-group myresourcegroup \ - --location eastus \ - --kind OpenAI \ - --sku s0 - ``` - -2. **Deploy a Model**: - - Go to Azure OpenAI Studio - - Navigate to "Deployments" - - Create a new deployment with your chosen model (e.g., gpt-35-turbo) - - Note the deployment name for the environment variable - -3. **Get API Key**: - ```bash - az cognitiveservices account keys list \ - --name myopenai \ - --resource-group myresourcegroup - ``` - -4. **Set Environment Variables** as shown above - -## Model Compatibility - -The agent supports various GPT models available in Azure OpenAI: -- GPT-3.5-turbo -- GPT-4 -- GPT-4-turbo -- And newer models as they become available - -Just make sure your deployment name matches the model you want to use. diff --git a/cli/azd/extensions/azd.ai.start/build.ps1 b/cli/azd/extensions/azd.ai.start/build.ps1 deleted file mode 100644 index 8cdd4ae9281..00000000000 --- a/cli/azd/extensions/azd.ai.start/build.ps1 +++ /dev/null @@ -1,71 +0,0 @@ -# Get the directory of the script -$EXTENSION_DIR = Split-Path -Parent $MyInvocation.MyCommand.Path - -# Change to the script directory -Set-Location -Path $EXTENSION_DIR - -# Create a safe version of EXTENSION_ID replacing dots with dashes -$EXTENSION_ID_SAFE = $env:EXTENSION_ID -replace '\.', '-' - -# Define output directory -$OUTPUT_DIR = if ($env:OUTPUT_DIR) { $env:OUTPUT_DIR } else { Join-Path $EXTENSION_DIR "bin" } - -# Create output directory if it doesn't exist -if (-not (Test-Path -Path $OUTPUT_DIR)) { - New-Item -ItemType Directory -Path $OUTPUT_DIR | Out-Null -} - -# Get Git commit hash and build date -$COMMIT = git rev-parse HEAD -$BUILD_DATE = (Get-Date -Format "yyyy-MM-ddTHH:mm:ssZ") - -# List of OS and architecture combinations -if ($env:EXTENSION_PLATFORM) { - $PLATFORMS = @($env:EXTENSION_PLATFORM) -} -else { - $PLATFORMS = @( - "windows/amd64", - "windows/arm64", - "darwin/amd64", - "darwin/arm64", - "linux/amd64", - "linux/arm64" - ) -} - -$APP_PATH = "$env:EXTENSION_ID/internal/cmd" - -# Loop through platforms and build -foreach ($PLATFORM in $PLATFORMS) { - $OS, $ARCH = $PLATFORM -split '/' - - $OUTPUT_NAME = Join-Path $OUTPUT_DIR "$EXTENSION_ID_SAFE-$OS-$ARCH" - - if ($OS -eq "windows") { - $OUTPUT_NAME += ".exe" - } - - Write-Host "Building for $OS/$ARCH..." - - # Delete the output file if it already exists - if (Test-Path -Path $OUTPUT_NAME) { - Remove-Item -Path $OUTPUT_NAME -Force - } - - # Set environment variables for Go build - $env:GOOS = $OS - $env:GOARCH = $ARCH - - go build ` - -ldflags="-X '$APP_PATH.Version=$env:EXTENSION_VERSION' -X '$APP_PATH.Commit=$COMMIT' -X '$APP_PATH.BuildDate=$BUILD_DATE'" ` - -o $OUTPUT_NAME - - if ($LASTEXITCODE -ne 0) { - Write-Host "An error occurred while building for $OS/$ARCH" - exit 1 - } -} - -Write-Host "Build completed successfully!" -Write-Host "Binaries are located in the $OUTPUT_DIR directory." diff --git a/cli/azd/extensions/azd.ai.start/build.sh b/cli/azd/extensions/azd.ai.start/build.sh deleted file mode 100644 index f1a995ec5e9..00000000000 --- a/cli/azd/extensions/azd.ai.start/build.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/bash - -# Get the directory of the script -EXTENSION_DIR="$(cd "$(dirname "$0")" && pwd)" - -# Change to the script directory -cd "$EXTENSION_DIR" || exit - -# Create a safe version of EXTENSION_ID replacing dots with dashes -EXTENSION_ID_SAFE="${EXTENSION_ID//./-}" - -# Define output directory -OUTPUT_DIR="${OUTPUT_DIR:-$EXTENSION_DIR/bin}" - -# Create output and target directories if they don't exist -mkdir -p "$OUTPUT_DIR" - -# Get Git commit hash and build date -COMMIT=$(git rev-parse HEAD) -BUILD_DATE=$(date -u +%Y-%m-%dT%H:%M:%SZ) - -# List of OS and architecture combinations -if [ -n "$EXTENSION_PLATFORM" ]; then - PLATFORMS=("$EXTENSION_PLATFORM") -else - PLATFORMS=( - "windows/amd64" - "windows/arm64" - "darwin/amd64" - "darwin/arm64" - "linux/amd64" - "linux/arm64" - ) -fi - -APP_PATH="$EXTENSION_ID/internal/cmd" - -# Loop through platforms and build -for PLATFORM in "${PLATFORMS[@]}"; do - OS=$(echo "$PLATFORM" | cut -d'/' -f1) - ARCH=$(echo "$PLATFORM" | cut -d'/' -f2) - - OUTPUT_NAME="$OUTPUT_DIR/$EXTENSION_ID_SAFE-$OS-$ARCH" - - if [ "$OS" = "windows" ]; then - OUTPUT_NAME+='.exe' - fi - - echo "Building for $OS/$ARCH..." - - # Delete the output file if it already exists - [ -f "$OUTPUT_NAME" ] && rm -f "$OUTPUT_NAME" - - # Set environment variables for Go build - GOOS=$OS GOARCH=$ARCH go build \ - -ldflags="-X '$APP_PATH.Version=$EXTENSION_VERSION' -X '$APP_PATH.Commit=$COMMIT' -X '$APP_PATH.BuildDate=$BUILD_DATE'" \ - -o "$OUTPUT_NAME" - - if [ $? -ne 0 ]; then - echo "An error occurred while building for $OS/$ARCH" - exit 1 - fi -done - -echo "Build completed successfully!" -echo "Binaries are located in the $OUTPUT_DIR directory." diff --git a/cli/azd/extensions/azd.ai.start/changelog.md b/cli/azd/extensions/azd.ai.start/changelog.md deleted file mode 100644 index b88d613cce0..00000000000 --- a/cli/azd/extensions/azd.ai.start/changelog.md +++ /dev/null @@ -1,3 +0,0 @@ -# Release History - -## 0.0.1 - Initial Version \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/extension.yaml b/cli/azd/extensions/azd.ai.start/extension.yaml deleted file mode 100644 index 2c645db27b3..00000000000 --- a/cli/azd/extensions/azd.ai.start/extension.yaml +++ /dev/null @@ -1,9 +0,0 @@ -capabilities: - - custom-commands -description: Enables interactive AI agent through AZD -displayName: AZD AI Agent -id: azd.ai.start -language: go -namespace: ai.chat -usage: azd ai.chat [options] -version: 0.0.1 diff --git a/cli/azd/extensions/azd.ai.start/go.mod b/cli/azd/extensions/azd.ai.start/go.mod deleted file mode 100644 index 892e4868991..00000000000 --- a/cli/azd/extensions/azd.ai.start/go.mod +++ /dev/null @@ -1,61 +0,0 @@ -module azd.ai.start - -go 1.24.1 - -require ( - github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c - github.com/bmatcuk/doublestar/v4 v4.8.1 - github.com/fatih/color v1.18.0 - github.com/spf13/cobra v1.9.1 - github.com/tmc/langchaingo v0.1.13 -) - -require ( - github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.3.1 // indirect - github.com/Masterminds/sprig/v3 v3.2.3 // indirect - github.com/bahlo/generic-list-go v0.2.0 // indirect - github.com/buger/jsonparser v1.1.1 // indirect - github.com/dlclark/regexp2 v1.10.0 // indirect - github.com/dustin/go-humanize v1.0.1 // indirect - github.com/google/go-cmp v0.7.0 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/goph/emperror v0.17.2 // indirect - github.com/huandu/xstrings v1.3.3 // indirect - github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df // indirect - github.com/imdario/mergo v0.3.13 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/invopop/jsonschema v0.13.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/mark3labs/mcp-go v0.36.0 // indirect - github.com/mattn/go-colorable v0.1.14 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mitchellh/copystructure v1.0.0 // indirect - github.com/mitchellh/reflectwalk v1.0.0 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/nikolalohinski/gonja v1.5.3 // indirect - github.com/pelletier/go-toml/v2 v2.0.9 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pkoukk/tiktoken-go v0.1.6 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect - github.com/shopspring/decimal v1.2.0 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/cast v1.7.1 // indirect - github.com/spf13/pflag v1.0.6 // indirect - github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect - github.com/yargevad/filepathx v1.0.0 // indirect - github.com/yosida95/uritemplate/v3 v3.0.2 // indirect - go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 // indirect - golang.org/x/crypto v0.37.0 // indirect - golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect - golang.org/x/net v0.39.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/text v0.24.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755 // indirect - google.golang.org/grpc v1.71.1 // indirect - google.golang.org/protobuf v1.36.6 // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/cli/azd/extensions/azd.ai.start/go.sum b/cli/azd/extensions/azd.ai.start/go.sum deleted file mode 100644 index ebd93aadd8e..00000000000 --- a/cli/azd/extensions/azd.ai.start/go.sum +++ /dev/null @@ -1,379 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.114.0 h1:OIPFAdfrFDFO2ve2U7r/H5SwSbBzEdrBdE7xkgwc+kY= -cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E= -cloud.google.com/go/ai v0.7.0 h1:P6+b5p4gXlza5E+u7uvcgYlzZ7103ACg70YdZeC6oGE= -cloud.google.com/go/ai v0.7.0/go.mod h1:7ozuEcraovh4ABsPbrec3o4LmFl9HigNI3D5haxYeQo= -cloud.google.com/go/aiplatform v1.68.0 h1:EPPqgHDJpBZKRvv+OsB3cr0jYz3EL2pZ+802rBPcG8U= -cloud.google.com/go/aiplatform v1.68.0/go.mod h1:105MFA3svHjC3Oazl7yjXAmIR89LKhRAeNdnDKJczME= -cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= -cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= -cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= -cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= -cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= -cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= -cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU= -cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng= -cloud.google.com/go/vertexai v0.12.0 h1:zTadEo/CtsoyRXNx3uGCncoWAP1H2HakGqwznt+iMo8= -cloud.google.com/go/vertexai v0.12.0/go.mod h1:8u+d0TsvBfAAd2x5R6GMgbYhsLgo3J7lmP4bR8g2ig8= -github.com/AssemblyAI/assemblyai-go-sdk v1.3.0 h1:AtOVgGxUycvK4P4ypP+1ZupecvFgnfH+Jsum0o5ILoU= -github.com/AssemblyAI/assemblyai-go-sdk v1.3.0/go.mod h1:H0naZbvpIW49cDA5ZZ/gggeXqi7ojSGB1mqshRk6kNE= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= -github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= -github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= -github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM= -github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJsnnd3H7Ho5jQ= -github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o= -github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss= -github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= -github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= -github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= -github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c h1:pi62a7GwfbxvZDXhV4DfhxeePzpVCoyr9/rZaWH5eow= -github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c/go.mod h1:mSTaPODklWyhruY0DZgPw1DI97K5cHXfU3afMqGf0IM= -github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= -github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR5wKP38= -github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= -github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= -github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/getzep/zep-go v1.0.4 h1:09o26bPP2RAPKFjWuVWwUWLbtFDF/S8bfbilxzeZAAg= -github.com/getzep/zep-go v1.0.4/go.mod h1:HC1Gz7oiyrzOTvzeKC4dQKUiUy87zpIJl0ZFXXdHuss= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= -github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/generative-ai-go v0.15.1 h1:n8aQUpvhPOlGVuM2DRkJ2jvx04zpp42B778AROJa+pQ= -github.com/google/generative-ai-go v0.15.1/go.mod h1:AAucpWZjXsDKhQYWvCYuP6d0yB1kX998pJlOW1rAesw= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= -github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= -github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= -github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18= -github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= -github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= -github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df h1:4lTJXCZw16BF0BCzrQ1LUzlMW4+2OwBkkYj1/bRybhY= -github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df/go.mod h1:oL2JAtsIp/1vnVy4UG4iDzL8SZwkOzqvRL3YR9PGPjs= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= -github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= -github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80 h1:6Yzfa6GP0rIo/kULo2bwGEkFvCePZ3qHDDTC3/J9Swo= -github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mark3labs/mcp-go v0.32.0 h1:fgwmbfL2gbd67obg57OfV2Dnrhs1HtSdlY/i5fn7MU8= -github.com/mark3labs/mcp-go v0.32.0/go.mod h1:rXqOudj/djTORU/ThxYx8fqEVj/5pvTuuebQ2RC7uk4= -github.com/mark3labs/mcp-go v0.36.0 h1:rIZaijrRYPeSbJG8/qNDe0hWlGrCJ7FWHNMz2SQpTis= -github.com/mark3labs/mcp-go v0.36.0/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= -github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= -github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= -github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58= -github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/nikolalohinski/gonja v1.5.3 h1:GsA+EEaZDZPGJ8JtpeGN78jidhOlxeJROpqMT9fTj9c= -github.com/nikolalohinski/gonja v1.5.3/go.mod h1:RmjwxNiXAEqcq1HeK5SSMmqFJvKOfTfXhkJv6YBtPa4= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0= -github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkoukk/tiktoken-go v0.1.6 h1:JF0TlJzhTbrI30wCvFuiw6FzP2+/bR+FIxUdgEAcUsw= -github.com/pkoukk/tiktoken-go v0.1.6/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= -github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tmc/langchaingo v0.1.13 h1:rcpMWBIi2y3B90XxfE4Ao8dhCQPVDMaNPnN5cGB1CaA= -github.com/tmc/langchaingo v0.1.13/go.mod h1:vpQ5NOIhpzxDfTZK9B6tf2GM/MoaHewPWM5KXXGh7hg= -github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= -github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= -github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= -github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= -github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= -github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= -github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= -github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181 h1:K+bMSIx9A7mLES1rtG+qKduLIXq40DAzYHtb0XuCukA= -gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181/go.mod h1:dzYhVIwWCtzPAa4QP98wfB9+mzt33MSmM8wsKiMi2ow= -gitlab.com/golang-commonmark/linkify v0.0.0-20191026162114-a0c2df6c8f82 h1:oYrL81N608MLZhma3ruL8qTM4xcpYECGut8KSxRY59g= -gitlab.com/golang-commonmark/linkify v0.0.0-20191026162114-a0c2df6c8f82/go.mod h1:Gn+LZmCrhPECMD3SOKlE+BOHwhOYD9j7WT9NUtkCrC8= -gitlab.com/golang-commonmark/markdown v0.0.0-20211110145824-bf3e522c626a h1:O85GKETcmnCNAfv4Aym9tepU8OE0NmcZNqPlXcsBKBs= -gitlab.com/golang-commonmark/markdown v0.0.0-20211110145824-bf3e522c626a/go.mod h1:LaSIs30YPGs1H5jwGgPhLzc8vkNc/k0rDX/fEZqiU/M= -gitlab.com/golang-commonmark/mdurl v0.0.0-20191124015652-932350d1cb84 h1:qqjvoVXdWIcZCLPMlzgA7P9FZWdPGPvP/l3ef8GzV6o= -gitlab.com/golang-commonmark/mdurl v0.0.0-20191124015652-932350d1cb84/go.mod h1:IJZ+fdMvbW2qW6htJx7sLJ04FEs4Ldl/MDsJtMKywfw= -gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f h1:Wku8eEdeJqIOFHtrfkYUByc4bCaTeA6fL0UJgfEiFMI= -gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f/go.mod h1:Tiuhl+njh/JIg0uS/sOJVYi0x2HEa5rc1OAaVsb5tAs= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 h1:A3SayB3rNyt+1S6qpI9mHPkeHTZbD7XILEqWnYZb2l0= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0/go.mod h1:27iA5uvhuRNmalO+iEUdVn5ZMj2qy10Mm+XRIpRmyuU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= -go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= -go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 h1:Ss6D3hLXTM0KobyBYEAygXzFfGcjnmfEJOBgSbemCtg= -go.starlark.net v0.0.0-20230302034142-4b1e35fe2254/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= -golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= -golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= -golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= -google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240528184218-531527333157 h1:u7WMYrIrVvs0TF5yaKwKNbcJyySYf+HAIFXxWltJOXE= -google.golang.org/genproto v0.0.0-20240528184218-531527333157/go.mod h1:ubQlAQnzejB8uZzszhrTCU2Fyp6Vi7ZE5nn0c3W8+qQ= -google.golang.org/genproto/googleapis/api v0.0.0-20250407143221-ac9807e6c755 h1:AMLTAunltONNuzWgVPZXrjLWtXpsG6A3yLLPEoJ/IjU= -google.golang.org/genproto/googleapis/api v0.0.0-20250407143221-ac9807e6c755/go.mod h1:2R6XrVC8Oc08GlNh8ujEpc7HkLiEZ16QeY7FxIs20ac= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755 h1:TwXJCGVREgQ/cl18iY0Z4wJCTL/GmW+Um2oSwZiZPnc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI= -google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= -nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go deleted file mode 100644 index 3a27dc4643c..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package cmd - -import ( - "bufio" - "context" - "fmt" - "os" - "strings" - - "github.com/fatih/color" - - "azd.ai.start/internal/agent" -) - -// RunEnhancedAgentLoop runs the enhanced AZD Copilot agent with full capabilities -func RunEnhancedAgentLoop(ctx context.Context, agent *agent.AzdAiAgent, args []string) error { - fmt.Println("🤖 AZD Copilot - Interactive Mode") - fmt.Println("═══════════════════════════════════════════════════════════") - - // Handle initial query if provided - var initialQuery string - if len(args) > 0 { - initialQuery = strings.Join(args, " ") - } - - scanner := bufio.NewScanner(os.Stdin) - - for { - var userInput string - - if initialQuery != "" { - userInput = initialQuery - initialQuery = "" // Clear after first use - color.Cyan("💬 You: %s\n", userInput) - } else { - fmt.Print(color.CyanString("\n💬 You: ")) - color.Set(color.FgCyan) // Set blue color for user input - if !scanner.Scan() { - color.Unset() // Reset color - break // EOF or error - } - userInput = strings.TrimSpace(scanner.Text()) - color.Unset() // Reset color after input - } - - // Check for exit commands - if userInput == "" { - continue - } - - if strings.ToLower(userInput) == "exit" || strings.ToLower(userInput) == "quit" { - fmt.Println("👋 Goodbye! Thanks for using AZD Copilot!") - break - } - - // Process the query with the enhanced agent - err := agent.ProcessQuery(ctx, userInput) - if err != nil { - continue - } - } - - if err := scanner.Err(); err != nil { - return fmt.Errorf("error reading input: %w", err) - } - - return nil -} diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go deleted file mode 100644 index 81cc5cac8cd..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package cmd - -import ( - "context" - "encoding/json" - "fmt" - "os" - - "azd.ai.start/internal/agent" - "azd.ai.start/internal/agent/logging" - "github.com/azure/azure-dev/cli/azd/pkg/azdext" - "github.com/spf13/cobra" - "github.com/tmc/langchaingo/llms/openai" -) - -func NewRootCommand() *cobra.Command { - var debug bool - - rootCmd := &cobra.Command{ - Use: "azd ai.chat [options]", - Short: "Enables interactive AI agent through AZD", - SilenceUsage: true, - SilenceErrors: true, - CompletionOptions: cobra.CompletionOptions{ - DisableDefaultCmd: true, - }, - RunE: func(cmd *cobra.Command, args []string) error { - return runAIAgent(cmd.Context(), args, debug) - }, - } - - rootCmd.Flags().BoolVar(&debug, "debug", false, "Enable debug logging") - - return rootCmd -} - -type AiModelConfig struct { - Endpoint string `json:"endpoint"` - ApiKey string `json:"apiKey"` - DeploymentName string `json:"deploymentName"` -} - -// runAIAgent creates and runs the enhanced AI agent using LangChain Go -func runAIAgent(ctx context.Context, args []string, debug bool) error { - // Create a new context that includes the AZD access token - ctx = azdext.WithAccessToken(ctx) - - // Create a new AZD client - azdClient, err := azdext.NewAzdClient() - if err != nil { - return fmt.Errorf("failed to create azd client: %w", err) - } - - defer azdClient.Close() - - getSectionResponse, err := azdClient. - UserConfig(). - GetSection(ctx, &azdext.GetUserConfigSectionRequest{ - Path: "ai.chat.model", - }) - if err != nil { - return fmt.Errorf("AI model configuration not found, %w", err) - } - - var aiConfig *AiModelConfig - if err := json.Unmarshal(getSectionResponse.Section, &aiConfig); err != nil { - return fmt.Errorf("failed to unmarshal AI model configuration: %w", err) - } - - if debug { - defaultValue := true - - _, _ = azdClient.Prompt().Confirm(ctx, &azdext.ConfirmRequest{ - Options: &azdext.ConfirmOptions{ - Message: fmt.Sprintf("Ready? (PID: %d - You can attach a debugger now)", os.Getpid()), - DefaultValue: &defaultValue, - }, - }) - } - - // Common deployment names to try - azureAPIVersion := "2024-02-15-preview" - - var defaultModel *openai.LLM - var samplingModel *openai.LLM - - actionLogger := logging.NewActionLogger(logging.WithDebug(debug)) - - // Try different deployment names - if aiConfig.Endpoint != "" && aiConfig.ApiKey != "" { - // Use Azure OpenAI with proper configuration - fmt.Printf("🔵 Trying Azure OpenAI with deployment: %s\n", aiConfig.DeploymentName) - - defaultModel, err = openai.New( - openai.WithToken(aiConfig.ApiKey), - openai.WithBaseURL(aiConfig.Endpoint+"/"), - openai.WithAPIType(openai.APITypeAzure), - openai.WithAPIVersion(azureAPIVersion), - openai.WithModel(aiConfig.DeploymentName), - openai.WithCallback(actionLogger), - ) - - if err == nil { - fmt.Printf("✅ Successfully connected with deployment: %s\n", aiConfig.DeploymentName) - } else { - fmt.Printf("❌ Failed with deployment %s: %v\n", aiConfig.DeploymentName, err) - } - - samplingModel, err = openai.New( - openai.WithToken(aiConfig.ApiKey), - openai.WithBaseURL(aiConfig.Endpoint+"/"), - openai.WithAPIType(openai.APITypeAzure), - openai.WithAPIVersion(azureAPIVersion), - openai.WithModel(aiConfig.DeploymentName), - ) - - if err != nil { - return err - } - } - - // Create the enhanced agent - azdAgent, err := agent.NewAzdAiAgent(defaultModel, - agent.WithSamplingModel(samplingModel), - agent.WithDebug(debug), - ) - if err != nil { - return err - } - - if defaultModel == nil { - return fmt.Errorf("failed to connect to any Azure OpenAI deployment") - } - - // Use the enhanced AZD Copilot agent with full capabilities - return RunEnhancedAgentLoop(ctx, azdAgent, args) -} diff --git a/cli/azd/extensions/azd.ai.start/main.go b/cli/azd/extensions/azd.ai.start/main.go deleted file mode 100644 index 026e7c944e7..00000000000 --- a/cli/azd/extensions/azd.ai.start/main.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package main - -import ( - "context" - "os" - - "azd.ai.start/internal/cmd" - "github.com/fatih/color" -) - -func init() { - forceColorVal, has := os.LookupEnv("FORCE_COLOR") - if has && forceColorVal == "1" { - color.NoColor = false - } -} - -func main() { - // Execute the root command - ctx := context.Background() - rootCmd := cmd.NewRootCommand() - - if err := rootCmd.ExecuteContext(ctx); err != nil { - color.Red("Error: %v", err) - os.Exit(1) - } -} diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/internal/agent/agent.go similarity index 64% rename from cli/azd/extensions/azd.ai.start/internal/agent/agent.go rename to cli/azd/internal/agent/agent.go index 128de9af89a..4e318314718 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -4,19 +4,24 @@ package agent import ( + "bufio" "context" _ "embed" + "fmt" + "os" + "strings" + "github.com/fatih/color" "github.com/tmc/langchaingo/agents" "github.com/tmc/langchaingo/chains" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/memory" "github.com/tmc/langchaingo/tools" - "azd.ai.start/internal/agent/logging" - localtools "azd.ai.start/internal/agent/tools" - "azd.ai.start/internal/agent/tools/mcp" - mcptools "azd.ai.start/internal/agent/tools/mcp" + "github.com/azure/azure-dev/cli/azd/internal/agent/logging" + localtools "github.com/azure/azure-dev/cli/azd/internal/agent/tools" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" + mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" ) //go:embed prompts/default_agent_prefix.txt @@ -127,8 +132,63 @@ func NewAzdAiAgent(llm llms.Model, opts ...AgentOption) (*AzdAiAgent, error) { return azdAgent, nil } +// RunConversationLoop runs the enhanced AZD Copilot agent with full capabilities +func (aai *AzdAiAgent) RunConversationLoop(ctx context.Context, args []string) error { + fmt.Println("🤖 AZD Copilot - Interactive Mode") + fmt.Println("═══════════════════════════════════════════════════════════") + + // Handle initial query if provided + var initialQuery string + if len(args) > 0 { + initialQuery = strings.Join(args, " ") + } + + scanner := bufio.NewScanner(os.Stdin) + + for { + var userInput string + + if initialQuery != "" { + userInput = initialQuery + initialQuery = "" // Clear after first use + color.Cyan("💬 You: %s\n", userInput) + } else { + fmt.Print(color.CyanString("\n💬 You: ")) + color.Set(color.FgCyan) // Set blue color for user input + if !scanner.Scan() { + color.Unset() // Reset color + break // EOF or error + } + userInput = strings.TrimSpace(scanner.Text()) + color.Unset() // Reset color after input + } + + // Check for exit commands + if userInput == "" { + continue + } + + if strings.ToLower(userInput) == "exit" || strings.ToLower(userInput) == "quit" { + fmt.Println("👋 Goodbye! Thanks for using AZD Copilot!") + break + } + + // Process the query with the enhanced agent + err := aai.runChain(ctx, userInput) + if err != nil { + continue + } + } + + if err := scanner.Err(); err != nil { + return fmt.Errorf("error reading input: %w", err) + } + + return nil +} + // ProcessQuery processes a user query with full action tracking and validation -func (aai *AzdAiAgent) ProcessQuery(ctx context.Context, userInput string) error { +func (aai *AzdAiAgent) runChain(ctx context.Context, userInput string) error { // Execute with enhanced input - agent should automatically handle memory _, err := chains.Run(ctx, aai.executor, userInput, chains.WithMaxTokens(800), diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/logging/logger.go b/cli/azd/internal/agent/logging/logger.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/logging/logger.go rename to cli/azd/internal/agent/logging/logger.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt b/cli/azd/internal/agent/prompts/default_agent_format_instructions.txt similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt rename to cli/azd/internal/agent/prompts/default_agent_format_instructions.txt diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt b/cli/azd/internal/agent/prompts/default_agent_prefix.txt similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt rename to cli/azd/internal/agent/prompts/default_agent_prefix.txt diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_suffix.txt b/cli/azd/internal/agent/prompts/default_agent_suffix.txt similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_suffix.txt rename to cli/azd/internal/agent/prompts/default_agent_suffix.txt diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_architecture_planning.go b/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go similarity index 92% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_architecture_planning.go rename to cli/azd/internal/agent/tools/azd/azd_architecture_planning.go index 1e29c2050f4..d74111eaa53 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_architecture_planning.go +++ b/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/agent/tools/azd/prompts" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_azure_yaml_generation.go b/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go similarity index 91% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_azure_yaml_generation.go rename to cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go index d5012b63f10..9e5764563f3 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_azure_yaml_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/agent/tools/azd/prompts" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_discovery_analysis.go b/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go similarity index 91% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_discovery_analysis.go rename to cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go index 7b7c39a5077..1d1ae810d23 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_discovery_analysis.go +++ b/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/agent/tools/azd/prompts" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_docker_generation.go b/cli/azd/internal/agent/tools/azd/azd_docker_generation.go similarity index 91% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_docker_generation.go rename to cli/azd/internal/agent/tools/azd/azd_docker_generation.go index 67a76b4d9fa..18effd6c9e7 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_docker_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_docker_generation.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/agent/tools/azd/prompts" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_iac_generation_rules.go b/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go similarity index 90% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_iac_generation_rules.go rename to cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go index 40cf27facfa..38d797365c8 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_iac_generation_rules.go +++ b/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/agent/tools/azd/prompts" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_infrastructure_generation.go b/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go similarity index 91% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_infrastructure_generation.go rename to cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go index 44876b94300..0cc87372e87 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_infrastructure_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/agent/tools/azd/prompts" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_plan_init.go b/cli/azd/internal/agent/tools/azd/azd_plan_init.go similarity index 88% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_plan_init.go rename to cli/azd/internal/agent/tools/azd/azd_plan_init.go index c45c5d21d12..3bddc9dbb31 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_plan_init.go +++ b/cli/azd/internal/agent/tools/azd/azd_plan_init.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/agent/tools/azd/prompts" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_project_validation.go b/cli/azd/internal/agent/tools/azd/azd_project_validation.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_project_validation.go rename to cli/azd/internal/agent/tools/azd/azd_project_validation.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_yaml_schema.go b/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go similarity index 88% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_yaml_schema.go rename to cli/azd/internal/agent/tools/azd/azd_yaml_schema.go index 9ed0d3806cc..d9577f92af8 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_yaml_schema.go +++ b/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/agent/tools/azd/prompts" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/loader.go b/cli/azd/internal/agent/tools/azd/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/loader.go rename to cli/azd/internal/agent/tools/azd/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/README.md b/cli/azd/internal/agent/tools/azd/prompts/README.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/README.md rename to cli/azd/internal/agent/tools/azd/prompts/README.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_architecture_planning.md b/cli/azd/internal/agent/tools/azd/prompts/azd_architecture_planning.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_architecture_planning.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_architecture_planning.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_discovery_analysis.md b/cli/azd/internal/agent/tools/azd/prompts/azd_discovery_analysis.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_discovery_analysis.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_discovery_analysis.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_docker_generation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_docker_generation.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_docker_generation.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_docker_generation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md b/cli/azd/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_plan_init.md b/cli/azd/internal/agent/tools/azd/prompts/azd_plan_init.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_plan_init.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_plan_init.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_project_validation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_project_validation.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_yaml_schema.md b/cli/azd/internal/agent/tools/azd/prompts/azd_yaml_schema.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_yaml_schema.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_yaml_schema.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azure.yaml.json b/cli/azd/internal/agent/tools/azd/prompts/azure.yaml.json similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azure.yaml.json rename to cli/azd/internal/agent/tools/azd/prompts/azure.yaml.json diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/prompts.go b/cli/azd/internal/agent/tools/azd/prompts/prompts.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/prompts.go rename to cli/azd/internal/agent/tools/azd/prompts/prompts.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/common/types.go b/cli/azd/internal/agent/tools/common/types.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/common/types.go rename to cli/azd/internal/agent/tools/common/types.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/command_executor.go b/cli/azd/internal/agent/tools/dev/command_executor.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/command_executor.go rename to cli/azd/internal/agent/tools/dev/command_executor.go index 483b4281719..6f0fc33bdaa 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/command_executor.go +++ b/cli/azd/internal/agent/tools/dev/command_executor.go @@ -9,7 +9,7 @@ import ( "runtime" "strings" - "azd.ai.start/internal/agent/tools/common" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/tmc/langchaingo/callbacks" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/loader.go b/cli/azd/internal/agent/tools/dev/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/loader.go rename to cli/azd/internal/agent/tools/dev/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/http/http_fetcher.go b/cli/azd/internal/agent/tools/http/http_fetcher.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/http/http_fetcher.go rename to cli/azd/internal/agent/tools/http/http_fetcher.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/http/loader.go b/cli/azd/internal/agent/tools/http/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/http/loader.go rename to cli/azd/internal/agent/tools/http/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/change_directory.go b/cli/azd/internal/agent/tools/io/change_directory.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/change_directory.go rename to cli/azd/internal/agent/tools/io/change_directory.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/copy_file.go b/cli/azd/internal/agent/tools/io/copy_file.go similarity index 98% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/copy_file.go rename to cli/azd/internal/agent/tools/io/copy_file.go index 7abd91de036..64f91d50b72 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/copy_file.go +++ b/cli/azd/internal/agent/tools/io/copy_file.go @@ -8,7 +8,7 @@ import ( "os" "strings" - "azd.ai.start/internal/agent/tools/common" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/tmc/langchaingo/callbacks" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/create_directory.go b/cli/azd/internal/agent/tools/io/create_directory.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/create_directory.go rename to cli/azd/internal/agent/tools/io/create_directory.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/current_directory.go b/cli/azd/internal/agent/tools/io/current_directory.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/current_directory.go rename to cli/azd/internal/agent/tools/io/current_directory.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/delete_directory.go b/cli/azd/internal/agent/tools/io/delete_directory.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/delete_directory.go rename to cli/azd/internal/agent/tools/io/delete_directory.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/delete_file.go b/cli/azd/internal/agent/tools/io/delete_file.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/delete_file.go rename to cli/azd/internal/agent/tools/io/delete_file.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/directory_list.go b/cli/azd/internal/agent/tools/io/directory_list.go similarity index 98% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/directory_list.go rename to cli/azd/internal/agent/tools/io/directory_list.go index 40bd8f80fb1..581bd593da1 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/directory_list.go +++ b/cli/azd/internal/agent/tools/io/directory_list.go @@ -8,7 +8,7 @@ import ( "path/filepath" "strings" - "azd.ai.start/internal/agent/tools/common" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/tmc/langchaingo/callbacks" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/file_info.go b/cli/azd/internal/agent/tools/io/file_info.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/file_info.go rename to cli/azd/internal/agent/tools/io/file_info.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/file_search.go b/cli/azd/internal/agent/tools/io/file_search.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/file_search.go rename to cli/azd/internal/agent/tools/io/file_search.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/loader.go b/cli/azd/internal/agent/tools/io/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/loader.go rename to cli/azd/internal/agent/tools/io/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/move_file.go b/cli/azd/internal/agent/tools/io/move_file.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/move_file.go rename to cli/azd/internal/agent/tools/io/move_file.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/read_file.go b/cli/azd/internal/agent/tools/io/read_file.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/read_file.go rename to cli/azd/internal/agent/tools/io/read_file.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/write_file.go b/cli/azd/internal/agent/tools/io/write_file.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/write_file.go rename to cli/azd/internal/agent/tools/io/write_file.go index e367be33e00..8c20367af38 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/write_file.go +++ b/cli/azd/internal/agent/tools/io/write_file.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "azd.ai.start/internal/agent/tools/common" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // WriteFileTool implements a comprehensive file writing tool that handles all scenarios diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/loader.go b/cli/azd/internal/agent/tools/loader.go similarity index 84% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/loader.go rename to cli/azd/internal/agent/tools/loader.go index 8aee4593a0c..e4a10ad1f53 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/loader.go +++ b/cli/azd/internal/agent/tools/loader.go @@ -4,9 +4,9 @@ import ( "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" - "azd.ai.start/internal/agent/tools/azd" - "azd.ai.start/internal/agent/tools/dev" - "azd.ai.start/internal/agent/tools/io" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/dev" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/io" ) // ToolLoader provides an interface for loading tools from different categories diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/loader.go b/cli/azd/internal/agent/tools/mcp/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/loader.go rename to cli/azd/internal/agent/tools/mcp/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/mcp.json b/cli/azd/internal/agent/tools/mcp/mcp.json similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/mcp.json rename to cli/azd/internal/agent/tools/mcp/mcp.json diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/sampling_handler.go b/cli/azd/internal/agent/tools/mcp/sampling_handler.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/sampling_handler.go rename to cli/azd/internal/agent/tools/mcp/sampling_handler.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/weather/loader.go b/cli/azd/internal/agent/tools/weather/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/weather/loader.go rename to cli/azd/internal/agent/tools/weather/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/weather/weather.go b/cli/azd/internal/agent/tools/weather/weather.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/weather/weather.go rename to cli/azd/internal/agent/tools/weather/weather.go diff --git a/cli/azd/pkg/llm/azure_openai.go b/cli/azd/pkg/llm/azure_openai.go index 99b6a6e243b..ea02134c848 100644 --- a/cli/azd/pkg/llm/azure_openai.go +++ b/cli/azd/pkg/llm/azure_openai.go @@ -5,70 +5,67 @@ package llm import ( "fmt" - "maps" - "os" - "github.com/azure/azure-dev/cli/azd/pkg/output/ux" + "github.com/azure/azure-dev/cli/azd/pkg/config" "github.com/tmc/langchaingo/llms/openai" ) -const ( - modelEnvVar = "AZD_AZURE_OPENAI_MODEL" - versionEnvVar = "AZD_AZURE_OPENAI_VERSION" - urlEnvVar = "AZD_AZURE_OPENAI_URL" - keyEnvVar = "OPENAI_API_KEY" -) +type AzureOpenAiModelConfig struct { + Model string `json:"model"` + Version string `json:"version"` + Endpoint string `json:"endpoint"` + Token string `json:"token"` + ApiVersion string `json:"apiVersion"` +} -type requiredEnvVar struct { - name string - value string - isDefined bool +type AzureOpenAiModelProvider struct { + userConfigManager config.UserConfigManager } -var requiredEnvVars = map[string]requiredEnvVar{ - modelEnvVar: {name: modelEnvVar}, - versionEnvVar: {name: versionEnvVar}, - urlEnvVar: {name: urlEnvVar}, - keyEnvVar: {name: keyEnvVar}, +func NewAzureOpenAiModelProvider(userConfigManager config.UserConfigManager) ModelProvider { + return &AzureOpenAiModelProvider{ + userConfigManager: userConfigManager, + } } -func loadAzureOpenAi() (InfoResponse, error) { +func (p *AzureOpenAiModelProvider) CreateModelContainer(opts ...ModelOption) (*ModelContainer, error) { + userConfig, err := p.userConfigManager.Load() + if err != nil { + return nil, err + } - envVars := maps.Clone(requiredEnvVars) - missingEnvVars := []string{} - for name, envVar := range envVars { - value, isDefined := os.LookupEnv(envVar.name) - if !isDefined { - missingEnvVars = append(missingEnvVars, envVar.name) - continue - } + var modelConfig AzureOpenAiModelConfig + if ok, err := userConfig.GetSection("ai.agent.model.azure", &modelConfig); !ok || err != nil { + return nil, err + } - envVar.value = value - envVar.isDefined = true - envVars[name] = envVar + modelContainer := &ModelContainer{ + Type: LlmTypeOpenAIAzure, + IsLocal: false, + Metadata: ModelMetadata{ + Name: modelConfig.Model, + Version: modelConfig.Version, + }, + Url: modelConfig.Endpoint, } - if len(missingEnvVars) > 0 { - return InfoResponse{}, fmt.Errorf( - "missing required environment variable(s): %s", ux.ListAsText(missingEnvVars)) + + for _, opt := range opts { + opt(modelContainer) } - _, err := openai.New( - openai.WithModel(envVars[modelEnvVar].value), + model, err := openai.New( + openai.WithModel(modelConfig.Model), openai.WithAPIType(openai.APITypeAzure), - openai.WithAPIVersion(envVars[versionEnvVar].value), - openai.WithBaseURL(envVars[urlEnvVar].value), + openai.WithAPIVersion(modelConfig.ApiVersion), + openai.WithBaseURL(modelConfig.Endpoint), + openai.WithToken(modelConfig.Token), ) if err != nil { - return InfoResponse{}, fmt.Errorf("failed to create LLM: %w", err) + return nil, fmt.Errorf("failed to create LLM: %w", err) } - return InfoResponse{ - Type: LlmTypeOpenAIAzure, - IsLocal: false, - Model: LlmModel{ - Name: envVars[modelEnvVar].value, - Version: envVars[versionEnvVar].value, - }, - Url: envVars[urlEnvVar].value, - }, nil + model.CallbacksHandler = modelContainer.logger + modelContainer.Model = model + + return modelContainer, nil } diff --git a/cli/azd/pkg/llm/client.go b/cli/azd/pkg/llm/client.go deleted file mode 100644 index b11f392f1c7..00000000000 --- a/cli/azd/pkg/llm/client.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package llm - -import ( - "github.com/tmc/langchaingo/llms" -) - -// Client is the AZD representation of a Language Model (LLM) client. -type Client struct { - llms.Model -} diff --git a/cli/azd/pkg/llm/manager.go b/cli/azd/pkg/llm/manager.go index c288aedc138..f8ec6c0ba49 100644 --- a/cli/azd/pkg/llm/manager.go +++ b/cli/azd/pkg/llm/manager.go @@ -5,30 +5,33 @@ package llm import ( "fmt" - "io" - "log" - "os" - "strings" "github.com/azure/azure-dev/cli/azd/pkg/alpha" - "github.com/tmc/langchaingo/llms/ollama" - "github.com/tmc/langchaingo/llms/openai" + "github.com/azure/azure-dev/cli/azd/pkg/config" + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/llms" ) var featureLlm = alpha.MustFeatureKey("llm") func NewManager( alphaManager *alpha.FeatureManager, -) Manager { - return Manager{ - alphaManager: alphaManager, + userConfigManager config.UserConfigManager, + modelFactory *ModelFactory, +) *Manager { + return &Manager{ + alphaManager: alphaManager, + userConfigManager: userConfigManager, + ModelFactory: modelFactory, } } // Manager provides functionality to manage Language Model (LLM) features and capabilities. // It encapsulates the alpha feature manager to control access to experimental LLM features. type Manager struct { - alphaManager *alpha.FeatureManager + alphaManager *alpha.FeatureManager + userConfigManager config.UserConfigManager + ModelFactory *ModelFactory } type LlmType string @@ -49,22 +52,32 @@ const ( LlmTypeOllama LlmType = "ollama" ) -// LlmModel represents a language model with its name and version information. +// ModelMetadata represents a language model with its name and version information. // Name specifies the identifier of the language model. // Version indicates the specific version or release of the model. -type LlmModel struct { +type ModelMetadata struct { Name string Version string } -// InfoResponse represents the configuration information of a Language Learning Model (LLM). +// ModelContainer represents the configuration information of a Language Learning Model (LLM). // It contains details about the model type, deployment location, model specification, // and endpoint URL for remote models. -type InfoResponse struct { - Type LlmType - IsLocal bool - Model LlmModel - Url string // For remote models, this is the API endpoint URL +type ModelContainer struct { + Type LlmType + IsLocal bool + Metadata ModelMetadata + Model llms.Model + Url string // For remote models, this is the API endpoint URL + logger callbacks.Handler +} + +type ModelOption func(modelContainer *ModelContainer) + +func WithLogger(logger callbacks.Handler) ModelOption { + return func(modelContainer *ModelContainer) { + modelContainer.logger = logger + } } // NotEnabledError represents an error that occurs when LLM functionality is not enabled. @@ -88,117 +101,27 @@ func (e InvalidLlmConfiguration) Error() string { return "Unable to determine LLM configuration. Please check your environment variables or configuration." } -// Info obtains configuration information about the LLM (Large Language Model) feature. -// If the LLM feature is not enabled through the alpha manager, it returns a NotEnabledError. -// The function writes output to the provided stdout writer. -// Returns an InfoResponse containing the LLM configuration and any error that occurred. -func (m Manager) Info(stdout io.Writer) (InfoResponse, error) { - if !m.alphaManager.IsEnabled(featureLlm) { - return InfoResponse{}, NotEnabledError{} +// GetDefaultModel returns the configured model from the global azd user configuration +func (m Manager) GetDefaultModel(opts ...ModelOption) (*ModelContainer, error) { + userConfig, err := m.userConfigManager.Load() + if err != nil { + return nil, err } - return LlmConfig() -} -var availableLlmTypes = []LlmType{ - LlmTypeOpenAIAzure, - LlmTypeOllama, -} - -// LlmConfig attempts to load and validate LLM (Language Learning Model) configuration. -// It first determines the default LLM type, which can be overridden by the AZD_LLM_TYPE -// environment variable. It then tries to load configurations for available LLM types -// in order, starting with the default type. -// -// The function supports two LLM types: -// - LlmTypeOpenAIAzure (default) -// - LlmTypeOllama -// -// Returns: -// - InfoResponse: Contains the successfully loaded LLM configuration -// - error: Returns an error if no valid LLM configuration could be loaded or if -// an unknown LLM type is specified in AZD_LLM_TYPE -func LlmConfig() (InfoResponse, error) { - defaultLLm := LlmTypeOpenAIAzure - // Default LLM can be overridden by environment variable AZD_LLM_TYPE - if value, isDefined := os.LookupEnv("AZD_LLM_TYPE"); isDefined { - switch strings.ToLower(value) { - case string(LlmTypeOllama): - defaultLLm = LlmTypeOllama - case string(LlmTypeOpenAIAzure): - defaultLLm = LlmTypeOpenAIAzure - default: - return InfoResponse{}, fmt.Errorf("unknown LLM type: %s", value) - } + defaultModelType, ok := userConfig.GetString("ai.agent.model.type") + if !ok { + return nil, fmt.Errorf("Default model type has not been set") } - // keep default on the top and add the rest in the order they are defined - configOrder := []LlmType{defaultLLm} - for _, llmType := range availableLlmTypes { - if llmType != defaultLLm { - configOrder = append(configOrder, llmType) - } - } - - for _, llmType := range configOrder { - log.Println("Checking LLM configuration for: ", llmType) - info, err := loadLlmConfig(llmType) - if err != nil { - log.Printf("Failed to load LLM configuration for %s: %v\n", llmType, err) - continue // Try the next LLM type - } - return info, nil - } - - return InfoResponse{}, InvalidLlmConfiguration{} + return m.ModelFactory.CreateModelContainer(LlmType(defaultModelType), opts...) } -// loadLlmConfig loads the configuration for the specified LLM type. -// It returns an InfoResponse containing the LLM configuration details and any error encountered. -// -// Parameters: -// - llmType: The type of LLM to load configuration for (LlmTypeOllama or LlmTypeOpenAIAzure) -// -// Returns: -// - InfoResponse: Configuration details for the specified LLM -// - error: InvalidLlmConfiguration error if an unsupported LLM type is provided -func loadLlmConfig(llmType LlmType) (InfoResponse, error) { - switch llmType { - case LlmTypeOllama: - return loadOllama() - case LlmTypeOpenAIAzure: - return loadAzureOpenAi() - default: - return InfoResponse{}, InvalidLlmConfiguration{} - } +// GetModel returns the configured model from the global azd user configuration +func (m Manager) GetModel(modelType LlmType, opts ...ModelOption) (*ModelContainer, error) { + return m.ModelFactory.CreateModelContainer(modelType, opts...) } -// LlmClient creates and returns a new LLM (Language Learning Model) client based on the provided InfoResponse. -// It supports different types of LLM services including Ollama and Azure OpenAI. -// -// Parameters: -// - info: InfoResponse containing the configuration details for the LLM service -// -// Returns: -// - Client: A configured LLM client wrapper -// - error: An error if the client creation fails or if the LLM type is unsupported -func LlmClient(info InfoResponse) (Client, error) { - switch info.Type { - case LlmTypeOllama: - c, err := ollama.New(ollama.WithModel(info.Model.Name)) - return Client{ - Model: c, - }, err - case LlmTypeOpenAIAzure: - c, err := openai.New( - openai.WithModel(info.Model.Name), - openai.WithAPIType(openai.APITypeAzure), - openai.WithAPIVersion(info.Model.Version), - openai.WithBaseURL(info.Url), - ) - return Client{ - Model: c, - }, err - default: - return Client{}, fmt.Errorf("unsupported LLM type: %s", info.Type) - } +var availableLlmTypes = []LlmType{ + LlmTypeOpenAIAzure, + LlmTypeOllama, } diff --git a/cli/azd/pkg/llm/manager_test.go b/cli/azd/pkg/llm/manager_test.go index 46d0d6e3874..4c87ec516c9 100644 --- a/cli/azd/pkg/llm/manager_test.go +++ b/cli/azd/pkg/llm/manager_test.go @@ -5,128 +5,7 @@ package llm import ( "testing" - - "github.com/stretchr/testify/require" ) func TestLlmConfig(t *testing.T) { - tests := []struct { - name string - envVars map[string]string - expectedType LlmType - expectErr bool - }{ - { - name: "Default to local Ollama", - envVars: map[string]string{}, - expectedType: LlmTypeOllama, - expectErr: false, - }, - { - name: "Use Ollama when AZD_LLM_TYPE=ollama", - envVars: map[string]string{ - "AZD_LLM_TYPE": "ollama", - }, - expectedType: LlmTypeOllama, - expectErr: false, - }, - { - name: "Use Azure OpenAI when AZD_LLM_TYPE=azure", - envVars: map[string]string{ - "AZD_LLM_TYPE": "azure", - keyEnvVar: "test-key", - urlEnvVar: "https://test.openai.azure.com/", - versionEnvVar: "2023-05-15", - modelEnvVar: "gpt-35-turbo", - }, - expectedType: LlmTypeOpenAIAzure, - expectErr: false, - }, - { - name: "Error on invalid LLM type", - envVars: map[string]string{ - "AZD_LLM_TYPE": "invalid", - }, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(innerTest *testing.T) { - - for key, value := range tt.envVars { - t.Setenv(key, value) - } - - info, err := LlmConfig() - if tt.expectErr { - require.Error(innerTest, err) - return - } - - require.NoError(innerTest, err) - require.Equal(innerTest, tt.expectedType, info.Type, "Expected LLM type does not match") - }) - } -} - -func TestLlmClient(t *testing.T) { - tests := []struct { - name string - info InfoResponse - expectErr bool - env map[string]string - }{ - { - name: "Create Ollama client", - info: InfoResponse{ - Type: LlmTypeOllama, - Model: LlmModel{ - Name: "llama2", - }, - }, - expectErr: false, - }, - { - name: "Create Azure OpenAI client", - info: InfoResponse{ - Type: LlmTypeOpenAIAzure, - Model: LlmModel{ - Name: "gpt-35-turbo", - Version: "2023-05-15", - }, - Url: "https://test.openai.azure.com/", - }, - expectErr: false, - env: map[string]string{ - keyEnvVar: "test-key", - }, - }, - { - name: "Error on invalid LLM type", - info: InfoResponse{ - Type: "invalid", - }, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - for key, value := range tt.env { - t.Setenv(key, value) - } - - client, err := LlmClient(tt.info) - if tt.expectErr { - require.Error(t, err) - require.Equal(t, Client{}, client, "Expected empty client on error") - require.Nil(t, client.Model, "Expected nil Model on error") - return - } - require.NoError(t, err) - require.NotNil(t, client) - }) - } } diff --git a/cli/azd/pkg/llm/model_factory.go b/cli/azd/pkg/llm/model_factory.go new file mode 100644 index 00000000000..d228465bd52 --- /dev/null +++ b/cli/azd/pkg/llm/model_factory.go @@ -0,0 +1,28 @@ +package llm + +import ( + "github.com/azure/azure-dev/cli/azd/pkg/ioc" +) + +type ModelFactory struct { + serviceLocator ioc.ServiceLocator +} + +func NewModelFactory(serviceLocator ioc.ServiceLocator) *ModelFactory { + return &ModelFactory{ + serviceLocator: serviceLocator, + } +} + +func (f *ModelFactory) CreateModelContainer(modelType LlmType, opts ...ModelOption) (*ModelContainer, error) { + var modelProvider ModelProvider + if err := f.serviceLocator.ResolveNamed(string(modelType), &modelProvider); err != nil { + return nil, err + } + + return modelProvider.CreateModelContainer(opts...) +} + +type ModelProvider interface { + CreateModelContainer(opts ...ModelOption) (*ModelContainer, error) +} diff --git a/cli/azd/pkg/llm/ollama.go b/cli/azd/pkg/llm/ollama.go index 46f7187bd15..3c58cd970b8 100644 --- a/cli/azd/pkg/llm/ollama.go +++ b/cli/azd/pkg/llm/ollama.go @@ -4,33 +4,64 @@ package llm import ( - "log" - "os" - + "github.com/azure/azure-dev/cli/azd/pkg/config" "github.com/tmc/langchaingo/llms/ollama" ) -func loadOllama() (InfoResponse, error) { - defaultLlamaVersion := "llama3" +type OllamaModelConfig struct { + Model string `json:"model"` +} - if value, isDefined := os.LookupEnv("AZD_OLLAMA_MODEL"); isDefined { - log.Printf("Found AZD_OLLAMA_MODEL with %s. Using this model", value) - defaultLlamaVersion = value +type OllamaModelProvider struct { + userConfigManager config.UserConfigManager +} + +func NewOllamaModelProvider(userConfigManager config.UserConfigManager) ModelProvider { + return &OllamaModelProvider{ + userConfigManager: userConfigManager, } +} - _, err := ollama.New( - ollama.WithModel(defaultLlamaVersion), - ) +func (p *OllamaModelProvider) CreateModelContainer(opts ...ModelOption) (*ModelContainer, error) { + userConfig, err := p.userConfigManager.Load() + if err != nil { + return nil, err + } + + defaultLlamaVersion := "llama3" + + var modelConfig OllamaModelConfig + ok, err := userConfig.GetSection("ai.agent.model.ollama", &modelConfig) if err != nil { - return InfoResponse{}, err + return nil, err } - return InfoResponse{ + if ok { + defaultLlamaVersion = modelConfig.Model + } + + modelContainer := &ModelContainer{ Type: LlmTypeOllama, IsLocal: true, - Model: LlmModel{ + Metadata: ModelMetadata{ Name: defaultLlamaVersion, Version: "latest", }, - }, nil + } + + for _, opt := range opts { + opt(modelContainer) + } + + model, err := ollama.New( + ollama.WithModel(defaultLlamaVersion), + ) + if err != nil { + return nil, err + } + + model.CallbacksHandler = modelContainer.logger + modelContainer.Model = model + + return modelContainer, nil } diff --git a/go.mod b/go.mod index 346a62d1230..ba009a74964 100644 --- a/go.mod +++ b/go.mod @@ -50,8 +50,10 @@ require ( github.com/golobby/container/v3 v3.3.2 github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 + github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df github.com/joho/godotenv v1.5.1 github.com/magefile/mage v1.15.0 + github.com/mark3labs/mcp-go v0.36.0 github.com/mattn/go-colorable v0.1.14 github.com/mattn/go-isatty v0.0.20 github.com/microsoft/ApplicationInsights-Go v0.4.4 @@ -88,34 +90,60 @@ require ( github.com/Azure/azure-pipeline-go v0.2.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/bahlo/generic-list-go v0.2.0 // indirect + github.com/buger/jsonparser v1.1.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dlclark/regexp2 v1.10.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/goph/emperror v0.17.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect + github.com/huandu/xstrings v1.3.3 // indirect + github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/invopop/jsonschema v0.13.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kylelemons/godebug v1.1.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-ieproxy v0.0.12 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect + github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/mitchellh/reflectwalk v1.0.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/nikolalohinski/gonja v1.5.3 // indirect github.com/otiai10/mint v1.6.3 // indirect + github.com/pelletier/go-toml/v2 v2.0.9 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pkoukk/tiktoken-go v0.1.6 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/segmentio/encoding v0.4.1 // indirect + github.com/shopspring/decimal v1.2.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect + github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect + github.com/yargevad/filepathx v1.0.0 // indirect + github.com/yosida95/uritemplate/v3 v3.0.2 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect go.opentelemetry.io/otel/metric v1.35.0 // indirect go.opentelemetry.io/proto/otlp v1.5.0 // indirect + go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 // indirect golang.org/x/crypto v0.37.0 // indirect + golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect golang.org/x/net v0.39.0 // indirect golang.org/x/sync v0.13.0 // indirect golang.org/x/term v0.31.0 // indirect diff --git a/go.sum b/go.sum index 11a6a0287fb..81ab09de4a8 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,29 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.114.0 h1:OIPFAdfrFDFO2ve2U7r/H5SwSbBzEdrBdE7xkgwc+kY= +cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E= +cloud.google.com/go/ai v0.7.0 h1:P6+b5p4gXlza5E+u7uvcgYlzZ7103ACg70YdZeC6oGE= +cloud.google.com/go/ai v0.7.0/go.mod h1:7ozuEcraovh4ABsPbrec3o4LmFl9HigNI3D5haxYeQo= +cloud.google.com/go/aiplatform v1.68.0 h1:EPPqgHDJpBZKRvv+OsB3cr0jYz3EL2pZ+802rBPcG8U= +cloud.google.com/go/aiplatform v1.68.0/go.mod h1:105MFA3svHjC3Oazl7yjXAmIR89LKhRAeNdnDKJczME= +cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= +cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= +cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= +cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU= +cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng= +cloud.google.com/go/vertexai v0.12.0 h1:zTadEo/CtsoyRXNx3uGCncoWAP1H2HakGqwznt+iMo8= +cloud.google.com/go/vertexai v0.12.0/go.mod h1:8u+d0TsvBfAAd2x5R6GMgbYhsLgo3J7lmP4bR8g2ig8= code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= +github.com/AssemblyAI/assemblyai-go-sdk v1.3.0 h1:AtOVgGxUycvK4P4ypP+1ZupecvFgnfH+Jsum0o5ILoU= +github.com/AssemblyAI/assemblyai-go-sdk v1.3.0/go.mod h1:H0naZbvpIW49cDA5ZZ/gggeXqi7ojSGB1mqshRk6kNE= github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= @@ -71,32 +92,61 @@ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/MakeNowJust/heredoc/v2 v2.0.1 h1:rlCHh70XXXv7toz95ajQWOWQnN4WNLt0TdpZYIR/J6A= github.com/MakeNowJust/heredoc/v2 v2.0.1/go.mod h1:6/2Abh5s+hc3g9nbWLe9ObDIOhaRrqsyY9MWy+4JdRM= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= +github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM= +github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJsnnd3H7Ho5jQ= github.com/adam-lavrik/go-imath v0.0.0-20210910152346-265a42a96f0b h1:g9SuFmxM/WucQFKTMSP+irxyf5m0RiUJreBDhGI6jSA= github.com/adam-lavrik/go-imath v0.0.0-20210910152346-265a42a96f0b/go.mod h1:XjvqMUpGd3Xn9Jtzk/4GEBCSoBX0eB2RyriXgne0IdM= +github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o= +github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss= +github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= +github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= +github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR5wKP38= github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bradleyjkemp/cupaloy/v2 v2.8.0 h1:any4BmKE+jGIaMpnU8YgH/I2LPiLBufr6oMMlVBbn9M= github.com/bradleyjkemp/cupaloy/v2 v2.8.0/go.mod h1:bm7JXdkRd4BHJk9HpwqAI8BoAY1lps46Enkdqw6aRX0= github.com/braydonk/yaml v0.9.0 h1:ewGMrVmEVpsm3VwXQDR388sLg5+aQ8Yihp6/hc4m+h4= github.com/braydonk/yaml v0.9.0/go.mod h1:hcm3h581tudlirk8XEUPDBAimBPbmnL0Y45hCRl47N4= github.com/buger/goterm v1.0.4 h1:Z9YvGmOih81P0FbVtEYTFF6YsSgxSUKEhf/f9bTMXbY= github.com/buger/goterm v1.0.4/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cli/browser v1.3.0 h1:LejqCrpWr+1pRqmEPDGnTZOjsMe7sehifLynZJuqJpo= github.com/cli/browser v1.3.0/go.mod h1:HH8s+fOAxjhQoBUAsKuPCbqUuxZDhQ2/aD+SzsEfBTk= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -113,35 +163,82 @@ github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/drone/envsubst v1.0.3 h1:PCIBwNDYjs50AsLZPYdfhSATKaRg/FJmDc2D6+C2x8g= github.com/drone/envsubst v1.0.3/go.mod h1:N2jZmlMufstn1KEqvbHjw40h1KyTmnVzHcSc9bFiJ2g= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eiannone/keyboard v0.0.0-20220611211555-0d226195f203 h1:XBBHcIb256gUJtLmY22n99HaZTz+r2Z51xUPi01m3wg= github.com/eiannone/keyboard v0.0.0-20220611211555-0d226195f203/go.mod h1:E1jcSv8FaEny+OP/5k9UxZVw9YFWGj7eI4KR/iOBqCg= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/getzep/zep-go v1.0.4 h1:09o26bPP2RAPKFjWuVWwUWLbtFDF/S8bfbilxzeZAAg= +github.com/getzep/zep-go v1.0.4/go.mod h1:HC1Gz7oiyrzOTvzeKC4dQKUiUy87zpIJl0ZFXXdHuss= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golobby/container/v3 v3.3.2 h1:7u+RgNnsdVlhGoS8gY4EXAG601vpMMzLZlYqSp77Quw= github.com/golobby/container/v3 v3.3.2/go.mod h1:RDdKpnKpV1Of11PFBe7Dxc2C1k2KaLE4FD47FflAmj0= +github.com/google/generative-ai-go v0.15.1 h1:n8aQUpvhPOlGVuM2DRkJ2jvx04zpp42B778AROJa+pQ= +github.com/google/generative-ai-go v0.15.1/go.mod h1:AAucpWZjXsDKhQYWvCYuP6d0yB1kX998pJlOW1rAesw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= +github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18= +github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= +github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= +github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= @@ -149,14 +246,30 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1ns github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df h1:4lTJXCZw16BF0BCzrQ1LUzlMW4+2OwBkkYj1/bRybhY= +github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df/go.mod h1:oL2JAtsIp/1vnVy4UG4iDzL8SZwkOzqvRL3YR9PGPjs= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= +github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= +github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= +github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -167,8 +280,14 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80 h1:6Yzfa6GP0rIo/kULo2bwGEkFvCePZ3qHDDTC3/J9Swo= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mark3labs/mcp-go v0.36.0 h1:rIZaijrRYPeSbJG8/qNDe0hWlGrCJ7FWHNMz2SQpTis= +github.com/mark3labs/mcp-go v0.36.0/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= @@ -183,16 +302,29 @@ github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58= +github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs= github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= github.com/microsoft/azure-devops-go-api/azuredevops/v7 v7.1.0 h1:mmJCWLe63QvybxhW1iBmQWEaCKdc4SKgALfTNZ+OphU= github.com/microsoft/azure-devops-go-api/azuredevops/v7 v7.1.0/go.mod h1:mDunUZ1IUJdJIRHvFb+LPBUtxe3AYB5MI6BMXNg8194= github.com/microsoft/go-deviceid v1.0.0 h1:i5AQ654Xk9kfvwJeKQm3w2+eT1+ImBDVEpAR0AjpP40= github.com/microsoft/go-deviceid v1.0.0/go.mod h1:KY13FeVdHkzD8gy+6T8+kVmD/7RMpTaWW75K+T4uZWg= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/nathan-fiscaletti/consolesize-go v0.0.0-20220204101620-317176b6684d h1:NqRhLdNVlozULwM1B3VaHhcXYSgrOAv8V5BE65om+1Q= github.com/nathan-fiscaletti/consolesize-go v0.0.0-20220204101620-317176b6684d/go.mod h1:cxIIfNMTwff8f/ZvRouvWYF6wOoO7nj99neWSx2q/Es= +github.com/nikolalohinski/gonja v1.5.3 h1:GsA+EEaZDZPGJ8JtpeGN78jidhOlxeJROpqMT9fTj9c= +github.com/nikolalohinski/gonja v1.5.3/go.mod h1:RmjwxNiXAEqcq1HeK5SSMmqFJvKOfTfXhkJv6YBtPa4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -200,14 +332,18 @@ github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= +github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0= +github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkoukk/tiktoken-go v0.1.6 h1:JF0TlJzhTbrI30wCvFuiw6FzP2+/bR+FIxUdgEAcUsw= github.com/pkoukk/tiktoken-go v0.1.6/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/psanford/memfs v0.0.0-20241019191636-4ef911798f9b h1:xzjEJAHum+mV5Dd5KyohRlCyP03o4yq6vNpEUtAJQzI= github.com/psanford/memfs v0.0.0-20241019191636-4ef911798f9b/go.mod h1:tcaRap0jS3eifrEEllL6ZMd9dg8IlDpi2S1oARrQ+NI= github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= @@ -217,6 +353,7 @@ github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= @@ -226,6 +363,14 @@ github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= @@ -236,8 +381,12 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= @@ -256,11 +405,35 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tmc/langchaingo v0.1.13 h1:rcpMWBIi2y3B90XxfE4Ao8dhCQPVDMaNPnN5cGB1CaA= github.com/tmc/langchaingo v0.1.13/go.mod h1:vpQ5NOIhpzxDfTZK9B6tf2GM/MoaHewPWM5KXXGh7hg= +github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= +github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= +github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= +github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= +github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= +github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181 h1:K+bMSIx9A7mLES1rtG+qKduLIXq40DAzYHtb0XuCukA= +gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181/go.mod h1:dzYhVIwWCtzPAa4QP98wfB9+mzt33MSmM8wsKiMi2ow= +gitlab.com/golang-commonmark/linkify v0.0.0-20191026162114-a0c2df6c8f82 h1:oYrL81N608MLZhma3ruL8qTM4xcpYECGut8KSxRY59g= +gitlab.com/golang-commonmark/linkify v0.0.0-20191026162114-a0c2df6c8f82/go.mod h1:Gn+LZmCrhPECMD3SOKlE+BOHwhOYD9j7WT9NUtkCrC8= +gitlab.com/golang-commonmark/markdown v0.0.0-20211110145824-bf3e522c626a h1:O85GKETcmnCNAfv4Aym9tepU8OE0NmcZNqPlXcsBKBs= +gitlab.com/golang-commonmark/markdown v0.0.0-20211110145824-bf3e522c626a/go.mod h1:LaSIs30YPGs1H5jwGgPhLzc8vkNc/k0rDX/fEZqiU/M= +gitlab.com/golang-commonmark/mdurl v0.0.0-20191124015652-932350d1cb84 h1:qqjvoVXdWIcZCLPMlzgA7P9FZWdPGPvP/l3ef8GzV6o= +gitlab.com/golang-commonmark/mdurl v0.0.0-20191124015652-932350d1cb84/go.mod h1:IJZ+fdMvbW2qW6htJx7sLJ04FEs4Ldl/MDsJtMKywfw= +gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f h1:Wku8eEdeJqIOFHtrfkYUByc4bCaTeA6fL0UJgfEiFMI= +gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f/go.mod h1:Tiuhl+njh/JIg0uS/sOJVYi0x2HEa5rc1OAaVsb5tAs= go.lsp.dev/jsonrpc2 v0.10.0 h1:Pr/YcXJoEOTMc/b6OTmcR1DPJ3mSWl/SWiU1Cct6VmI= go.lsp.dev/jsonrpc2 v0.10.0/go.mod h1:fmEzIdXPi/rf6d4uFcayi8HpFP1nBF99ERP1htC72Ac= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 h1:A3SayB3rNyt+1S6qpI9mHPkeHTZbD7XILEqWnYZb2l0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0/go.mod h1:27iA5uvhuRNmalO+iEUdVn5ZMj2qy10Mm+XRIpRmyuU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= @@ -279,29 +452,50 @@ go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 h1:Ss6D3hLXTM0KobyBYEAygXzFfGcjnmfEJOBgSbemCtg= +go.starlark.net v0.0.0-20230302034142-4b1e35fe2254/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -310,14 +504,18 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -326,17 +524,44 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= +google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20240528184218-531527333157 h1:u7WMYrIrVvs0TF5yaKwKNbcJyySYf+HAIFXxWltJOXE= +google.golang.org/genproto v0.0.0-20240528184218-531527333157/go.mod h1:ubQlAQnzejB8uZzszhrTCU2Fyp6Vi7ZE5nn0c3W8+qQ= google.golang.org/genproto/googleapis/api v0.0.0-20250407143221-ac9807e6c755 h1:AMLTAunltONNuzWgVPZXrjLWtXpsG6A3yLLPEoJ/IjU= google.golang.org/genproto/googleapis/api v0.0.0-20250407143221-ac9807e6c755/go.mod h1:2R6XrVC8Oc08GlNh8ujEpc7HkLiEZ16QeY7FxIs20ac= google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755 h1:TwXJCGVREgQ/cl18iY0Z4wJCTL/GmW+Um2oSwZiZPnc= google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI= google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -350,10 +575,16 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= From bfd5d13865ddd7d094db82fdd594a813c703de30 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 1 Aug 2025 17:51:04 -0700 Subject: [PATCH 029/116] Updates io tools to remove callback handler --- cli/azd/cmd/init.go | 8 +- cli/azd/internal/agent/agent.go | 4 +- cli/azd/internal/agent/tools/azd/loader.go | 11 +- .../azd/prompts/azd_project_validation.md | 126 ++++--- cli/azd/internal/agent/tools/dev/loader.go | 13 +- .../agent/tools/io/change_directory.go | 88 +++-- cli/azd/internal/agent/tools/io/copy_file.go | 117 ++----- .../agent/tools/io/create_directory.go | 78 +++-- .../agent/tools/io/current_directory.go | 53 ++- .../agent/tools/io/delete_directory.go | 87 +++-- .../internal/agent/tools/io/delete_file.go | 82 +++-- .../internal/agent/tools/io/directory_list.go | 140 +++----- cli/azd/internal/agent/tools/io/file_info.go | 61 ++-- .../internal/agent/tools/io/file_search.go | 177 ++++------ cli/azd/internal/agent/tools/io/loader.go | 33 +- cli/azd/internal/agent/tools/io/move_file.go | 96 ++--- cli/azd/internal/agent/tools/io/read_file.go | 330 ++++++------------ cli/azd/internal/agent/tools/loader.go | 13 +- cli/azd/internal/agent/tools/mcp/loader.go | 5 +- .../internal/agent/tools/weather/loader.go | 13 +- cli/azd/pkg/llm/azure_openai.go | 6 +- 21 files changed, 686 insertions(+), 855 deletions(-) diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index 3395047e7c1..f51d9d03da2 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -371,13 +371,13 @@ func (i *initAction) Run(ctx context.Context) (*actions.ActionResult, error) { } func (i *initAction) initAppWithCopilot(ctx context.Context) error { - defaultModelContainer, err := i.llmManager.GetDefaultModel() + actionLogger := logging.NewActionLogger() + defaultModelContainer, err := i.llmManager.GetDefaultModel(llm.WithLogger(actionLogger)) if err != nil { return err } - actionLogger := logging.NewActionLogger() - samplingModelContainer, err := i.llmManager.GetDefaultModel(llm.WithLogger(actionLogger)) + samplingModelContainer, err := i.llmManager.GetDefaultModel() azdAgent, err := agent.NewAzdAiAgent( defaultModelContainer.Model, @@ -392,6 +392,8 @@ func (i *initAction) initAppWithCopilot(ctx context.Context) error { Read and review the 'azd-arch-plan.md' file if it exists to get current status Run the 'azd_plan_init' tool and follow the steps Finally - run the 'azd_project_validation' tool to ensure the process is fully completed +Be very short, terse and to the point during planning and action execution. +Provide verbose output for the final summary when you are complete. ` if err := azdAgent.RunConversationLoop(ctx, []string{initPrompt}); err != nil { diff --git a/cli/azd/internal/agent/agent.go b/cli/azd/internal/agent/agent.go index 4e318314718..bcf30a609c6 100644 --- a/cli/azd/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -83,8 +83,8 @@ func NewAzdAiAgent(llm llms.Model, opts ...AgentOption) (*AzdAiAgent, error) { ) toolLoaders := []localtools.ToolLoader{ - localtools.NewLocalToolsLoader(actionLogger), - mcptools.NewMcpToolsLoader(actionLogger, samplingHandler), + localtools.NewLocalToolsLoader(), + mcptools.NewMcpToolsLoader(samplingHandler), } allTools := []tools.Tool{} diff --git a/cli/azd/internal/agent/tools/azd/loader.go b/cli/azd/internal/agent/tools/azd/loader.go index b4ac9a4de31..6d81740a6ac 100644 --- a/cli/azd/internal/agent/tools/azd/loader.go +++ b/cli/azd/internal/agent/tools/azd/loader.go @@ -1,19 +1,14 @@ package azd import ( - "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" ) // AzdToolsLoader loads AZD-related tools -type AzdToolsLoader struct { - callbackHandler callbacks.Handler -} +type AzdToolsLoader struct{} -func NewAzdToolsLoader(callbackHandler callbacks.Handler) *AzdToolsLoader { - return &AzdToolsLoader{ - callbackHandler: callbackHandler, - } +func NewAzdToolsLoader() *AzdToolsLoader { + return &AzdToolsLoader{} } func (l *AzdToolsLoader) LoadTools() ([]tools.Tool, error) { diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md index e8e06172fa9..5197edcacbb 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md @@ -1,35 +1,36 @@ # AZD Project Validation Tool -Validates an AZD project by running comprehensive checks on all components including azure.yaml schema validation, Bicep template validation, environment setup, packaging, and deployment preview. +This tool validates an AZD project by programmatically running comprehensive checks on all components including azure.yaml schema validation, Bicep template validation, environment setup, packaging, and deployment preview. ## Purpose -This tool performs end-to-end validation of an AZD project to ensure all components are properly configured and the project is ready for deployment. It centralizes all validation logic to provide a single source of truth for project readiness. +This tool performs automated end-to-end validation of an AZD project to ensure all components are properly configured and the project is ready for deployment. The LLM should execute all validation steps directly using available tools and terminal commands, not just provide instructions to the user. ## Validation Workflow +The LLM must execute these validation steps programmatically using terminal commands and available tools: + ### 1. Azure.yaml Schema Validation -**REQUIRED ACTION:** -Use the `azd_yaml_schema` tool to validate the azure.yaml file against the official schema. +**EXECUTE:** Use the `azd_yaml_schema` tool to validate the azure.yaml file against the official schema. -**Validation Steps:** +**Steps to Execute:** -- Check if `azure.yaml` exists in current directory -- Validate schema compliance using `azd_yaml_schema` tool -- Report any schema violations or missing required fields -- Verify service definitions and configurations +- Check if `azure.yaml` exists in current directory using file system tools +- Run `azd_yaml_schema` tool to validate schema compliance +- Parse and report any schema violations or missing required fields +- Verify service definitions and configurations are correct ### 2. Bicep Template Validation -**REQUIRED ACTIONS:** +**EXECUTE:** Run the following commands to validate Bicep templates: -1. **Find Bicep Files:** Scan `./infra` directory for `.bicep` files -2. **Compile Templates:** Run `az bicep build --file --stdout` for each template +1. **Find Bicep Files:** Use file search to scan `./infra` directory for `.bicep` files +2. **Compile Templates:** Execute `az bicep build --file --stdout` for each template 3. **Validate Syntax:** Ensure all templates compile without errors 4. **Check Dependencies:** Verify module references and parameter passing -**Commands to Run:** +**Commands to Execute:** ```powershell # Compile main template @@ -41,15 +42,15 @@ az deployment sub validate --template-file ./infra/main.bicep --parameters ./inf ### 3. AZD Environment Validation -**REQUIRED ACTIONS:** +**EXECUTE:** Run these commands to validate AZD environment setup: -1. **Check Environment Exists:** Run `azd env list` to see available environments +1. **Check Environment Exists:** Execute `azd env list` to see available environments 2. **Create Environment if Missing:** - - If no environments exist, run `azd env new ` + - If no environments exist, execute `azd env new ` - Use current directory name as environment name (sanitized) 3. **Verify Environment Selection:** Ensure an environment is currently selected -**Commands to Run:** +**Commands to Execute:** ```powershell # List existing environments @@ -64,17 +65,18 @@ azd env select ### 4. Package Validation -**REQUIRED ACTION:** -Run `azd package` to validate all services can be packaged successfully. +**EXECUTE:** Run `azd package` to validate all services can be packaged successfully. -**Validation Steps:** +**Steps to Execute:** +- Execute `azd package` command +- Monitor output for errors or warnings - Verify all service source paths are valid - Check Docker builds complete successfully (for containerized services) - Ensure all build artifacts are created - Validate package manifests -**Command to Run:** +**Command to Execute:** ```powershell azd package @@ -82,18 +84,19 @@ azd package ### 5. Deployment Preview Validation -**REQUIRED ACTION:** -Run `azd provision --preview` to validate infrastructure deployment without actually creating resources. +**EXECUTE:** Run `azd provision --preview` to validate infrastructure deployment without actually creating resources. -**Validation Steps:** +**Steps to Execute:** +- Execute `azd provision --preview` command +- Monitor output for errors or warnings - Verify Azure authentication is working - Check resource group creation plan - Validate all Bicep modules deploy correctly - Ensure parameter values are properly resolved - Confirm no deployment conflicts -**Command to Run:** +**Command to Execute:** ```powershell azd provision --preview @@ -101,81 +104,88 @@ azd provision --preview ## Success Criteria -The project validation is successful when: +The LLM must verify that project validation is successful when all of the following are true: -- [ ] `azure.yaml` passes schema validation -- [ ] All Bicep templates compile without errors or warnings -- [ ] AZD environment exists and is properly configured -- [ ] `azd package` completes without errors or warnings +- [ ] `azure.yaml` passes schema validation (executed via `azd_yaml_schema` tool) +- [ ] All Bicep templates compile without errors or warnings (verified via `az bicep build`) +- [ ] AZD environment exists and is properly configured (verified via `azd env list`) +- [ ] `azd package` completes without errors or warnings - [ ] `azd provision --preview` completes without errors or warnings - [ ] All service configurations are valid - [ ] No missing dependencies or configuration issues +The LLM should report the status of each validation step and provide a summary of the overall validation results. + ## Error Handling -### Common Issues and Solutions +The LLM must handle common validation errors by executing appropriate remediation steps: + +### Common Issues and Automated Solutions **Azure.yaml Schema Errors:** -- Use `azd_yaml_schema` tool to get correct schema format -- Check service names match directory structure -- Verify all required fields are present +- Execute `azd_yaml_schema` tool to get correct schema format +- Check service names match directory structure using file system tools +- Verify all required fields are present and report missing fields **Bicep Compilation Errors:** -- Check module paths and parameter names +- Parse compilation error output and identify specific issues +- Check module paths and parameter names programmatically - Verify resource naming conventions follow Azure requirements - Ensure all required parameters have values **Environment Issues:** -- Run `azd auth login` if authentication fails -- Check Azure subscription access and permissions +- Execute `azd auth login` if authentication fails +- Check Azure subscription access and permissions via Azure CLI - Verify location parameter is valid Azure region **Package Errors:** -- Check service source paths in azure.yaml -- Verify Docker builds work locally for containerized services +- Check service source paths in azure.yaml programmatically +- Verify Docker builds work locally for containerized services by executing build commands - Ensure all build dependencies are available **Provision Preview Errors:** -- Verify Azure subscription has sufficient permissions +- Verify Azure subscription has sufficient permissions via Azure CLI - Check resource quotas and limits - Ensure resource names are globally unique where required +The LLM should attempt to resolve issues automatically where possible and provide clear error reporting for issues that require manual intervention. + ## Update Documentation -**REQUIRED ACTIONS:** +**EXECUTE:** The LLM must update `azd-arch-plan.md` with validation results by: -Update `azd-arch-plan.md` with: +- Writing validation results for each component to the documentation +- Recording any issues found and resolutions applied +- Documenting environment configuration details +- Including deployment preview summary +- Updating project readiness status -- Validation results for each component -- Any issues found and resolutions applied -- Environment configuration details -- Deployment preview summary -- Project readiness status +Use file editing tools to update the documentation with the validation results. ## Next Steps -After successful validation: +The LLM should inform the user that after successful validation, they can proceed with: -1. **Deploy Infrastructure:** Run `azd provision` to create Azure resources -2. **Deploy Applications:** Run `azd deploy` to deploy services -3. **Complete Deployment:** Run `azd up` to provision and deploy in one step +1. **Deploy Infrastructure:** Execute `azd provision` to create Azure resources +2. **Deploy Applications:** Execute `azd deploy` to deploy services +3. **Complete Deployment:** Execute `azd up` to provision and deploy in one step 4. **Monitor Deployment:** Use `azd monitor` to check application health 5. **View Logs:** Use `azd logs` to view deployment and runtime logs ### Production Preparation -For production deployment: +For production deployment, the LLM should guide the user through: -- Create production environment: `azd env new -prod` -- Configure production-specific settings and secrets -- Set up monitoring, alerting, and backup procedures -- Document operational procedures and runbooks +- Creating production environment: `azd env new -prod` +- Configuring production-specific settings and secrets +- Setting up monitoring, alerting, and backup procedures +- Documenting operational procedures and runbooks -**DEPLOYMENT READY:** Your AZD migration is complete and ready for deployment with `azd up`. +**VALIDATION COMPLETE:** Once all validation steps pass, the LLM should confirm that the AZD migration is complete and ready for deployment with `azd up`. -**IMPORTANT:** This tool centralizes all validation logic. Other tools should reference this tool for validation rather than duplicating validation steps. +**IMPORTANT:** This tool centralizes all validation logic. The LLM should execute all validation steps programmatically rather than delegating to other tools or providing user instructions. diff --git a/cli/azd/internal/agent/tools/dev/loader.go b/cli/azd/internal/agent/tools/dev/loader.go index 655d52a42a8..3b938213ed0 100644 --- a/cli/azd/internal/agent/tools/dev/loader.go +++ b/cli/azd/internal/agent/tools/dev/loader.go @@ -1,23 +1,18 @@ package dev import ( - "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" ) // DevToolLoader loads development-related tools -type DevToolsLoader struct { - callbacksHandler callbacks.Handler -} +type DevToolsLoader struct{} -func NewDevToolsLoader(callbacksHandler callbacks.Handler) *DevToolsLoader { - return &DevToolsLoader{ - callbacksHandler: callbacksHandler, - } +func NewDevToolsLoader() *DevToolsLoader { + return &DevToolsLoader{} } func (l *DevToolsLoader) LoadTools() ([]tools.Tool, error) { return []tools.Tool{ - &CommandExecutorTool{CallbacksHandler: l.callbacksHandler}, + &CommandExecutorTool{}, }, nil } diff --git a/cli/azd/internal/agent/tools/io/change_directory.go b/cli/azd/internal/agent/tools/io/change_directory.go index 48094b919ff..889d07d8041 100644 --- a/cli/azd/internal/agent/tools/io/change_directory.go +++ b/cli/azd/internal/agent/tools/io/change_directory.go @@ -2,17 +2,17 @@ package io import ( "context" + "encoding/json" "fmt" "os" "path/filepath" + "strings" - "github.com/tmc/langchaingo/callbacks" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // ChangeDirectoryTool implements the Tool interface for changing the current working directory -type ChangeDirectoryTool struct { - CallbacksHandler callbacks.Handler -} +type ChangeDirectoryTool struct{} func (t ChangeDirectoryTool) Name() string { return "change_directory" @@ -22,63 +22,75 @@ func (t ChangeDirectoryTool) Description() string { return "Change the current working directory. Input: directory path (e.g., '../parent' or './subfolder' or absolute path)" } -func (t ChangeDirectoryTool) Call(ctx context.Context, input string) (string, error) { - // Invoke callback for tool start - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("change_directory: %s", input)) +// createErrorResponse creates a JSON error response +func (t ChangeDirectoryTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, } + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + +func (t ChangeDirectoryTool) Call(ctx context.Context, input string) (string, error) { + input = strings.TrimSpace(input) + input = strings.Trim(input, `"`) + if input == "" { - err := fmt.Errorf("directory path is required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("directory path is required"), "Directory path is required") } // Convert to absolute path absPath, err := filepath.Abs(input) if err != nil { - toolErr := fmt.Errorf("failed to resolve path %s: %w", input, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to resolve path %s: %s", input, err.Error())) } // Check if directory exists info, err := os.Stat(absPath) if err != nil { - toolErr := fmt.Errorf("directory %s does not exist: %w", absPath, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Directory %s does not exist: %s", absPath, err.Error())) } if !info.IsDir() { - toolErr := fmt.Errorf("%s is not a directory", absPath) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(fmt.Errorf("%s is not a directory", absPath), fmt.Sprintf("%s is not a directory", absPath)) } // Change directory err = os.Chdir(absPath) if err != nil { - toolErr := fmt.Errorf("failed to change directory to %s: %w", absPath, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to change directory to %s: %s", absPath, err.Error())) } - output := fmt.Sprintf("Changed directory to %s\n", absPath) + // Create success response + type ChangeDirectoryResponse struct { + Success bool `json:"success"` + OldPath string `json:"oldPath,omitempty"` + NewPath string `json:"newPath"` + Message string `json:"message"` + } - // Invoke callback for tool end - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + response := ChangeDirectoryResponse{ + Success: true, + NewPath: absPath, + Message: fmt.Sprintf("Successfully changed directory to %s", absPath), + } + + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/copy_file.go b/cli/azd/internal/agent/tools/io/copy_file.go index 64f91d50b72..2d0d573295f 100644 --- a/cli/azd/internal/agent/tools/io/copy_file.go +++ b/cli/azd/internal/agent/tools/io/copy_file.go @@ -9,13 +9,10 @@ import ( "strings" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" - "github.com/tmc/langchaingo/callbacks" ) // CopyFileTool implements the Tool interface for copying files -type CopyFileTool struct { - CallbacksHandler callbacks.Handler -} +type CopyFileTool struct{} func (t CopyFileTool) Name() string { return "copy_file" @@ -28,6 +25,27 @@ Returns: JSON with copy operation details or error information. The input must be formatted as a single line valid JSON string.` } +// createErrorResponse creates a JSON error response +func (t CopyFileTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, + } + + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { // Parse JSON input type InputParams struct { @@ -40,106 +58,46 @@ func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { // Clean the input first cleanInput := strings.TrimSpace(input) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("copy_file: %s", cleanInput)) - } - // Parse as JSON - this is now required if err := json.Unmarshal([]byte(cleanInput), ¶ms); err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"source\": \"file.txt\", \"destination\": \"backup.txt\"}", err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to parse JSON input: %w", err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"source\": \"file.txt\", \"destination\": \"backup.txt\"}", err.Error())) } source := strings.TrimSpace(params.Source) destination := strings.TrimSpace(params.Destination) if source == "" || destination == "" { - errorResponse := common.ErrorResponse{ - Error: true, - Message: "Both source and destination paths are required", - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("both source and destination paths are required")) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(fmt.Errorf("both source and destination paths are required"), "Both source and destination paths are required") } // Check if source file exists sourceInfo, err := os.Stat(source) if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Source file %s does not exist: %s", source, err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("source file %s does not exist: %w", source, err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Source file %s does not exist: %s", source, err.Error())) } if sourceInfo.IsDir() { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Source %s is a directory. Use copy_directory for directories", source), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("source %s is a directory. Use copy_directory for directories", source)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(fmt.Errorf("source %s is a directory", source), fmt.Sprintf("Source %s is a directory. Use copy_directory for directories", source)) } // Open source file sourceFile, err := os.Open(source) if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Failed to open source file %s: %s", source, err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to open source file %s: %w", source, err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Failed to open source file %s: %s", source, err.Error())) } defer sourceFile.Close() // Create destination file destFile, err := os.Create(destination) if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Failed to create destination file %s: %s", destination, err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to create destination file %s: %w", destination, err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Failed to create destination file %s: %s", destination, err.Error())) } defer destFile.Close() // Copy contents bytesWritten, err := io.Copy(destFile, sourceFile) if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Failed to copy file: %s", err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to copy file: %w", err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Failed to copy file: %s", err.Error())) } // Prepare JSON response structure @@ -162,21 +120,8 @@ func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { // Convert to JSON jsonData, err := json.MarshalIndent(response, "", " ") if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Failed to marshal JSON response: %s", err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to marshal JSON response: %w", err)) - } - errorJsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(errorJsonData), nil - } - - output := string(jsonData) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, fmt.Sprintf("Copied %s to %s (%d bytes)", source, destination, bytesWritten)) + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/create_directory.go b/cli/azd/internal/agent/tools/io/create_directory.go index c79ac8d46c6..79db58865cb 100644 --- a/cli/azd/internal/agent/tools/io/create_directory.go +++ b/cli/azd/internal/agent/tools/io/create_directory.go @@ -2,17 +2,16 @@ package io import ( "context" + "encoding/json" "fmt" "os" "strings" - "github.com/tmc/langchaingo/callbacks" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // CreateDirectoryTool implements the Tool interface for creating directories -type CreateDirectoryTool struct { - CallbacksHandler callbacks.Handler -} +type CreateDirectoryTool struct{} func (t CreateDirectoryTool) Name() string { return "create_directory" @@ -22,56 +21,69 @@ func (t CreateDirectoryTool) Description() string { return "Create a directory (and any necessary parent directories). Input: directory path (e.g., 'docs' or './src/components')" } -func (t CreateDirectoryTool) Call(ctx context.Context, input string) (string, error) { - // Invoke callback for tool start - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("create_directory: %s", input)) +// createErrorResponse creates a JSON error response +func (t CreateDirectoryTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, } + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + +func (t CreateDirectoryTool) Call(ctx context.Context, input string) (string, error) { input = strings.TrimPrefix(input, `"`) input = strings.TrimSuffix(input, `"`) + input = strings.TrimSpace(input) if input == "" { - err := fmt.Errorf("directory path is required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("directory path is required"), "Directory path is required") } err := os.MkdirAll(input, 0755) if err != nil { - toolErr := fmt.Errorf("failed to create directory %s: %w", input, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to create directory %s: %s", input, err.Error())) } // Check if directory already existed or was newly created info, err := os.Stat(input) if err != nil { - toolErr := fmt.Errorf("failed to verify directory creation: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to verify directory creation: %s", err.Error())) } if !info.IsDir() { - toolErr := fmt.Errorf("%s exists but is not a directory", input) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(fmt.Errorf("%s exists but is not a directory", input), fmt.Sprintf("%s exists but is not a directory", input)) + } + + // Create success response + type CreateDirectoryResponse struct { + Success bool `json:"success"` + Path string `json:"path"` + Message string `json:"message"` } - output := fmt.Sprintf("Created directory: %s\n", input) + response := CreateDirectoryResponse{ + Success: true, + Path: input, + Message: fmt.Sprintf("Successfully created directory: %s", input), + } - // Invoke callback for tool end - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/current_directory.go b/cli/azd/internal/agent/tools/io/current_directory.go index 59169eb24e9..56256b3ea56 100644 --- a/cli/azd/internal/agent/tools/io/current_directory.go +++ b/cli/azd/internal/agent/tools/io/current_directory.go @@ -2,16 +2,15 @@ package io import ( "context" + "encoding/json" "fmt" "os" - "github.com/tmc/langchaingo/callbacks" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // CurrentDirectoryTool implements the Tool interface for getting current directory -type CurrentDirectoryTool struct { - CallbacksHandler callbacks.Handler -} +type CurrentDirectoryTool struct{} func (t CurrentDirectoryTool) Name() string { return "cwd" @@ -21,21 +20,51 @@ func (t CurrentDirectoryTool) Description() string { return "Get the current working directory to understand the project context. Input: use 'current' or '.' (any input works)" } -func (t CurrentDirectoryTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, input) +// createErrorResponse creates a JSON error response +func (t CurrentDirectoryTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, } + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + +func (t CurrentDirectoryTool) Call(ctx context.Context, input string) (string, error) { dir, err := os.Getwd() if err != nil { - return "", fmt.Errorf("failed to get current directory: %w", err) + return t.createErrorResponse(err, fmt.Sprintf("Failed to get current directory: %s", err.Error())) + } + + // Create success response + type CurrentDirectoryResponse struct { + Success bool `json:"success"` + CurrentDirectory string `json:"currentDirectory"` + Message string `json:"message"` } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, dir) + response := CurrentDirectoryResponse{ + Success: true, + CurrentDirectory: dir, + Message: fmt.Sprintf("Current directory is %s", dir), } - output := fmt.Sprintf("Current directory is %s\n", dir) + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) + } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/delete_directory.go b/cli/azd/internal/agent/tools/io/delete_directory.go index 7afb090f868..3066cd2d403 100644 --- a/cli/azd/internal/agent/tools/io/delete_directory.go +++ b/cli/azd/internal/agent/tools/io/delete_directory.go @@ -2,17 +2,16 @@ package io import ( "context" + "encoding/json" "fmt" "os" "strings" - "github.com/tmc/langchaingo/callbacks" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // DeleteDirectoryTool implements the Tool interface for deleting directories -type DeleteDirectoryTool struct { - CallbacksHandler callbacks.Handler -} +type DeleteDirectoryTool struct{} func (t DeleteDirectoryTool) Name() string { return "delete_directory" @@ -22,40 +21,48 @@ func (t DeleteDirectoryTool) Description() string { return "Delete a directory and all its contents. Input: directory path (e.g., 'temp-folder' or './old-docs')" } -func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, error) { - // Invoke callback for tool start - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("delete_directory: %s", input)) +// createErrorResponse creates a JSON error response +func (t DeleteDirectoryTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() } + errorResp := common.ErrorResponse{ + Error: true, + Message: message, + } + + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + +func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, error) { input = strings.TrimPrefix(input, `"`) input = strings.TrimSuffix(input, `"`) + input = strings.TrimSpace(input) if input == "" { - err := fmt.Errorf("directory path is required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("directory path is required"), "Directory path is required") } // Check if directory exists info, err := os.Stat(input) if err != nil { - toolErr := fmt.Errorf("directory %s does not exist: %w", input, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + if os.IsNotExist(err) { + return t.createErrorResponse(err, fmt.Sprintf("Directory %s does not exist", input)) } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Cannot access directory %s: %s", input, err.Error())) } // Make sure it's a directory, not a file if !info.IsDir() { - toolErr := fmt.Errorf("%s is a file, not a directory. Use delete_file to remove files", input) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(fmt.Errorf("%s is a file, not a directory", input), fmt.Sprintf("%s is a file, not a directory. Use delete_file to remove files", input)) } // Count contents before deletion for reporting @@ -68,24 +75,36 @@ func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, er // Delete the directory and all contents err = os.RemoveAll(input) if err != nil { - toolErr := fmt.Errorf("failed to delete directory %s: %w", input, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to delete directory %s: %s", input, err.Error())) + } + + // Create success response + type DeleteDirectoryResponse struct { + Success bool `json:"success"` + Path string `json:"path"` + ItemsDeleted int `json:"itemsDeleted"` + Message string `json:"message"` } - var output string + var message string if fileCount > 0 { - output = fmt.Sprintf("Deleted directory: %s (contained %d items)", input, fileCount) + message = fmt.Sprintf("Successfully deleted directory %s (contained %d items)", input, fileCount) } else { - output = fmt.Sprintf("Deleted empty directory: %s", input) + message = fmt.Sprintf("Successfully deleted empty directory %s", input) + } + + response := DeleteDirectoryResponse{ + Success: true, + Path: input, + ItemsDeleted: fileCount, + Message: message, } - // Invoke callback for tool end - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/delete_file.go b/cli/azd/internal/agent/tools/io/delete_file.go index 57c51b415de..e5333526286 100644 --- a/cli/azd/internal/agent/tools/io/delete_file.go +++ b/cli/azd/internal/agent/tools/io/delete_file.go @@ -2,17 +2,16 @@ package io import ( "context" + "encoding/json" "fmt" "os" "strings" - "github.com/tmc/langchaingo/callbacks" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // DeleteFileTool implements the Tool interface for deleting files -type DeleteFileTool struct { - CallbacksHandler callbacks.Handler -} +type DeleteFileTool struct{} func (t DeleteFileTool) Name() string { return "delete_file" @@ -22,55 +21,78 @@ func (t DeleteFileTool) Description() string { return "Delete a file. Input: file path (e.g., 'temp.txt' or './docs/old-file.md')" } -func (t DeleteFileTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("delete_file: %s", input)) +// createErrorResponse creates a JSON error response +func (t DeleteFileTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() } + errorResp := common.ErrorResponse{ + Error: true, + Message: message, + } + + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + +func (t DeleteFileTool) Call(ctx context.Context, input string) (string, error) { input = strings.TrimPrefix(input, `"`) input = strings.TrimSuffix(input, `"`) + input = strings.TrimSpace(input) if input == "" { - err := fmt.Errorf("file path is required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("file path is required"), "File path is required") } // Check if file exists and get info info, err := os.Stat(input) if err != nil { - toolErr := fmt.Errorf("file %s does not exist: %w", input, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + if os.IsNotExist(err) { + return t.createErrorResponse(err, fmt.Sprintf("File %s does not exist", input)) } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Cannot access file %s: %s", input, err.Error())) } // Make sure it's a file, not a directory if info.IsDir() { - err := fmt.Errorf("%s is a directory, not a file. Use delete_directory to remove directories", input) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("%s is a directory, not a file", input), fmt.Sprintf("%s is a directory, not a file. Use delete_directory to remove directories", input)) } + fileSize := info.Size() + // Delete the file err = os.Remove(input) if err != nil { - toolErr := fmt.Errorf("failed to delete file %s: %w", input, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to delete file %s: %s", input, err.Error())) } - output := fmt.Sprintf("Deleted file %s (%d bytes)", input, info.Size()) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + // Create success response + type DeleteFileResponse struct { + Success bool `json:"success"` + FilePath string `json:"filePath"` + SizeDeleted int64 `json:"sizeDeleted"` + Message string `json:"message"` + } + + response := DeleteFileResponse{ + Success: true, + FilePath: input, + SizeDeleted: fileSize, + Message: fmt.Sprintf("Successfully deleted file %s (%d bytes)", input, fileSize), + } + + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/directory_list.go b/cli/azd/internal/agent/tools/io/directory_list.go index 581bd593da1..a5f6b92d089 100644 --- a/cli/azd/internal/agent/tools/io/directory_list.go +++ b/cli/azd/internal/agent/tools/io/directory_list.go @@ -9,13 +9,10 @@ import ( "strings" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" - "github.com/tmc/langchaingo/callbacks" ) // DirectoryListTool implements the Tool interface for listing directory contents -type DirectoryListTool struct { - CallbacksHandler callbacks.Handler -} +type DirectoryListTool struct{} func (t DirectoryListTool) Name() string { return "list_directory" @@ -28,6 +25,27 @@ Returns: JSON with directory contents including file names, types, and sizes. The input must be formatted as a single line valid JSON string.` } +// createErrorResponse creates a JSON error response +func (t DirectoryListTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, + } + + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + func (t DirectoryListTool) Call(ctx context.Context, input string) (string, error) { // Parse JSON input type InputParams struct { @@ -42,15 +60,7 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro // Parse as JSON - this is now required if err := json.Unmarshal([]byte(cleanInput), ¶ms); err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"path\": \".\", \"include_hidden\": false}", err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to parse JSON input: %w", err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"path\": \".\", \"includeHidden\": false}", err.Error())) } // Validate required path field @@ -60,11 +70,6 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro path := strings.TrimSpace(params.Path) - // Add debug logging - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("Processing JSON input: path='%s', include_hidden=%v", path, params.IncludeHidden)) - } - // Get absolute path for clarity - handle "." explicitly to avoid potential issues var absPath string var err error @@ -73,85 +78,32 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro // Explicitly get current working directory instead of relying on filepath.Abs(".") absPath, err = os.Getwd() if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Failed to get current working directory: %s", err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to get current working directory: %w", err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Failed to get current working directory: %s", err.Error())) } } else { absPath, err = filepath.Abs(path) if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Failed to get absolute path for %s: %s", path, err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to get absolute path for %s: %w", path, err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Failed to get absolute path for %s: %s", path, err.Error())) } } - // Invoke callback for tool execution start - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("Reading directory %s (absolute: %s)", path, absPath)) - } - - // Check if directory exists - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("Checking if directory exists: '%s'", absPath)) - } - + // Check if directory exists and is accessible info, err := os.Stat(absPath) if err != nil { - var message string if os.IsNotExist(err) { - message = fmt.Sprintf("Directory does not exist: %s", absPath) - } else { - message = fmt.Sprintf("Failed to access %s: %s (original input: '%s', cleaned path: '%s')", absPath, err.Error(), input, path) + return t.createErrorResponse(err, fmt.Sprintf("Directory %s does not exist", absPath)) } - - errorResponse := common.ErrorResponse{ - Error: true, - Message: message, - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to access %s: %w", absPath, err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Failed to access %s: %s", absPath, err.Error())) } if !info.IsDir() { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Path is not a directory: %s", absPath), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("%s is not a directory", absPath)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(fmt.Errorf("%s is not a directory", absPath), fmt.Sprintf("%s is not a directory", absPath)) } - // List directory contents + // Read directory contents files, err := os.ReadDir(absPath) if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Failed to read directory %s: %s", absPath, err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to read directory %s: %w", absPath, err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Failed to read directory %s: %s", absPath, err.Error())) } // Prepare JSON response structure @@ -163,14 +115,21 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro } type DirectoryResponse struct { + Success bool `json:"success"` Path string `json:"path"` TotalItems int `json:"totalItems"` Items []FileInfo `json:"items"` + Message string `json:"message"` } var items []FileInfo for _, file := range files { + // Skip hidden files if not requested + if !params.IncludeHidden && strings.HasPrefix(file.Name(), ".") { + continue + } + fileInfo := FileInfo{ Name: file.Name(), IsDir: file.IsDir(), @@ -189,31 +148,18 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro } response := DirectoryResponse{ + Success: true, Path: absPath, - TotalItems: len(files), + TotalItems: len(items), Items: items, + Message: fmt.Sprintf("Successfully listed %d items in directory %s", len(items), absPath), } // Convert to JSON jsonData, err := json.MarshalIndent(response, "", " ") if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Failed to marshal JSON response: %s", err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to marshal JSON response: %w", err)) - } - errorJsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(errorJsonData), nil - } - - output := string(jsonData) - - // Invoke callback for tool end - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, "") + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/file_info.go b/cli/azd/internal/agent/tools/io/file_info.go index afc5a0aca15..57d53ddb906 100644 --- a/cli/azd/internal/agent/tools/io/file_info.go +++ b/cli/azd/internal/agent/tools/io/file_info.go @@ -8,13 +8,11 @@ import ( "strings" "time" - "github.com/tmc/langchaingo/callbacks" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // FileInfoTool implements the Tool interface for getting file information -type FileInfoTool struct { - CallbacksHandler callbacks.Handler -} +type FileInfoTool struct{} func (t FileInfoTool) Name() string { return "file_info" @@ -24,33 +22,47 @@ func (t FileInfoTool) Description() string { return "Get information about a file (size, modification time, permissions). Input: file path (e.g., 'data.txt' or './docs/readme.md'). Returns JSON with file information." } -func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("file_info: %s", input)) +// createErrorResponse creates a JSON error response +func (t FileInfoTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, } + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + +func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { input = strings.TrimPrefix(input, `"`) input = strings.TrimSuffix(input, `"`) + input = strings.TrimSpace(input) if input == "" { - err := fmt.Errorf("file path is required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("file path is required"), "File path is required") } info, err := os.Stat(input) if err != nil { - toolErr := fmt.Errorf("failed to get info for %s: %w", input, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + if os.IsNotExist(err) { + return t.createErrorResponse(err, fmt.Sprintf("File or directory %s does not exist", input)) } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to get info for %s: %s", input, err.Error())) } // Prepare JSON response structure type FileInfoResponse struct { + Success bool `json:"success"` Path string `json:"path"` Name string `json:"name"` Type string `json:"type"` @@ -58,6 +70,7 @@ func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { Size int64 `json:"size"` ModifiedTime time.Time `json:"modifiedTime"` Permissions string `json:"permissions"` + Message string `json:"message"` } var fileType string @@ -68,6 +81,7 @@ func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { } response := FileInfoResponse{ + Success: true, Path: input, Name: info.Name(), Type: fileType, @@ -75,23 +89,14 @@ func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { Size: info.Size(), ModifiedTime: info.ModTime(), Permissions: info.Mode().String(), + Message: fmt.Sprintf("Successfully retrieved information for %s", input), } // Convert to JSON jsonData, err := json.MarshalIndent(response, "", " ") if err != nil { - toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr - } - - output := string(jsonData) - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/file_search.go b/cli/azd/internal/agent/tools/io/file_search.go index dd7a7a0de9c..84ea580c93e 100644 --- a/cli/azd/internal/agent/tools/io/file_search.go +++ b/cli/azd/internal/agent/tools/io/file_search.go @@ -4,18 +4,14 @@ import ( "context" "encoding/json" "fmt" - "os" - "path/filepath" "sort" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/bmatcuk/doublestar/v4" - "github.com/tmc/langchaingo/callbacks" ) // FileSearchTool implements a tool for searching files using glob patterns -type FileSearchTool struct { - CallbacksHandler callbacks.Handler -} +type FileSearchTool struct{} // FileSearchRequest represents the JSON payload for file search requests type FileSearchRequest struct { @@ -77,147 +73,100 @@ Returns a sorted list of matching file paths relative to the current working dir The input must be formatted as a single line valid JSON string.` } -func (t FileSearchTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("file_search: %s", input)) +// createErrorResponse creates a JSON error response +func (t FileSearchTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, } + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + +func (t FileSearchTool) Call(ctx context.Context, input string) (string, error) { if input == "" { - err := fmt.Errorf("input is required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("input is required"), "Input is required. Expected JSON format: {\"pattern\": \"*.go\"}") } // Parse JSON input var req FileSearchRequest if err := json.Unmarshal([]byte(input), &req); err != nil { - toolErr := fmt.Errorf("invalid JSON input: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"pattern\": \"*.go\", \"maxResults\": 50}", err.Error())) } // Validate required fields if req.Pattern == "" { - err := fmt.Errorf("pattern is required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err - } - - // Set defaults - if req.MaxResults == 0 { - req.MaxResults = 100 - } - - // Get current working directory - searchPath, err := os.Getwd() - if err != nil { - toolErr := fmt.Errorf("failed to get current working directory: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(fmt.Errorf("pattern is required"), "Pattern is required in the JSON input") } - // Perform the search - matches, err := t.searchFiles(searchPath, req.Pattern, req.MaxResults) - if err != nil { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + // Set default max results + maxResults := req.MaxResults + if maxResults <= 0 { + maxResults = 100 } - // Format output as JSON - output, err := t.formatResults(searchPath, req.Pattern, matches, req.MaxResults) + // Use doublestar to find matching files + matches, err := doublestar.FilepathGlob(req.Pattern) if err != nil { - toolErr := fmt.Errorf("failed to format results: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr - } - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + return t.createErrorResponse(err, fmt.Sprintf("Invalid glob pattern '%s': %s", req.Pattern, err.Error())) } - return output, nil -} - -// searchFiles performs the actual file search using doublestar for comprehensive glob matching -func (t FileSearchTool) searchFiles(searchPath, pattern string, maxResults int) ([]string, error) { - var matches []string - searchPath = filepath.Clean(searchPath) - - // Use doublestar.Glob which handles all advanced patterns including recursion via ** - globPattern := filepath.Join(searchPath, pattern) - // Convert to forward slashes for cross-platform compatibility - globPattern = filepath.ToSlash(globPattern) + // Sort results for consistent output + sort.Strings(matches) - globMatches, err := doublestar.FilepathGlob(globPattern) - if err != nil { - return nil, fmt.Errorf("error in glob pattern matching: %w", err) + // Limit results if needed + if len(matches) > maxResults { + matches = matches[:maxResults] } - // Convert to relative paths and limit results - for _, match := range globMatches { - if len(matches) >= maxResults { - break - } - - // Check if it's a file (not directory) - info, err := os.Stat(match) - if err != nil || info.IsDir() { - continue - } - - relPath, err := filepath.Rel(searchPath, match) - if err != nil { - continue // Skip files we can't get relative path for - } - - // Convert to forward slashes for consistent output - relPath = filepath.ToSlash(relPath) - matches = append(matches, relPath) + // Create response structure + type FileSearchResponse struct { + Success bool `json:"success"` + Pattern string `json:"pattern"` + TotalFound int `json:"totalFound"` + Returned int `json:"returned"` + MaxResults int `json:"maxResults"` + Files []string `json:"files"` + Message string `json:"message"` } - // Sort the results for consistent output - sort.Strings(matches) + totalFound := len(matches) + returned := len(matches) - return matches, nil -} - -// formatResults formats the search results into a JSON response -func (t FileSearchTool) formatResults(searchPath, pattern string, matches []string, maxResults int) (string, error) { - // Prepare JSON response structure - type FileSearchResponse struct { - CurrentDirectory string `json:"currentDirectory"` - Pattern string `json:"pattern"` - TotalFound int `json:"totalFound"` - MaxResults int `json:"maxResults"` - ResultsLimited bool `json:"resultsLimited"` - Matches []string `json:"matches"` + var message string + if totalFound == 0 { + message = fmt.Sprintf("No files found matching pattern '%s'", req.Pattern) + } else if totalFound == returned { + message = fmt.Sprintf("Found %d files matching pattern '%s'", totalFound, req.Pattern) + } else { + message = fmt.Sprintf("Found %d files matching pattern '%s', returning first %d", totalFound, req.Pattern, returned) } response := FileSearchResponse{ - CurrentDirectory: searchPath, - Pattern: pattern, - TotalFound: len(matches), - MaxResults: maxResults, - ResultsLimited: len(matches) >= maxResults, - Matches: matches, + Success: true, + Pattern: req.Pattern, + TotalFound: totalFound, + Returned: returned, + MaxResults: maxResults, + Files: matches, + Message: message, } // Convert to JSON jsonData, err := json.MarshalIndent(response, "", " ") if err != nil { - return "", fmt.Errorf("failed to marshal JSON response: %w", err) + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } return string(jsonData), nil diff --git a/cli/azd/internal/agent/tools/io/loader.go b/cli/azd/internal/agent/tools/io/loader.go index 5ddc49c749c..bf5e95a9f3f 100644 --- a/cli/azd/internal/agent/tools/io/loader.go +++ b/cli/azd/internal/agent/tools/io/loader.go @@ -1,34 +1,29 @@ package io import ( - "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" ) // IoToolsLoader loads IO-related tools -type IoToolsLoader struct { - callbackHandler callbacks.Handler -} +type IoToolsLoader struct{} -func NewIoToolsLoader(callbackHandler callbacks.Handler) *IoToolsLoader { - return &IoToolsLoader{ - callbackHandler: callbackHandler, - } +func NewIoToolsLoader() *IoToolsLoader { + return &IoToolsLoader{} } func (l *IoToolsLoader) LoadTools() ([]tools.Tool, error) { return []tools.Tool{ - &CurrentDirectoryTool{CallbacksHandler: l.callbackHandler}, - &ChangeDirectoryTool{CallbacksHandler: l.callbackHandler}, - &DirectoryListTool{CallbacksHandler: l.callbackHandler}, - &CreateDirectoryTool{CallbacksHandler: l.callbackHandler}, - &DeleteDirectoryTool{CallbacksHandler: l.callbackHandler}, - &ReadFileTool{CallbacksHandler: l.callbackHandler}, + &CurrentDirectoryTool{}, + &ChangeDirectoryTool{}, + &DirectoryListTool{}, + &CreateDirectoryTool{}, + &DeleteDirectoryTool{}, + &ReadFileTool{}, &WriteFileTool{}, - &CopyFileTool{CallbacksHandler: l.callbackHandler}, - &MoveFileTool{CallbacksHandler: l.callbackHandler}, - &DeleteFileTool{CallbacksHandler: l.callbackHandler}, - &FileInfoTool{CallbacksHandler: l.callbackHandler}, - &FileSearchTool{CallbacksHandler: l.callbackHandler}, + &CopyFileTool{}, + &MoveFileTool{}, + &DeleteFileTool{}, + &FileInfoTool{}, + &FileSearchTool{}, }, nil } diff --git a/cli/azd/internal/agent/tools/io/move_file.go b/cli/azd/internal/agent/tools/io/move_file.go index 51c12488774..98d77ac6cc3 100644 --- a/cli/azd/internal/agent/tools/io/move_file.go +++ b/cli/azd/internal/agent/tools/io/move_file.go @@ -2,17 +2,16 @@ package io import ( "context" + "encoding/json" "fmt" "os" "strings" - "github.com/tmc/langchaingo/callbacks" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // MoveFileTool implements the Tool interface for moving/renaming files -type MoveFileTool struct { - CallbacksHandler callbacks.Handler -} +type MoveFileTool struct{} func (t MoveFileTool) Name() string { return "move_file" @@ -22,70 +21,77 @@ func (t MoveFileTool) Description() string { return "Move or rename a file. Input format: 'source|destination' (e.g., 'old.txt|new.txt' or './file.txt|./folder/file.txt')" } -func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("move_file: %s", input)) +// createErrorResponse creates a JSON error response +func (t MoveFileTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, + } + + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil } + return string(jsonData), nil +} + +func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { input = strings.TrimPrefix(input, `"`) input = strings.TrimSuffix(input, `"`) + input = strings.TrimSpace(input) if input == "" { - err := fmt.Errorf("input is required in format 'source|destination'") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("input is required in format 'source|destination'"), "Input is required in format 'source|destination'") } // Split on first occurrence of '|' to separate source from destination parts := strings.SplitN(input, "|", 2) if len(parts) != 2 { - err := fmt.Errorf("invalid input format. Use 'source|destination'") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("invalid input format"), "Invalid input format. Use 'source|destination'") } source := strings.TrimSpace(parts[0]) destination := strings.TrimSpace(parts[1]) if source == "" || destination == "" { - err := fmt.Errorf("both source and destination paths are required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("both source and destination paths are required"), "Both source and destination paths are required") } // Check if source exists sourceInfo, err := os.Stat(source) if err != nil { - toolErr := fmt.Errorf("source %s does not exist: %w", source, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + if os.IsNotExist(err) { + return t.createErrorResponse(err, fmt.Sprintf("Source %s does not exist", source)) } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Cannot access source %s: %s", source, err.Error())) } // Check if destination already exists if _, err := os.Stat(destination); err == nil { - err := fmt.Errorf("destination %s already exists", destination) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("destination %s already exists", destination), fmt.Sprintf("Destination %s already exists", destination)) } // Move/rename the file err = os.Rename(source, destination) if err != nil { - toolErr := fmt.Errorf("failed to move %s to %s: %w", source, destination, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to move %s to %s: %s", source, destination, err.Error())) + } + + // Create success response + type MoveFileResponse struct { + Success bool `json:"success"` + Source string `json:"source"` + Destination string `json:"destination"` + Type string `json:"type"` + Size int64 `json:"size"` + Message string `json:"message"` } fileType := "file" @@ -93,10 +99,20 @@ func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { fileType = "directory" } - output := fmt.Sprintf("Successfully moved %s from %s to %s (%d bytes)\n", fileType, source, destination, sourceInfo.Size()) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + response := MoveFileResponse{ + Success: true, + Source: source, + Destination: destination, + Type: fileType, + Size: sourceInfo.Size(), + Message: fmt.Sprintf("Successfully moved %s from %s to %s (%d bytes)", fileType, source, destination, sourceInfo.Size()), + } + + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/read_file.go b/cli/azd/internal/agent/tools/io/read_file.go index 0890e127e76..9547d62054f 100644 --- a/cli/azd/internal/agent/tools/io/read_file.go +++ b/cli/azd/internal/agent/tools/io/read_file.go @@ -1,6 +1,7 @@ package io import ( + "bufio" "context" "encoding/json" "fmt" @@ -8,13 +9,11 @@ import ( "strings" "time" - "github.com/tmc/langchaingo/callbacks" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // ReadFileTool implements the Tool interface for reading file contents -type ReadFileTool struct { - CallbacksHandler callbacks.Handler -} +type ReadFileTool struct{} // ReadFileRequest represents the JSON payload for file read requests type ReadFileRequest struct { @@ -80,279 +79,170 @@ Examples: 5. Read single line: {"filePath": "package.json", "startLine": 42, "endLine": 42} -Files larger than 10KB are automatically truncated. Files over 1MB show size info only unless specific line range is requested. +Files larger than 100KB are automatically truncated. Files over 1MB show size info only unless specific line range is requested. The input must be formatted as a single line valid JSON string.` } -func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("read_file: %s", input)) +// createErrorResponse creates a JSON error response +func (t ReadFileTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() } - if input == "" { - output := "❌ No input provided\n\n" - output += "📝 Expected JSON format:\n" - output += `{"filePath": "path/to/file.txt"}` + errorResp := common.ErrorResponse{ + Error: true, + Message: message, + } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("empty input")) - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + +func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return t.createErrorResponse(fmt.Errorf("empty input"), "No input provided. Expected JSON format: {\"filePath\": \"path/to/file.txt\"}") } // Parse JSON input var req ReadFileRequest if err := json.Unmarshal([]byte(input), &req); err != nil { - output := fmt.Sprintf("❌ Invalid JSON input: %s\n\n", err.Error()) - output += "📝 Expected format:\n" - output += `{"filePath": "path/to/file.txt", "startLine": 1, "endLine": 50}` - output += "\n\n💡 Tips:\n" - output += "- Use double quotes for strings\n" - output += "- Remove any trailing commas\n" - output += "- Escape backslashes: use \\\\ instead of \\" - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil + return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"filePath\": \"path/to/file.txt\", \"startLine\": 1, \"endLine\": 50}", err.Error())) } // Validate required fields if req.FilePath == "" { - output := "❌ Missing required field: filePath cannot be empty\n\n" - output += "📝 Example: " + `{"filePath": "README.md"}` - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("missing filePath")) - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil + return t.createErrorResponse(fmt.Errorf("missing filePath"), "Missing required field: filePath cannot be empty") } // Get file info first to check size fileInfo, err := os.Stat(req.FilePath) if err != nil { - output := fmt.Sprintf("❌ Cannot access file: %s\n\n", req.FilePath) if os.IsNotExist(err) { - output += "📁 File does not exist. Please check:\n" - output += "- File path spelling and case sensitivity\n" - output += "- File location relative to current directory\n" - output += "- File permissions\n" - } else { - output += fmt.Sprintf("Error details: %s\n", err.Error()) + return t.createErrorResponse(err, fmt.Sprintf("File does not exist: %s. Please check file path spelling and location", req.FilePath)) } - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil + return t.createErrorResponse(err, fmt.Sprintf("Cannot access file %s: %s", req.FilePath, err.Error())) } - fileSize := fileInfo.Size() - - // Handle very large files differently (unless specific line range requested) - if fileSize > 1024*1024 && req.StartLine == 0 && req.EndLine == 0 { // 1MB+ - response := ReadFileResponse{ - Success: false, - FilePath: req.FilePath, - Content: "", - IsTruncated: false, - IsPartial: false, - FileInfo: ReadFileInfo{ - Size: fileSize, - ModifiedTime: fileInfo.ModTime(), - Permissions: fileInfo.Mode().String(), - }, - Message: fmt.Sprintf("File is very large (%.2f MB). Use startLine and endLine parameters for specific sections.", float64(fileSize)/(1024*1024)), - } - - jsonData, err := json.MarshalIndent(response, "", " ") - if err != nil { - toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr - } + if fileInfo.IsDir() { + return t.createErrorResponse(fmt.Errorf("path is a directory"), fmt.Sprintf("%s is a directory, not a file. Use directory_list tool for directories", req.FilePath)) + } - output := string(jsonData) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil + // Handle very large files (>1MB) - require line range + const maxFileSize = 1024 * 1024 // 1MB + if fileInfo.Size() > maxFileSize && req.StartLine == 0 && req.EndLine == 0 { + return t.createErrorResponse(fmt.Errorf("file too large"), fmt.Sprintf("File %s is too large (%d bytes). Please specify startLine and endLine to read specific sections", req.FilePath, fileInfo.Size())) } - content, err := os.ReadFile(req.FilePath) + // Read file content + file, err := os.Open(req.FilePath) if err != nil { - output := fmt.Sprintf("❌ Cannot read file: %s\n", req.FilePath) - output += fmt.Sprintf("Error: %s\n\n", err.Error()) - output += "💡 This might be due to:\n" - output += "- Insufficient permissions\n" - output += "- File is locked by another process\n" - output += "- File is binary or corrupted\n" - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil + return t.createErrorResponse(err, fmt.Sprintf("Failed to open file %s: %s", req.FilePath, err.Error())) } + defer file.Close() - lines := strings.Split(string(content), "\n") - totalLines := len(lines) + // Read lines + var lines []string + scanner := bufio.NewScanner(file) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } - // Handle partial reads based on line range - if req.StartLine > 0 || req.EndLine > 0 { - return t.handlePartialRead(ctx, req.FilePath, lines, req.StartLine, req.EndLine, totalLines, fileInfo) + if err := scanner.Err(); err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Error reading file %s: %s", req.FilePath, err.Error())) } - var finalContent string + totalLines := len(lines) + var content string + var isPartial bool var isTruncated bool - var message string - - // Improved truncation with better limits for full file reads - if len(content) > 10000 { // 10KB limit - // Show first 50 lines and last 10 lines - preview := strings.Join(lines[:50], "\n") - if totalLines > 60 { - preview += fmt.Sprintf("\n\n... [%d lines omitted] ...\n\n", totalLines-60) - preview += strings.Join(lines[totalLines-10:], "\n") - } - finalContent = preview - isTruncated = true - message = "Large file truncated - showing first 50 and last 10 lines" - } else { - finalContent = string(content) - isTruncated = false - message = "File read successfully" - } + var lineRange *LineRange - response := ReadFileResponse{ - Success: true, - FilePath: req.FilePath, - Content: finalContent, - IsTruncated: isTruncated, - IsPartial: false, - FileInfo: ReadFileInfo{ - Size: fileSize, - ModifiedTime: fileInfo.ModTime(), - Permissions: fileInfo.Mode().String(), - }, - Message: message, - } + // Determine what to read + if req.StartLine > 0 || req.EndLine > 0 { + // Reading specific line range + startLine := req.StartLine + endLine := req.EndLine - jsonData, err := json.MarshalIndent(response, "", " ") - if err != nil { - toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + if startLine == 0 { + startLine = 1 } - return "", toolErr - } - - output := string(jsonData) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - - return output, nil -} - -// handlePartialRead handles reading specific line ranges from a file -func (t ReadFileTool) handlePartialRead(ctx context.Context, filePath string, lines []string, startLine, endLine, totalLines int, fileInfo os.FileInfo) (string, error) { - // Validate and adjust line numbers (1-based to 0-based) - if startLine == 0 { - startLine = 1 // Default to start of file - } - if endLine == 0 { - endLine = totalLines // Default to end of file - } - - // Validate line numbers - if startLine < 1 { - startLine = 1 - } - if endLine > totalLines { - endLine = totalLines - } - if startLine > endLine { - response := ReadFileResponse{ - Success: false, - FilePath: filePath, - Content: "", - IsTruncated: false, - IsPartial: false, - FileInfo: ReadFileInfo{ - Size: fileInfo.Size(), - ModifiedTime: fileInfo.ModTime(), - Permissions: fileInfo.Mode().String(), - }, - Message: fmt.Sprintf("Invalid line range: start line (%d) cannot be greater than end line (%d)", startLine, endLine), + if endLine == 0 { + endLine = totalLines } - jsonData, err := json.MarshalIndent(response, "", " ") - if err != nil { - toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + // Validate line range + if startLine > totalLines { + return t.createErrorResponse(fmt.Errorf("start line out of range"), fmt.Sprintf("Start line %d is greater than total lines %d in file", startLine, totalLines)) } - - output := string(jsonData) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("invalid line range: start %d > end %d", startLine, endLine)) - t.CallbacksHandler.HandleToolEnd(ctx, output) + if startLine > endLine { + return t.createErrorResponse(fmt.Errorf("invalid line range"), fmt.Sprintf("Start line %d is greater than end line %d", startLine, endLine)) } - return output, nil - } - // Convert to 0-based indexing - startIdx := startLine - 1 - endIdx := endLine + // Adjust endLine if it exceeds total lines + if endLine > totalLines { + endLine = totalLines + } - // Extract the requested lines - selectedLines := lines[startIdx:endIdx] - content := strings.Join(selectedLines, "\n") + // Convert to 0-based indexing and extract lines + startIdx := startLine - 1 + endIdx := endLine + selectedLines := lines[startIdx:endIdx] + content = strings.Join(selectedLines, "\n") + isPartial = true - linesRead := endLine - startLine + 1 + lineRange = &LineRange{ + StartLine: startLine, + EndLine: endLine, + TotalLines: totalLines, + LinesRead: endLine - startLine + 1, + } + } else { + // Reading entire file + content = strings.Join(lines, "\n") + + // Truncate if content is too large (>100KB) + const maxContentSize = 100 * 1024 // 100KB + if len(content) > maxContentSize { + content = content[:maxContentSize] + "\n... [content truncated]" + isTruncated = true + } + } + // Create success response response := ReadFileResponse{ Success: true, - FilePath: filePath, + FilePath: req.FilePath, Content: content, - IsTruncated: false, - IsPartial: true, - LineRange: &LineRange{ - StartLine: startLine, - EndLine: endLine, - TotalLines: totalLines, - LinesRead: linesRead, - }, + IsTruncated: isTruncated, + IsPartial: isPartial, + LineRange: lineRange, FileInfo: ReadFileInfo{ Size: fileInfo.Size(), ModifiedTime: fileInfo.ModTime(), Permissions: fileInfo.Mode().String(), }, - Message: fmt.Sprintf("Successfully read %d lines (%d-%d) from file", linesRead, startLine, endLine), } - jsonData, err := json.MarshalIndent(response, "", " ") - if err != nil { - toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + // Set appropriate message + if isPartial && lineRange != nil { + response.Message = fmt.Sprintf("Successfully read %d lines (%d-%d) from file", lineRange.LinesRead, lineRange.StartLine, lineRange.EndLine) + } else if isTruncated { + response.Message = fmt.Sprintf("Successfully read file (content truncated due to size)") + } else { + response.Message = fmt.Sprintf("Successfully read entire file (%d lines)", totalLines) } - output := string(jsonData) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/loader.go b/cli/azd/internal/agent/tools/loader.go index e4a10ad1f53..75be6baefe6 100644 --- a/cli/azd/internal/agent/tools/loader.go +++ b/cli/azd/internal/agent/tools/loader.go @@ -1,7 +1,6 @@ package tools import ( - "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd" @@ -15,18 +14,16 @@ type ToolLoader interface { } type LocalToolsLoader struct { - loaders []ToolLoader - callbackHandler callbacks.Handler + loaders []ToolLoader } -func NewLocalToolsLoader(callbackHandler callbacks.Handler) *LocalToolsLoader { +func NewLocalToolsLoader() *LocalToolsLoader { return &LocalToolsLoader{ loaders: []ToolLoader{ - azd.NewAzdToolsLoader(callbackHandler), - dev.NewDevToolsLoader(callbackHandler), - io.NewIoToolsLoader(callbackHandler), + azd.NewAzdToolsLoader(), + dev.NewDevToolsLoader(), + io.NewIoToolsLoader(), }, - callbackHandler: callbackHandler, } } diff --git a/cli/azd/internal/agent/tools/mcp/loader.go b/cli/azd/internal/agent/tools/mcp/loader.go index e30c3fb5e0e..7ad5fdc1bd1 100644 --- a/cli/azd/internal/agent/tools/mcp/loader.go +++ b/cli/azd/internal/agent/tools/mcp/loader.go @@ -10,7 +10,6 @@ import ( langchaingo_mcp_adapter "github.com/i2y/langchaingo-mcp-adapter" "github.com/mark3labs/mcp-go/client" "github.com/mark3labs/mcp-go/client/transport" - "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" ) @@ -31,13 +30,11 @@ type ServerConfig struct { } type McpToolsLoader struct { - callbackHandler callbacks.Handler samplingHandler client.SamplingHandler } -func NewMcpToolsLoader(callbackHandler callbacks.Handler, samplingHandler client.SamplingHandler) *McpToolsLoader { +func NewMcpToolsLoader(samplingHandler client.SamplingHandler) *McpToolsLoader { return &McpToolsLoader{ - callbackHandler: callbackHandler, samplingHandler: samplingHandler, } } diff --git a/cli/azd/internal/agent/tools/weather/loader.go b/cli/azd/internal/agent/tools/weather/loader.go index ce283e18fb3..afdf7894f68 100644 --- a/cli/azd/internal/agent/tools/weather/loader.go +++ b/cli/azd/internal/agent/tools/weather/loader.go @@ -1,23 +1,18 @@ package weather import ( - "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" ) // WeatherToolsLoader loads weather-related tools -type WeatherToolsLoader struct { - callbackHandler callbacks.Handler -} +type WeatherToolsLoader struct{} -func NewWeatherToolsLoader(callbackHandler callbacks.Handler) *WeatherToolsLoader { - return &WeatherToolsLoader{ - callbackHandler: callbackHandler, - } +func NewWeatherToolsLoader() *WeatherToolsLoader { + return &WeatherToolsLoader{} } func (l *WeatherToolsLoader) LoadTools() ([]tools.Tool, error) { return []tools.Tool{ - &WeatherTool{CallbacksHandler: l.callbackHandler}, + &WeatherTool{}, }, nil } diff --git a/cli/azd/pkg/llm/azure_openai.go b/cli/azd/pkg/llm/azure_openai.go index ea02134c848..dc05b78ec86 100644 --- a/cli/azd/pkg/llm/azure_openai.go +++ b/cli/azd/pkg/llm/azure_openai.go @@ -54,11 +54,11 @@ func (p *AzureOpenAiModelProvider) CreateModelContainer(opts ...ModelOption) (*M } model, err := openai.New( - openai.WithModel(modelConfig.Model), + openai.WithToken(modelConfig.Token), + openai.WithBaseURL(modelConfig.Endpoint), openai.WithAPIType(openai.APITypeAzure), openai.WithAPIVersion(modelConfig.ApiVersion), - openai.WithBaseURL(modelConfig.Endpoint), - openai.WithToken(modelConfig.Token), + openai.WithModel(modelConfig.Model), ) if err != nil { return nil, fmt.Errorf("failed to create LLM: %w", err) From a001e39a57812298e388cf535fddcae2edd667e8 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Wed, 6 Aug 2025 18:19:54 -0700 Subject: [PATCH 030/116] Adds feature flag usage --- cli/azd/cmd/init.go | 187 ++++++- cli/azd/internal/agent/agent.go | 201 ++----- .../internal/agent/conversational_agent.go | 182 +++++++ cli/azd/internal/agent/logging/file_logger.go | 201 +++++++ cli/azd/internal/agent/logging/logger.go | 6 + cli/azd/internal/agent/one_shot_agent.go | 117 +++++ .../internal/agent/prompts/conversational.txt | 88 ++++ .../default_agent_format_instructions.txt | 40 -- .../agent/prompts/default_agent_prefix.txt | 17 - .../agent/prompts/default_agent_suffix.txt | 8 - cli/azd/internal/agent/prompts/one_shot.txt | 78 +++ .../tools/azd/azd_architecture_planning.go | 14 +- .../tools/azd/azd_azure_yaml_generation.go | 14 +- .../agent/tools/azd/azd_discovery_analysis.go | 14 +- .../agent/tools/azd/azd_docker_generation.go | 14 +- .../tools/azd/azd_iac_generation_rules.go | 12 +- .../azd/azd_infrastructure_generation.go | 13 +- .../internal/agent/tools/azd/azd_plan_init.go | 12 +- .../agent/tools/azd/azd_project_validation.go | 17 +- .../agent/tools/azd/prompts/README.md | 199 ------- .../azd/prompts/azd_architecture_planning.md | 222 ++++---- .../azd/prompts/azd_azure_yaml_generation.md | 185 ++----- .../azd/prompts/azd_discovery_analysis.md | 238 ++------- .../azd/prompts/azd_docker_generation.md | 245 ++++----- .../azd/prompts/azd_iac_generation_rules.md | 224 +++----- .../prompts/azd_infrastructure_generation.md | 268 +++++----- .../agent/tools/azd/prompts/azd_plan_init.md | 153 +++--- .../azd/prompts/azd_project_validation.md | 233 +++------ .../tools/azd/prompts/azd_yaml_schema.md | 18 - .../agent/tools/azd/prompts/prompts.go | 3 + cli/azd/internal/agent/tools/io/write_file.go | 237 ++++++--- .../agent/tools/io/write_file_test.go | 495 ++++++++++++++++++ cli/azd/pkg/llm/azure_openai.go | 28 +- cli/azd/pkg/llm/manager.go | 4 +- cli/azd/pkg/llm/model.go | 35 ++ cli/azd/pkg/llm/model_factory.go | 8 +- cli/azd/pkg/output/colors.go | 56 +- go.mod | 20 +- go.sum | 39 ++ 39 files changed, 2414 insertions(+), 1731 deletions(-) create mode 100644 cli/azd/internal/agent/conversational_agent.go create mode 100644 cli/azd/internal/agent/logging/file_logger.go create mode 100644 cli/azd/internal/agent/one_shot_agent.go create mode 100644 cli/azd/internal/agent/prompts/conversational.txt delete mode 100644 cli/azd/internal/agent/prompts/default_agent_format_instructions.txt delete mode 100644 cli/azd/internal/agent/prompts/default_agent_prefix.txt delete mode 100644 cli/azd/internal/agent/prompts/default_agent_suffix.txt create mode 100644 cli/azd/internal/agent/prompts/one_shot.txt delete mode 100644 cli/azd/internal/agent/tools/azd/prompts/README.md delete mode 100644 cli/azd/internal/agent/tools/azd/prompts/azd_yaml_schema.md create mode 100644 cli/azd/internal/agent/tools/io/write_file_test.go create mode 100644 cli/azd/pkg/llm/model.go diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index f51d9d03da2..3c128cb5c53 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -34,6 +34,7 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/tools" "github.com/azure/azure-dev/cli/azd/pkg/tools/git" "github.com/azure/azure-dev/cli/azd/pkg/workflow" + "github.com/fatih/color" "github.com/joho/godotenv" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -244,7 +245,7 @@ func (i *initAction) Run(ctx context.Context) (*actions.ActionResult, error) { initTypeSelect = initEnvironment } else { // Prompt for init type for new projects - initTypeSelect, err = promptInitType(i.console, ctx) + initTypeSelect, err = promptInitType(i.console, ctx, i.featuresManager) if err != nil { return nil, err } @@ -371,38 +372,176 @@ func (i *initAction) Run(ctx context.Context) (*actions.ActionResult, error) { } func (i *initAction) initAppWithCopilot(ctx context.Context) error { - actionLogger := logging.NewActionLogger() - defaultModelContainer, err := i.llmManager.GetDefaultModel(llm.WithLogger(actionLogger)) + // Warn user that this is an alpha feature + i.console.WarnForFeature(ctx, llm.FeatureLlm) + + fileLogger, cleanup, err := logging.NewFileLoggerDefault() + if err != nil { + return err + } + defer cleanup() + + defaultModelContainer, err := i.llmManager.GetDefaultModel(llm.WithLogger(fileLogger)) if err != nil { return err } samplingModelContainer, err := i.llmManager.GetDefaultModel() - azdAgent, err := agent.NewAzdAiAgent( + azdAgent, err := agent.NewConversationalAzdAiAgent( defaultModelContainer.Model, agent.WithSamplingModel(samplingModelContainer.Model), + agent.WithDebug(i.flags.global.EnableDebugLogging), ) if err != nil { return err } - initPrompt := `Goal: Initialize or migrate the AZD project from the current working directory. + type initStep struct { + Name string + Description string + } + + taskInput := `Your task: %s + +Break this task down into smaller steps if needed. +If new information reveals more work to be done, pursue it. +Do not stop until all tasks are complete and fully resolved. +` + + initSteps := []initStep{ + { + Name: "Running Discovery & Analysis", + Description: "Run a deep discovery and analysis on the current working directory. Provide a detailed summary of work performed.", + }, + { + Name: "Generating Architecture Plan", + Description: "Create a high-level architecture plan for the application. Provide a detailed summary of work performed.", + }, + { + Name: "Generating Dockerfile(s)", + Description: "Generate a Dockerfile for the application components as needed. Provide a detailed summary of work performed.", + }, + { + Name: "Generating infrastructure", + Description: "Generate infrastructure as code (IaC) for the application. Provide a detailed summary of work performed.", + }, + { + Name: "Generating azure.yaml file", + Description: "Generate an azure.yaml file for the application. Provide a detailed summary of work performed.", + }, + { + Name: "Validating project", + Description: "Validate the project structure and configuration. Provide a detailed summary of work performed.", + }, + } + + for idx, step := range initSteps { + // Collect and apply feedback for next steps + if idx > 0 { + if err := i.collectAndApplyFeedback(ctx, azdAgent, "Any feedback before continuing to the next step?"); err != nil { + return err + } + } + + // Run Step + i.console.ShowSpinner(ctx, step.Name, input.Step) + fullTaskInput := fmt.Sprintf(taskInput, step.Description) + agentOutput, err := azdAgent.SendMessage(ctx, fullTaskInput) + if err != nil { + i.console.StopSpinner(ctx, fmt.Sprintf("%s (With errors)", step.Name), input.StepWarning) + if agentOutput != "" { + i.console.Message(ctx, output.WithMarkdown(agentOutput)) + } + + return err + } + + i.console.StopSpinner(ctx, step.Name, input.StepDone) + i.console.Message(ctx, "") + finalOutput := fmt.Sprintf("%s %s", color.MagentaString("🤖 AZD Copilot:"), output.WithMarkdown(agentOutput)) + i.console.Message(ctx, finalOutput) + i.console.Message(ctx, "") + } + + // Post-completion feedback loop + if err := i.postCompletionFeedbackLoop(ctx, azdAgent); err != nil { + return err + } -Read and review the 'azd-arch-plan.md' file if it exists to get current status -Run the 'azd_plan_init' tool and follow the steps -Finally - run the 'azd_project_validation' tool to ensure the process is fully completed -Be very short, terse and to the point during planning and action execution. -Provide verbose output for the final summary when you are complete. - ` + return nil +} - if err := azdAgent.RunConversationLoop(ctx, []string{initPrompt}); err != nil { +// collectAndApplyFeedback prompts for user feedback and applies it using the agent in a loop +func (i *initAction) collectAndApplyFeedback(ctx context.Context, azdAgent *agent.ConversationalAzdAiAgent, promptMessage string) error { + hasFeedback, err := i.console.Confirm(ctx, input.ConsoleOptions{ + Message: promptMessage, + DefaultValue: false, + }) + if err != nil { return err } + if !hasFeedback { + i.console.Message(ctx, "") + return nil + } + + // Loop to allow multiple rounds of feedback + for { + userInput, err := i.console.Prompt(ctx, input.ConsoleOptions{ + Message: "💭 You:", + DefaultValue: "", + Help: "Additional context will be provided to AZD Copilot", + }) + if err != nil { + return fmt.Errorf("error collecting feedback during azd init, %w", err) + } + + if userInput != "" { + i.console.ShowSpinner(ctx, "Submitting feedback", input.Step) + feedbackOutput, err := azdAgent.SendMessage(ctx, userInput) + if err != nil { + i.console.StopSpinner(ctx, "Submitting feedback (With errors)", input.StepWarning) + if feedbackOutput != "" { + i.console.Message(ctx, output.WithMarkdown(feedbackOutput)) + } + return err + } + + i.console.StopSpinner(ctx, "Submitting feedback", input.StepDone) + i.console.Message(ctx, "") + agentOutput := fmt.Sprintf("%s %s", color.MagentaString("🤖 AZD Copilot:"), output.WithMarkdown(feedbackOutput)) + i.console.Message(ctx, agentOutput) + i.console.Message(ctx, "") + } + + // Check if user wants to provide more feedback + moreFeedback, err := i.console.Confirm(ctx, input.ConsoleOptions{ + Message: "Do you have any more feedback or changes?", + DefaultValue: false, + }) + if err != nil { + return err + } + + if !moreFeedback { + break + } + } + return nil } +// postCompletionFeedbackLoop provides a final opportunity for feedback after all steps complete +func (i *initAction) postCompletionFeedbackLoop(ctx context.Context, azdAgent *agent.ConversationalAzdAiAgent) error { + i.console.Message(ctx, "") + i.console.Message(ctx, "🎉 All initialization steps completed!") + i.console.Message(ctx, "") + + return i.collectAndApplyFeedback(ctx, azdAgent, "Any additional feedback or changes you'd like to make?") +} + type initType int const ( @@ -413,14 +552,20 @@ const ( initWithCopilot ) -func promptInitType(console input.Console, ctx context.Context) (initType, error) { +func promptInitType(console input.Console, ctx context.Context, featuresManager *alpha.FeatureManager) (initType, error) { + options := []string{ + "Scan current directory", // This now covers minimal project creation too + "Select a template", + } + + // Only include AZD Copilot option if the LLM feature is enabled + if featuresManager.IsEnabled(llm.FeatureLlm) { + options = append(options, fmt.Sprintf("AZD Copilot %s", color.YellowString("(Alpha)"))) + } + selection, err := console.Select(ctx, input.ConsoleOptions{ Message: "How do you want to initialize your app?", - Options: []string{ - "Scan current directory", // This now covers minimal project creation too - "Select a template", - "AZD Copilot", - }, + Options: options, }) if err != nil { return initUnknown, err @@ -432,7 +577,11 @@ func promptInitType(console input.Console, ctx context.Context) (initType, error case 1: return initAppTemplate, nil case 2: - return initWithCopilot, nil + // Only return initWithCopilot if the LLM feature is enabled and we have 3 options + if featuresManager.IsEnabled(llm.FeatureLlm) { + return initWithCopilot, nil + } + fallthrough default: panic("unhandled selection") } diff --git a/cli/azd/internal/agent/agent.go b/cli/azd/internal/agent/agent.go index bcf30a609c6..dc3f434e3d3 100644 --- a/cli/azd/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -1,202 +1,73 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - package agent import ( - "bufio" - "context" - _ "embed" "fmt" - "os" "strings" - "github.com/fatih/color" "github.com/tmc/langchaingo/agents" - "github.com/tmc/langchaingo/chains" + "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/llms" - "github.com/tmc/langchaingo/memory" "github.com/tmc/langchaingo/tools" - - "github.com/azure/azure-dev/cli/azd/internal/agent/logging" - localtools "github.com/azure/azure-dev/cli/azd/internal/agent/tools" - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" - mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" ) -//go:embed prompts/default_agent_prefix.txt -var _defaultAgentPrefix string - -//go:embed prompts/default_agent_format_instructions.txt -var _defaultAgentFormatInstructions string - -//go:embed prompts/default_agent_suffix.txt -var _defaultAgentSuffix string - -// AzdAiAgent represents an enhanced AZD Copilot agent with action tracking, intent validation, and conversation memory -type AzdAiAgent struct { - debug bool - defaultModel llms.Model - samplingModel llms.Model - executor *agents.Executor +type Agent struct { + debug bool + defaultModel llms.Model + samplingModel llms.Model + executor *agents.Executor + tools []tools.Tool + callbacksHandler callbacks.Handler } -type AgentOption func(*AzdAiAgent) +type AgentOption func(*Agent) func WithDebug(debug bool) AgentOption { - return func(agent *AzdAiAgent) { + return func(agent *Agent) { agent.debug = debug } } -func WithSamplingModel(model llms.Model) AgentOption { - return func(agent *AzdAiAgent) { - agent.samplingModel = model +func WithDefaultModel(model llms.Model) AgentOption { + return func(agent *Agent) { + agent.defaultModel = model } } -func NewAzdAiAgent(llm llms.Model, opts ...AgentOption) (*AzdAiAgent, error) { - azdAgent := &AzdAiAgent{ - defaultModel: llm, - samplingModel: llm, - } - - for _, opt := range opts { - opt(azdAgent) - } - - actionLogger := logging.NewActionLogger( - logging.WithDebug(azdAgent.debug), - ) - - smartMemory := memory.NewConversationBuffer( - memory.WithInputKey("input"), - memory.WithOutputKey("output"), - memory.WithHumanPrefix("Human"), - memory.WithAIPrefix("AI"), - ) - - // Create sampling handler for MCP - samplingHandler := mcptools.NewMcpSamplingHandler( - azdAgent.samplingModel, - mcp.WithDebug(azdAgent.debug), - ) - - toolLoaders := []localtools.ToolLoader{ - localtools.NewLocalToolsLoader(), - mcptools.NewMcpToolsLoader(samplingHandler), - } - - allTools := []tools.Tool{} - - // Define block list of excluded tools - excludedTools := map[string]bool{ - "extension_az": true, - "extension_azd": true, - // Add more excluded tools here as needed +func WithSamplingModel(model llms.Model) AgentOption { + return func(agent *Agent) { + agent.samplingModel = model } +} - for _, toolLoader := range toolLoaders { - categoryTools, err := toolLoader.LoadTools() - if err != nil { - return nil, err - } - - // Filter out excluded tools - for _, tool := range categoryTools { - if !excludedTools[tool.Name()] { - allTools = append(allTools, tool) - } - } +func WithTools(tools ...tools.Tool) AgentOption { + return func(agent *Agent) { + agent.tools = tools } - - // 4. Create agent with memory directly integrated - conversationAgent := agents.NewConversationalAgent(llm, allTools, - agents.WithPromptPrefix(_defaultAgentPrefix), - agents.WithPromptSuffix(_defaultAgentSuffix), - agents.WithPromptFormatInstructions(_defaultAgentFormatInstructions), - agents.WithMemory(smartMemory), - agents.WithCallbacksHandler(actionLogger), - agents.WithReturnIntermediateSteps(), - ) - - // 5. Create executor without separate memory configuration since agent already has it - executor := agents.NewExecutor(conversationAgent, - agents.WithMaxIterations(500), // Much higher limit for complex multi-step processes - agents.WithMemory(smartMemory), - agents.WithCallbacksHandler(actionLogger), - agents.WithReturnIntermediateSteps(), - ) - - azdAgent.executor = executor - return azdAgent, nil } -// RunConversationLoop runs the enhanced AZD Copilot agent with full capabilities -func (aai *AzdAiAgent) RunConversationLoop(ctx context.Context, args []string) error { - fmt.Println("🤖 AZD Copilot - Interactive Mode") - fmt.Println("═══════════════════════════════════════════════════════════") - - // Handle initial query if provided - var initialQuery string - if len(args) > 0 { - initialQuery = strings.Join(args, " ") +func WithCallbacksHandler(handler callbacks.Handler) AgentOption { + return func(agent *Agent) { + agent.callbacksHandler = handler } +} - scanner := bufio.NewScanner(os.Stdin) - - for { - var userInput string - - if initialQuery != "" { - userInput = initialQuery - initialQuery = "" // Clear after first use - color.Cyan("💬 You: %s\n", userInput) - } else { - fmt.Print(color.CyanString("\n💬 You: ")) - color.Set(color.FgCyan) // Set blue color for user input - if !scanner.Scan() { - color.Unset() // Reset color - break // EOF or error - } - userInput = strings.TrimSpace(scanner.Text()) - color.Unset() // Reset color after input - } - - // Check for exit commands - if userInput == "" { - continue - } - - if strings.ToLower(userInput) == "exit" || strings.ToLower(userInput) == "quit" { - fmt.Println("👋 Goodbye! Thanks for using AZD Copilot!") - break - } - - // Process the query with the enhanced agent - err := aai.runChain(ctx, userInput) - if err != nil { - continue +func toolNames(tools []tools.Tool) string { + var tn strings.Builder + for i, tool := range tools { + if i > 0 { + tn.WriteString(", ") } + tn.WriteString(tool.Name()) } - if err := scanner.Err(); err != nil { - return fmt.Errorf("error reading input: %w", err) - } - - return nil + return tn.String() } -// ProcessQuery processes a user query with full action tracking and validation -func (aai *AzdAiAgent) runChain(ctx context.Context, userInput string) error { - // Execute with enhanced input - agent should automatically handle memory - _, err := chains.Run(ctx, aai.executor, userInput, - chains.WithMaxTokens(800), - chains.WithTemperature(0.3), - ) - if err != nil { - return err +func toolDescriptions(tools []tools.Tool) string { + var ts strings.Builder + for _, tool := range tools { + ts.WriteString(fmt.Sprintf("- %s: %s\n", tool.Name(), tool.Description())) } - return nil + return ts.String() } diff --git a/cli/azd/internal/agent/conversational_agent.go b/cli/azd/internal/agent/conversational_agent.go new file mode 100644 index 00000000000..49b6c20e850 --- /dev/null +++ b/cli/azd/internal/agent/conversational_agent.go @@ -0,0 +1,182 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package agent + +import ( + "bufio" + "context" + _ "embed" + "fmt" + "os" + "strings" + + "github.com/fatih/color" + "github.com/tmc/langchaingo/agents" + "github.com/tmc/langchaingo/chains" + "github.com/tmc/langchaingo/llms" + "github.com/tmc/langchaingo/memory" + "github.com/tmc/langchaingo/prompts" + "github.com/tmc/langchaingo/tools" + + localtools "github.com/azure/azure-dev/cli/azd/internal/agent/tools" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" + mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" +) + +//go:embed prompts/conversational.txt +var conversational_prompt_template string + +// ConversationalAzdAiAgent represents an enhanced AZD Copilot agent with action tracking, intent validation, and conversation memory +type ConversationalAzdAiAgent struct { + *Agent +} + +func NewConversationalAzdAiAgent(llm llms.Model, opts ...AgentOption) (*ConversationalAzdAiAgent, error) { + azdAgent := &ConversationalAzdAiAgent{ + Agent: &Agent{ + defaultModel: llm, + samplingModel: llm, + tools: []tools.Tool{}, + }, + } + + for _, opt := range opts { + opt(azdAgent.Agent) + } + + smartMemory := memory.NewConversationBuffer( + memory.WithInputKey("input"), + memory.WithOutputKey("output"), + memory.WithHumanPrefix("Human"), + memory.WithAIPrefix("AI"), + ) + + // Create sampling handler for MCP + samplingHandler := mcptools.NewMcpSamplingHandler( + azdAgent.samplingModel, + mcp.WithDebug(azdAgent.debug), + ) + + toolLoaders := []localtools.ToolLoader{ + localtools.NewLocalToolsLoader(), + mcptools.NewMcpToolsLoader(samplingHandler), + } + + // Define block list of excluded tools + excludedTools := map[string]bool{ + "extension_az": true, + "extension_azd": true, + // Add more excluded tools here as needed + } + + for _, toolLoader := range toolLoaders { + categoryTools, err := toolLoader.LoadTools() + if err != nil { + return nil, err + } + + // Filter out excluded tools + for _, tool := range categoryTools { + if !excludedTools[tool.Name()] { + azdAgent.tools = append(azdAgent.tools, tool) + } + } + } + + promptTemplate := prompts.PromptTemplate{ + Template: conversational_prompt_template, + TemplateFormat: prompts.TemplateFormatGoTemplate, + InputVariables: []string{"input", "agent_scratchpad"}, + PartialVariables: map[string]any{ + "tool_names": toolNames(azdAgent.tools), + "tool_descriptions": toolDescriptions(azdAgent.tools), + "history": "", + }, + } + + // 4. Create agent with memory directly integrated + conversationAgent := agents.NewConversationalAgent(llm, azdAgent.tools, + agents.WithPrompt(promptTemplate), + agents.WithMemory(smartMemory), + agents.WithCallbacksHandler(azdAgent.callbacksHandler), + agents.WithReturnIntermediateSteps(), + ) + + // 5. Create executor without separate memory configuration since agent already has it + executor := agents.NewExecutor(conversationAgent, + agents.WithMaxIterations(500), // Much higher limit for complex multi-step processes + agents.WithMemory(smartMemory), + agents.WithCallbacksHandler(azdAgent.callbacksHandler), + agents.WithReturnIntermediateSteps(), + ) + + azdAgent.executor = executor + return azdAgent, nil +} + +func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, args ...string) (string, error) { + return aai.runChain(ctx, strings.Join(args, "\n")) +} + +// RunConversationLoop runs the enhanced AZD Copilot agent with full capabilities +func (aai *ConversationalAzdAiAgent) StartConversation(ctx context.Context, args ...string) (string, error) { + fmt.Println("🤖 AZD Copilot - Interactive Mode") + fmt.Println("═══════════════════════════════════════════════════════════") + + // Handle initial query if provided + var initialQuery string + if len(args) > 0 { + initialQuery = strings.Join(args, " ") + } + + scanner := bufio.NewScanner(os.Stdin) + + for { + var userInput string + + if initialQuery != "" { + userInput = initialQuery + initialQuery = "" // Clear after first use + color.Cyan("💬 You: %s\n", userInput) + } else { + fmt.Print(color.CyanString("\n💬 You: ")) + color.Set(color.FgCyan) // Set blue color for user input + if !scanner.Scan() { + color.Unset() // Reset color + break // EOF or error + } + userInput = strings.TrimSpace(scanner.Text()) + color.Unset() // Reset color after input + } + + // Check for exit commands + if userInput == "" { + continue + } + + if strings.ToLower(userInput) == "exit" || strings.ToLower(userInput) == "quit" { + fmt.Println("👋 Goodbye! Thanks for using AZD Copilot!") + break + } + + // Process the query with the enhanced agent + return aai.runChain(ctx, userInput) + } + + if err := scanner.Err(); err != nil { + return "", fmt.Errorf("error reading input: %w", err) + } + + return "", nil +} + +// ProcessQuery processes a user query with full action tracking and validation +func (aai *ConversationalAzdAiAgent) runChain(ctx context.Context, userInput string) (string, error) { + // Execute with enhanced input - agent should automatically handle memory + output, err := chains.Run(ctx, aai.executor, userInput) + if err != nil { + return "", err + } + return output, nil +} diff --git a/cli/azd/internal/agent/logging/file_logger.go b/cli/azd/internal/agent/logging/file_logger.go new file mode 100644 index 00000000000..07222bbe781 --- /dev/null +++ b/cli/azd/internal/agent/logging/file_logger.go @@ -0,0 +1,201 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package logging + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "os" + "time" + + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/llms" + "github.com/tmc/langchaingo/schema" +) + +// Compile-time check to ensure FileLogger implements callbacks.Handler +var _ callbacks.Handler = &FileLogger{} + +// FlushWriter is an interface for writers that support flushing +type FlushWriter interface { + io.Writer + Flush() error +} + +// FileLogger logs all agent actions to a file with automatic flushing +type FileLogger struct { + writer FlushWriter + file *os.File // Keep reference to close file when needed +} + +// FileLoggerOption represents an option for configuring FileLogger +type FileLoggerOption func(*FileLogger) + +// NewFileLogger creates a new file logger that writes to the provided FlushWriter +func NewFileLogger(writer FlushWriter, opts ...FileLoggerOption) *FileLogger { + fl := &FileLogger{ + writer: writer, + } + + for _, opt := range opts { + opt(fl) + } + + return fl +} + +// NewFileLoggerDefault creates a new file logger with default settings. +// Opens or creates "azd-agent-{date}.log" in the current working directory. +// Returns the logger and a cleanup function that should be called to close the file. +func NewFileLoggerDefault(opts ...FileLoggerOption) (*FileLogger, func() error, error) { + // Create dated filename: azd-agent-2025-08-05.log + dateStr := time.Now().Format("2006-01-02") + filename := fmt.Sprintf("azd-agent-%s.log", dateStr) + + file, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) + if err != nil { + return nil, nil, fmt.Errorf("failed to open log file: %w", err) + } + + bufferedWriter := bufio.NewWriter(file) + + // Create a flush writer that flushes both the buffer and the file + flushWriter := &fileFlushWriter{ + writer: bufferedWriter, + file: file, + } + + fl := &FileLogger{ + writer: flushWriter, + file: file, + } + + for _, opt := range opts { + opt(fl) + } + + cleanup := func() error { + if err := bufferedWriter.Flush(); err != nil { + file.Close() + return err + } + return file.Close() + } + + return fl, cleanup, nil +} + +// fileFlushWriter wraps a buffered writer and ensures both buffer and file are flushed +type fileFlushWriter struct { + writer *bufio.Writer + file *os.File +} + +func (fw *fileFlushWriter) Write(p []byte) (int, error) { + return fw.writer.Write(p) +} + +func (fw *fileFlushWriter) Flush() error { + if err := fw.writer.Flush(); err != nil { + return err + } + return fw.file.Sync() +} + +// writeAndFlush writes a message to the file and flushes immediately +func (fl *FileLogger) writeAndFlush(format string, args ...interface{}) { + timestamp := time.Now().UTC().Format(time.RFC3339) + message := fmt.Sprintf("[%s] %s\n", timestamp, fmt.Sprintf(format, args...)) + + if _, err := fl.writer.Write([]byte(message)); err == nil { + fl.writer.Flush() + } +} + +// HandleText is called when text is processed +func (fl *FileLogger) HandleText(ctx context.Context, text string) { + fl.writeAndFlush("TEXT: %s", text) +} + +// HandleLLMGenerateContentStart is called when LLM content generation starts +func (fl *FileLogger) HandleLLMGenerateContentStart(ctx context.Context, ms []llms.MessageContent) { + fl.writeAndFlush("LLM_GENERATE_START: %d messages", len(ms)) +} + +// HandleLLMGenerateContentEnd is called when LLM content generation ends +func (fl *FileLogger) HandleLLMGenerateContentEnd(ctx context.Context, res *llms.ContentResponse) { + for i, choice := range res.Choices { + fl.writeAndFlush("LLM_GENERATE_END[%d]: %s", i, choice.Content) + } +} + +// HandleRetrieverStart is called when retrieval starts +func (fl *FileLogger) HandleRetrieverStart(ctx context.Context, query string) { + fl.writeAndFlush("RETRIEVER_START: %s", query) +} + +// HandleRetrieverEnd is called when retrieval ends +func (fl *FileLogger) HandleRetrieverEnd(ctx context.Context, query string, documents []schema.Document) { + fl.writeAndFlush("RETRIEVER_END: query=%s, documents=%d", query, len(documents)) +} + +// HandleToolStart is called when a tool execution starts +func (fl *FileLogger) HandleToolStart(ctx context.Context, input string) { + fl.writeAndFlush("TOOL_START: %s", input) +} + +// HandleToolEnd is called when a tool execution ends +func (fl *FileLogger) HandleToolEnd(ctx context.Context, output string) { + fl.writeAndFlush("TOOL_END: %s", output) +} + +// HandleToolError is called when a tool execution fails +func (fl *FileLogger) HandleToolError(ctx context.Context, err error) { + fl.writeAndFlush("TOOL_ERROR: %s", err.Error()) +} + +// HandleLLMStart is called when LLM call starts +func (fl *FileLogger) HandleLLMStart(ctx context.Context, prompts []string) { + fl.writeAndFlush("LLM_START: %d prompts", len(prompts)) +} + +// HandleChainStart is called when chain execution starts +func (fl *FileLogger) HandleChainStart(ctx context.Context, inputs map[string]any) { + inputsJson, _ := json.Marshal(inputs) + fl.writeAndFlush("CHAIN_START: %s", string(inputsJson)) +} + +// HandleChainEnd is called when chain execution ends +func (fl *FileLogger) HandleChainEnd(ctx context.Context, outputs map[string]any) { + outputsJson, _ := json.Marshal(outputs) + fl.writeAndFlush("CHAIN_END: %s", string(outputsJson)) +} + +// HandleChainError is called when chain execution fails +func (fl *FileLogger) HandleChainError(ctx context.Context, err error) { + fl.writeAndFlush("CHAIN_ERROR: %s", err.Error()) +} + +// HandleAgentAction is called when an agent action is planned +func (fl *FileLogger) HandleAgentAction(ctx context.Context, action schema.AgentAction) { + fl.writeAndFlush("AGENT_ACTION: tool=%s, input=%s", action.Tool, action.ToolInput) +} + +// HandleAgentFinish is called when the agent finishes +func (fl *FileLogger) HandleAgentFinish(ctx context.Context, finish schema.AgentFinish) { + fl.writeAndFlush("AGENT_FINISH: %s", finish.Log) +} + +// HandleLLMError is called when LLM call fails +func (fl *FileLogger) HandleLLMError(ctx context.Context, err error) { + fl.writeAndFlush("LLM_ERROR: %s", err.Error()) +} + +// HandleStreamingFunc handles streaming responses +func (fl *FileLogger) HandleStreamingFunc(ctx context.Context, chunk []byte) { + fl.writeAndFlush("STREAMING: %s", string(chunk)) +} diff --git a/cli/azd/internal/agent/logging/logger.go b/cli/azd/internal/agent/logging/logger.go index e3f9b64e0e4..94a36192ffa 100644 --- a/cli/azd/internal/agent/logging/logger.go +++ b/cli/azd/internal/agent/logging/logger.go @@ -95,10 +95,16 @@ func (al *ActionLogger) HandleRetrieverEnd(ctx context.Context, query string, do // HandleToolStart is called when a tool execution starts func (al *ActionLogger) HandleToolStart(ctx context.Context, input string) { + if al.debugEnabled { + color.HiBlack("\nHandleToolStart\n%s\n", input) + } } // HandleToolEnd is called when a tool execution ends func (al *ActionLogger) HandleToolEnd(ctx context.Context, output string) { + if al.debugEnabled { + color.HiBlack("\nHandleToolEnd\n%s\n", output) + } } // HandleToolError is called when a tool execution fails diff --git a/cli/azd/internal/agent/one_shot_agent.go b/cli/azd/internal/agent/one_shot_agent.go new file mode 100644 index 00000000000..e6b5adf427f --- /dev/null +++ b/cli/azd/internal/agent/one_shot_agent.go @@ -0,0 +1,117 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package agent + +import ( + "context" + _ "embed" + "strings" + + "github.com/tmc/langchaingo/agents" + "github.com/tmc/langchaingo/chains" + "github.com/tmc/langchaingo/llms" + "github.com/tmc/langchaingo/prompts" + "github.com/tmc/langchaingo/tools" + + localtools "github.com/azure/azure-dev/cli/azd/internal/agent/tools" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" + mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" +) + +// OneShotAzdAiAgent represents an enhanced AZD Copilot agent with action tracking, intent validation, and conversation memory +type OneShotAzdAiAgent struct { + *Agent +} + +//go:embed prompts/one_shot.txt +var one_shot_prompt_template string + +func NewOneShotAzdAiAgent(llm llms.Model, opts ...AgentOption) (*OneShotAzdAiAgent, error) { + azdAgent := &OneShotAzdAiAgent{ + Agent: &Agent{ + defaultModel: llm, + samplingModel: llm, + tools: []tools.Tool{}, + }, + } + + for _, opt := range opts { + opt(azdAgent.Agent) + } + + // Create sampling handler for MCP + samplingHandler := mcptools.NewMcpSamplingHandler( + azdAgent.samplingModel, + mcp.WithDebug(azdAgent.debug), + ) + + toolLoaders := []localtools.ToolLoader{ + localtools.NewLocalToolsLoader(), + mcptools.NewMcpToolsLoader(samplingHandler), + } + + // Define block list of excluded tools + excludedTools := map[string]bool{ + "extension_az": true, + "extension_azd": true, + // Add more excluded tools here as needed + } + + for _, toolLoader := range toolLoaders { + categoryTools, err := toolLoader.LoadTools() + if err != nil { + return nil, err + } + + // Filter out excluded tools + for _, tool := range categoryTools { + if !excludedTools[tool.Name()] { + azdAgent.tools = append(azdAgent.tools, tool) + } + } + } + + promptTemplate := prompts.PromptTemplate{ + Template: one_shot_prompt_template, + InputVariables: []string{"input", "agent_scratchpad"}, + TemplateFormat: prompts.TemplateFormatGoTemplate, + PartialVariables: map[string]any{ + "tool_names": toolNames(azdAgent.tools), + "tool_descriptions": toolDescriptions(azdAgent.tools), + }, + } + + // 4. Create agent with memory directly integrated + oneShotAgent := agents.NewOneShotAgent(llm, azdAgent.tools, + agents.WithPrompt(promptTemplate), + agents.WithCallbacksHandler(azdAgent.callbacksHandler), + agents.WithReturnIntermediateSteps(), + ) + + // 5. Create executor without separate memory configuration since agent already has it + executor := agents.NewExecutor(oneShotAgent, + agents.WithMaxIterations(500), // Much higher limit for complex multi-step processes + agents.WithCallbacksHandler(azdAgent.callbacksHandler), + agents.WithReturnIntermediateSteps(), + ) + + azdAgent.executor = executor + return azdAgent, nil +} + +// RunConversationLoop runs the enhanced AZD Copilot agent with full capabilities +func (aai *OneShotAzdAiAgent) SendMessage(ctx context.Context, args ...string) (string, error) { + return aai.runChain(ctx, strings.Join(args, "\n")) +} + +// ProcessQuery processes a user query with full action tracking and validation +func (aai *OneShotAzdAiAgent) runChain(ctx context.Context, userInput string) (string, error) { + // Execute with enhanced input - agent should automatically handle memory + output, err := chains.Run(ctx, aai.executor, userInput) + if err != nil { + return "", err + } + + return output, nil +} diff --git a/cli/azd/internal/agent/prompts/conversational.txt b/cli/azd/internal/agent/prompts/conversational.txt new file mode 100644 index 00000000000..66878fc4eca --- /dev/null +++ b/cli/azd/internal/agent/prompts/conversational.txt @@ -0,0 +1,88 @@ +You are an Azure Developer CLI (AZD) agent. +You are an expert in building, provisioning, and deploying Azure applications. +Always use Azure best practices and automation wherever possible. + +--- + +## Pre-Task Expectations + +Before beginning your work: + +* Review all available tools. +* If a tool exists for best practices or required inputs, you MUST invoke it before taking further steps. +* Integrate any learned knowledge from tools into your workflow. + +When generating code, infrastructure, or configurations: + +* You MUST ALWAYS save the content to files using the `write_file` tool. +* If no filename is provided, generate a meaningful and descriptive name. + +--- + +## Efficiency and Token Usage Guidelines + +To minimize cost and maximize speed: + +* DO NOT list or read full directories unless absolutely necessary. +* Prefer targeted exploration: + * Top-level file listings (1–2 levels deep) + * Common files: `README.md`, `package.json`, `*.csproj`, etc. + * Specific file extensions or known filenames +* Read files incrementally and only go deeper if prior steps justify it. +* **Favor breadth over depth**, and always limit the number and size of file reads per action. + +--- + +You have access to the following tools: +{{.tool_descriptions}} + +--- + +## REQUIRED RESPONSE FORMAT — DO NOT DEVIATE + +You MUST follow the ReAct pattern below for every task, without exception. + +This pattern consists of repeating the following sequence: + +``` +Thought: [Analyze the current situation and what needs to be done] +Thought: Do I need to use a tool? [Yes/No] +Action: [the action to take, should be one of [{{.tool_names}}]] +Action Input: [the input to the action] +Observation: [the result of the action] +``` + +After each Observation, you MUST continue the ReAct loop: + +* Reflect on the outcome. +* Determine if further actions are required. +* If yes, perform the next tool call using the same format. +* If an error occurred, debug and retry using alternative tool inputs (up to 3 retries). + +Only when ALL subtasks are completed and no further tool use is needed, you may finish with: + +``` +Thought: Do I need to use a tool? No +AI: [your full, final answer] +``` + +--- + +## Additional Behavior Requirements + +* Never skip the ReAct format. No direct answers, summaries, or conclusions are allowed outside of the full ReAct loop. +* Every Observation must trigger another Thought. +* You must NEVER exit early unless all actions are truly completed. +* If tool output reveals new required work, continue acting until all related tasks are complete. +* Be exhaustive and explicit in your reasoning. + +--- + +Previous conversation history: +{{.history}} + +User Question: +{{.input}} + +Thought: +{{.agent_scratchpad}} diff --git a/cli/azd/internal/agent/prompts/default_agent_format_instructions.txt b/cli/azd/internal/agent/prompts/default_agent_format_instructions.txt deleted file mode 100644 index 4ff35663ba8..00000000000 --- a/cli/azd/internal/agent/prompts/default_agent_format_instructions.txt +++ /dev/null @@ -1,40 +0,0 @@ -Answer the following questions or perform tasks as best you can. You have access to the following tools: - -IMPORTANT: Continue taking actions recursively until the task is completely finished. Do not stop after a single action if more work is needed to accomplish the user's goal. - -Follow this format exactly: - -Thought: [Analyze the current situation and what needs to be done] - -Thought: Do I need to use a tool? [Yes/No] -Action: [the action to take, should be one of [{{.tool_names}}]] -Action Input: [the input to the action] -Observation: [the result of the action] - -After each Observation, you MUST continue the cycle: - -Thought: [Evaluate the result and determine if the task is complete or if more actions are needed] - -If the task is NOT complete: - -Thought: Do I need to use a tool? Yes -Action: [next action to take] -Action Input: [input for the next action] -Observation: [result of the next action] -... (continue this cycle until the task is fully complete) - -If there are errors: - -Thought: [Analyze the error and determine how to fix it] -Thought: Do I need to use a tool? Yes -Action: [corrective action] -Action Input: [corrected input] -Observation: [result] -... (retry up to 3 times with different approaches if needed) - -Remember: Always continue taking actions until the user's request is fully satisfied. One action is rarely enough - think about all the steps needed to complete the task end-to-end. - -When you are done or handing control back to the user you MUST ALWAYS use the following format: - -Thought: Do I need to use a tool? No -AI: [briefly summarize your response without all the details from your observations] \ No newline at end of file diff --git a/cli/azd/internal/agent/prompts/default_agent_prefix.txt b/cli/azd/internal/agent/prompts/default_agent_prefix.txt deleted file mode 100644 index 0102a07432f..00000000000 --- a/cli/azd/internal/agent/prompts/default_agent_prefix.txt +++ /dev/null @@ -1,17 +0,0 @@ -You are an Azure Developer CLI (AZD) agent. -You are an expert is building, provisioning and deploying Azure applications. -Always use Azure best patterns and practices. - -Before starting your task initial task review available tools. -If any tools exist for best practices invoke the tool to learn more. -Incorporate learned best practices in your work. - -When any code generation is performed ALWAYS save content to files. -When filenames are not explicitly specified generate new files with meaningful names. - -TOOLS: ------- - -Agent has access to the following tools: - -{{.tool_descriptions}} diff --git a/cli/azd/internal/agent/prompts/default_agent_suffix.txt b/cli/azd/internal/agent/prompts/default_agent_suffix.txt deleted file mode 100644 index c469d53ce8e..00000000000 --- a/cli/azd/internal/agent/prompts/default_agent_suffix.txt +++ /dev/null @@ -1,8 +0,0 @@ -Begin! - -Previous conversation history: -{{.history}} - -Question: {{.input}} - -Thought:{{.agent_scratchpad}} \ No newline at end of file diff --git a/cli/azd/internal/agent/prompts/one_shot.txt b/cli/azd/internal/agent/prompts/one_shot.txt new file mode 100644 index 00000000000..890569e2bc5 --- /dev/null +++ b/cli/azd/internal/agent/prompts/one_shot.txt @@ -0,0 +1,78 @@ +You are an Azure Developer CLI (AZD) agent. +You are an expert in generating, building, provisioning, and deploying Azure applications. +Always follow Azure best patterns and practices. +Always automate as many tasks as possible. + +Before starting your initial task, review the available tools. +If any tools exist for best practices, invoke those tools to gather information. +Incorporate any learned best practices into your work. + +When generating code or configuration, ALWAYS save the output to a file. +If a filename is not explicitly provided, generate a meaningful and appropriate name automatically. + +--- + +**Efficiency and Token Usage Guidelines:** + +- Always minimize token usage when interacting with file-related tools. +- Do **not** request large directory globs like `**/*` or attempt to read full directories unless absolutely required. +- Instead, start with: + - High-level file listings (e.g., 1–2 levels deep) + - Only common project root files or config files + - Specific files by name or extension (.csproj, package.json, README.md) +- When reading files, limit the number of files and prefer smaller ones. +- Never request entire folders to be read in a single call. +- If you need to scan deeper, do so **incrementally** and **only if earlier reads indicate it's necessary.** +- When in doubt, prioritize **breadth first, then depth**. + +Failing to follow these heuristics may result in tool failures, token overuse, or excessive latency. + +--- + +You have access to the following tools: + +{{.tool_descriptions}} + +When responding, always use the following format: + +Question: [the input question you must answer] +Thought: [you should always think about what to do] +Action: [the action to take, must be one of [ {{.tool_names}} ]] +Action Input: [the input to the action] +Observation: [the result of the action] +... (this Thought → Action → Action Input → Observation sequence can repeat N times) +Thought: [I now know the final answer] +Final Answer: [the final answer to the original input question] + +--- + +**Important Behavioral Guidelines:** + +- After every Observation, reflect on whether it reveals additional work that must be done. New tasks may emerge from tool outputs — you must identify and complete them before finishing. +- Do **not** assume a task is complete after a single tool call unless you have verified that **all necessary work is complete**. +- Never skip steps or return a Final Answer prematurely. +- Always continue until all identified and implied tasks have been completed using the tools available. +- If the Observation hints at other subtasks, pursue them fully before concluding. + +**Strict Output Format Rules (Do Not Violate):** + +You MUST follow this exact output structure for each tool invocation: + +Thought: [your thought] +Action: [tool name] +Action Input: [input to the tool] +Observation: [result from the tool] + +**Every** Action MUST be followed by an Observation — even if the result is empty, obvious, or a no-op. +Do NOT omit, reorder, or skip any part of this pattern. +Do NOT substitute summaries or explanations for an Observation. + +Only after completing all actions and observations may you finish with: + +Thought: I now know the final answer +Final Answer: [your full, final answer] + +Begin! + +Question: {{.input}} +{{.agent_scratchpad}} diff --git a/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go b/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go index d74111eaa53..0368400fabd 100644 --- a/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go +++ b/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go @@ -17,13 +17,15 @@ func (t *AzdArchitecturePlanningTool) Name() string { } func (t *AzdArchitecturePlanningTool) Description() string { - return ` - Performs Azure service selection and architecture planning for applications preparing for Azure Developer CLI (AZD) initialization. - This is Phase 2 of the AZD migration process that maps components to Azure services, plans hosting strategies, - and designs infrastructure architecture based on discovery results. + return `Returns instructions for selecting appropriate Azure services for discovered application components and designing infrastructure architecture. The LLM agent should execute these instructions using available tools. - Input: "./azd-arch-plan.md" - ` +Use this tool when: +- Discovery analysis has been completed and azd-arch-plan.md exists +- Application components have been identified and classified +- Need to map components to Azure hosting services +- Ready to plan containerization and database strategies + +Input: "./azd-arch-plan.md"` } func (t *AzdArchitecturePlanningTool) Call(ctx context.Context, input string) (string, error) { diff --git a/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go b/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go index 9e5764563f3..5b1f5adb84e 100644 --- a/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go @@ -17,13 +17,15 @@ func (t *AzdAzureYamlGenerationTool) Name() string { } func (t *AzdAzureYamlGenerationTool) Description() string { - return ` - Generates the azure.yaml configuration file for Azure Developer CLI (AZD) projects. - This specialized tool focuses on creating service definitions, hosting configurations, - and deployment instructions. Can be used independently for service configuration updates. + return `Returns instructions for generating the azure.yaml configuration file with proper service hosting, build, and deployment settings for AZD projects. The LLM agent should execute these instructions using available tools. - Input: "./azd-arch-plan.md" - ` +Use this tool when: +- Architecture planning has been completed and Azure services selected +- Need to create or update azure.yaml configuration file +- Services have been mapped to Azure hosting platforms +- Ready to define build and deployment configurations + +Input: "./azd-arch-plan.md"` } func (t *AzdAzureYamlGenerationTool) Call(ctx context.Context, input string) (string, error) { diff --git a/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go b/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go index 1d1ae810d23..5e24f5727fc 100644 --- a/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go +++ b/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go @@ -17,13 +17,15 @@ func (t *AzdDiscoveryAnalysisTool) Name() string { } func (t *AzdDiscoveryAnalysisTool) Description() string { - return ` - Performs comprehensive discovery and analysis of applications to prepare them for Azure Developer CLI (AZD) initialization. - This is Phase 1 of the AZD migration process that analyzes codebase, identifies components and dependencies, - and creates a foundation for architecture planning. + return `Returns instructions for performing comprehensive discovery and analysis of application components to prepare for Azure Developer CLI (AZD) initialization. The LLM agent should execute these instructions using available tools. - Input: "./azd-arch-plan.md" - ` +Use this tool when: +- Starting Phase 1 of AZD migration process +- Need to identify all application components and dependencies +- Codebase analysis required before architecture planning +- azd-arch-plan.md does not exist or needs updating + +Input: "./azd-arch-plan.md"` } func (t *AzdDiscoveryAnalysisTool) Call(ctx context.Context, input string) (string, error) { diff --git a/cli/azd/internal/agent/tools/azd/azd_docker_generation.go b/cli/azd/internal/agent/tools/azd/azd_docker_generation.go index 18effd6c9e7..c22b590c779 100644 --- a/cli/azd/internal/agent/tools/azd/azd_docker_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_docker_generation.go @@ -17,13 +17,15 @@ func (t *AzdDockerGenerationTool) Name() string { } func (t *AzdDockerGenerationTool) Description() string { - return ` - Generates Dockerfiles and container configurations for Azure Developer CLI (AZD) projects. - This specialized tool focuses on containerization requirements, creating optimized Dockerfiles - for different programming languages, and configuring container-specific settings for Azure hosting. + return `Returns instructions for generating optimized Dockerfiles and container configurations for containerizable services in AZD projects. The LLM agent should execute these instructions using available tools. - Input: "./azd-arch-plan.md" - ` +Use this tool when: +- Architecture planning identified services requiring containerization +- azd-arch-plan.md shows Container Apps or AKS as selected hosting platform +- Need Dockerfiles for microservices, APIs, or containerized web applications +- Ready to implement containerization strategy + +Input: "./azd-arch-plan.md"` } func (t *AzdDockerGenerationTool) Call(ctx context.Context, input string) (string, error) { diff --git a/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go b/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go index 38d797365c8..2fe68dbeaeb 100644 --- a/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go +++ b/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go @@ -17,11 +17,15 @@ func (t *AzdIacGenerationRulesTool) Name() string { } func (t *AzdIacGenerationRulesTool) Description() string { - return ` - Gets the infrastructure as code (IaC) rules and best practices and patterns to use when generating bicep files and modules for use within AZD. + return `Returns comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules for AZD projects. The LLM agent should reference these rules when generating infrastructure code. - Input: "./azd-arch-plan.md" - ` +Use this tool when: +- Generating any Bicep infrastructure templates for AZD projects +- Need compliance rules and naming conventions for Azure resources +- Creating modular, reusable Bicep files +- Ensuring security and operational best practices + +Input: "./azd-arch-plan.md"` } func (t *AzdIacGenerationRulesTool) Call(ctx context.Context, input string) (string, error) { diff --git a/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go b/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go index 0cc87372e87..b147a99b3aa 100644 --- a/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go @@ -17,13 +17,14 @@ func (t *AzdInfrastructureGenerationTool) Name() string { } func (t *AzdInfrastructureGenerationTool) Description() string { - return ` - Generates Bicep infrastructure templates for Azure Developer CLI (AZD) projects. - This specialized tool focuses on creating modular Bicep templates, parameter files, - and implementing Azure security and operational best practices for infrastructure as code. + return `Returns instructions for generating modular Bicep infrastructure templates following Azure security and operational best practices for AZD projects. The LLM agent should execute these instructions using available tools. - Input: "./azd-arch-plan.md" - ` +Use this tool when: +- Architecture planning completed with Azure services selected +- Need to create Bicep infrastructure templates +- Ready to implement infrastructure as code for deployment + +Input: "./azd-arch-plan.md"` } func (t *AzdInfrastructureGenerationTool) Call(ctx context.Context, input string) (string, error) { diff --git a/cli/azd/internal/agent/tools/azd/azd_plan_init.go b/cli/azd/internal/agent/tools/azd/azd_plan_init.go index 3bddc9dbb31..0c246d46b92 100644 --- a/cli/azd/internal/agent/tools/azd/azd_plan_init.go +++ b/cli/azd/internal/agent/tools/azd/azd_plan_init.go @@ -17,11 +17,15 @@ func (t *AzdPlanInitTool) Name() string { } func (t *AzdPlanInitTool) Description() string { - return ` - Gets the required workflow steps and best practices and patterns for initializing or migrating an application to use AZD. + return `Returns instructions for orchestrating complete AZD application initialization using structured phases with specialized tools. The LLM agent should execute these instructions using available tools. - Input: "./azd-arch-plan.md" - ` +Use this tool when: +- Starting new AZD project initialization or migration +- Need structured approach to transform application into AZD-compatible project +- Want to ensure proper sequencing of discovery, planning, and file generation +- Require complete project orchestration guidance + +Input: "./azd-arch-plan.md"` } func (t *AzdPlanInitTool) Call(ctx context.Context, input string) (string, error) { diff --git a/cli/azd/internal/agent/tools/azd/azd_project_validation.go b/cli/azd/internal/agent/tools/azd/azd_project_validation.go index de7639839a5..2a856a5596a 100644 --- a/cli/azd/internal/agent/tools/azd/azd_project_validation.go +++ b/cli/azd/internal/agent/tools/azd/azd_project_validation.go @@ -4,12 +4,10 @@ import ( "context" _ "embed" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) -//go:embed prompts/azd_project_validation.md -var azdProjectValidationPrompt string - // AzdProjectValidationTool validates an AZD project by running comprehensive checks on all components // including azure.yaml schema validation, Bicep template validation, environment setup, packaging, // and deployment preview. @@ -22,15 +20,20 @@ func (t *AzdProjectValidationTool) Name() string { // Description returns the description of the tool. func (t *AzdProjectValidationTool) Description() string { - return ` - Validates an AZD project by running comprehensive checks on all components including azure.yaml schema validation, Bicep template validation, environment setup, packaging, and deployment preview. + return `Returns instructions for validating AZD project by running comprehensive checks on azure.yaml schema, Bicep templates, environment setup, packaging, and deployment preview. The LLM agent should execute these instructions using available tools. + +Use this tool when: +- All AZD configuration files have been generated +- Ready to validate complete project before deployment +- Need to ensure azure.yaml, Bicep templates, and environment are properly configured +- Final validation step before running azd up - Input: "./azd-arch-plan.md"` +Input: "./azd-arch-plan.md"` } // Call executes the tool with the given arguments. func (t *AzdProjectValidationTool) Call(ctx context.Context, args string) (string, error) { - return azdProjectValidationPrompt, nil + return prompts.AzdProjectValidationPrompt, nil } // Ensure AzdProjectValidationTool implements the Tool interface. diff --git a/cli/azd/internal/agent/tools/azd/prompts/README.md b/cli/azd/internal/agent/tools/azd/prompts/README.md deleted file mode 100644 index 01c5a3ab0dd..00000000000 --- a/cli/azd/internal/agent/tools/azd/prompts/README.md +++ /dev/null @@ -1,199 +0,0 @@ -# AZD Modular Tools Overview - -This document provides an overview of the modular AZD initialization tools that replace the monolithic `azd_plan_init` tool. Each tool is designed to be used independently or as part of a complete AZD migration workflow. - -## Tool Structure - -The AZD initialization process has been broken down into focused, modular tools: - -### 1. Discovery and Analysis Tool (`azd_discovery_analysis`) - -**Purpose:** Analyze applications and identify components and dependencies -**Use When:** Starting a new AZD migration or need to understand an existing codebase -**Output:** Component inventory and dependency mapping in `azd-arch-plan.md` - -### 2. Architecture Planning Tool (`azd_architecture_planning`) - -**Purpose:** Select Azure services and plan hosting strategies -**Use When:** You have discovered components and need to plan Azure service mapping -**Prerequisites:** Completed discovery and analysis -**Output:** Architecture decisions and service selections in `azd-arch-plan.md` - -### 3. Azure.yaml Generation Tool (`azd_azure_yaml_generation`) - -**Purpose:** Generate azure.yaml service configuration file -**Use When:** You need to create or update just the service definitions -**Prerequisites:** Understanding of application services and hosting requirements -**Output:** Valid `azure.yaml` file - -### 4. Infrastructure Generation Tool (`azd_infrastructure_generation`) - -**Purpose:** Generate Bicep infrastructure templates -**Use When:** You need to create or update just the infrastructure components -**Prerequisites:** Architecture decisions about Azure services -**Output:** Complete Bicep template structure - -### 5. Docker Generation Tool (`azd_docker_generation`) - -**Purpose:** Generate Dockerfiles and container configurations -**Use When:** You need containerization for your services -**Prerequisites:** Understanding of application services and containerization needs -**Output:** Optimized Dockerfiles and .dockerignore files - -### 6. Project Validation Tool (`azd_project_validation`) - -**Purpose:** Validate the complete AZD project setup and configuration -**Use When:** All files are generated and you need to validate the setup -**Prerequisites:** All configuration files generated -**Output:** Validation report and ready-to-deploy confirmation - -## Complete Workflow - -For a full AZD migration, use the tools in this sequence: - -``` -1. azd_discovery_analysis - ↓ -2. azd_architecture_planning - ↓ -3a. azd_azure_yaml_generation -3b. azd_infrastructure_generation -3c. azd_docker_generation (if containerization needed) - ↓ -4. azd_project_validation -``` - -## Selective Usage - -You can also use individual tools for specific tasks: - -**Generate only azure.yaml:** -``` -azd_discovery_analysis → azd_azure_yaml_generation -``` - -**Generate only infrastructure:** -``` -azd_architecture_planning → azd_infrastructure_generation -``` - -**Add containerization:** -``` -azd_docker_generation (based on existing analysis) -``` - -**Validate existing project:** -``` -azd_project_validation (for validation and testing) -``` - -## Central Planning Document - -All tools use `azd-arch-plan.md` as the central planning document: - -- **Created by:** Discovery and Analysis tool -- **Updated by:** All subsequent tools -- **Purpose:** Track progress, document decisions, and maintain project state -- **Location:** Current working directory - -## Key Features - -### Modular Design -- Each tool has a specific responsibility -- Tools can be used independently or together -- Clear prerequisites and outputs -- Consistent documentation patterns - -### Azure Best Practices -- All tools implement Azure best practices -- Security-first approach -- Cost optimization considerations -- Operational excellence patterns - -### LLM Optimized -- Clear, actionable instructions -- Structured output formats -- Comprehensive validation steps -- Troubleshooting guidance - -### Progress Tracking -- Checkboxes for completed actions -- Clear success criteria -- Validation requirements -- Next step guidance - -## Tool Selection Guide - -**Use the Discovery Tool when:** -- Starting a new AZD migration -- Don't understand the application structure -- Need to document existing architecture -- Want to identify all components and dependencies - -**Use the Architecture Planning Tool when:** -- Have component inventory -- Need to select Azure services -- Planning hosting strategies -- Designing infrastructure architecture - -**Use the File Generation Tool when:** -- Have architecture decisions -- Need to create all AZD files -- Want complete project setup -- Ready to implement infrastructure - -**Use the Environment Initialization Tool when:** -- All files are generated -- Ready to create AZD environment -- Need to validate complete setup -- Preparing for deployment - -**Use the Azure.yaml Generation Tool when:** -- Only need service configuration -- Updating existing azure.yaml -- Working with known service requirements -- Quick service definition setup - -**Use the Infrastructure Generation Tool when:** -- Only need Bicep templates -- Updating existing infrastructure -- Working with specific Azure service requirements -- Advanced infrastructure customization - -## Benefits of Modular Approach - -### For Users -- **Faster iterations:** Update only what you need -- **Better understanding:** Focus on one aspect at a time -- **Reduced complexity:** Smaller, focused tasks -- **Flexible workflow:** Use tools in different orders based on needs - -### For LLMs -- **Clearer context:** Each tool has specific scope -- **Better accuracy:** Focused instructions reduce errors -- **Improved validation:** Tool-specific validation steps -- **Enhanced troubleshooting:** Targeted problem resolution - -### For Maintenance -- **Easier updates:** Modify individual tools without affecting others -- **Better testing:** Test each tool independently -- **Clearer documentation:** Each tool is self-contained -- **Improved reusability:** Tools can be repurposed for different scenarios - -## Migration from Original Tool - -If you were using the original `azd_plan_init` tool, here's how to migrate: - -**Original Phase 1 (Discovery and Analysis):** -Use `azd_discovery_analysis` tool - -**Original Phase 2 (Architecture Planning):** -Use `azd_architecture_planning` tool - -**Original Phase 3 (File Generation):** -Use `azd_azure_yaml_generation` + `azd_infrastructure_generation` + `azd_docker_generation` for focused file generation - -**Original Phase 4 (Project Validation):** -Use `azd_project_validation` tool for final validation and setup verification - -The modular tools provide the same functionality with improved focus and flexibility. diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_architecture_planning.md b/cli/azd/internal/agent/tools/azd/prompts/azd_architecture_planning.md index b85778ecb24..904f75ffa95 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_architecture_planning.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_architecture_planning.md @@ -1,165 +1,133 @@ -# AZD Architecture Planning Tool +# AZD Architecture Planning Instructions -This tool performs Azure service selection and architecture planning for Azure Developer CLI (AZD) initialization. This is Phase 2 of the AZD migration process. +✅ **Agent Task List** -## Overview +1. Read `azd-arch-plan.md` to understand discovered components +2. For each component, select optimal Azure service using selection criteria below +3. Plan containerization strategy for applicable services +4. Select appropriate database and messaging services +5. Design resource group organization and networking approach +6. Generate IaC file checklist based on selected Azure services +7. Generate Docker file checklist based on containerization strategy +8. Create `azd-arch-plan.md` if it doesn't exist, or update existing file with service mapping table, architecture decisions, IaC checklist, and Docker checklist while preserving existing content -Use discovery results to select appropriate Azure services, plan hosting strategies, and design infrastructure architecture. +📄 **Required Outputs** -**IMPORTANT:** Before starting, review the `azd-arch-plan.md` file in your current working directory to understand discovered components and dependencies from the discovery phase. +- Create `azd-arch-plan.md` if missing, or update existing file with Azure Service Mapping Table showing Component | Current Tech | Azure Service | Rationale +- Hosting strategy summary documenting decisions for each component (preserve existing content) +- Containerization plans for applicable services (preserve existing content) +- Infrastructure architecture design including resource organization and networking (preserve existing content) +- **IaC File Generation Checklist** listing all Bicep files that need to be created based on selected services (add to existing file) +- **Docker File Generation Checklist** listing all Docker files needed for containerized services (add to existing file) -## Success Criteria +🧠 **Execution Guidelines** -- [ ] Azure service selections made for all components -- [ ] Hosting strategies defined for each service -- [ ] Containerization plans documented -- [ ] Infrastructure architecture designed -- [ ] Ready to proceed to file generation phase +**Azure Service Selection Criteria:** -## Azure Service Selection +**Azure Container Apps (PREFERRED)** - Use for microservices, containerized applications, event-driven workloads with auto-scaling needs -**REQUIRED ANALYSIS:** +**Azure Kubernetes Service (AKS)** - Use for complex containerized applications requiring full Kubernetes control, advanced networking, custom operators -For each discovered application component, select the most appropriate Azure hosting platform: +**Azure App Service** - Use for web applications, REST APIs needing specific runtime versions or Windows-specific features -### Azure Container Apps (PREFERRED) +**Azure Functions** - Use for event processing, scheduled tasks, lightweight APIs with pay-per-execution model -**Use for:** Microservices, containerized applications, event-driven workloads -**Benefits:** Auto-scaling, managed Kubernetes, simplified deployment -**Consider when:** Component can be containerized, needs elastic scaling +**Azure Static Web Apps** - Use for frontend SPAs, static sites, JAMstack applications with minimal backend needs -### Azure App Service +**Database Service Selection:** -**Use for:** Web applications, REST APIs with specific runtime needs -**Benefits:** Managed platform, built-in CI/CD, easy SSL/custom domains -**Consider when:** Need specific runtime versions, Windows-specific features +- Azure SQL Database: SQL Server compatibility, complex queries, ACID compliance +- Azure Database for PostgreSQL/MySQL: Specific engine compatibility required +- Azure Cosmos DB: NoSQL requirements, global scale, flexible schemas +- Azure Cache for Redis: Application caching, session storage, real-time analytics -### Azure Functions +**Messaging Service Selection:** -**Use for:** Event processing, scheduled tasks, lightweight APIs -**Benefits:** Serverless, automatic scaling, pay-per-execution -**Consider when:** Event-driven processing, stateless operations +- Azure Service Bus: Enterprise messaging, guaranteed delivery, complex routing +- Azure Event Hubs: High-throughput event streaming, telemetry ingestion +- Azure Event Grid: Event-driven architectures, reactive programming -### Azure Static Web Apps +**IaC File Checklist Generation:** -**Use for:** Frontend SPAs, static sites, JAMstack applications -**Benefits:** Global CDN, built-in authentication, API integration -**Consider when:** Static content, minimal backend requirements +Based on selected Azure services, generate a checklist of required Bicep files to be created: -## Selection Criteria +**Always Required:** -**REQUIRED ANALYSIS:** +- [ ] `./infra/main.bicep` - Primary deployment template (subscription scope) +- [ ] `./infra/main.parameters.json` - Parameter defaults +- [ ] `./infra/modules/monitoring.bicep` - Log Analytics and Application Insights -For each discovered component, consider: +**Service-Specific Modules (include based on service selection):** -- Scalability requirements and traffic patterns -- Runtime and platform needs -- Operational complexity preferences -- Cost considerations -- Team expertise and preferences +- [ ] `./infra/modules/container-apps.bicep` - If Container Apps selected +- [ ] `./infra/modules/app-service.bicep` - If App Service selected +- [ ] `./infra/modules/functions.bicep` - If Azure Functions selected +- [ ] `./infra/modules/static-web-app.bicep` - If Static Web Apps selected +- [ ] `./infra/modules/aks.bicep` - If AKS selected +- [ ] `./infra/modules/database.bicep` - If SQL/PostgreSQL/MySQL selected +- [ ] `./infra/modules/cosmosdb.bicep` - If Cosmos DB selected +- [ ] `./infra/modules/storage.bicep` - If Storage Account needed +- [ ] `./infra/modules/keyvault.bicep` - If Key Vault needed (recommended) +- [ ] `./infra/modules/servicebus.bicep` - If Service Bus selected +- [ ] `./infra/modules/eventhub.bicep` - If Event Hubs selected +- [ ] `./infra/modules/redis.bicep` - If Redis Cache selected +- [ ] `./infra/modules/container-registry.bicep` - If container services selected -## Containerization Planning +**Example IaC Checklist Output:** -**REQUIRED ASSESSMENT:** - -For each component, determine: - -- **Containerization Feasibility:** Can it run in Docker? Windows-specific dependencies? -- **Docker Strategy:** Base image selection, port mappings, environment variables -- **Resource Requirements:** CPU, memory, storage needs -- **Health Check Strategy:** Endpoint patterns for monitoring - -## Data Storage Planning - -**REQUIRED ANALYSIS:** - -Select appropriate Azure database services: - -### Azure SQL Database - -**Use for:** SQL Server compatibility, complex queries, ACID compliance -**Consider when:** Relational data model, existing SQL Server applications - -### Azure Database for PostgreSQL/MySQL - -**Use for:** PostgreSQL/MySQL workloads, web applications -**Consider when:** Specific database engine compatibility required - -### Azure Cosmos DB - -**Use for:** NoSQL requirements, global scale, flexible schemas -**Consider when:** Multiple data models, global distribution needed - -### Azure Cache for Redis - -**Use for:** Application caching, session storage, real-time analytics -**Consider when:** Performance optimization, session management - -## Messaging and Integration Planning - -**REQUIRED ANALYSIS:** - -Select messaging services based on patterns: - -### Azure Service Bus - -**Use for:** Enterprise messaging, guaranteed delivery, complex routing -**Consider when:** Reliable messaging, enterprise scenarios - -### Azure Event Hubs - -**Use for:** High-throughput event streaming, telemetry ingestion -**Consider when:** Big data scenarios, real-time analytics - -### Azure Event Grid - -**Use for:** Event-driven architectures, reactive programming -**Consider when:** Decoupled systems, serverless architectures - -## Update Architecture Documentation +```markdown +## Infrastructure as Code File Checklist -**REQUIRED ACTIONS:** +Based on the selected Azure services, the following Bicep files need to be generated: -Update `azd-arch-plan.md` with: +### Core Files (Always Required) +- [ ] `./infra/main.bicep` - Primary deployment template +- [ ] `./infra/main.parameters.json` - Parameter defaults +- [ ] `./infra/modules/monitoring.bicep` - Observability stack -### Azure Service Mapping Table +### Service-Specific Modules +- [ ] `./infra/modules/container-apps.bicep` - For web API hosting +- [ ] `./infra/modules/database.bicep` - For PostgreSQL database +- [ ] `./infra/modules/keyvault.bicep` - For secrets management +- [ ] `./infra/modules/container-registry.bicep` - For container image storage -```markdown -| Component | Current Tech | Azure Service | Rationale | -|-----------|-------------|---------------|-----------| -| Web App | React | Static Web Apps | Frontend SPA | -| API Service | Node.js | Container Apps | Microservice architecture | -| Database | PostgreSQL | Azure Database for PostgreSQL | Existing dependency | +Total files to generate: 7 ``` -### Hosting Strategy Summary +**Docker File Checklist Generation:** -- Document hosting decisions for each component -- Include containerization plans where applicable -- Note resource requirements and scaling strategies +Based on selected Azure services and containerization strategy, generate a checklist of required Docker files: -### Infrastructure Architecture +**Container-Based Services (include based on service selection):** -- Resource group organization strategy -- Networking and security design approach -- Monitoring and logging strategy -- Integration patterns between services +- [ ] `{service-path}/Dockerfile` - If Container Apps, AKS, or containerized App Service selected +- [ ] `{service-path}/.dockerignore` - For each containerized service -### Next Steps Checklist +**Example Docker Checklist Output:** -- [ ] Azure service selected for each component with rationale -- [ ] Hosting strategies defined -- [ ] Containerization plans documented -- [ ] Data storage strategies planned -- [ ] Ready to proceed to file generation phase +```markdown +## Docker File Generation Checklist -## Next Phase +Based on the containerization strategy, the following Docker files need to be generated: -After completing architecture planning, proceed to the appropriate file generation tool: +### Service Dockerfiles +- [ ] `./api/Dockerfile` - For Node.js API service (Container Apps) +- [ ] `./api/.dockerignore` - Exclude unnecessary files from API container +- [ ] `./frontend/Dockerfile` - For React frontend (containerized App Service) +- [ ] `./frontend/.dockerignore` - Exclude unnecessary files from frontend container -- Use `azd_azure_yaml_generation` tool for azure.yaml configuration -- Use `azd_infrastructure_generation` tool for Bicep templates -- Use `azd_docker_generation` tool for container configurations -- Use `azd_project_validation` tool for final project validation +Total Docker files to generate: 4 +``` -**IMPORTANT:** Keep `azd-arch-plan.md` updated as the central reference for all architecture decisions. This document guides subsequent phases and serves as implementation documentation. +📌 **Completion Checklist** + +- [ ] Azure service selected for each discovered component with documented rationale +- [ ] Hosting strategies defined and documented in `azd-arch-plan.md` +- [ ] Containerization plans documented for applicable services +- [ ] Data storage strategies planned and documented +- [ ] Resource group organization strategy defined +- [ ] Integration patterns between services documented +- [ ] **IaC file checklist generated** and added to `azd-arch-plan.md` based on selected services +- [ ] **Docker file checklist generated** and added to `azd-arch-plan.md` based on containerization strategy +- [ ] `azd-arch-plan.md` created or updated while preserving existing content +- [ ] Ready to proceed to infrastructure generation phase diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md index 84a7618ea0a..7496351c81e 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md @@ -1,67 +1,37 @@ -# AZD Azure.yaml Generation Tool +# AZD Azure.yaml Generation Instructions -This specialized tool generates the `azure.yaml` configuration file for Azure Developer CLI (AZD) projects. +✅ **Agent Task List** -## Overview +1. Check if `azd-arch-plan.md` exists and review architecture decisions +2. Identify all application services (frontend, backend, functions, etc.) +3. Determine hosting requirements for each service based on Azure service selections +4. Analyze build requirements (language, package manager, build commands) +5. Create complete `azure.yaml` file in root directory following required patterns +6. Validate file against AZD schema using available tools +7. Update existing `azd-arch-plan.md` with generated configuration details while preserving existing content -Generate a valid `azure.yaml` configuration file with proper service hosting, build, and deployment settings. +📄 **Required Outputs** -**IMPORTANT:** Before starting, check if `azd-arch-plan.md` exists in your current working directory. If it exists, review it to understand previous analysis and architecture decisions. Use the existing `azd_yaml_schema` tool for schema validation. +- Valid `azure.yaml` file created in root directory +- Service configurations matching Azure service selections from architecture planning +- Build and deployment instructions for all services +- Configuration validated against AZD schema +- Update existing `azd-arch-plan.md` with configuration details while preserving existing content -## Success Criteria +🧠 **Execution Guidelines** -- [ ] Valid `azure.yaml` file created in root directory -- [ ] All application services properly configured -- [ ] Service hosting configurations match Azure service selections -- [ ] Build and deployment instructions complete -- [ ] File validates against AZD schema (use `azd_yaml_schema` tool) - -## Service Analysis Requirements - -**REQUIRED ACTIONS:** - -1. **Identify Application Services:** - - Frontend applications (React, Angular, Vue.js, static sites) - - Backend services (REST APIs, microservices, GraphQL, gRPC) - - Function-based services (Azure Functions) - - Background services and workers - -2. **Determine Hosting Requirements:** - - **Container Apps:** Microservices, APIs, containerized web apps - - **App Service:** Traditional web applications, APIs - - **Static Web Apps:** Frontend SPAs, static sites - - **Functions:** Event-driven, serverless workloads - -3. **Analyze Build Requirements:** - - Programming language and framework - - Package manager (npm, pip, dotnet, maven) - - Build commands and output directories - - Dependency management needs +**Service Analysis Requirements:** -## Azure.yaml Configuration Requirements +Identify and configure these service types: -**REQUIRED ACTIONS:** +- **Frontend applications:** React, Angular, Vue.js, static sites +- **Backend services:** REST APIs, microservices, GraphQL, gRPC +- **Function-based services:** Azure Functions for event-driven workloads +- **Background services:** Workers and long-running processes -Create a complete `azure.yaml` file in the root directory following these patterns: +**Hosting Configuration Patterns:** -### Basic Structure Requirements - -**IMPORTANT:** Use the `azd_yaml_schema` tool for complete schema definition, structure requirements, and validation rules. - -Basic structure: - -```yaml -name: [project-name] -services: - # Service configurations -infra: - provider: bicep - path: infra -``` - -### Service Configuration Patterns - -**Azure Container Apps (for microservices, APIs, containerized apps):** +**Azure Container Apps** (for microservices, APIs, containerized apps): ```yaml services: @@ -70,10 +40,10 @@ services: language: js host: containerapp docker: - path: ./src/api/Dockerfile + path: ./Dockerfile ``` -**Azure App Service (for traditional web apps):** +**Azure App Service** (for traditional web apps): ```yaml services: @@ -83,7 +53,7 @@ services: host: appservice ``` -**Azure Functions (for serverless workloads):** +**Azure Functions** (for serverless workloads): ```yaml services: @@ -93,7 +63,7 @@ services: host: function ``` -**Azure Static Web Apps (for SPAs, static sites):** +**Azure Static Web Apps** (for SPAs, static sites): ```yaml services: @@ -104,97 +74,28 @@ services: dist: build ``` -### Advanced Configuration Options - -**Environment Variables:** - -```yaml -services: - api: - env: - - name: NODE_ENV - value: production - - name: DATABASE_URL - value: "{{ .Env.DATABASE_URL }}" -``` - -**Custom Build Commands:** - -```yaml -services: - frontend: - hooks: - prebuild: - posix: npm install - build: - posix: npm run build -``` - -## Configuration Requirements - -**CRITICAL REQUIREMENTS:** +**Critical Configuration Requirements:** -- Service names must be valid Azure resource names (alphanumeric, hyphens only) +- Service names must be alphanumeric with hyphens only - All `project` paths must point to existing directories -- All `docker.path` references must point to existing Dockerfiles +- All `docker.path` references must point to existing Dockerfiles **relative to the service project path** - Host types must be: `containerapp`, `appservice`, `function`, or `staticwebapp` - Language must match detected programming language - `dist` paths must match build output directories -## Validation Requirements - -**VALIDATION STEPS:** - -1. **Schema Validation:** Use `azd_yaml_schema` tool for authoritative schema validation -2. **Path Validation:** Ensure all referenced paths exist -3. **Configuration Testing:** Run `azd show` to test service discovery - -**Validation Commands:** - -```bash -# Validate configuration -azd config show - -# Test service discovery -azd show -``` - -## Common Patterns +**Important Note:** For Container Apps with Docker configurations, the `docker.path` is relative to the service's `project` directory, not the repository root. For example, if your service project is `./src/api` and the Dockerfile is located at `./src/api/Dockerfile`, the `docker.path` should be `./Dockerfile`. -**Multi-Service Microservices:** +**Advanced Configuration Options:** -- Frontend: Static Web App -- APIs: Container Apps with Dockerfiles -- Background Services: Container Apps or Functions +- Environment variables using `${VARIABLE_NAME}` syntax +- Custom commands using hooks (prebuild, postbuild, prepackage, postpackage, preprovision, postprovision) +- Service dependencies and startup order -**Full-Stack Application:** +📌 **Completion Checklist** -- Frontend: Static Web App -- Backend: Container App or App Service - -**Serverless Application:** - -- Frontend: Static Web App -- APIs: Azure Functions - -## Update Documentation - -**REQUIRED ACTIONS:** - -Update `azd-arch-plan.md` with: - -- Generated azure.yaml location and schema version -- Service configuration table (service, type, host, language, path) -- Hosting strategy summary by Azure service type -- Build and deployment configuration decisions -- Docker configuration details -- Validation results - -## Next Steps - -After azure.yaml generation is complete: - -1. Validate configuration using `azd_yaml_schema` tool -2. Test service discovery with `azd show` - -**IMPORTANT:** Reference existing tools for specific functionality. Use `azd_yaml_schema` for schema validation. +- [ ] Valid `azure.yaml` file created in root directory +- [ ] All discovered services properly configured with correct host types +- [ ] Service hosting configurations match Azure service selections from architecture planning +- [ ] Build and deployment instructions complete for all services +- [ ] File validates against any available AZD schema tools +- [ ] `azd-arch-plan.md` updated with configuration details while preserving existing content diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_discovery_analysis.md b/cli/azd/internal/agent/tools/azd/prompts/azd_discovery_analysis.md index 10ff9e4e49c..d14f99ca52e 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_discovery_analysis.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_discovery_analysis.md @@ -1,200 +1,66 @@ -# AZD Application Discovery and Analysis Tool +# AZD Application Discovery and Analysis Instructions -This tool performs comprehensive discovery and analysis of applications to prepare them for Azure Developer CLI (AZD) initialization. This is Phase 1 of the AZD migration process. +✅ **Agent Task List** -Always use Azure best practices with intelligent defaults. +1. Check if `azd-arch-plan.md` exists and review previous analysis if present +2. Scan current directory recursively for all files and document structure +3. Identify programming languages, frameworks, and configuration files +4. Classify discovered components by type (web apps, APIs, databases, etc.) +5. Map dependencies and communication patterns between components +6. Create `azd-arch-plan.md` if it doesn't exist, or update existing file with complete discovery report while preserving existing content -## Overview +📄 **Required Outputs** -This tool analyzes your current codebase and architecture to: -1. Identify all application components and dependencies -2. Classify components by type and hosting requirements -3. Map dependencies and communication patterns -4. Provide foundation for architecture planning +- Complete file system inventory documented in `azd-arch-plan.md` (create file if missing, update existing while preserving content) +- Component classification table with Type | Technology | Location | Purpose (add to existing file) +- Dependency map showing inter-component communication (add to existing file) +- External dependencies list with required environment variables (add to existing file) +- Discovery report ready for architecture planning phase -**IMPORTANT:** Before starting, check if `azd-arch-plan.md` exists in your current working directory. If it exists, review it to understand what analysis has already been completed and build upon that work. +🧠 **Execution Guidelines** -## Success Criteria +**File System Analysis - Document:** -The discovery and analysis is successful when: +- Programming languages and frameworks detected +- Configuration files (package.json, requirements.txt, pom.xml, Dockerfile, docker-compose.yml) +- API endpoints, service definitions, application entry points +- Database configurations and connection strings +- CI/CD pipeline files (.github/workflows, azure-pipelines.yml) +- Documentation files and existing architecture docs -- [ ] Complete file system inventory is documented -- [ ] All application components are identified and classified -- [ ] Component dependencies are mapped -- [ ] Results are documented in `azd-arch-plan.md` -- [ ] Ready to proceed to architecture planning phase +**Component Classification Categories:** -## Step 1: Deep File System Analysis +- **Web Applications:** React/Angular/Vue.js apps, static sites, server-rendered apps +- **API Services:** REST APIs, GraphQL endpoints, gRPC services, microservices +- **Background Services:** Message queue processors, scheduled tasks, data pipelines +- **Databases:** SQL/NoSQL databases, caching layers, migration scripts +- **Messaging Systems:** Message queues, event streaming, pub/sub systems +- **AI/ML Components:** Models, inference endpoints, training pipelines +- **Supporting Services:** Authentication, logging, monitoring, configuration -**REQUIRED ACTIONS:** +**Dependency Analysis - Identify:** -- Scan all files in the current working directory recursively -- Document file structure, programming languages, and frameworks detected -- Identify configuration files (package.json, requirements.txt, pom.xml, etc.) -- Locate any existing Docker files, docker-compose files, or containerization configs -- Find database configuration files and connection strings -- Identify API endpoints, service definitions, and application entry points -- Look for existing CI/CD pipeline files (.github/workflows, azure-pipelines.yml, etc.) -- Identify documentation files (README.md, API docs, architecture docs) +- Internal dependencies (component-to-component communication) +- External dependencies (third-party APIs, SaaS services) +- Data dependencies (shared databases, file systems, caches) +- Configuration dependencies (shared settings, secrets, environment variables) +- Runtime dependencies (required services for startup) -**ANALYSIS QUESTIONS TO ANSWER:** - -- What programming languages and frameworks are used? -- What build systems and package managers are in use? -- Are there existing containerization configurations? -- What ports and endpoints are exposed? -- What external dependencies are required? -- Are there existing deployment or infrastructure configurations? - -**OUTPUT:** Complete inventory of all discoverable application artifacts - -## Step 2: Component Classification - -**REQUIRED ACTIONS:** - -Categorize each discovered component into one of these types: - -- **Web Applications** (frontend, SPA, static sites) - - React, Angular, Vue.js applications - - Static HTML/CSS/JavaScript sites - - Server-rendered web applications - -- **API Services** (REST APIs, GraphQL, gRPC services) - - RESTful web APIs - - GraphQL endpoints - - gRPC services - - Microservices - -- **Background Services** (workers, processors, scheduled jobs) - - Message queue processors - - Scheduled task runners - - Data processing pipelines - - Event handlers - -- **Databases** (relational, NoSQL, caching) - - SQL Server, PostgreSQL, MySQL databases - - NoSQL databases (MongoDB, CosmosDB) - - Caching layers (Redis, Memcached) - - Database migration scripts - -- **Messaging Systems** (queues, topics, event streams) - - Message queues - - Event streaming platforms - - Pub/sub systems - -- **AI/ML Components** (models, inference endpoints, training jobs) - - Machine learning models - - AI inference endpoints - - Training pipelines - - Data preprocessing services - -- **Supporting Services** (authentication, logging, monitoring) - - Authentication services - - Logging aggregators - - Monitoring and metrics - - Configuration services - -**CLASSIFICATION CRITERIA:** - -For each component, determine: -- Primary function and responsibility -- Runtime requirements -- Scalability needs -- Security considerations -- Integration points - -**OUTPUT:** Structured component inventory with classifications - -## Step 3: Dependency Mapping - -**REQUIRED ACTIONS:** - -- Map inter-component dependencies and communication patterns -- Identify external service dependencies (third-party APIs, SaaS services) -- Document data flow between components -- Identify shared resources and configuration -- Analyze network communication requirements -- Document authentication and authorization flows - -**DEPENDENCY ANALYSIS:** - -- **Internal Dependencies:** How components communicate with each other -- **External Dependencies:** Third-party services, APIs, databases -- **Data Dependencies:** Shared databases, file systems, caches -- **Configuration Dependencies:** Shared settings, secrets, environment variables -- **Runtime Dependencies:** Required services for startup and operation - -**COMMUNICATION PATTERNS TO IDENTIFY:** +**Communication Patterns to Document:** - Synchronous HTTP/HTTPS calls -- Asynchronous messaging -- Database connections -- File system access -- Caching patterns -- Authentication flows - -**OUTPUT:** Component dependency graph and communication matrix - -## Step 4: Generate Discovery Report - -**REQUIRED ACTIONS:** - -Create or update `azd-arch-plan.md` with the following sections: - -```markdown -# AZD Architecture Plan - -## Discovery and Analysis Results - -### Application Overview -- [Summary of application type and purpose] -- [Key technologies and frameworks identified] -- [Overall architecture pattern (monolith, microservices, etc.)] - -### Component Inventory -[For each component discovered:] -- **Component Name:** [name] -- **Type:** [classification] -- **Technology:** [language/framework] -- **Location:** [file path/directory] -- **Purpose:** [brief description] -- **Entry Points:** [how component is accessed] -- **Configuration:** [key config files] - -### Dependency Map -[Visual or text representation of dependencies] -- **Component A** → **Component B** (HTTP API) -- **Component B** → **Database** (SQL connection) -- **Component A** → **External API** (REST calls) - -### External Dependencies -- [List of third-party services] -- [Required environment variables] -- [External configuration requirements] - -### Next Steps -- [ ] Review discovery results -- [ ] Proceed to architecture planning phase -- [ ] Use `azd_architecture_planning` tool -``` - -## Validation and Next Steps - -**VALIDATION CHECKLIST:** - -- [ ] All major application components identified -- [ ] Component types and technologies documented -- [ ] Dependencies mapped and understood -- [ ] External services and APIs catalogued -- [ ] `azd-arch-plan.md` created or updated with findings - -**NEXT PHASE:** - -After completing this discovery phase, proceed to the **Architecture Planning** phase using the `azd_architecture_planning` tool. This next phase will use your discovery results to: - -- Select appropriate Azure services for each component -- Plan hosting strategies and containerization -- Design infrastructure architecture -- Prepare for configuration file generation - -**IMPORTANT:** Keep the `azd-arch-plan.md` file updated throughout the process as it serves as the central planning document for your AZD migration. +- Asynchronous messaging patterns +- Database connections and data access +- File system access patterns +- Caching patterns and session management +- Authentication and authorization flows + +📌 **Completion Checklist** + +- [ ] Complete inventory of all discoverable application artifacts documented +- [ ] All major application components identified and classified by type +- [ ] Component technologies and frameworks documented with file locations +- [ ] Dependencies mapped and communication patterns understood +- [ ] External services and APIs catalogued with requirements +- [ ] `azd-arch-plan.md` created or updated with comprehensive findings while preserving existing content +- [ ] Ready to proceed to architecture planning phase using `azd_architecture_planning` tool diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_docker_generation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_docker_generation.md index 38091d58d9a..b09aee91625 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_docker_generation.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_docker_generation.md @@ -1,174 +1,115 @@ -# AZD Docker Generation Tool +# AZD Docker Generation Instructions -This specialized tool generates Dockerfiles and container configurations for Azure Developer CLI (AZD) projects. +✅ **Agent Task List** -## Overview +1. Read the **Docker File Generation Checklist** from `azd-arch-plan.md` +2. Identify containerizable services and required Docker files from the checklist +3. Detect programming language and framework for each containerizable service +4. Generate each Docker file specified in the checklist following language-specific best practices +5. Create .dockerignore files for build optimization +6. Implement health checks and security configurations +7. Update the Docker checklist section in existing `azd-arch-plan.md` by marking completed items as [x] while preserving existing content -Generate optimized Dockerfiles for different programming languages and frameworks with Azure Container Apps best practices. +📄 **Required Outputs** -**IMPORTANT:** Before starting, check if `azd-arch-plan.md` exists in your current working directory. If it exists, review it to understand discovered services and containerization requirements. +- All Docker files listed in the Docker File Generation Checklist from `azd-arch-plan.md` +- Dockerfiles created for all containerizable services +- .dockerignore files generated for each service +- Health check endpoints implemented +- Multi-stage builds with security best practices +- Update existing `azd-arch-plan.md` Docker checklist by marking completed items as [x] while preserving existing content -## Success Criteria +🧠 **Execution Guidelines** -- [ ] Dockerfiles created for all containerizable services -- [ ] .dockerignore files generated for build optimization -- [ ] Health checks and security configurations implemented -- [ ] Multi-stage builds used where appropriate -- [ ] Azure Container Apps best practices followed +**Read Docker Checklist:** -## Containerization Requirements Analysis +- Read the "Docker File Generation Checklist" section from `azd-arch-plan.md` +- This checklist specifies exactly which Docker files need to be generated +- Use this as the authoritative source for what to create +- Follow the exact file paths specified in the checklist -**REQUIRED ACTIONS:** +**Generate Files in Order:** -1. **Identify Containerization Candidates:** - - Microservices and APIs (REST, GraphQL, gRPC) - - Web applications needing runtime flexibility - - Background services and workers - - Custom applications with specific runtime requirements +- Create service Dockerfiles first (e.g., `{service-path}/Dockerfile`) +- Create corresponding .dockerignore files for each service (e.g., `{service-path}/.dockerignore`) +- Follow the exact file paths specified in the checklist from `azd-arch-plan.md` -2. **Services That Don't Need Containerization:** - - Static websites (use Azure Static Web Apps) - - Azure Functions (serverless, managed runtime) - - Database services (use managed Azure databases) +**Containerization Candidates:** -3. **Language and Framework Detection:** - - Programming language (Node.js, Python, .NET, Java, Go, etc.) - - Framework type (Express, FastAPI, ASP.NET Core, Spring Boot) - - Build requirements (npm, pip, dotnet, maven, gradle) - - Runtime dependencies and port configurations -- **Programming language** (Node.js, Python, .NET, Java, Go, etc.) +- **Include:** Microservices, REST APIs, GraphQL services, web applications, background workers +- **Exclude:** Static websites (use Static Web Apps), Azure Functions (serverless), databases (use managed services) -## Dockerfile Generation Requirements - -**REQUIRED ACTIONS:** - -For each containerizable service, generate optimized Dockerfiles following these patterns: - -### Language-Specific Requirements +**Language-Specific Dockerfile Patterns:** **Node.js Applications:** -- Use `node:18-alpine` base image -- Implement multi-stage build (build + runtime) + +- Base image: `node:18-alpine` +- Multi-stage build (build + runtime) - Copy package*.json first for layer caching - Use `npm ci --only=production` -- Create non-root user (`nodejs`) -- Expose appropriate port (typically 3000) -- Include health check endpoint -- Use `CMD ["npm", "start"]` +- Non-root user: `nodejs` +- Expose port 3000, health check `/health` **Python Applications:** -- Use `python:3.11-slim` base image -- Set environment variables: `PYTHONDONTWRITEBYTECODE=1`, `PYTHONUNBUFFERED=1` -- Copy requirements.txt first for caching + +- Base image: `python:3.11-slim` +- Environment: `PYTHONDONTWRITEBYTECODE=1`, `PYTHONUNBUFFERED=1` +- Copy requirements.txt first - Use `pip install --no-cache-dir` -- Create non-root user (`appuser`) -- Expose appropriate port (typically 8000) -- Include health check endpoint -- Use appropriate startup command (uvicorn, gunicorn, etc.) +- Non-root user: `appuser` +- Expose port 8000, health check `/health` **.NET Applications:** -- Use `mcr.microsoft.com/dotnet/sdk:8.0` for build stage -- Use `mcr.microsoft.com/dotnet/aspnet:8.0` for runtime -- Multi-stage build: restore → build → publish → runtime -- Copy .csproj first for layer caching -- Create non-root user (`appuser`) -- Expose port 8080 (standard for .NET in containers) -- Include health check endpoint -- Use `ENTRYPOINT ["dotnet", "AppName.dll"]` - -**Java/Spring Boot Applications:** -- Use `openjdk:17-jdk-slim` for build, `openjdk:17-jre-slim` for runtime -- Copy pom.xml/build.gradle first for dependency caching -- Multi-stage build pattern -- Create non-root user (`appuser`) -- Expose port 8080 -- Include actuator health check -- Use `CMD ["java", "-jar", "app.jar"]` - -## Security and Best Practices - -**CRITICAL REQUIREMENTS:** - -- **Always use non-root users** in production stage -- **Use minimal base images** (alpine, slim variants) -- **Implement multi-stage builds** to reduce image size -- **Include health check endpoints** for Container Apps -- **Set proper working directories** and file permissions -- **Use layer caching** by copying dependency files first -- **Never include secrets** in container images - -## .dockerignore Requirements - -**REQUIRED ACTIONS:** - -Create .dockerignore files with these patterns: - -**Universal Exclusions:** -- Version control: `.git`, `.gitignore` -- Documentation: `README.md`, `*.md` -- IDE files: `.vscode/`, `.idea/`, `*.swp` -- OS files: `.DS_Store`, `Thumbs.db` -- Docker files: `Dockerfile*`, `.dockerignore`, `docker-compose*.yml` -- Build artifacts and logs - -**Language-Specific Exclusions:** -- **Node.js:** `node_modules/`, `npm-debug.log*`, `coverage/`, `dist/` -- **Python:** `__pycache__/`, `*.pyc`, `venv/`, `.pytest_cache/`, `dist/` -- **.NET:** `bin/`, `obj/`, `*.user`, `packages/`, `.vs/` -- **Java:** `target/`, `*.class`, `.mvn/repository` - -## Health Check Implementation - -**REQUIRED ACTIONS:** - -Each containerized service must include a health check endpoint: - -- **Endpoint:** `/health` (standard convention) -- **Response:** JSON with status and timestamp -- **HTTP Status:** 200 for healthy, 503 for unhealthy -- **Timeout:** 3 seconds maximum response time -- **Content:** `{"status": "healthy", "timestamp": "ISO-8601"}` - -## Container Optimization - -**REQUIRED OPTIMIZATIONS:** - -- Use multi-stage builds to exclude build tools from production images -- Copy package/dependency files before source code for better caching -- Combine RUN commands to reduce layers -- Clean package manager caches in same RUN command -- Use specific versions for base images (avoid `latest`) -- Set resource limits appropriate for Azure Container Apps - -## Validation and Testing - -**VALIDATION REQUIREMENTS:** - -- All Dockerfiles must build successfully: `docker build -t test-image .` -- Containers must run with non-root users -- Health checks must respond correctly -- Images should be optimized for size (use `docker images` to verify) -- Container startup time should be reasonable (<30 seconds) - -## Update Documentation - -**REQUIRED ACTIONS:** - -Update `azd-arch-plan.md` with: - -- List of generated Dockerfiles and their languages -- Container configurations (ports, health checks, users) -- Security implementations (non-root users, minimal images) -- Build optimizations applied -- Local testing commands - -## Next Steps - -After Docker generation is complete: - -1. Test all containers build successfully locally -2. Integrate Dockerfile paths into `azure.yaml` service definitions -3. Configure Container Apps infrastructure to use these images -4. Set up Azure Container Registry for image storage -**IMPORTANT:** Reference existing tools for schema validation. For azure.yaml updates, use the `azd_azure_yaml_generation` tool. For infrastructure setup, use the `azd_infrastructure_generation` tool. +- Build: `mcr.microsoft.com/dotnet/sdk:8.0` +- Runtime: `mcr.microsoft.com/dotnet/aspnet:8.0` +- Multi-stage: restore → build → publish → runtime +- Non-root user: `appuser` +- Expose port 8080, health check `/health` + +**Java/Spring Boot:** + +- Build: `openjdk:17-jdk-slim`, Runtime: `openjdk:17-jre-slim` +- Copy dependency files first for caching +- Non-root user: `appuser` +- Expose port 8080, actuator health check + +**Security and Optimization Requirements:** + +- Always use non-root users in production stage +- Use minimal base images (alpine, slim variants) +- Implement multi-stage builds to reduce size +- Include health check endpoints for Container Apps +- Set proper working directories and file permissions +- Use layer caching by copying dependency files first +- Never include secrets in container images + +**.dockerignore Patterns:** + +- Universal: `.git`, `README.md`, `.vscode/`, `.DS_Store`, `Dockerfile*` +- Node.js: `node_modules/`, `npm-debug.log*`, `coverage/` +- Python: `__pycache__/`, `*.pyc`, `venv/`, `.pytest_cache/` +- .NET: `bin/`, `obj/`, `*.user`, `packages/` +- Java: `target/`, `*.class`, `.mvn/repository` + +**Health Check Implementation:** + +- Endpoint: `/health` (standard convention) +- Response: JSON with status and timestamp +- HTTP Status: 200 for healthy, 503 for unhealthy +- Timeout: 3 seconds maximum +- Content: `{"status": "healthy", "timestamp": "ISO-8601"}` + +📌 **Completion Checklist** + +- [ ] **Docker File Generation Checklist read** from `azd-arch-plan.md` +- [ ] **All files from Docker checklist generated** in the correct locations +- [ ] Dockerfiles created for all containerizable services identified in architecture planning +- [ ] .dockerignore files generated with appropriate exclusions for each language +- [ ] Multi-stage builds implemented to reduce image size +- [ ] Non-root users configured for security +- [ ] Health check endpoints implemented for all services +- [ ] Container startup optimization applied (dependency file caching) +- [ ] All Dockerfiles build successfully (`docker build` test) +- [ ] Security best practices followed (minimal images, no secrets) +- [ ] **Docker checklist in `azd-arch-plan.md` updated** by marking completed items as [x] while preserving existing content diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md b/cli/azd/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md index bff5ab8418d..49d7bb80107 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md @@ -1,134 +1,109 @@ -# Infrastructure as Code (IaC) Generation Rules for Azure Developer CLI (AZD) +# Infrastructure as Code (IaC) Generation Rules -This document provides comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules for AZD projects. Follow these rules strictly when generating Azure infrastructure code. +✅ **Agent Task List** -## Core Generation Rules +1. Reference these rules when generating any IaC files +2. Follow file structure and organization requirements +3. Implement naming conventions and tagging strategies +4. Apply security and compliance best practices +5. Validate all generated code against these requirements -### File Structure and Organization +📄 **Required Outputs** -- **REQUIRED**: Place all IaC files in the `./infra` folder within an AZD project -- **REQUIRED**: Name the main deployment file `main.bicep` - this is the primary deployment target -- **REQUIRED**: Create a `main.parameters.json` file alongside `main.bicep` containing all parameter defaults for the Bicep deployment -- **REQUIRED**: The root level `main.bicep` must be a subscription level deployment using `targetScope = 'subscription'` -- **REQUIRED**: The main.bicep file must create a resource group as the primary container for all resources -- **REQUIRED**: Pass the resource group scope to all child modules that deploy resources -- **REQUIRED**: Create modular, reusable Bicep files instead of monolithic templates -- **RECOMMENDED**: Organize modules by resource type or logical grouping +- IaC files following all specified rules and conventions +- Proper file structure in `./infra` directory +- Compliance with Azure Well-Architected Framework principles +- Security best practices implemented +- Validation passing without errors -### Azure Best Practices Compliance +🧠 **Execution Guidelines** -- **REQUIRED**: Follow Azure Well-Architected Framework principles -- **REQUIRED**: Use Bicep best practices including proper parameter validation and resource dependencies -- **REQUIRED**: Leverage Azure Verified Modules (AVM) when available - always check for existing AVM modules before creating custom ones -- **REQUIRED**: Implement least-privilege access principles +**File Structure and Organization:** -### Naming Conventions +- **REQUIRED:** Place all IaC files in `./infra` folder +- **REQUIRED:** Name main deployment file `main.bicep` +- **REQUIRED:** Create `main.parameters.json` with parameter defaults +- **REQUIRED:** Main.bicep must use `targetScope = 'subscription'` +- **REQUIRED:** Create resource group as primary container +- **REQUIRED:** Pass resource group scope to all child modules +- **REQUIRED:** Create modular, reusable Bicep files -- **REQUIRED**: Use consistent naming pattern: `{resourcePrefix}-{name}-{uniqueHash}` -- **REQUIRED**: Generate unique hash using combination of environment name, subscription ID, and resource group name -- **EXAMPLE**: `app-myservice-h3x9k2` where `h3x9k2` is generated from env/subscription/rg -- **FORBIDDEN**: Hard-code tenant IDs, subscription IDs, or resource group names +**Naming Conventions:** -### Module Parameters +- **REQUIRED:** Use pattern `{resourcePrefix}-{name}-{uniqueHash}` +- **REQUIRED:** Generate unique hash from environment name, subscription ID, and resource group name +- **EXAMPLE:** `app-myservice-h3x9k2` where `h3x9k2` is generated +- **FORBIDDEN:** Hard-code tenant IDs, subscription IDs, or resource group names -- **REQUIRED**: Every module must accept these standard parameters: - - `name` (string): Base name for the resource - - `location` (string): Azure region for deployment - - `tags` (object): Resource tags for governance -- **REQUIRED**: Modules that deploy Azure resources must use `targetScope = 'resourceGroup'` and be called with the resource group scope from main.bicep -- **REQUIRED**: Provide intelligent defaults for optional parameters -- **REQUIRED**: Use parameter decorators for validation (e.g., `@minLength`, `@allowed`) -- **RECOMMENDED**: Group related parameters using objects when appropriate +**Module Parameters (All modules must accept):** -### Tagging Strategy +- `name` (string): Base name for the resource +- `location` (string): Azure region for deployment +- `tags` (object): Resource tags for governance +- **REQUIRED:** Modules use `targetScope = 'resourceGroup'` +- **REQUIRED:** Provide intelligent defaults for optional parameters +- **REQUIRED:** Use parameter decorators for validation -- **REQUIRED**: Tag resource groups with `azd-env-name: {environment-name}` -- **REQUIRED**: Tag hosting resources with `azd-service-name: {service-name}` -- **RECOMMENDED**: Include additional governance tags (cost center, owner, etc.) +**Tagging Strategy:** -### Security and Compliance +- **REQUIRED:** Tag resource groups with `azd-env-name: {environment-name}` +- **REQUIRED:** Tag hosting resources with `azd-service-name: {service-name}` +- **RECOMMENDED:** Include governance tags (cost center, owner, etc.) -- **FORBIDDEN**: Hard-code secrets, connection strings, or sensitive values -- **REQUIRED**: Use Key Vault references for secrets -- **REQUIRED**: Enable diagnostic settings and logging where applicable -- **REQUIRED**: Follow principle of least privilege for managed identities +**Security and Compliance:** -### Quality Assurance +- **FORBIDDEN:** Hard-code secrets, connection strings, or sensitive values +- **REQUIRED:** Use Key Vault references for secrets +- **REQUIRED:** Enable diagnostic settings and logging where applicable +- **REQUIRED:** Follow principle of least privilege for managed identities +- **REQUIRED:** Follow Azure Well-Architected Framework principles -- **REQUIRED**: Validate all generated Bicep code using Bicep CLI -- **REQUIRED**: Address all warnings and errors before considering code complete -- **REQUIRED**: Test deployment in a sandbox environment when possible +**Container Resource Specifications:** -## Supported Azure Services +- **REQUIRED:** Wrap partial CPU values in `json()` function (e.g., `json('0.5')` for 0.5 CPU cores) +- **REQUIRED:** Memory values should be strings with units (e.g., `'0.5Gi'`, `'1Gi'`, `'2Gi'`) +- **EXAMPLE:** Container Apps resource specification: -### Primary Hosting Resources (Choose One) - -1. **Azure Container Apps** ⭐ **(PREFERRED)** - - Best for containerized applications - - Built-in scaling and networking - - Supports both HTTP and background services - -2. **Azure App Service** - - Best for web applications and APIs - - Supports multiple runtime stacks - - Built-in CI/CD integration - -3. **Azure Function Apps** - - Best for serverless and event-driven workloads - - Multiple hosting plans available - - Trigger-based execution model - -4. **Azure Static Web Apps** - - Best for frontend applications - - Built-in GitHub/Azure DevOps integration - - Free tier available - -5. **Azure Kubernetes Service (AKS)** - - Best for complex containerized workloads - - Full Kubernetes capabilities - - Requires advanced configuration - -### Essential Supporting Resources + ```bicep + resources: { + cpu: json('0.25') // Correct: wrapped in json() + memory: '0.5Gi' // Correct: string with units + } + ``` -**REQUIRED** - Include these resources in most AZD applications: +**Supported Azure Services:** -- **Log Analytics Workspace** - - Central logging and monitoring - - Required for Application Insights - - Enable diagnostic settings for all resources +**Primary Hosting Resources (Choose One):** -- **Application Insights** - - Application performance monitoring - - Dependency tracking and telemetry - - Link to Log Analytics workspace +- **Azure Container Apps** (PREFERRED): Containerized applications, built-in scaling +- **Azure App Service:** Web applications and APIs, multiple runtime stacks +- **Azure Function Apps:** Serverless and event-driven workloads +- **Azure Static Web Apps:** Frontend applications, built-in CI/CD +- **Azure Kubernetes Service (AKS):** Complex containerized workloads -- **Azure Key Vault** - - Secure storage for secrets, keys, and certificates - - Use managed identity for access - - Enable soft delete and purge protection +**Essential Supporting Resources (REQUIRED for most applications):** -**CONDITIONAL** - Include based on application requirements: +- **Log Analytics Workspace:** Central logging and monitoring +- **Application Insights:** Application performance monitoring +- **Azure Key Vault:** Secure storage for secrets and certificates -- **Azure Container Registry** (for container-based apps) -- **Azure Service Bus** (for messaging scenarios) -- **Azure Cosmos DB** (for NoSQL data storage) -- **Azure SQL Database** (for relational data storage) -- **Azure Storage Account** (for blob/file storage) -- **Azure Cache for Redis** (for caching scenarios) +**Conditional Resources (Include based on requirements):** -## Code Generation Examples +- Azure Container Registry (for container-based apps) +- Azure Service Bus (for messaging scenarios) +- Azure Cosmos DB (for NoSQL data storage) +- Azure SQL Database (for relational data storage) +- Azure Storage Account (for blob/file storage) +- Azure Cache for Redis (for caching scenarios) -### Main.bicep Structure Template +**Main.bicep Structure Template:** ```bicep targetScope = 'subscription' - @description('Name of the environment') param environmentName string - @description('Location for all resources') param location string - @description('Tags to apply to all resources') param tags object = {} @@ -157,37 +132,14 @@ module appService 'modules/app-service.bicep' = { } ``` -### Main.parameters.json Structure Template - -```json -{ - "$schema": "https://schema.management.azure.com/schemas/2018-05-01/subscriptionDeploymentParameters.json#", - "contentVersion": "1.0.0.0", - "parameters": { - "environmentName": { - "value": "${AZURE_ENV_NAME}" - }, - "location": { - "value": "${AZURE_LOCATION}" - }, - "tags": { - "value": {} - } - } -} -``` - -### Child Module Structure Template +**Child Module Structure Template:** ```bicep targetScope = 'resourceGroup' - @description('Base name for all resources') param name string - -@description('Location for all resources') +@description('Location for all resources') param location string = resourceGroup().location - @description('Tags to apply to all resources') param tags object = {} @@ -198,20 +150,16 @@ var resourceName = '${name}-${resourceSuffix}' // Resource definitions here... ``` -## Validation Checklist - -Before completing code generation, verify: +📌 **Completion Checklist** -- [ ] All files are in `./infra` folder -- [ ] `main.bicep` exists as primary deployment file with subscription scope -- [ ] `main.parameters.json` exists alongside `main.bicep` with parameter defaults -- [ ] Resource group is created in `main.bicep` and properly tagged +- [ ] All files placed in `./infra` folder with correct structure +- [ ] `main.bicep` exists with subscription scope and resource group creation +- [ ] `main.parameters.json` exists with parameter defaults - [ ] All child modules use `targetScope = 'resourceGroup'` and receive resource group scope -- [ ] All resources use consistent naming convention -- [ ] Required tags are applied correctly -- [ ] No hard-coded secrets or identifiers -- [ ] Parameters have appropriate validation -- [ ] Bicep CLI validation passes without errors -- [ ] AVM modules are used where available -- [ ] Supporting resources are included as needed -- [ ] Security best practices are followed +- [ ] Consistent naming convention applied: `{resourcePrefix}-{name}-{uniqueHash}` +- [ ] Required tags applied: `azd-env-name` and `azd-service-name` +- [ ] No hard-coded secrets, tenant IDs, or subscription IDs +- [ ] Parameters have appropriate validation decorators +- [ ] Security best practices followed (Key Vault, managed identities, diagnostics) +- [ ] Bicep CLI validation passes without errors (`az bicep build`) +- [ ] Deployment validation successful (`az deployment sub validate`) diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md index e7ff88ef55c..f7b902bb239 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md @@ -1,75 +1,126 @@ -# AZD Infrastructure Generation Tool - -This specialized tool generates Bicep infrastructure templates for Azure Developer CLI (AZD) projects. - -## Overview - -Generate modular Bicep templates following Azure security and operational best practices. - -**IMPORTANT:** -- Before starting, check if `azd-arch-plan.md` exists to understand architecture decisions -- **Use the `azd_iac_generation_rules` tool for complete IaC rules, naming conventions, and best practices** - -## Success Criteria - -- [ ] Complete Bicep template structure created in `./infra` directory -- [ ] All templates compile without errors (`az bicep build --file infra/main.bicep`) -- [ ] Infrastructure supports all services defined in `azure.yaml` -- [ ] Follows all rules from `azd_iac_generation_rules` tool -- [ ] Parameter files configured appropriately - -## Requirements Analysis - -**REQUIRED ACTIONS:** - -1. **Review IaC Rules:** Use `azd_iac_generation_rules` tool to get complete file structure, naming conventions, and compliance requirements - -2. **Analyze Infrastructure Needs:** - - Map services from `azure.yaml` to required Azure resources - - Identify shared resources (Log Analytics, Container Registry, Key Vault) - - Determine connectivity and security requirements +# AZD Infrastructure Generation Instructions + +✅ **Agent Task List** + +1. Use `azd_iac_generation_rules` tool to get complete IaC rules and conventions +2. **Inventory existing IaC files** - scan current working directory for all `.bicep` files +3. Read `azd-arch-plan.md` to get the **IaC File Generation Checklist** +4. Create directory structure in `./infra` following IaC rules +5. For each file in the IaC checklist: + - **If file exists**: Intelligently update to match requirements, preserve user customizations where possible + - **If file missing**: Generate new file following templates and best practices + - **Flag conflicts**: Note any incompatible configurations but proceed with updates +6. Validate all generated bicep templates compile without errors or warnings +7. Update the IaC checklist section in existing `azd-arch-plan.md` by marking completed files as [x] while preserving existing content + +📄 **Required Outputs** + +- **Existing IaC inventory** documenting all current `.bicep` files found +- Complete Bicep template structure in `./infra` directory based on the IaC checklist +- All files listed in the IaC File Generation Checklist from `azd-arch-plan.md` (created or updated) +- Main.bicep file with subscription scope and modular deployment +- Service-specific modules for each Azure service from the checklist +- Parameter files with sensible defaults +- **Conflict report** highlighting any incompatible configurations that were updated +- All templates validated and error-free +- Update existing `azd-arch-plan.md` IaC checklist by marking completed files as [x] while preserving existing content + +🧠 **Execution Guidelines** + +**CRITICAL:** Use `azd_iac_generation_rules` tool first to get complete file structure, naming conventions, and compliance requirements. + +**Inventory Existing IaC Files:** + +- Scan current working directory recursively for all `.bicep` files +- Document existing files, their locations, and basic structure +- Note any existing modules, parameters, and resource definitions +- Identify which checklist files already exist vs. need to be created + +**Read IaC Checklist:** + +- Read the "Infrastructure as Code File Checklist" section from `azd-arch-plan.md` +- This checklist specifies exactly which Bicep files need to be generated +- Cross-reference with existing file inventory to determine update vs. create strategy + +**Smart File Generation Strategy:** + +**For Existing Files:** + +- **Preserve user customizations**: Keep existing resource configurations, naming, and parameters where compatible +- **Add missing components**: Inject required modules, resources, or configurations that are missing +- **Update outdated patterns**: Modernize to use current best practices +- **Maintain functionality**: Ensure existing deployments continue to work + +**For New Files:** + +- Create from templates following IaC generation rules +- Follow standard naming conventions and patterns + +**Conflict Resolution:** + +- **Document conflicts**: Log when existing configurations conflict with requirements +- **Prioritize functionality**: Make changes needed for AZD compatibility +- **Preserve intent**: Keep user's architectural decisions when possible +- **Flag major changes**: Clearly indicate significant modifications made + +**Generate Files in Order:** + +- Create `./infra/main.bicep` first (always required) +- Create `./infra/main.parameters.json` second (always required) +- Generate each module file listed in the checklist +- Follow the exact file paths specified in the checklist + +**Main Parameters File Requirements:** + +The `./infra/main.parameters.json` file is critical for AZD integration and must follow this exact structure: + +```json +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "environmentName": { + "value": "${AZURE_ENV_NAME}" + }, + "location": { + "value": "${AZURE_LOCATION}" + }, + "principalId": { + "value": "${AZURE_PRINCIPAL_ID}" + } + } +} +``` -3. **Service Infrastructure Mapping:** - - **Container Apps:** Environment, Log Analytics, Container Registry, App Insights, Managed Identity - - **App Service:** Service Plan, App Service, App Insights - - **Functions:** Function App, Storage Account, App Insights - - **Static Web Apps:** Static Web App resource - - **Database:** SQL/CosmosDB/PostgreSQL with appropriate SKUs +**Key Features:** -## Generation Workflow +- **Environment Variable Substitution**: Uses `${VARIABLE_NAME}` syntax for dynamic values +- **Standard Parameters**: Always include `environmentName`, `location`, and `principalId` +- **AZD Integration**: These variables are automatically populated by AZD during deployment +- **Additional Parameters**: Add service-specific parameters as needed, using the same substitution pattern -**REQUIRED ACTIONS:** +**Service Infrastructure Mapping:** -1. **Create Directory Structure:** - Follow structure from `azd_iac_generation_rules` tool: - ``` - ./infra/ - ├── main.bicep - ├── main.parameters.json - ├── modules/ - └── [additional files per rules] - ``` +- **Container Apps:** Environment, Log Analytics, Container Registry, App Insights, Managed Identity +- **App Service:** Service Plan, App Service, App Insights, Managed Identity +- **Functions:** Function App, Storage Account, App Insights, Managed Identity +- **Static Web Apps:** Static Web App resource with configuration +- **Database:** SQL/CosmosDB/PostgreSQL with appropriate SKUs and security -2. **Generate Main Template:** - - Use subscription-level scope (`targetScope = 'subscription'`) - - Create resource group with proper tagging - - Deploy modules conditionally based on service requirements - - Follow naming conventions from IaC rules tool +**Module Template Requirements:** -3. **Generate Module Templates:** - - Create focused modules for each service type - - Use resource group scope for all modules - - Accept standardized parameters (environmentName, location, tags) - - Output connection information for applications +- Use `targetScope = 'resourceGroup'` for all modules +- Accept resource group scope from main template +- Use standardized parameters (name, location, tags) +- Follow naming convention: `{resourcePrefix}-{name}-{uniqueHash}` +- Output connection information for applications +- Include security best practices and monitoring -4. **Generate Parameter Files:** - - Provide sensible defaults for all parameters - - Use parameter references for environment-specific values - - Include all required parameters from IaC rules +**Required Directory Structure:** -``` +```text ./infra/ -├── main.bicep # Primary deployment template +├── main.bicep # Primary deployment template (subscription scope) ├── main.parameters.json # Default parameters ├── modules/ │ ├── container-apps.bicep @@ -82,78 +133,31 @@ Generate modular Bicep templates following Azure security and operational best p └── resources.bicep # Shared resources ``` -## Template Requirements - -### Main Template (main.bicep) - -**CRITICAL REQUIREMENTS:** +**Main Template Requirements:** - Use `targetScope = 'subscription'` - Accept standardized parameters: `environmentName`, `location`, `principalId` -- Include feature flags for conditional deployment (e.g., `deployDatabase`) -- Create resource group with proper tagging (`azd-env-name`, `azd-provisioned`) -- Call modules conditionally based on feature flags +- Include feature flags for conditional deployment +- Create resource group with proper tagging (`azd-env-name`) +- Call modules conditionally based on service requirements - Output connection strings and service endpoints -### Module Templates - -## Generate Infrastructure Files - -**WORKFLOW REQUIREMENTS:** - -1. **Create Directory Structure:** - - ```text - ./infra/ - ├── main.bicep - ├── main.parameters.json - ├── modules/ - └── [service-specific modules] - ``` - -2. **Generate Main Template (main.bicep):** - - Use `targetScope = 'subscription'` - - Create resource group with proper tagging - - Deploy modules conditionally based on service requirements - -3. **Generate Module Templates:** - - Create focused modules for each service type - - Use standardized parameters (`environmentName`, `location`, `tags`) - - Output connection information for applications - -4. **Generate Parameter Files:** - - Provide sensible defaults for all parameters - - Use parameter references for environment-specific values - -## Validation and Testing - -**VALIDATION REQUIREMENTS:** - -- All Bicep templates must compile without errors: `az bicep build --file infra/main.bicep` -- Validate deployment: `az deployment sub validate --template-file infra/main.bicep` -- Test with AZD: `azd provision --dry-run` -- Use existing tools for schema validation (reference `azd_yaml_schema` tool for azure.yaml validation) - -## Update Documentation - -**REQUIRED ACTIONS:** - -Update `azd-arch-plan.md` with: - -- List of generated infrastructure files -- Resource naming conventions used -- Security configurations implemented -- Parameter requirements -- Output variables available -- Validation results - -## Next Steps - -After infrastructure generation is complete: - -1. Validate all templates compile successfully -2. Test deployment with `azd provision --dry-run` -3. Deploy with `azd provision` (creates resources) -4. Proceed to application deployment with `azd deploy` +📌 **Completion Checklist** + +- [ ] `azd_iac_generation_rules` tool referenced for complete compliance requirements +- [ ] **Existing IaC inventory completed** - all `.bicep` files in current directory catalogued +- [ ] **IaC File Generation Checklist read** from `azd-arch-plan.md` +- [ ] **Update vs. create strategy determined** for each file in checklist +- [ ] **All files from checklist generated or updated** in the correct locations +- [ ] **User customizations preserved** where compatible with requirements +- [ ] **Conflicts documented** and resolved with functional priority +- [ ] Infrastructure directory structure created following IaC rules +- [ ] Main.bicep template created/updated with subscription scope and resource group +- [ ] Module templates generated/updated for all services listed in checklist +- [ ] Parameter files created/updated with appropriate defaults +- [ ] All Bicep templates compile without errors or warnings (`az bicep build`) +- [ ] Templates validate successfully (`az deployment sub validate`) +- [ ] Naming conventions and tagging implemented correctly +- [ ] Security best practices implemented (Key Vault, managed identities) +- [ ] **IaC checklist in `azd-arch-plan.md` updated** by marking completed files as [x] while preserving existing content -**IMPORTANT:** Reference existing tools instead of duplicating functionality. For azure.yaml validation, use the `azd_yaml_schema` tool. For Bicep best practices, follow the AZD IaC Generation Rules document. diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_plan_init.md b/cli/azd/internal/agent/tools/azd/prompts/azd_plan_init.md index 5859bf69840..fceea6c0f96 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_plan_init.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_plan_init.md @@ -1,124 +1,93 @@ -# AZD Application Initialization and Migration Plan +# AZD Application Initialization and Migration Instructions -This document provides a comprehensive, step-by-step plan for initializing or migrating applications to use Azure Developer CLI (AZD). This is the orchestrating tool that guides you through using the specialized AZD tools. +✅ **Agent Task List** -**IMPORTANT: Before starting any workflow, ALWAYS check if `azd-arch-plan.md` exists in the current directory and review it to understand current progress, previous decisions, and what work has already been completed. This prevents duplicate work and ensures continuity.** +1. **Check Progress:** Review existing `azd-arch-plan.md` to understand completed work +2. **Phase 1:** Execute `azd_discovery_analysis` for component identification +3. **Phase 2:** Execute `azd_architecture_planning` for Azure service selection +4. **Phase 3:** Execute file generation tools (`azd_azure_yaml_generation`, `azd_infrastructure_generation`, `azd_docker_generation`) +5. **Phase 4:** Execute `azd_project_validation` for complete validation +6. **Final:** Confirm project readiness for deployment -Always use Azure best practices with intelligent defaults. +📄 **Required Outputs** -## Executive Summary +- Complete AZD-compatible project structure +- Valid `azure.yaml` configuration file +- Bicep infrastructure templates in `./infra` directory +- Dockerfiles for containerizable services +- Comprehensive `azd-arch-plan.md` documentation (created or updated while preserving existing content) +- Validated project ready for `azd up` deployment -Transform any application into an AZD-compatible project using a structured approach with specialized tools. Each tool has a focused responsibility and builds upon the previous phase to create a complete AZD deployment. +🧠 **Execution Guidelines** -## Success Criteria +**CRITICAL:** Always check if `azd-arch-plan.md` exists first to understand current progress and avoid duplicate work. If the file exists, preserve all existing content and user modifications while updating relevant sections. -The migration is successful when: +**Complete Workflow Phases:** -- [ ] All application components are identified and classified -- [ ] `azure.yaml` file is valid and complete -- [ ] All infrastructure files are generated and error-free -- [ ] Required Dockerfiles are created for containerizable components -- [ ] `azd-arch-plan.md` provides comprehensive documentation -- [ ] AZD environment is initialized and configured -- [ ] **All validation checks pass (use `azd_project_validation` tool)** +**Phase 1: Review Existing Progress** -## Complete Workflow Guide +- Check if `azd-arch-plan.md` exists in current directory +- If exists: Review thoroughly and skip completed phases +- If doesn't exist: Proceed to Phase 2 -### Phase 1: Review Existing Progress +**Phase 2: Discovery and Analysis** -Check if the file `azd-arch-plan.md` exists in the current directory and review it to understand current progress, previous decisions, and what work has already been completed. This prevents duplicate work and ensures continuity. +- Tool: `azd_discovery_analysis` +- Scans files recursively, documents structure/languages/frameworks +- Identifies entry points, maps dependencies, creates component inventory +- Updates `azd-arch-plan.md` with findings -- If file exists: Review thoroughly and skip completed phases -- If file doesn't exist: Proceed to Phase 2 +**Phase 3: Architecture Planning and Azure Service Selection** -### Phase 2: Discovery and Analysis +- Tool: `azd_architecture_planning` +- Maps components to Azure services, plans hosting strategies +- Designs database/messaging architecture, creates containerization strategies +- Updates `azd-arch-plan.md` with service selections -**Tool:** `azd_discovery_analysis` +**Phase 4: File Generation (Execute in Sequence)** -Scans files recursively, documents structure/languages/frameworks, identifies entry points, maps dependencies, and creates component inventory in `azd-arch-plan.md`. +1. **Azure.yaml Configuration:** `azd_azure_yaml_generation` (Required for all projects) +2. **Infrastructure Templates:** `azd_infrastructure_generation` (Required for all projects) +3. **Docker Configurations:** `azd_docker_generation` (Required for containerizable services) -### Phase 3: Architecture Planning and Azure Service Selection +**Phase 5: Project Validation and Environment Setup** -**Tool:** `azd_architecture_planning` +- Tool: `azd_project_validation` +- Validates azure.yaml against schema, compiles Bicep templates +- Ensures AZD environment exists, tests packaging, validates deployment preview +- Provides readiness confirmation -Maps components to Azure services, plans hosting strategies, designs database/messaging architecture, and creates containerization strategies. Updates `azd-arch-plan.md`. +**Usage Patterns:** -### Phase 4: File Generation - -Generate all necessary AZD files using these focused tools (most projects need all three): - -#### 1. Generate Azure.yaml Configuration - -**Tool:** `azd_azure_yaml_generation` (Required for all AZD projects) - -#### 2. Generate Infrastructure Templates - -**Tool:** `azd_infrastructure_generation` (Required for all AZD projects) - -#### 3. Generate Docker Configurations - -**Tool:** `azd_docker_generation` (Required for containerizable services) - -**Use in sequence:** azure.yaml → infrastructure → docker - -### Phase 5: Project Validation and Environment Setup - -**Tool:** `azd_project_validation` - -Validates azure.yaml against schema, compiles Bicep templates, ensures AZD environment exists, tests packaging, validates deployment with preview, and provides readiness confirmation. - -## Usage Patterns - -### Complete New Project Migration +**Complete New Project Migration:** ```text -1. Review existing azd-arch-plan.md (Phase 1) -2. azd_discovery_analysis -3. azd_architecture_planning -4. azd_azure_yaml_generation -5. azd_infrastructure_generation -6. azd_docker_generation (if containerization needed) +1. Review azd-arch-plan.md → 2. azd_discovery_analysis → 3. azd_architecture_planning → +4. azd_azure_yaml_generation → 5. azd_infrastructure_generation → 6. azd_docker_generation → 7. azd_project_validation ``` -### Update Existing AZD Project +**Update Existing AZD Project:** ```text -1. Review existing azd-arch-plan.md (Phase 1) -2. azd_azure_yaml_generation → azd_infrastructure_generation → azd_docker_generation → azd_project_validation +1. Review azd-arch-plan.md → 2. File generation tools → 3. azd_project_validation ``` -### Quick Service Addition +**Quick Service Addition:** ```text -1. Review existing azd-arch-plan.md (Phase 1) -2. azd_discovery_analysis → azd_azure_yaml_generation → azd_docker_generation → azd_project_validation +1. Review azd-arch-plan.md → 2. azd_discovery_analysis → 3. azd_azure_yaml_generation → +4. azd_docker_generation → 5. azd_project_validation ``` -## Central Planning Document - -**CRITICAL:** `azd-arch-plan.md` is the central coordination file that tracks progress, documents decisions, and maintains project state. Always review this file before starting any tool to understand current progress and avoid duplicate work. - -## Supporting Resources - -### Schema and Validation - -- Use `azd_yaml_schema` tool to get complete azure.yaml schema information -- Use `azd_iac_generation_rules` tool for Infrastructure as Code best practices - -### Troubleshooting - -Each tool includes: - -- Validation checklists -- Testing commands -- Common issues and solutions -- Next step guidance - -## Getting Started - -**Standard workflow:** -1. Review existing `azd-arch-plan.md` (Phase 1) -2. `azd_discovery_analysis` → `azd_architecture_planning` → File generation tools → `azd_project_validation` - -Keep `azd-arch-plan.md` updated throughout the process as the central coordination document. +📌 **Completion Checklist** + +- [ ] All application components identified and classified in discovery phase +- [ ] Azure service selections made for each component with rationale +- [ ] `azure.yaml` file generated and validates against schema +- [ ] Infrastructure files generated and compile without errors +- [ ] Dockerfiles created for containerizable components +- [ ] `azd-arch-plan.md` created or updated to provide comprehensive project documentation while preserving existing content +- [ ] AZD environment initialized and configured +- [ ] All validation checks pass using `azd_project_validation` tool +- [ ] Project confirmed ready for deployment with `azd up` diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md index 5197edcacbb..badbd8a58fb 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md @@ -1,191 +1,100 @@ -# AZD Project Validation Tool +# AZD Project Validation Instructions -This tool validates an AZD project by programmatically running comprehensive checks on all components including azure.yaml schema validation, Bicep template validation, environment setup, packaging, and deployment preview. +✅ **Agent Task List** -## Purpose +1. Load existing `azd-arch-plan.md` to understand current project state and context +2. Execute azure.yaml against azd schema using available tool +3. Compile and validate all Bicep templates in ./infra directory +4. Verify AZD environment exists and is properly configured +5. Run `azd package` to validate service packaging +6. Execute `azd provision --preview` to test infrastructure deployment +7. Resolve ALL issues found in each validation step before proceeding +8. Update existing `azd-arch-plan.md` with validation results by adding/updating validation section while preserving existing content -This tool performs automated end-to-end validation of an AZD project to ensure all components are properly configured and the project is ready for deployment. The LLM should execute all validation steps directly using available tools and terminal commands, not just provide instructions to the user. +📄 **Required Outputs** -## Validation Workflow +- Complete validation report with all checks passed +- All identified issues resolved with zero remaining errors +- Confirmation that project is ready for deployment +- Update existing `azd-arch-plan.md` with validation results while preserving existing content +- Validation checklist added to or updated in architecture plan +- Clear next steps for deployment -The LLM must execute these validation steps programmatically using terminal commands and available tools: +🧠 **Execution Guidelines** -### 1. Azure.yaml Schema Validation +**CRITICAL REQUIREMENT:** Resolve ALL issues found during validation before proceeding to the next step. No validation step should be considered successful until all errors, warnings, and issues have been fully addressed. -**EXECUTE:** Use the `azd_yaml_schema` tool to validate the azure.yaml file against the official schema. +**Pre-Validation Setup:** -**Steps to Execute:** +**0. Load Architecture Plan:** -- Check if `azure.yaml` exists in current directory using file system tools -- Run `azd_yaml_schema` tool to validate schema compliance -- Parse and report any schema violations or missing required fields -- Verify service definitions and configurations are correct - -### 2. Bicep Template Validation - -**EXECUTE:** Run the following commands to validate Bicep templates: - -1. **Find Bicep Files:** Use file search to scan `./infra` directory for `.bicep` files -2. **Compile Templates:** Execute `az bicep build --file --stdout` for each template -3. **Validate Syntax:** Ensure all templates compile without errors -4. **Check Dependencies:** Verify module references and parameter passing - -**Commands to Execute:** - -```powershell -# Compile main template -az bicep build --file ./infra/main.bicep +- Read existing `azd-arch-plan.md` to understand current project architecture and context +- Review any previous validation results or known issues +- Understand the project structure and service configurations from the plan +- **MANDATORY:** Must load and review architecture plan before starting validation -# Validate deployment (requires Azure CLI login) -az deployment sub validate --template-file ./infra/main.bicep --parameters ./infra/main.parameters.json --location -``` +**Validation Execution Steps:** -### 3. AZD Environment Validation +**1. Azure.yaml Schema Validation:** -**EXECUTE:** Run these commands to validate AZD environment setup: - -1. **Check Environment Exists:** Execute `azd env list` to see available environments -2. **Create Environment if Missing:** - - If no environments exist, execute `azd env new ` - - Use current directory name as environment name (sanitized) -3. **Verify Environment Selection:** Ensure an environment is currently selected - -**Commands to Execute:** - -```powershell -# List existing environments -azd env list +- Check if `azure.yaml` exists in current directory +- Validate `azure.yaml` against AZD schema using available tools +- Parse and report any schema violations or missing fields +- Verify service definitions and configurations are correct +- **MANDATORY:** Fix ALL schema violations before proceeding -# Create new environment if none exist (replace with directory name) -azd env new +**2. Bicep Template Validation:** -# Select environment if not already selected -azd env select -``` +- Scan `./infra` directory for `.bicep` files using file search +- Execute `az bicep build --file ` for each template +- Run `az deployment sub validate --template-file ./infra/main.bicep --parameters ./infra/main.parameters.json --location ` +- Verify all templates compile without errors and dependencies are correct +- **MANDATORY:** Fix ALL compilation errors before proceeding +- Clean up any generated `` files generated during bicep validation -### 4. Package Validation +**3. AZD Environment Validation:** -**EXECUTE:** Run `azd package` to validate all services can be packaged successfully. +- Execute `azd env list` to check available environments +- If no environments exist, create one: `azd env new -dev` +- Ensure environment is selected and configured +- Ensure `AZURE_LOCATION` azd environment variable is set to a valid Azure location value +- Ensure `AZURE_SUBSCRIPTION_ID` azd environment variable is set to the users current Azure subscription +- **MANDATORY:** Fix environment issues before proceeding -**Steps to Execute:** +**4. Package Validation:** -- Execute `azd package` command -- Monitor output for errors or warnings +- Execute `azd package` command and monitor output - Verify all service source paths are valid -- Check Docker builds complete successfully (for containerized services) -- Ensure all build artifacts are created -- Validate package manifests - -**Command to Execute:** +- Check Docker builds complete successfully for containerized services +- Ensure all build artifacts are created correctly +- **MANDATORY:** Fix ALL packaging errors before proceeding -```powershell -azd package -``` - -### 5. Deployment Preview Validation - -**EXECUTE:** Run `azd provision --preview` to validate infrastructure deployment without actually creating resources. - -**Steps to Execute:** +**5. Deployment Preview Validation:** - Execute `azd provision --preview` command -- Monitor output for errors or warnings - Verify Azure authentication is working -- Check resource group creation plan -- Validate all Bicep modules deploy correctly +- Check resource group creation plan and Bicep module deployment - Ensure parameter values are properly resolved -- Confirm no deployment conflicts - -**Command to Execute:** - -```powershell -azd provision --preview -``` - -## Success Criteria - -The LLM must verify that project validation is successful when all of the following are true: - -- [ ] `azure.yaml` passes schema validation (executed via `azd_yaml_schema` tool) -- [ ] All Bicep templates compile without errors or warnings (verified via `az bicep build`) -- [ ] AZD environment exists and is properly configured (verified via `azd env list`) -- [ ] `azd package` completes without errors or warnings -- [ ] `azd provision --preview` completes without errors or warnings -- [ ] All service configurations are valid -- [ ] No missing dependencies or configuration issues - -The LLM should report the status of each validation step and provide a summary of the overall validation results. - -## Error Handling - -The LLM must handle common validation errors by executing appropriate remediation steps: - -### Common Issues and Automated Solutions - -**Azure.yaml Schema Errors:** - -- Execute `azd_yaml_schema` tool to get correct schema format -- Check service names match directory structure using file system tools -- Verify all required fields are present and report missing fields - -**Bicep Compilation Errors:** - -- Parse compilation error output and identify specific issues -- Check module paths and parameter names programmatically -- Verify resource naming conventions follow Azure requirements -- Ensure all required parameters have values - -**Environment Issues:** - -- Execute `azd auth login` if authentication fails -- Check Azure subscription access and permissions via Azure CLI -- Verify location parameter is valid Azure region - -**Package Errors:** - -- Check service source paths in azure.yaml programmatically -- Verify Docker builds work locally for containerized services by executing build commands -- Ensure all build dependencies are available - -**Provision Preview Errors:** - -- Verify Azure subscription has sufficient permissions via Azure CLI -- Check resource quotas and limits -- Ensure resource names are globally unique where required - -The LLM should attempt to resolve issues automatically where possible and provide clear error reporting for issues that require manual intervention. - -## Update Documentation - -**EXECUTE:** The LLM must update `azd-arch-plan.md` with validation results by: - -- Writing validation results for each component to the documentation -- Recording any issues found and resolutions applied -- Documenting environment configuration details -- Including deployment preview summary -- Updating project readiness status - -Use file editing tools to update the documentation with the validation results. - -## Next Steps - -The LLM should inform the user that after successful validation, they can proceed with: - -1. **Deploy Infrastructure:** Execute `azd provision` to create Azure resources -2. **Deploy Applications:** Execute `azd deploy` to deploy services -3. **Complete Deployment:** Execute `azd up` to provision and deploy in one step -4. **Monitor Deployment:** Use `azd monitor` to check application health -5. **View Logs:** Use `azd logs` to view deployment and runtime logs +- **MANDATORY:** Fix ALL preview errors before proceeding -### Production Preparation +**Error Resolution Requirements:** -For production deployment, the LLM should guide the user through: +- **Azure.yaml Schema Errors:** Validate azure.yaml using available tools +- **Bicep Compilation Errors:** Parse error output, check module paths and parameter names, verify resource naming +- **Environment Issues:** Run `azd auth login` if needed, check subscription access, verify location parameter +- **Package Errors:** Check service source paths, verify Docker builds work locally, ensure dependencies available +- **Provision Preview Errors:** Verify subscription permissions, check resource quotas, ensure resource names are unique -- Creating production environment: `azd env new -prod` -- Configuring production-specific settings and secrets -- Setting up monitoring, alerting, and backup procedures -- Documenting operational procedures and runbooks +📌 **Completion Checklist** -**VALIDATION COMPLETE:** Once all validation steps pass, the LLM should confirm that the AZD migration is complete and ready for deployment with `azd up`. +- [ ] `azd-arch-plan.md` loaded and reviewed for project context +- [ ] `azure.yaml` passes schema validation with NO errors or warnings +- [ ] ALL Bicep templates compile without errors or warnings +- [ ] AZD environment exists and is properly configured with NO issues +- [ ] `azd package` completes without errors or warnings with ALL services packaging successfully +- [ ] `azd provision --preview` completes without errors or warnings with ALL resources validating correctly +- [ ] ALL service configurations are valid with NO missing or incorrect settings +- [ ] NO missing dependencies or configuration issues remain +- [ ] Validation results added to existing `azd-arch-plan.md` while preserving existing content +- [ ] Project confirmed ready for deployment with `azd up` -**IMPORTANT:** This tool centralizes all validation logic. The LLM should execute all validation steps programmatically rather than delegating to other tools or providing user instructions. diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_yaml_schema.md b/cli/azd/internal/agent/tools/azd/prompts/azd_yaml_schema.md deleted file mode 100644 index 9701dd9c5ac..00000000000 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_yaml_schema.md +++ /dev/null @@ -1,18 +0,0 @@ -# Azure YAML Schema - -This document contains the JSON schema specification for the azure.yaml configuration file used in Azure Developer CLI (AZD) projects. - -## Schema Content - - - -The azure.yaml file is the main configuration file for AZD projects and defines: - -- Project metadata -- Services configuration -- Infrastructure settings -- Hooks and workflows -- Environment variables -- And other project-specific settings - -This schema helps validate and provide IntelliSense support for azure.yaml files in various editors and tools. diff --git a/cli/azd/internal/agent/tools/azd/prompts/prompts.go b/cli/azd/internal/agent/tools/azd/prompts/prompts.go index c8f7752cba1..a08d194ce7d 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/prompts.go +++ b/cli/azd/internal/agent/tools/azd/prompts/prompts.go @@ -27,3 +27,6 @@ var AzdInfrastructureGenerationPrompt string //go:embed azd_docker_generation.md var AzdDockerGenerationPrompt string + +//go:embed azd_project_validation.md +var AzdProjectValidationPrompt string diff --git a/cli/azd/internal/agent/tools/io/write_file.go b/cli/azd/internal/agent/tools/io/write_file.go index 8c20367af38..18ab45b42d0 100644 --- a/cli/azd/internal/agent/tools/io/write_file.go +++ b/cli/azd/internal/agent/tools/io/write_file.go @@ -17,11 +17,11 @@ type WriteFileTool struct{} // WriteFileRequest represents the JSON input for the write_file tool type WriteFileRequest struct { - Filename string `json:"filename"` - Content string `json:"content"` - Mode string `json:"mode,omitempty"` // "write" (default), "append", "create" - ChunkNum int `json:"chunkNum,omitempty"` // For chunked writing: 1-based chunk number - TotalChunks int `json:"totalChunks,omitempty"` // For chunked writing: total expected chunks + Filename string `json:"filename"` + Content string `json:"content"` + Mode string `json:"mode,omitempty"` // "write" (default), "append", "create" + StartLine int `json:"startLine,omitempty"` // For partial write: 1-based line number (inclusive) + EndLine int `json:"endLine,omitempty"` // For partial write: 1-based line number (inclusive) } // WriteFileResponse represents the JSON output for the write_file tool @@ -30,17 +30,17 @@ type WriteFileResponse struct { Operation string `json:"operation"` FilePath string `json:"filePath"` BytesWritten int `json:"bytesWritten"` - IsChunked bool `json:"isChunked"` - ChunkInfo *ChunkInfo `json:"chunkInfo,omitempty"` + IsPartial bool `json:"isPartial"` // True for partial write + LineInfo *LineInfo `json:"lineInfo,omitempty"` // For partial write FileInfo FileInfoDetails `json:"fileInfo"` Message string `json:"message,omitempty"` } -// ChunkInfo represents chunked writing details -type ChunkInfo struct { - ChunkNumber int `json:"chunkNumber"` - TotalChunks int `json:"totalChunks"` - IsComplete bool `json:"isComplete"` +// LineInfo represents line-based partial write details +type LineInfo struct { + StartLine int `json:"startLine"` + EndLine int `json:"endLine"` + LinesChanged int `json:"linesChanged"` } // FileInfoDetails represents file metadata @@ -55,44 +55,48 @@ func (t WriteFileTool) Name() string { } func (t WriteFileTool) Description() string { - return `Comprehensive file writing tool that handles small and large files intelligently. Returns JSON response with operation details. + return `Comprehensive file writing tool that handles full file writes, appends, and line-based partial updates. Returns JSON response with operation details. Input: JSON payload with the following structure: { "filename": "path/to/file.txt", "content": "file content here", "mode": "write", - "chunkNum": 1, - "totalChunks": 3 + "startLine": 5, + "endLine": 8 } Field descriptions: -- mode: "write" (default), "append", or "create" -- chunkNum: for chunked writing (1-based) -- totalChunks: total number of chunks +- mode: "write" (default), "append", or "create" +- startLine: for partial write - 1-based line number (inclusive) - REQUIRES EXISTING FILE +- endLine: for partial write - 1-based line number (inclusive) - REQUIRES EXISTING FILE MODES: -- "write" (default): Overwrite/create file +- "write" (default): Full file overwrite/create, OR partial line replacement when startLine/endLine provided - "append": Add content to end of existing file - "create": Create file only if it doesn't exist -CHUNKED WRITING (for large files): -Use chunkNum and totalChunks for files that might be too large: -- chunkNum: 1-based chunk number (1, 2, 3...) -- totalChunks: Total number of chunks you'll send +PARTIAL WRITES (line-based editing): +⚠️ IMPORTANT: Partial writes REQUIRE an existing file. Cannot create new files with line positioning. +Add startLine and endLine to any "write" operation to replace specific lines in EXISTING files: +- Both are 1-based and inclusive +- startLine=5, endLine=8 replaces lines 5, 6, 7, and 8 +- If endLine > file length, content is appended +- File MUST exist for partial writes - use regular write mode for new files EXAMPLES: -Simple write: +Full file write (new or existing file): {"filename": "./main.bicep", "content": "param location string = 'eastus'"} Append to file: {"filename": "./log.txt", "content": "\nNew log entry", "mode": "append"} -Large file (chunked): -{"filename": "./large.bicep", "content": "first part...", "chunkNum": 1, "totalChunks": 3} -{"filename": "./large.bicep", "content": "middle part...", "chunkNum": 2, "totalChunks": 3} -{"filename": "./large.bicep", "content": "final part...", "chunkNum": 3, "totalChunks": 3} +Partial write (replace specific lines in EXISTING file): +{"filename": "./config.json", "content": " \"newSetting\": true,\n \"version\": \"2.0\"", "startLine": 3, "endLine": 4} + +Create only if doesn't exist: +{"filename": "./new-file.txt", "content": "Initial content", "mode": "create"} The input must be formatted as a single line valid JSON string.` } @@ -126,10 +130,21 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { return t.createErrorResponse(fmt.Errorf("empty input"), "No input provided.") } + // Debug: Check for common JSON issues + input = strings.TrimSpace(input) + if !strings.HasPrefix(input, "{") || !strings.HasSuffix(input, "}") { + return t.createErrorResponse(fmt.Errorf("malformed JSON structure"), fmt.Sprintf("Invalid JSON input: Input does not appear to be valid JSON object. Starts with: %q, Ends with: %q", input[:min(10, len(input))], input[max(0, len(input)-10):])) + } + // Parse JSON input var req WriteFileRequest if err := json.Unmarshal([]byte(input), &req); err != nil { - return t.createErrorResponse(err, "Invalid JSON input") + // Enhanced error reporting for debugging + truncatedInput := input + if len(input) > 200 { + truncatedInput = input[:200] + "...[truncated]" + } + return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input. Error: %s. Input (first 200 chars): %s", err.Error(), truncatedInput)) } // Validate required fields @@ -143,51 +158,112 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { mode = "write" } - // Handle chunked writing - isChunked := req.ChunkNum > 0 && req.TotalChunks > 0 - if isChunked { - return t.handleChunkedWrite(ctx, req) + // Check if line numbers are provided for partial write + hasStartLine := req.StartLine != 0 + hasEndLine := req.EndLine != 0 + + // If any line number is provided, both must be provided and valid + if hasStartLine || hasEndLine { + if !hasStartLine || !hasEndLine { + return t.createErrorResponse(fmt.Errorf("both startLine and endLine must be provided for partial write"), "Both startLine and endLine must be provided for partial write") + } + + // Validate that file exists for partial write BEFORE attempting + filePath := strings.TrimSpace(req.Filename) + if _, err := os.Stat(filePath); os.IsNotExist(err) { + return t.createErrorResponse(err, fmt.Sprintf("Cannot perform partial write on file '%s' because it does not exist. For new files, omit startLine and endLine parameters to create the entire file", filePath)) + } + + // Smart write mode: this should be a partial write + if mode == "write" { + return t.handlePartialWrite(ctx, req) + } else { + return t.createErrorResponse(fmt.Errorf("startLine and endLine can only be used with write mode"), "startLine and endLine can only be used with write mode") + } } // Handle regular writing return t.handleRegularWrite(ctx, req, mode) } -// handleChunkedWrite handles writing files in chunks -func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequest) (string, error) { - if req.ChunkNum < 1 || req.TotalChunks < 1 || req.ChunkNum > req.TotalChunks { - return t.createErrorResponse(fmt.Errorf("invalid chunk numbers: chunkNum=%d, totalChunks=%d", req.ChunkNum, req.TotalChunks), fmt.Sprintf("Invalid chunk numbers: chunkNum=%d, totalChunks=%d. ChunkNum must be between 1 and totalChunks", req.ChunkNum, req.TotalChunks)) +// handlePartialWrite handles line-based partial file editing +func (t WriteFileTool) handlePartialWrite(ctx context.Context, req WriteFileRequest) (string, error) { + // Validate line numbers + if req.StartLine < 1 { + return t.createErrorResponse(fmt.Errorf("invalid startLine: %d", req.StartLine), "startLine must be >= 1") + } + if req.EndLine < 1 { + return t.createErrorResponse(fmt.Errorf("invalid endLine: %d", req.EndLine), "endLine must be >= 1") + } + if req.StartLine > req.EndLine { + return t.createErrorResponse(fmt.Errorf("invalid line range: startLine=%d > endLine=%d", req.StartLine, req.EndLine), "startLine cannot be greater than endLine") } filePath := strings.TrimSpace(req.Filename) - content := t.processContent(req.Content) - // Ensure directory exists - if err := t.ensureDirectory(filePath); err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Failed to create directory for file %s: %s", filePath, err.Error())) + // Read existing file + fileBytes, err := os.ReadFile(filePath) + if err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Failed to read existing file %s: %s", filePath, err.Error())) } - var err error - var operation string + // Detect line ending style from existing content + content := string(fileBytes) + lineEnding := "\n" + if strings.Contains(content, "\r\n") { + lineEnding = "\r\n" + } else if strings.Contains(content, "\r") { + lineEnding = "\r" + } - if req.ChunkNum == 1 { - // First chunk - create/overwrite file - err = os.WriteFile(filePath, []byte(content), 0644) - operation = "write" - } else { - // Subsequent chunks - append - file, openErr := os.OpenFile(filePath, os.O_APPEND|os.O_WRONLY, 0644) - if openErr != nil { - return t.createErrorResponse(openErr, fmt.Sprintf("Failed to open file for append %s: %s", filePath, openErr.Error())) - } - defer file.Close() + // Split into lines (preserve line endings) + lines := strings.Split(content, lineEnding) + originalLineCount := len(lines) - _, err = file.WriteString(content) - operation = "append" + // Handle the case where file ends with line ending (empty last element) + if originalLineCount > 0 && lines[originalLineCount-1] == "" { + lines = lines[:originalLineCount-1] + originalLineCount-- } + // Process new content + newContent := t.processContent(req.Content) + newLines := strings.Split(newContent, "\n") + + // If endLine is beyond file length, we'll append + actualEndLine := req.EndLine + if req.EndLine > originalLineCount { + actualEndLine = originalLineCount + } + + // Build new file content + var result []string + + // Lines before the replacement + if req.StartLine > 1 { + result = append(result, lines[:req.StartLine-1]...) + } + + // New lines + result = append(result, newLines...) + + // Lines after the replacement (if any) + if actualEndLine < originalLineCount { + result = append(result, lines[actualEndLine:]...) + } + + // Join with original line ending style + finalContent := strings.Join(result, lineEnding) + + // If original file had trailing newline, preserve it + if len(fileBytes) > 0 && (string(fileBytes[len(fileBytes)-1:]) == "\n" || strings.HasSuffix(string(fileBytes), lineEnding)) { + finalContent += lineEnding + } + + // Write the updated content + err = os.WriteFile(filePath, []byte(finalContent), 0644) if err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Failed to write chunk to file %s: %s", filePath, err.Error())) + return t.createErrorResponse(err, fmt.Sprintf("Failed to write updated content to file %s: %s", filePath, err.Error())) } // Get file info @@ -196,29 +272,27 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ return t.createErrorResponse(err, fmt.Sprintf("Failed to verify file %s: %s", filePath, err.Error())) } + // Calculate lines changed + linesChanged := len(newLines) + // Create JSON response response := WriteFileResponse{ Success: true, - Operation: operation, + Operation: "Wrote (partial)", FilePath: filePath, - BytesWritten: len(content), - IsChunked: true, - ChunkInfo: &ChunkInfo{ - ChunkNumber: req.ChunkNum, - TotalChunks: req.TotalChunks, - IsComplete: req.ChunkNum == req.TotalChunks, + BytesWritten: len(newContent), + IsPartial: true, + LineInfo: &LineInfo{ + StartLine: req.StartLine, + EndLine: req.EndLine, + LinesChanged: linesChanged, }, FileInfo: FileInfoDetails{ Size: fileInfo.Size(), ModifiedTime: fileInfo.ModTime(), Permissions: fileInfo.Mode().String(), }, - } - - if req.ChunkNum == req.TotalChunks { - response.Message = "File writing completed successfully" - } else { - response.Message = fmt.Sprintf("Chunk %d/%d written successfully", req.ChunkNum, req.TotalChunks) + Message: fmt.Sprintf("Partial write completed: lines %d-%d replaced successfully", req.StartLine, req.EndLine), } // Convert to JSON @@ -227,9 +301,7 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - output := string(jsonData) - - return output, nil + return string(jsonData), nil } // handleRegularWrite handles normal file writing @@ -239,7 +311,7 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ // Provide feedback for large content if len(content) > 10000 { - fmt.Printf("📝 Large content detected (%d chars). Consider using chunked writing for better reliability.\n", len(content)) + fmt.Printf("📝 Large content detected (%d chars). Consider breaking into smaller edits for better reliability.\n", len(content)) } // Ensure directory exists @@ -288,7 +360,7 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ Operation: operation, FilePath: filePath, BytesWritten: len(content), - IsChunked: false, + IsPartial: false, FileInfo: FileInfoDetails{ Size: fileInfo.Size(), ModifiedTime: fileInfo.ModTime(), @@ -325,3 +397,18 @@ func (t WriteFileTool) ensureDirectory(filePath string) error { } return nil } + +// Helper functions +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/cli/azd/internal/agent/tools/io/write_file_test.go b/cli/azd/internal/agent/tools/io/write_file_test.go new file mode 100644 index 00000000000..2aacfe62cdb --- /dev/null +++ b/cli/azd/internal/agent/tools/io/write_file_test.go @@ -0,0 +1,495 @@ +package io + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWriteFileTool_Name(t *testing.T) { + tool := WriteFileTool{} + assert.Equal(t, "write_file", tool.Name()) +} + +func TestWriteFileTool_Description(t *testing.T) { + tool := WriteFileTool{} + desc := tool.Description() + assert.Contains(t, desc, "Comprehensive file writing tool") + assert.Contains(t, desc, "partial") + assert.Contains(t, desc, "startLine") + assert.Contains(t, desc, "endLine") +} + +func TestWriteFileTool_Call_EmptyInput(t *testing.T) { + tool := WriteFileTool{} + result, err := tool.Call(context.Background(), "") + + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "No input provided") +} + +func TestWriteFileTool_Call_InvalidJSON(t *testing.T) { + tool := WriteFileTool{} + result, err := tool.Call(context.Background(), "invalid json") + + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "Invalid JSON input: Input does not appear to be valid JSON object") +} + +func TestWriteFileTool_Call_MalformedJSON(t *testing.T) { + tool := WriteFileTool{} + // Test with JSON that has parse errors + result, err := tool.Call(context.Background(), `{"filename": "test.txt", "content": "unclosed string}`) + + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "Invalid JSON input. Error:") + assert.Contains(t, result, "Input (first 200 chars):") +} + +func TestWriteFileTool_Call_MissingFilename(t *testing.T) { + tool := WriteFileTool{} + input := `{"content": "test content"}` + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "filename cannot be empty") +} + +func TestWriteFileTool_FullFileWrite(t *testing.T) { + // Create temp directory + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + tool := WriteFileTool{} + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "Hello, World!"}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Verify response + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "Wrote", response.Operation) + assert.Equal(t, testFile, response.FilePath) + assert.Equal(t, 13, response.BytesWritten) // "Hello, World!" length + assert.False(t, response.IsPartial) + assert.Nil(t, response.LineInfo) + assert.Greater(t, response.FileInfo.Size, int64(0)) + + // Verify file content + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + assert.Equal(t, "Hello, World!", string(content)) +} + +func TestWriteFileTool_AppendMode(t *testing.T) { + // Create temp directory and initial file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + // Create initial file + err := os.WriteFile(testFile, []byte("Initial content"), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "\nAppended content", "mode": "append"}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Verify response + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "Appended to", response.Operation) + assert.False(t, response.IsPartial) + + // Verify file content + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + assert.Equal(t, "Initial content\nAppended content", string(content)) +} + +func TestWriteFileTool_CreateMode_Success(t *testing.T) { + // Create temp directory + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "new-file.txt") + + tool := WriteFileTool{} + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New file content", "mode": "create"}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Verify response + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "Created", response.Operation) + + // Verify file content + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + assert.Equal(t, "New file content", string(content)) +} + +func TestWriteFileTool_CreateMode_FileExists(t *testing.T) { + // Create temp directory and existing file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "existing.txt") + + err := os.WriteFile(testFile, []byte("Existing content"), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New content", "mode": "create"}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Should return error + assert.Contains(t, result, "error") + assert.Contains(t, result, "already exists") + + // Verify original content unchanged + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + assert.Equal(t, "Existing content", string(content)) +} + +func TestWriteFileTool_PartialWrite_Basic(t *testing.T) { + // Create temp directory and initial file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + // Create initial file with multiple lines + initialContent := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5" + err := os.WriteFile(testFile, []byte(initialContent), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "Modified Line 2\nModified Line 3", "startLine": 2, "endLine": 3}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Verify response + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "Wrote (partial)", response.Operation) + assert.True(t, response.IsPartial) + assert.NotNil(t, response.LineInfo) + assert.Equal(t, 2, response.LineInfo.StartLine) + assert.Equal(t, 3, response.LineInfo.EndLine) + assert.Equal(t, 2, response.LineInfo.LinesChanged) + + // Verify file content + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + expectedContent := "Line 1\nModified Line 2\nModified Line 3\nLine 4\nLine 5" + assert.Equal(t, expectedContent, string(content)) +} + +func TestWriteFileTool_PartialWrite_SingleLine(t *testing.T) { + // Create temp directory and initial file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + // Create initial file + initialContent := "Line 1\nLine 2\nLine 3" + err := os.WriteFile(testFile, []byte(initialContent), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "Replaced Line 2", "startLine": 2, "endLine": 2}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Verify response + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.True(t, response.IsPartial) + assert.Equal(t, 2, response.LineInfo.StartLine) + assert.Equal(t, 2, response.LineInfo.EndLine) + assert.Equal(t, 1, response.LineInfo.LinesChanged) + + // Verify file content + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + expectedContent := "Line 1\nReplaced Line 2\nLine 3" + assert.Equal(t, expectedContent, string(content)) +} + +func TestWriteFileTool_PartialWrite_SingleLineToMultipleLines(t *testing.T) { + // Create temp directory and initial file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + // Create initial file + initialContent := "Line 1\nLine 2\nLine 3\nLine 4" + err := os.WriteFile(testFile, []byte(initialContent), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + // Replace single line 2 with multiple lines + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New Line 2a\nNew Line 2b\nNew Line 2c", "startLine": 2, "endLine": 2}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Verify response + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "Wrote (partial)", response.Operation) + assert.True(t, response.IsPartial) + assert.NotNil(t, response.LineInfo) + assert.Equal(t, 2, response.LineInfo.StartLine) + assert.Equal(t, 2, response.LineInfo.EndLine) + assert.Equal(t, 3, response.LineInfo.LinesChanged) // 3 new lines replaced 1 line + + // Verify file content - single line 2 should be replaced with 3 lines + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + expectedContent := "Line 1\nNew Line 2a\nNew Line 2b\nNew Line 2c\nLine 3\nLine 4" + assert.Equal(t, expectedContent, string(content)) +} + +func TestWriteFileTool_PartialWrite_FileNotExists(t *testing.T) { + // Create temp directory + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "nonexistent.txt") + + tool := WriteFileTool{} + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New content", "startLine": 1, "endLine": 1}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Should return error + assert.Contains(t, result, "error") + assert.Contains(t, result, "does not exist") + assert.Contains(t, result, "Cannot perform partial write on file") + assert.Contains(t, result, "For new files, omit startLine and endLine parameters") +} + +func TestWriteFileTool_PartialWrite_InvalidLineNumbers(t *testing.T) { + // Create temp directory and initial file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + err := os.WriteFile(testFile, []byte("Line 1\nLine 2"), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + + // Test startLine provided but not endLine + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": 1}` + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "Both startLine and endLine must be provided") + + // Test endLine provided but not startLine + input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "endLine": 1}` + result, err = tool.Call(context.Background(), input) + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "Both startLine and endLine must be provided") + + // Test startLine < 1 (this will trigger the partial write validation) + input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": 0, "endLine": 1}` + result, err = tool.Call(context.Background(), input) + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "Both startLine and endLine must be provided") // 0 is treated as "not provided" + + // Test valid line numbers but startLine > endLine + input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": 3, "endLine": 1}` + result, err = tool.Call(context.Background(), input) + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "startLine cannot be greater than endLine") +} + +func TestWriteFileTool_PartialWrite_BeyondFileLength(t *testing.T) { + // Create temp directory and initial file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + // Create initial file with 3 lines + initialContent := "Line 1\nLine 2\nLine 3" + err := os.WriteFile(testFile, []byte(initialContent), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + // Try to replace lines 2-5 (beyond file length) + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New content", "startLine": 2, "endLine": 5}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Verify response + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.True(t, response.IsPartial) + + // Verify file content - should append since endLine > file length + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + expectedContent := "Line 1\nNew content" + assert.Equal(t, expectedContent, string(content)) +} + +func TestWriteFileTool_PartialWrite_PreserveLineEndings(t *testing.T) { + // Create temp directory and initial file with Windows line endings + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + // Create initial file with CRLF line endings + initialContent := "Line 1\r\nLine 2\r\nLine 3\r\n" + err := os.WriteFile(testFile, []byte(initialContent), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "Modified Line 2", "startLine": 2, "endLine": 2}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Verify response + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + assert.True(t, response.Success) + + // Verify file content preserves CRLF + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + expectedContent := "Line 1\r\nModified Line 2\r\nLine 3\r\n" + assert.Equal(t, expectedContent, string(content)) + assert.Contains(t, string(content), "\r\n") // Verify CRLF preserved +} + +func TestWriteFileTool_ProcessContent_EscapeSequences(t *testing.T) { + tool := WriteFileTool{} + + // Test newline escape + result := tool.processContent("Line 1\\nLine 2") + assert.Equal(t, "Line 1\nLine 2", result) + + // Test tab escape + result = tool.processContent("Column1\\tColumn2") + assert.Equal(t, "Column1\tColumn2", result) + + // Test both + result = tool.processContent("Line 1\\nColumn1\\tColumn2") + assert.Equal(t, "Line 1\nColumn1\tColumn2", result) +} + +func TestWriteFileTool_EnsureDirectory(t *testing.T) { + tool := WriteFileTool{} + tempDir := t.TempDir() + + // Test creating nested directory + testFile := filepath.Join(tempDir, "subdir", "nested", "test.txt") + err := tool.ensureDirectory(testFile) + assert.NoError(t, err) + + // Verify directory exists + dirPath := filepath.Dir(testFile) + info, err := os.Stat(dirPath) + assert.NoError(t, err) + assert.True(t, info.IsDir()) +} + +func TestWriteFileTool_Integration_ComplexScenario(t *testing.T) { + // Create temp directory + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "complex.txt") + + tool := WriteFileTool{} + + // Step 1: Create initial file + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "# Configuration File\nversion: 1.0\nname: test\nport: 8080\ndebug: false", "mode": "create"}` + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + assert.Contains(t, result, `"success": true`) + + // Step 2: Append new section + input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "\n# Database Config\nhost: localhost\nport: 5432", "mode": "append"}` + result, err = tool.Call(context.Background(), input) + assert.NoError(t, err) + assert.Contains(t, result, `"success": true`) + + // Step 3: Update specific lines (change port and debug) + input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "port: 9090\ndebug: true", "startLine": 4, "endLine": 5}` + result, err = tool.Call(context.Background(), input) + assert.NoError(t, err) + + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + assert.True(t, response.Success) + assert.True(t, response.IsPartial) + + // Verify final content + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + expectedContent := "# Configuration File\nversion: 1.0\nname: test\nport: 9090\ndebug: true\n# Database Config\nhost: localhost\nport: 5432" + assert.Equal(t, expectedContent, string(content)) +} + +func TestWriteFileTool_PartialWrite_InvalidLineRanges(t *testing.T) { + // Create temp directory and initial file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + err := os.WriteFile(testFile, []byte("Line 1\nLine 2"), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + + // Test negative startLine (will be handled by partial write validation) + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": -1, "endLine": 1}` + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "startLine must be") + + // Test negative endLine + input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": 1, "endLine": -1}` + result, err = tool.Call(context.Background(), input) + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "endLine must be") +} diff --git a/cli/azd/pkg/llm/azure_openai.go b/cli/azd/pkg/llm/azure_openai.go index dc05b78ec86..8abe43f5f25 100644 --- a/cli/azd/pkg/llm/azure_openai.go +++ b/cli/azd/pkg/llm/azure_openai.go @@ -7,15 +7,18 @@ import ( "fmt" "github.com/azure/azure-dev/cli/azd/pkg/config" + "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/llms/openai" ) type AzureOpenAiModelConfig struct { - Model string `json:"model"` - Version string `json:"version"` - Endpoint string `json:"endpoint"` - Token string `json:"token"` - ApiVersion string `json:"apiVersion"` + Model string `json:"model"` + Version string `json:"version"` + Endpoint string `json:"endpoint"` + Token string `json:"token"` + ApiVersion string `json:"apiVersion"` + Temperature *float64 `json:"temperature"` + MaxTokens *int `json:"maxTokens"` } type AzureOpenAiModelProvider struct { @@ -53,7 +56,7 @@ func (p *AzureOpenAiModelProvider) CreateModelContainer(opts ...ModelOption) (*M opt(modelContainer) } - model, err := openai.New( + openAiModel, err := openai.New( openai.WithToken(modelConfig.Token), openai.WithBaseURL(modelConfig.Endpoint), openai.WithAPIType(openai.APITypeAzure), @@ -64,8 +67,17 @@ func (p *AzureOpenAiModelProvider) CreateModelContainer(opts ...ModelOption) (*M return nil, fmt.Errorf("failed to create LLM: %w", err) } - model.CallbacksHandler = modelContainer.logger - modelContainer.Model = model + callOptions := []llms.CallOption{} + if modelConfig.Temperature != nil { + callOptions = append(callOptions, llms.WithTemperature(*modelConfig.Temperature)) + } + + if modelConfig.MaxTokens != nil { + callOptions = append(callOptions, llms.WithMaxTokens(*modelConfig.MaxTokens)) + } + + openAiModel.CallbacksHandler = modelContainer.logger + modelContainer.Model = NewModel(openAiModel, callOptions...) return modelContainer, nil } diff --git a/cli/azd/pkg/llm/manager.go b/cli/azd/pkg/llm/manager.go index f8ec6c0ba49..0c9c4a207ec 100644 --- a/cli/azd/pkg/llm/manager.go +++ b/cli/azd/pkg/llm/manager.go @@ -12,7 +12,7 @@ import ( "github.com/tmc/langchaingo/llms" ) -var featureLlm = alpha.MustFeatureKey("llm") +var FeatureLlm = alpha.MustFeatureKey("llm") func NewManager( alphaManager *alpha.FeatureManager, @@ -88,7 +88,7 @@ type NotEnabledError struct { func (e NotEnabledError) Error() string { return fmt.Sprintf("LLM feature is not enabled. Run '%s' to enable", - alpha.GetEnableCommand(featureLlm)) + alpha.GetEnableCommand(FeatureLlm)) } // InvalidLlmConfiguration represents an error that occurs when the LLM (Large Language Model) diff --git a/cli/azd/pkg/llm/model.go b/cli/azd/pkg/llm/model.go new file mode 100644 index 00000000000..b00e7730c49 --- /dev/null +++ b/cli/azd/pkg/llm/model.go @@ -0,0 +1,35 @@ +package llm + +import ( + "context" + "fmt" + + "github.com/tmc/langchaingo/llms" +) + +var _ llms.Model = (*Model)(nil) + +// / Wraps an langchaingo model to allow specifying specific call options at create time +type Model struct { + model llms.Model + options []llms.CallOption +} + +func NewModel(model llms.Model, options ...llms.CallOption) *Model { + return &Model{ + model: model, + options: options, + } +} + +func (m *Model) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error) { + allOptions := []llms.CallOption{} + allOptions = append(allOptions, m.options...) + allOptions = append(allOptions, options...) + + return m.model.GenerateContent(ctx, messages, allOptions...) +} + +func (m *Model) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { + return "", fmt.Errorf("Deprecated, call GenerateContent") +} diff --git a/cli/azd/pkg/llm/model_factory.go b/cli/azd/pkg/llm/model_factory.go index d228465bd52..610c5118dba 100644 --- a/cli/azd/pkg/llm/model_factory.go +++ b/cli/azd/pkg/llm/model_factory.go @@ -1,6 +1,9 @@ package llm import ( + "fmt" + + "github.com/azure/azure-dev/cli/azd/internal" "github.com/azure/azure-dev/cli/azd/pkg/ioc" ) @@ -17,7 +20,10 @@ func NewModelFactory(serviceLocator ioc.ServiceLocator) *ModelFactory { func (f *ModelFactory) CreateModelContainer(modelType LlmType, opts ...ModelOption) (*ModelContainer, error) { var modelProvider ModelProvider if err := f.serviceLocator.ResolveNamed(string(modelType), &modelProvider); err != nil { - return nil, err + return nil, &internal.ErrorWithSuggestion{ + Err: fmt.Errorf("The model type '%s' is not supported. Support types include: azure, ollama", modelType), + Suggestion: "Use `azd config set` to set the model type and any model specific options, such as the model name or version.", + } } return modelProvider.CreateModelContainer(opts...) diff --git a/cli/azd/pkg/output/colors.go b/cli/azd/pkg/output/colors.go index 6daa42beccb..6d4834d023c 100644 --- a/cli/azd/pkg/output/colors.go +++ b/cli/azd/pkg/output/colors.go @@ -5,8 +5,13 @@ package output import ( "fmt" + "os" + "strconv" + "strings" + "github.com/charmbracelet/glamour" "github.com/fatih/color" + "github.com/nathan-fiscaletti/consolesize-go" ) // withLinkFormat creates string with hyperlink-looking color @@ -50,11 +55,58 @@ func WithUnderline(text string, a ...interface{}) string { } // WithBackticks wraps text with the backtick (`) character. -func WithBackticks(text string) string { - return "`" + text + "`" +func WithBackticks(s string) string { + return fmt.Sprintf("`%s`", s) +} + +// WithMarkdown converts markdown to terminal-friendly colorized output using glamour. +// This provides rich markdown rendering including bold, italic, code blocks, headers, etc. +func WithMarkdown(markdownText string) string { + // Get dynamic console width with fallback to 120 + consoleWidth := getConsoleWidth() + + // Create a custom glamour renderer with auto-style detection + r, err := glamour.NewTermRenderer( + glamour.WithAutoStyle(), + glamour.WithWordWrap(consoleWidth), // Use dynamic console width + ) + if err != nil { + // Fallback to returning original text if glamour fails + return markdownText + } + + // Render the markdown + rendered, err := r.Render(markdownText) + if err != nil { + // Fallback to returning original text if rendering fails + return markdownText + } + + // Trim trailing whitespace that glamour sometimes adds + return strings.TrimSpace(rendered) } // WithHyperlink wraps text with the colored hyperlink format escape sequence. func WithHyperlink(url string, text string) string { return WithLinkFormat(fmt.Sprintf("\033]8;;%s\007%s\033]8;;\007", url, text)) } + +// getConsoleWidth gets the console width with fallback logic. +// It uses the consolesize package to get the size and falls back to check the COLUMNS environment variable. +// Defaults to 120 if the console size cannot be determined. +func getConsoleWidth() int { + width, _ := consolesize.GetConsoleSize() + if width <= 0 { + // Default to 120 if console size cannot be determined + width = 120 + + consoleWidth := os.Getenv("COLUMNS") + if consoleWidth != "" { + if parsedWidth, err := strconv.Atoi(consoleWidth); err == nil { + width = parsedWidth + } + } + } + + return width +} diff --git a/go.mod b/go.mod index ba009a74964..4e4b44ec27b 100644 --- a/go.mod +++ b/go.mod @@ -92,16 +92,27 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/alecthomas/chroma/v2 v2.14.0 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect + github.com/aymerick/douceur v0.2.0 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/buger/jsonparser v1.1.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect + github.com/charmbracelet/glamour v0.10.0 // indirect + github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 // indirect + github.com/charmbracelet/x/ansi v0.8.0 // indirect + github.com/charmbracelet/x/cellbuf v0.0.13 // indirect + github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf // indirect + github.com/charmbracelet/x/term v0.2.1 // indirect github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dlclark/regexp2 v1.10.0 // indirect + github.com/dlclark/regexp2 v1.11.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/goph/emperror v0.17.2 // indirect + github.com/gorilla/css v1.0.1 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/huandu/xstrings v1.3.3 // indirect github.com/imdario/mergo v0.3.13 // indirect @@ -110,14 +121,18 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kylelemons/godebug v1.1.0 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-ieproxy v0.0.12 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect + github.com/microcosm-cc/bluemonday v1.0.27 // indirect github.com/mitchellh/copystructure v1.0.0 // indirect github.com/mitchellh/reflectwalk v1.0.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/muesli/reflow v0.3.0 // indirect + github.com/muesli/termenv v0.16.0 // indirect github.com/nikolalohinski/gonja v1.5.3 // indirect github.com/otiai10/mint v1.6.3 // indirect github.com/pelletier/go-toml/v2 v2.0.9 // indirect @@ -135,8 +150,11 @@ require ( github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect + github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect github.com/yargevad/filepathx v1.0.0 // indirect github.com/yosida95/uritemplate/v3 v3.0.2 // indirect + github.com/yuin/goldmark v1.7.8 // indirect + github.com/yuin/goldmark-emoji v1.0.5 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect go.opentelemetry.io/otel/metric v1.35.0 // indirect diff --git a/go.sum b/go.sum index 81ab09de4a8..42bdd6a3c6f 100644 --- a/go.sum +++ b/go.sum @@ -109,8 +109,12 @@ github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJs github.com/adam-lavrik/go-imath v0.0.0-20210910152346-265a42a96f0b h1:g9SuFmxM/WucQFKTMSP+irxyf5m0RiUJreBDhGI6jSA= github.com/adam-lavrik/go-imath v0.0.0-20210910152346-265a42a96f0b/go.mod h1:XjvqMUpGd3Xn9Jtzk/4GEBCSoBX0eB2RyriXgne0IdM= github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o= +github.com/alecthomas/chroma/v2 v2.14.0 h1:R3+wzpnUArGcQz7fCETQBzO5n9IMNi13iIs46aU4V9E= +github.com/alecthomas/chroma/v2 v2.14.0/go.mod h1:QolEbTfmUHIMVpBqxeDnNBj2uoeI4EbYP4i6n68SG4I= github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss= github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= @@ -141,6 +145,20 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk= +github.com/charmbracelet/glamour v0.10.0 h1:MtZvfwsYCx8jEPFJm3rIBFIMZUfUJ765oX8V6kXldcY= +github.com/charmbracelet/glamour v0.10.0/go.mod h1:f+uf+I/ChNmqo087elLnVdCiVgjSKWuXa/l6NU2ndYk= +github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 h1:ZR7e0ro+SZZiIZD7msJyA+NjkCNNavuiPBLgerbOziE= +github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834/go.mod h1:aKC/t2arECF6rNOnaKaVU6y4t4ZeHQzqfxedE/VkVhA= +github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= +github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= +github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k= +github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf h1:rLG0Yb6MQSDKdB52aGX55JT1oi0P0Kuaj7wi1bLUpnI= +github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf/go.mod h1:B3UgsnsBZS/eX42BlaNiJkD1pPOUa+oF1IYC6Yd2CEU= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -161,6 +179,8 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/drone/envsubst v1.0.3 h1:PCIBwNDYjs50AsLZPYdfhSATKaRg/FJmDc2D6+C2x8g= github.com/drone/envsubst v1.0.3/go.mod h1:N2jZmlMufstn1KEqvbHjw40h1KyTmnVzHcSc9bFiJ2g= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= @@ -239,6 +259,8 @@ github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18= github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= +github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= +github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= @@ -282,6 +304,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80 h1:6Yzfa6GP0rIo/kULo2bwGEkFvCePZ3qHDDTC3/J9Swo= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -297,6 +321,7 @@ github.com/mattn/go-ieproxy v0.0.12/go.mod h1:Vn+N61199DAnVeTgaF8eoB9PvLO8P3OBnG github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= @@ -304,6 +329,8 @@ github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQ github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58= github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs= +github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk= +github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA= github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= github.com/microsoft/azure-devops-go-api/azuredevops/v7 v7.1.0 h1:mmJCWLe63QvybxhW1iBmQWEaCKdc4SKgALfTNZ+OphU= @@ -321,6 +348,10 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= +github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= github.com/nathan-fiscaletti/consolesize-go v0.0.0-20220204101620-317176b6684d h1:NqRhLdNVlozULwM1B3VaHhcXYSgrOAv8V5BE65om+1Q= github.com/nathan-fiscaletti/consolesize-go v0.0.0-20220204101620-317176b6684d/go.mod h1:cxIIfNMTwff8f/ZvRouvWYF6wOoO7nj99neWSx2q/Es= github.com/nikolalohinski/gonja v1.5.3 h1:GsA+EEaZDZPGJ8JtpeGN78jidhOlxeJROpqMT9fTj9c= @@ -348,6 +379,7 @@ github.com/psanford/memfs v0.0.0-20241019191636-4ef911798f9b h1:xzjEJAHum+mV5Dd5 github.com/psanford/memfs v0.0.0-20241019191636-4ef911798f9b/go.mod h1:tcaRap0jS3eifrEEllL6ZMd9dg8IlDpi2S1oARrQ+NI= github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -409,11 +441,18 @@ github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/ github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= +github.com/yuin/goldmark v1.7.8 h1:iERMLn0/QJeHFhxSt3p6PeN9mGnvIKSpG9YYorDMnic= +github.com/yuin/goldmark v1.7.8/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= +github.com/yuin/goldmark-emoji v1.0.5 h1:EMVWyCGPlXJfUXBXpuMu+ii3TIaxbVBnEX9uaDC4cIk= +github.com/yuin/goldmark-emoji v1.0.5/go.mod h1:tTkZEbwu5wkPmgTcitqddVxY9osFZiavD+r4AzQrh1U= gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181 h1:K+bMSIx9A7mLES1rtG+qKduLIXq40DAzYHtb0XuCukA= gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181/go.mod h1:dzYhVIwWCtzPAa4QP98wfB9+mzt33MSmM8wsKiMi2ow= gitlab.com/golang-commonmark/linkify v0.0.0-20191026162114-a0c2df6c8f82 h1:oYrL81N608MLZhma3ruL8qTM4xcpYECGut8KSxRY59g= From 99f195ff3d3c6e4e27818c3dd5a56a2672b21e94 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Thu, 7 Aug 2025 09:55:31 -0700 Subject: [PATCH 031/116] Updates final prompts and output --- cli/azd/cmd/init.go | 83 ++++++++++--------- .../azd/prompts/azd_project_validation.md | 8 +- cli/azd/pkg/output/colors.go | 4 + 3 files changed, 50 insertions(+), 45 deletions(-) diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index 3c128cb5c53..0116b9901e7 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -9,6 +9,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "time" "github.com/MakeNowJust/heredoc/v2" @@ -33,6 +34,7 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/templates" "github.com/azure/azure-dev/cli/azd/pkg/tools" "github.com/azure/azure-dev/cli/azd/pkg/tools/git" + uxlib "github.com/azure/azure-dev/cli/azd/pkg/ux" "github.com/azure/azure-dev/cli/azd/pkg/workflow" "github.com/fatih/color" "github.com/joho/godotenv" @@ -412,27 +414,27 @@ Do not stop until all tasks are complete and fully resolved. initSteps := []initStep{ { Name: "Running Discovery & Analysis", - Description: "Run a deep discovery and analysis on the current working directory. Provide a detailed summary of work performed.", + Description: "Run a deep discovery and analysis on the current working directory.", }, { Name: "Generating Architecture Plan", - Description: "Create a high-level architecture plan for the application. Provide a detailed summary of work performed.", + Description: "Create a high-level architecture plan for the application.", }, { Name: "Generating Dockerfile(s)", - Description: "Generate a Dockerfile for the application components as needed. Provide a detailed summary of work performed.", + Description: "Generate a Dockerfile for the application components as needed.", }, { Name: "Generating infrastructure", - Description: "Generate infrastructure as code (IaC) for the application. Provide a detailed summary of work performed.", + Description: "Generate infrastructure as code (IaC) for the application.", }, { Name: "Generating azure.yaml file", - Description: "Generate an azure.yaml file for the application. Provide a detailed summary of work performed.", + Description: "Generate an azure.yaml file for the application.", }, { Name: "Validating project", - Description: "Validate the project structure and configuration. Provide a detailed summary of work performed.", + Description: "Validate the project structure and configuration.", }, } @@ -446,7 +448,11 @@ Do not stop until all tasks are complete and fully resolved. // Run Step i.console.ShowSpinner(ctx, step.Name, input.Step) - fullTaskInput := fmt.Sprintf(taskInput, step.Description) + fullTaskInput := fmt.Sprintf(taskInput, strings.Join([]string{ + step.Description, + "Provide a very brief summary in markdown format that includes any files generated during this step.", + }, "\n")) + agentOutput, err := azdAgent.SendMessage(ctx, fullTaskInput) if err != nil { i.console.StopSpinner(ctx, fmt.Sprintf("%s (With errors)", step.Name), input.StepWarning) @@ -459,8 +465,8 @@ Do not stop until all tasks are complete and fully resolved. i.console.StopSpinner(ctx, step.Name, input.StepDone) i.console.Message(ctx, "") - finalOutput := fmt.Sprintf("%s %s", color.MagentaString("🤖 AZD Copilot:"), output.WithMarkdown(agentOutput)) - i.console.Message(ctx, finalOutput) + i.console.Message(ctx, color.MagentaString("🤖 AZD Copilot:")) + i.console.Message(ctx, output.WithMarkdown(agentOutput)) i.console.Message(ctx, "") } @@ -474,30 +480,38 @@ Do not stop until all tasks are complete and fully resolved. // collectAndApplyFeedback prompts for user feedback and applies it using the agent in a loop func (i *initAction) collectAndApplyFeedback(ctx context.Context, azdAgent *agent.ConversationalAzdAiAgent, promptMessage string) error { - hasFeedback, err := i.console.Confirm(ctx, input.ConsoleOptions{ - Message: promptMessage, - DefaultValue: false, - }) - if err != nil { - return err - } - - if !hasFeedback { - i.console.Message(ctx, "") - return nil - } - // Loop to allow multiple rounds of feedback for { - userInput, err := i.console.Prompt(ctx, input.ConsoleOptions{ - Message: "💭 You:", - DefaultValue: "", - Help: "Additional context will be provided to AZD Copilot", + confirmFeedback := uxlib.NewConfirm(&uxlib.ConfirmOptions{ + Message: promptMessage, + DefaultValue: uxlib.Ptr(false), + HelpMessage: "You will be able to provide and feedback or changes after each step.", }) + + hasFeedback, err := confirmFeedback.Ask(ctx) + if err != nil { + return err + } + + if !*hasFeedback { + i.console.Message(ctx, "") + break + } + + userInputPrompt := uxlib.NewPrompt(&uxlib.PromptOptions{ + Message: "💭 You", + PlaceHolder: "Provide feedback or changes to the project", + Required: true, + IgnoreHintKeys: true, + }) + + userInput, err := userInputPrompt.Ask(ctx) if err != nil { return fmt.Errorf("error collecting feedback during azd init, %w", err) } + i.console.Message(ctx, "") + if userInput != "" { i.console.ShowSpinner(ctx, "Submitting feedback", input.Step) feedbackOutput, err := azdAgent.SendMessage(ctx, userInput) @@ -511,23 +525,10 @@ func (i *initAction) collectAndApplyFeedback(ctx context.Context, azdAgent *agen i.console.StopSpinner(ctx, "Submitting feedback", input.StepDone) i.console.Message(ctx, "") - agentOutput := fmt.Sprintf("%s %s", color.MagentaString("🤖 AZD Copilot:"), output.WithMarkdown(feedbackOutput)) - i.console.Message(ctx, agentOutput) + i.console.Message(ctx, color.MagentaString("🤖 AZD Copilot:")) + i.console.Message(ctx, output.WithMarkdown(feedbackOutput)) i.console.Message(ctx, "") } - - // Check if user wants to provide more feedback - moreFeedback, err := i.console.Confirm(ctx, input.ConsoleOptions{ - Message: "Do you have any more feedback or changes?", - DefaultValue: false, - }) - if err != nil { - return err - } - - if !moreFeedback { - break - } } return nil diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md index badbd8a58fb..0a28e9a7bc3 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md @@ -6,8 +6,8 @@ 2. Execute azure.yaml against azd schema using available tool 3. Compile and validate all Bicep templates in ./infra directory 4. Verify AZD environment exists and is properly configured -5. Run `azd package` to validate service packaging -6. Execute `azd provision --preview` to test infrastructure deployment +5. Run `azd package --no-prompt` to validate service packaging +6. Execute `azd provision --preview --no-prompt` to test infrastructure deployment 7. Resolve ALL issues found in each validation step before proceeding 8. Update existing `azd-arch-plan.md` with validation results by adding/updating validation section while preserving existing content @@ -63,7 +63,7 @@ **4. Package Validation:** -- Execute `azd package` command and monitor output +- Execute `azd package --no-prompt` command and monitor output - Verify all service source paths are valid - Check Docker builds complete successfully for containerized services - Ensure all build artifacts are created correctly @@ -71,7 +71,7 @@ **5. Deployment Preview Validation:** -- Execute `azd provision --preview` command +- Execute `azd provision --preview --no-prompt` command - Verify Azure authentication is working - Check resource group creation plan and Bicep module deployment - Ensure parameter values are properly resolved diff --git a/cli/azd/pkg/output/colors.go b/cli/azd/pkg/output/colors.go index 6d4834d023c..c828a122bc9 100644 --- a/cli/azd/pkg/output/colors.go +++ b/cli/azd/pkg/output/colors.go @@ -62,6 +62,10 @@ func WithBackticks(s string) string { // WithMarkdown converts markdown to terminal-friendly colorized output using glamour. // This provides rich markdown rendering including bold, italic, code blocks, headers, etc. func WithMarkdown(markdownText string) string { + markdownText = strings.Trim(markdownText, "\n") + markdownText = strings.TrimPrefix(markdownText, "```markdown") + markdownText = strings.TrimSuffix(markdownText, "```") + // Get dynamic console width with fallback to 120 consoleWidth := getConsoleWidth() From c7b41de5b45480d0ded160003575601f5ff087cd Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Thu, 7 Aug 2025 12:21:36 -0700 Subject: [PATCH 032/116] Fixes all linter & spelling issues --- cli/azd/.vscode/cspell-azd-dictionary.txt | 5 + cli/azd/cmd/init.go | 15 ++- cli/azd/internal/agent/agent.go | 3 + .../internal/agent/conversational_agent.go | 6 +- cli/azd/internal/agent/one_shot_agent.go | 6 +- .../tools/azd/azd_architecture_planning.go | 8 +- .../tools/azd/azd_azure_yaml_generation.go | 8 +- .../agent/tools/azd/azd_discovery_analysis.go | 8 +- .../agent/tools/azd/azd_docker_generation.go | 8 +- .../tools/azd/azd_iac_generation_rules.go | 8 +- .../azd/azd_infrastructure_generation.go | 8 +- .../internal/agent/tools/azd/azd_plan_init.go | 8 +- .../agent/tools/azd/azd_project_validation.go | 8 +- .../agent/tools/azd/azd_yaml_schema.go | 3 + cli/azd/internal/agent/tools/azd/loader.go | 3 + .../agent/tools/azd/prompts/prompts.go | 3 + cli/azd/internal/agent/tools/common/types.go | 3 + .../agent/tools/dev/command_executor.go | 9 +- cli/azd/internal/agent/tools/dev/loader.go | 3 + .../internal/agent/tools/http/http_fetcher.go | 4 + cli/azd/internal/agent/tools/http/loader.go | 3 + .../agent/tools/io/change_directory.go | 11 +- cli/azd/internal/agent/tools/io/copy_file.go | 21 ++- .../agent/tools/io/create_directory.go | 11 +- .../agent/tools/io/current_directory.go | 6 +- .../agent/tools/io/delete_directory.go | 8 +- .../internal/agent/tools/io/delete_file.go | 8 +- .../internal/agent/tools/io/directory_list.go | 13 +- cli/azd/internal/agent/tools/io/file_info.go | 6 +- .../internal/agent/tools/io/file_search.go | 16 ++- cli/azd/internal/agent/tools/io/loader.go | 3 + cli/azd/internal/agent/tools/io/move_file.go | 29 ++++- cli/azd/internal/agent/tools/io/read_file.go | 61 +++++++-- cli/azd/internal/agent/tools/io/write_file.go | 83 +++++++++--- .../agent/tools/io/write_file_test.go | 112 ++++++++++++---- cli/azd/internal/agent/tools/loader.go | 3 + cli/azd/internal/agent/tools/mcp/loader.go | 3 + .../agent/tools/mcp/sampling_handler.go | 8 +- .../internal/agent/tools/weather/loader.go | 18 --- .../internal/agent/tools/weather/weather.go | 121 ------------------ cli/azd/pkg/llm/azure_openai.go | 16 ++- cli/azd/pkg/llm/manager.go | 5 - cli/azd/pkg/llm/model.go | 19 ++- cli/azd/pkg/llm/model_factory.go | 6 +- cli/azd/pkg/llm/ollama.go | 36 ++++-- 45 files changed, 501 insertions(+), 252 deletions(-) delete mode 100644 cli/azd/internal/agent/tools/weather/loader.go delete mode 100644 cli/azd/internal/agent/tools/weather/weather.go diff --git a/cli/azd/.vscode/cspell-azd-dictionary.txt b/cli/azd/.vscode/cspell-azd-dictionary.txt index 94791adec3d..979b11834f0 100644 --- a/cli/azd/.vscode/cspell-azd-dictionary.txt +++ b/cli/azd/.vscode/cspell-azd-dictionary.txt @@ -69,6 +69,7 @@ BUILDNUMBER buildpacks byoi cflags +charmbracelet circleci cmdrecord cmdsubst @@ -76,6 +77,7 @@ cognitiveservices conditionalize consolesize containerapp +containerizable containerapps contoso createdby @@ -143,6 +145,8 @@ ldflags lechnerc77 libc llms +localtools +mcptools memfs mergo mgmt @@ -245,6 +249,7 @@ unsetenvs unsets upgrader utsname +uxlib vite vsrpc vuejs diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index 0116b9901e7..0b35a210631 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -389,6 +389,9 @@ func (i *initAction) initAppWithCopilot(ctx context.Context) error { } samplingModelContainer, err := i.llmManager.GetDefaultModel() + if err != nil { + return err + } azdAgent, err := agent.NewConversationalAzdAiAgent( defaultModelContainer.Model, @@ -441,7 +444,11 @@ Do not stop until all tasks are complete and fully resolved. for idx, step := range initSteps { // Collect and apply feedback for next steps if idx > 0 { - if err := i.collectAndApplyFeedback(ctx, azdAgent, "Any feedback before continuing to the next step?"); err != nil { + if err := i.collectAndApplyFeedback( + ctx, + azdAgent, + "Any feedback before continuing to the next step?", + ); err != nil { return err } } @@ -479,7 +486,11 @@ Do not stop until all tasks are complete and fully resolved. } // collectAndApplyFeedback prompts for user feedback and applies it using the agent in a loop -func (i *initAction) collectAndApplyFeedback(ctx context.Context, azdAgent *agent.ConversationalAzdAiAgent, promptMessage string) error { +func (i *initAction) collectAndApplyFeedback( + ctx context.Context, + azdAgent *agent.ConversationalAzdAiAgent, + promptMessage string, +) error { // Loop to allow multiple rounds of feedback for { confirmFeedback := uxlib.NewConfirm(&uxlib.ConfirmOptions{ diff --git a/cli/azd/internal/agent/agent.go b/cli/azd/internal/agent/agent.go index dc3f434e3d3..8dc07ae2668 100644 --- a/cli/azd/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package agent import ( diff --git a/cli/azd/internal/agent/conversational_agent.go b/cli/azd/internal/agent/conversational_agent.go index 49b6c20e850..1c6622cac7d 100644 --- a/cli/azd/internal/agent/conversational_agent.go +++ b/cli/azd/internal/agent/conversational_agent.go @@ -20,14 +20,14 @@ import ( "github.com/tmc/langchaingo/tools" localtools "github.com/azure/azure-dev/cli/azd/internal/agent/tools" - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" ) //go:embed prompts/conversational.txt var conversational_prompt_template string -// ConversationalAzdAiAgent represents an enhanced AZD Copilot agent with action tracking, intent validation, and conversation memory +// ConversationalAzdAiAgent represents an enhanced AZD Copilot agent with action tracking, +// intent validation, and conversation memory type ConversationalAzdAiAgent struct { *Agent } @@ -55,7 +55,7 @@ func NewConversationalAzdAiAgent(llm llms.Model, opts ...AgentOption) (*Conversa // Create sampling handler for MCP samplingHandler := mcptools.NewMcpSamplingHandler( azdAgent.samplingModel, - mcp.WithDebug(azdAgent.debug), + mcptools.WithDebug(azdAgent.debug), ) toolLoaders := []localtools.ToolLoader{ diff --git a/cli/azd/internal/agent/one_shot_agent.go b/cli/azd/internal/agent/one_shot_agent.go index e6b5adf427f..e2d8c9adcd2 100644 --- a/cli/azd/internal/agent/one_shot_agent.go +++ b/cli/azd/internal/agent/one_shot_agent.go @@ -15,11 +15,11 @@ import ( "github.com/tmc/langchaingo/tools" localtools "github.com/azure/azure-dev/cli/azd/internal/agent/tools" - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" ) -// OneShotAzdAiAgent represents an enhanced AZD Copilot agent with action tracking, intent validation, and conversation memory +// OneShotAzdAiAgent represents an enhanced AZD Copilot agent with action tracking, +// intent validation, and conversation memory type OneShotAzdAiAgent struct { *Agent } @@ -43,7 +43,7 @@ func NewOneShotAzdAiAgent(llm llms.Model, opts ...AgentOption) (*OneShotAzdAiAge // Create sampling handler for MCP samplingHandler := mcptools.NewMcpSamplingHandler( azdAgent.samplingModel, - mcp.WithDebug(azdAgent.debug), + mcptools.WithDebug(azdAgent.debug), ) toolLoaders := []localtools.ToolLoader{ diff --git a/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go b/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go index 0368400fabd..aa6ea409a26 100644 --- a/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go +++ b/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( @@ -17,7 +20,10 @@ func (t *AzdArchitecturePlanningTool) Name() string { } func (t *AzdArchitecturePlanningTool) Description() string { - return `Returns instructions for selecting appropriate Azure services for discovered application components and designing infrastructure architecture. The LLM agent should execute these instructions using available tools. + return `Returns instructions for selecting appropriate Azure services for discovered application components and +designing infrastructure architecture. + +The LLM agent should execute these instructions using available tools. Use this tool when: - Discovery analysis has been completed and azd-arch-plan.md exists diff --git a/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go b/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go index 5b1f5adb84e..2590eb2c0ba 100644 --- a/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( @@ -17,7 +20,10 @@ func (t *AzdAzureYamlGenerationTool) Name() string { } func (t *AzdAzureYamlGenerationTool) Description() string { - return `Returns instructions for generating the azure.yaml configuration file with proper service hosting, build, and deployment settings for AZD projects. The LLM agent should execute these instructions using available tools. + return `Returns instructions for generating the azure.yaml configuration file with proper service hosting, +build, and deployment settings for AZD projects. + +The LLM agent should execute these instructions using available tools. Use this tool when: - Architecture planning has been completed and Azure services selected diff --git a/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go b/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go index 5e24f5727fc..f8b13425eea 100644 --- a/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go +++ b/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( @@ -17,7 +20,10 @@ func (t *AzdDiscoveryAnalysisTool) Name() string { } func (t *AzdDiscoveryAnalysisTool) Description() string { - return `Returns instructions for performing comprehensive discovery and analysis of application components to prepare for Azure Developer CLI (AZD) initialization. The LLM agent should execute these instructions using available tools. + return `Returns instructions for performing comprehensive discovery and analysis of application components +to prepare for Azure Developer CLI (AZD) initialization. + +The LLM agent should execute these instructions using available tools. Use this tool when: - Starting Phase 1 of AZD migration process diff --git a/cli/azd/internal/agent/tools/azd/azd_docker_generation.go b/cli/azd/internal/agent/tools/azd/azd_docker_generation.go index c22b590c779..57c03e2d807 100644 --- a/cli/azd/internal/agent/tools/azd/azd_docker_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_docker_generation.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( @@ -17,7 +20,10 @@ func (t *AzdDockerGenerationTool) Name() string { } func (t *AzdDockerGenerationTool) Description() string { - return `Returns instructions for generating optimized Dockerfiles and container configurations for containerizable services in AZD projects. The LLM agent should execute these instructions using available tools. + return `Returns instructions for generating optimized Dockerfiles and container configurations for containerizable +services in AZD projects. + +The LLM agent should execute these instructions using available tools. Use this tool when: - Architecture planning identified services requiring containerization diff --git a/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go b/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go index 2fe68dbeaeb..d55f903e4d2 100644 --- a/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go +++ b/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( @@ -17,7 +20,10 @@ func (t *AzdIacGenerationRulesTool) Name() string { } func (t *AzdIacGenerationRulesTool) Description() string { - return `Returns comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules for AZD projects. The LLM agent should reference these rules when generating infrastructure code. + return `Returns comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules +for AZD projects. + +The LLM agent should reference these rules when generating infrastructure code. Use this tool when: - Generating any Bicep infrastructure templates for AZD projects diff --git a/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go b/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go index b147a99b3aa..3c3e7cf52b9 100644 --- a/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( @@ -17,7 +20,10 @@ func (t *AzdInfrastructureGenerationTool) Name() string { } func (t *AzdInfrastructureGenerationTool) Description() string { - return `Returns instructions for generating modular Bicep infrastructure templates following Azure security and operational best practices for AZD projects. The LLM agent should execute these instructions using available tools. + return `Returns instructions for generating modular Bicep infrastructure templates following Azure security and +operational best practices for AZD projects. + +The LLM agent should execute these instructions using available tools. Use this tool when: - Architecture planning completed with Azure services selected diff --git a/cli/azd/internal/agent/tools/azd/azd_plan_init.go b/cli/azd/internal/agent/tools/azd/azd_plan_init.go index 0c246d46b92..a6eb422ab78 100644 --- a/cli/azd/internal/agent/tools/azd/azd_plan_init.go +++ b/cli/azd/internal/agent/tools/azd/azd_plan_init.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( @@ -17,7 +20,10 @@ func (t *AzdPlanInitTool) Name() string { } func (t *AzdPlanInitTool) Description() string { - return `Returns instructions for orchestrating complete AZD application initialization using structured phases with specialized tools. The LLM agent should execute these instructions using available tools. + return `Returns instructions for orchestrating complete AZD application initialization using structured phases +with specialized tools. + +The LLM agent should execute these instructions using available tools. Use this tool when: - Starting new AZD project initialization or migration diff --git a/cli/azd/internal/agent/tools/azd/azd_project_validation.go b/cli/azd/internal/agent/tools/azd/azd_project_validation.go index 2a856a5596a..7645fac9ca0 100644 --- a/cli/azd/internal/agent/tools/azd/azd_project_validation.go +++ b/cli/azd/internal/agent/tools/azd/azd_project_validation.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( @@ -20,7 +23,10 @@ func (t *AzdProjectValidationTool) Name() string { // Description returns the description of the tool. func (t *AzdProjectValidationTool) Description() string { - return `Returns instructions for validating AZD project by running comprehensive checks on azure.yaml schema, Bicep templates, environment setup, packaging, and deployment preview. The LLM agent should execute these instructions using available tools. + return `Returns instructions for validating AZD project by running comprehensive checks on azure.yaml schema, +Bicep templates, environment setup, packaging, and deployment preview. + +The LLM agent should execute these instructions using available tools. Use this tool when: - All AZD configuration files have been generated diff --git a/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go b/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go index d9577f92af8..678d268d7ea 100644 --- a/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go +++ b/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( diff --git a/cli/azd/internal/agent/tools/azd/loader.go b/cli/azd/internal/agent/tools/azd/loader.go index 6d81740a6ac..f55b9c93e77 100644 --- a/cli/azd/internal/agent/tools/azd/loader.go +++ b/cli/azd/internal/agent/tools/azd/loader.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( diff --git a/cli/azd/internal/agent/tools/azd/prompts/prompts.go b/cli/azd/internal/agent/tools/azd/prompts/prompts.go index a08d194ce7d..7273140321a 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/prompts.go +++ b/cli/azd/internal/agent/tools/azd/prompts/prompts.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package prompts import ( diff --git a/cli/azd/internal/agent/tools/common/types.go b/cli/azd/internal/agent/tools/common/types.go index 47f14eea64e..b8740f01b06 100644 --- a/cli/azd/internal/agent/tools/common/types.go +++ b/cli/azd/internal/agent/tools/common/types.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package common // ErrorResponse represents a JSON error response structure that can be reused across all tools diff --git a/cli/azd/internal/agent/tools/dev/command_executor.go b/cli/azd/internal/agent/tools/dev/command_executor.go index 6f0fc33bdaa..1ecc9041318 100644 --- a/cli/azd/internal/agent/tools/dev/command_executor.go +++ b/cli/azd/internal/agent/tools/dev/command_executor.go @@ -1,8 +1,12 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package dev import ( "context" "encoding/json" + "errors" "fmt" "os" "os/exec" @@ -168,6 +172,7 @@ func (t CommandExecutorTool) executeCommand(ctx context.Context, command string, if runtime.GOOS == "windows" { // On Windows, use cmd.exe to handle built-in commands and path resolution allArgs := append([]string{"/C", command}, args...) + // #nosec G204 - Command execution is the intended functionality of this tool cmd = exec.CommandContext(ctx, "cmd", allArgs...) } else { // On Unix-like systems, use sh for better command resolution @@ -175,6 +180,7 @@ func (t CommandExecutorTool) executeCommand(ctx context.Context, command string, if len(args) > 0 { fullCommand += " " + strings.Join(args, " ") } + // #nosec G204 - Command execution is the intended functionality of this tool cmd = exec.CommandContext(ctx, "sh", "-c", fullCommand) } @@ -198,7 +204,8 @@ func (t CommandExecutorTool) executeCommand(ctx context.Context, command string, var cmdError error if err != nil { - if exitError, ok := err.(*exec.ExitError); ok { + var exitError *exec.ExitError + if errors.As(err, &exitError) { // Command ran but exited with non-zero code - this is normal exitCode = exitError.ExitCode() cmdError = nil // Don't treat non-zero exit as a system error diff --git a/cli/azd/internal/agent/tools/dev/loader.go b/cli/azd/internal/agent/tools/dev/loader.go index 3b938213ed0..1028825fb22 100644 --- a/cli/azd/internal/agent/tools/dev/loader.go +++ b/cli/azd/internal/agent/tools/dev/loader.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package dev import ( diff --git a/cli/azd/internal/agent/tools/http/http_fetcher.go b/cli/azd/internal/agent/tools/http/http_fetcher.go index cbd3628506b..7a4cb1c9c24 100644 --- a/cli/azd/internal/agent/tools/http/http_fetcher.go +++ b/cli/azd/internal/agent/tools/http/http_fetcher.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package http import ( @@ -27,6 +30,7 @@ func (t HTTPFetcherTool) Call(ctx context.Context, input string) (string, error) t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("http_fetcher: %s", input)) } + // #nosec G107 - HTTP requests with variable URLs are the intended functionality of this tool resp, err := http.Get(input) if err != nil { toolErr := fmt.Errorf("failed to fetch URL %s: %w", input, err) diff --git a/cli/azd/internal/agent/tools/http/loader.go b/cli/azd/internal/agent/tools/http/loader.go index 2233455e3e8..32a1ce4dbc8 100644 --- a/cli/azd/internal/agent/tools/http/loader.go +++ b/cli/azd/internal/agent/tools/http/loader.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package http import ( diff --git a/cli/azd/internal/agent/tools/io/change_directory.go b/cli/azd/internal/agent/tools/io/change_directory.go index 889d07d8041..b942e09b458 100644 --- a/cli/azd/internal/agent/tools/io/change_directory.go +++ b/cli/azd/internal/agent/tools/io/change_directory.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -19,7 +22,8 @@ func (t ChangeDirectoryTool) Name() string { } func (t ChangeDirectoryTool) Description() string { - return "Change the current working directory. Input: directory path (e.g., '../parent' or './subfolder' or absolute path)" + return "Change the current working directory. " + + "Input: directory path (e.g., '../parent' or './subfolder' or absolute path)" } // createErrorResponse creates a JSON error response @@ -63,7 +67,10 @@ func (t ChangeDirectoryTool) Call(ctx context.Context, input string) (string, er return t.createErrorResponse(err, fmt.Sprintf("Directory %s does not exist: %s", absPath, err.Error())) } if !info.IsDir() { - return t.createErrorResponse(fmt.Errorf("%s is not a directory", absPath), fmt.Sprintf("%s is not a directory", absPath)) + return t.createErrorResponse( + fmt.Errorf("%s is not a directory", absPath), + fmt.Sprintf("%s is not a directory", absPath), + ) } // Change directory diff --git a/cli/azd/internal/agent/tools/io/copy_file.go b/cli/azd/internal/agent/tools/io/copy_file.go index 2d0d573295f..0272421454a 100644 --- a/cli/azd/internal/agent/tools/io/copy_file.go +++ b/cli/azd/internal/agent/tools/io/copy_file.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -60,14 +63,23 @@ func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { // Parse as JSON - this is now required if err := json.Unmarshal([]byte(cleanInput), ¶ms); err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"source\": \"file.txt\", \"destination\": \"backup.txt\"}", err.Error())) + return t.createErrorResponse( + err, + fmt.Sprintf( + "Invalid JSON input: %s. Expected format: {\"source\": \"file.txt\", \"destination\": \"backup.txt\"}", + err.Error(), + ), + ) } source := strings.TrimSpace(params.Source) destination := strings.TrimSpace(params.Destination) if source == "" || destination == "" { - return t.createErrorResponse(fmt.Errorf("both source and destination paths are required"), "Both source and destination paths are required") + return t.createErrorResponse( + fmt.Errorf("both source and destination paths are required"), + "Both source and destination paths are required", + ) } // Check if source file exists @@ -77,7 +89,10 @@ func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { } if sourceInfo.IsDir() { - return t.createErrorResponse(fmt.Errorf("source %s is a directory", source), fmt.Sprintf("Source %s is a directory. Use copy_directory for directories", source)) + return t.createErrorResponse( + fmt.Errorf("source %s is a directory", source), + fmt.Sprintf("Source %s is a directory. Use copy_directory for directories", source), + ) } // Open source file diff --git a/cli/azd/internal/agent/tools/io/create_directory.go b/cli/azd/internal/agent/tools/io/create_directory.go index 79db58865cb..57f2e83710e 100644 --- a/cli/azd/internal/agent/tools/io/create_directory.go +++ b/cli/azd/internal/agent/tools/io/create_directory.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -18,7 +21,8 @@ func (t CreateDirectoryTool) Name() string { } func (t CreateDirectoryTool) Description() string { - return "Create a directory (and any necessary parent directories). Input: directory path (e.g., 'docs' or './src/components')" + return "Create a directory (and any necessary parent directories). " + + "Input: directory path (e.g., 'docs' or './src/components')" } // createErrorResponse creates a JSON error response @@ -63,7 +67,10 @@ func (t CreateDirectoryTool) Call(ctx context.Context, input string) (string, er } if !info.IsDir() { - return t.createErrorResponse(fmt.Errorf("%s exists but is not a directory", input), fmt.Sprintf("%s exists but is not a directory", input)) + return t.createErrorResponse( + fmt.Errorf("%s exists but is not a directory", input), + fmt.Sprintf("%s exists but is not a directory", input), + ) } // Create success response diff --git a/cli/azd/internal/agent/tools/io/current_directory.go b/cli/azd/internal/agent/tools/io/current_directory.go index 56256b3ea56..0ba2d925c3e 100644 --- a/cli/azd/internal/agent/tools/io/current_directory.go +++ b/cli/azd/internal/agent/tools/io/current_directory.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -17,7 +20,8 @@ func (t CurrentDirectoryTool) Name() string { } func (t CurrentDirectoryTool) Description() string { - return "Get the current working directory to understand the project context. Input: use 'current' or '.' (any input works)" + return "Get the current working directory to understand the project context. " + + "Input: use 'current' or '.' (any input works)" } // createErrorResponse creates a JSON error response diff --git a/cli/azd/internal/agent/tools/io/delete_directory.go b/cli/azd/internal/agent/tools/io/delete_directory.go index 3066cd2d403..27ae1413ce5 100644 --- a/cli/azd/internal/agent/tools/io/delete_directory.go +++ b/cli/azd/internal/agent/tools/io/delete_directory.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -62,7 +65,10 @@ func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, er // Make sure it's a directory, not a file if !info.IsDir() { - return t.createErrorResponse(fmt.Errorf("%s is a file, not a directory", input), fmt.Sprintf("%s is a file, not a directory. Use delete_file to remove files", input)) + return t.createErrorResponse( + fmt.Errorf("%s is a file, not a directory", input), + fmt.Sprintf("%s is a file, not a directory. Use delete_file to remove files", input), + ) } // Count contents before deletion for reporting diff --git a/cli/azd/internal/agent/tools/io/delete_file.go b/cli/azd/internal/agent/tools/io/delete_file.go index e5333526286..828c0180e4a 100644 --- a/cli/azd/internal/agent/tools/io/delete_file.go +++ b/cli/azd/internal/agent/tools/io/delete_file.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -62,7 +65,10 @@ func (t DeleteFileTool) Call(ctx context.Context, input string) (string, error) // Make sure it's a file, not a directory if info.IsDir() { - return t.createErrorResponse(fmt.Errorf("%s is a directory, not a file", input), fmt.Sprintf("%s is a directory, not a file. Use delete_directory to remove directories", input)) + return t.createErrorResponse( + fmt.Errorf("%s is a directory, not a file", input), + fmt.Sprintf("%s is a directory, not a file. Use delete_directory to remove directories", input), + ) } fileSize := info.Size() diff --git a/cli/azd/internal/agent/tools/io/directory_list.go b/cli/azd/internal/agent/tools/io/directory_list.go index a5f6b92d089..7833026b3ad 100644 --- a/cli/azd/internal/agent/tools/io/directory_list.go +++ b/cli/azd/internal/agent/tools/io/directory_list.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -60,7 +63,10 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro // Parse as JSON - this is now required if err := json.Unmarshal([]byte(cleanInput), ¶ms); err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"path\": \".\", \"includeHidden\": false}", err.Error())) + return t.createErrorResponse( + err, + fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"path\": \".\", \"includeHidden\": false}", err.Error()), + ) } // Validate required path field @@ -97,7 +103,10 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro } if !info.IsDir() { - return t.createErrorResponse(fmt.Errorf("%s is not a directory", absPath), fmt.Sprintf("%s is not a directory", absPath)) + return t.createErrorResponse( + fmt.Errorf("%s is not a directory", absPath), + fmt.Sprintf("%s is not a directory", absPath), + ) } // Read directory contents diff --git a/cli/azd/internal/agent/tools/io/file_info.go b/cli/azd/internal/agent/tools/io/file_info.go index 57d53ddb906..f05763acf8f 100644 --- a/cli/azd/internal/agent/tools/io/file_info.go +++ b/cli/azd/internal/agent/tools/io/file_info.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -19,7 +22,8 @@ func (t FileInfoTool) Name() string { } func (t FileInfoTool) Description() string { - return "Get information about a file (size, modification time, permissions). Input: file path (e.g., 'data.txt' or './docs/readme.md'). Returns JSON with file information." + return "Get information about a file (size, modification time, permissions). " + + "Input: file path (e.g., 'data.txt' or './docs/readme.md'). Returns JSON with file information." } // createErrorResponse creates a JSON error response diff --git a/cli/azd/internal/agent/tools/io/file_search.go b/cli/azd/internal/agent/tools/io/file_search.go index 84ea580c93e..700274d1479 100644 --- a/cli/azd/internal/agent/tools/io/file_search.go +++ b/cli/azd/internal/agent/tools/io/file_search.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -24,7 +27,8 @@ func (t FileSearchTool) Name() string { } func (t FileSearchTool) Description() string { - return `Search for files matching a glob pattern in the current working directory using the doublestar library for full glob support. + return `Searches for files matching a glob pattern in the current working directory +using the doublestar library for full glob support. Input: JSON payload with the following structure: { @@ -96,13 +100,19 @@ func (t FileSearchTool) createErrorResponse(err error, message string) (string, func (t FileSearchTool) Call(ctx context.Context, input string) (string, error) { if input == "" { - return t.createErrorResponse(fmt.Errorf("input is required"), "Input is required. Expected JSON format: {\"pattern\": \"*.go\"}") + return t.createErrorResponse( + fmt.Errorf("input is required"), + "Input is required. Expected JSON format: {\"pattern\": \"*.go\"}", + ) } // Parse JSON input var req FileSearchRequest if err := json.Unmarshal([]byte(input), &req); err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"pattern\": \"*.go\", \"maxResults\": 50}", err.Error())) + return t.createErrorResponse( + err, + fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"pattern\": \"*.go\", \"maxResults\": 50}", err.Error()), + ) } // Validate required fields diff --git a/cli/azd/internal/agent/tools/io/loader.go b/cli/azd/internal/agent/tools/io/loader.go index bf5e95a9f3f..1880f0e15d5 100644 --- a/cli/azd/internal/agent/tools/io/loader.go +++ b/cli/azd/internal/agent/tools/io/loader.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( diff --git a/cli/azd/internal/agent/tools/io/move_file.go b/cli/azd/internal/agent/tools/io/move_file.go index 98d77ac6cc3..9956580c381 100644 --- a/cli/azd/internal/agent/tools/io/move_file.go +++ b/cli/azd/internal/agent/tools/io/move_file.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -18,7 +21,8 @@ func (t MoveFileTool) Name() string { } func (t MoveFileTool) Description() string { - return "Move or rename a file. Input format: 'source|destination' (e.g., 'old.txt|new.txt' or './file.txt|./folder/file.txt')" + return "Move or rename a file.\n" + + "Input format: 'source|destination' (e.g., 'old.txt|new.txt' or './file.txt|./folder/file.txt')" } // createErrorResponse creates a JSON error response @@ -48,7 +52,10 @@ func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { input = strings.TrimSpace(input) if input == "" { - return t.createErrorResponse(fmt.Errorf("input is required in format 'source|destination'"), "Input is required in format 'source|destination'") + return t.createErrorResponse( + fmt.Errorf("input is required in format 'source|destination'"), + "Input is required in format 'source|destination'", + ) } // Split on first occurrence of '|' to separate source from destination @@ -61,7 +68,10 @@ func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { destination := strings.TrimSpace(parts[1]) if source == "" || destination == "" { - return t.createErrorResponse(fmt.Errorf("both source and destination paths are required"), "Both source and destination paths are required") + return t.createErrorResponse( + fmt.Errorf("both source and destination paths are required"), + "Both source and destination paths are required", + ) } // Check if source exists @@ -75,7 +85,10 @@ func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { // Check if destination already exists if _, err := os.Stat(destination); err == nil { - return t.createErrorResponse(fmt.Errorf("destination %s already exists", destination), fmt.Sprintf("Destination %s already exists", destination)) + return t.createErrorResponse( + fmt.Errorf("destination %s already exists", destination), + fmt.Sprintf("Destination %s already exists", destination), + ) } // Move/rename the file @@ -105,7 +118,13 @@ func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { Destination: destination, Type: fileType, Size: sourceInfo.Size(), - Message: fmt.Sprintf("Successfully moved %s from %s to %s (%d bytes)", fileType, source, destination, sourceInfo.Size()), + Message: fmt.Sprintf( + "Successfully moved %s from %s to %s (%d bytes)", + fileType, + source, + destination, + sourceInfo.Size(), + ), } // Convert to JSON diff --git a/cli/azd/internal/agent/tools/io/read_file.go b/cli/azd/internal/agent/tools/io/read_file.go index 9547d62054f..9a60c65c912 100644 --- a/cli/azd/internal/agent/tools/io/read_file.go +++ b/cli/azd/internal/agent/tools/io/read_file.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -54,7 +57,8 @@ func (t ReadFileTool) Name() string { } func (t ReadFileTool) Description() string { - return `Read file contents with intelligent handling for different file sizes and partial reads. Returns JSON response with file content and metadata. + return `Read file contents with intelligent handling for different file sizes and partial reads. +Returns JSON response with file content and metadata. Input: JSON payload with the following structure: { @@ -79,7 +83,8 @@ Examples: 5. Read single line: {"filePath": "package.json", "startLine": 42, "endLine": 42} -Files larger than 100KB are automatically truncated. Files over 1MB show size info only unless specific line range is requested. +Files larger than 100KB are automatically truncated. +Files over 1MB show size info only unless specific line range is requested. The input must be formatted as a single line valid JSON string.` } @@ -106,13 +111,23 @@ func (t ReadFileTool) createErrorResponse(err error, message string) (string, er func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { if input == "" { - return t.createErrorResponse(fmt.Errorf("empty input"), "No input provided. Expected JSON format: {\"filePath\": \"path/to/file.txt\"}") + return t.createErrorResponse( + fmt.Errorf("empty input"), + "No input provided. Expected JSON format: {\"filePath\": \"path/to/file.txt\"}", + ) } // Parse JSON input var req ReadFileRequest if err := json.Unmarshal([]byte(input), &req); err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"filePath\": \"path/to/file.txt\", \"startLine\": 1, \"endLine\": 50}", err.Error())) + return t.createErrorResponse( + err, + fmt.Sprintf( + "Invalid JSON input: %s. "+ + "Expected format: {\"filePath\": \"path/to/file.txt\", \"startLine\": 1, \"endLine\": 50}", + err.Error(), + ), + ) } // Validate required fields @@ -124,19 +139,32 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { fileInfo, err := os.Stat(req.FilePath) if err != nil { if os.IsNotExist(err) { - return t.createErrorResponse(err, fmt.Sprintf("File does not exist: %s. Please check file path spelling and location", req.FilePath)) + return t.createErrorResponse( + err, + fmt.Sprintf("File does not exist: %s. Please check file path spelling and location", req.FilePath), + ) } return t.createErrorResponse(err, fmt.Sprintf("Cannot access file %s: %s", req.FilePath, err.Error())) } if fileInfo.IsDir() { - return t.createErrorResponse(fmt.Errorf("path is a directory"), fmt.Sprintf("%s is a directory, not a file. Use directory_list tool for directories", req.FilePath)) + return t.createErrorResponse( + fmt.Errorf("path is a directory"), + fmt.Sprintf("%s is a directory, not a file. Use directory_list tool for directories", req.FilePath), + ) } // Handle very large files (>1MB) - require line range const maxFileSize = 1024 * 1024 // 1MB if fileInfo.Size() > maxFileSize && req.StartLine == 0 && req.EndLine == 0 { - return t.createErrorResponse(fmt.Errorf("file too large"), fmt.Sprintf("File %s is too large (%d bytes). Please specify startLine and endLine to read specific sections", req.FilePath, fileInfo.Size())) + return t.createErrorResponse( + fmt.Errorf("file too large"), + fmt.Sprintf( + "File %s is too large (%d bytes). Please specify startLine and endLine to read specific sections", + req.FilePath, + fileInfo.Size(), + ), + ) } // Read file content @@ -178,10 +206,16 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { // Validate line range if startLine > totalLines { - return t.createErrorResponse(fmt.Errorf("start line out of range"), fmt.Sprintf("Start line %d is greater than total lines %d in file", startLine, totalLines)) + return t.createErrorResponse( + fmt.Errorf("start line out of range"), + fmt.Sprintf("Start line %d is greater than total lines %d in file", startLine, totalLines), + ) } if startLine > endLine { - return t.createErrorResponse(fmt.Errorf("invalid line range"), fmt.Sprintf("Start line %d is greater than end line %d", startLine, endLine)) + return t.createErrorResponse( + fmt.Errorf("invalid line range"), + fmt.Sprintf("Start line %d is greater than end line %d", startLine, endLine), + ) } // Adjust endLine if it exceeds total lines @@ -231,9 +265,14 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { // Set appropriate message if isPartial && lineRange != nil { - response.Message = fmt.Sprintf("Successfully read %d lines (%d-%d) from file", lineRange.LinesRead, lineRange.StartLine, lineRange.EndLine) + response.Message = fmt.Sprintf( + "Successfully read %d lines (%d-%d) from file", + lineRange.LinesRead, + lineRange.StartLine, + lineRange.EndLine, + ) } else if isTruncated { - response.Message = fmt.Sprintf("Successfully read file (content truncated due to size)") + response.Message = "Successfully read file (content truncated due to size)" } else { response.Message = fmt.Sprintf("Successfully read entire file (%d lines)", totalLines) } diff --git a/cli/azd/internal/agent/tools/io/write_file.go b/cli/azd/internal/agent/tools/io/write_file.go index 18ab45b42d0..e6f4d3f5790 100644 --- a/cli/azd/internal/agent/tools/io/write_file.go +++ b/cli/azd/internal/agent/tools/io/write_file.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -55,7 +58,8 @@ func (t WriteFileTool) Name() string { } func (t WriteFileTool) Description() string { - return `Comprehensive file writing tool that handles full file writes, appends, and line-based partial updates. Returns JSON response with operation details. + return `Comprehensive file writing tool that handles full file writes, appends, and line-based partial updates. +Returns JSON response with operation details. Input: JSON payload with the following structure: { @@ -133,7 +137,14 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { // Debug: Check for common JSON issues input = strings.TrimSpace(input) if !strings.HasPrefix(input, "{") || !strings.HasSuffix(input, "}") { - return t.createErrorResponse(fmt.Errorf("malformed JSON structure"), fmt.Sprintf("Invalid JSON input: Input does not appear to be valid JSON object. Starts with: %q, Ends with: %q", input[:min(10, len(input))], input[max(0, len(input)-10):])) + return t.createErrorResponse( + fmt.Errorf("malformed JSON structure"), + fmt.Sprintf( + "Invalid JSON input: Input does not appear to be valid JSON object. Starts with: %q, Ends with: %q", + input[:min(10, len(input))], + input[max(0, len(input)-10):], + ), + ) } // Parse JSON input @@ -144,7 +155,10 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { if len(input) > 200 { truncatedInput = input[:200] + "...[truncated]" } - return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input. Error: %s. Input (first 200 chars): %s", err.Error(), truncatedInput)) + return t.createErrorResponse( + err, + fmt.Sprintf("Invalid JSON input. Error: %s. Input (first 200 chars): %s", err.Error(), truncatedInput), + ) } // Validate required fields @@ -165,20 +179,33 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { // If any line number is provided, both must be provided and valid if hasStartLine || hasEndLine { if !hasStartLine || !hasEndLine { - return t.createErrorResponse(fmt.Errorf("both startLine and endLine must be provided for partial write"), "Both startLine and endLine must be provided for partial write") + return t.createErrorResponse( + fmt.Errorf("both startLine and endLine must be provided for partial write"), + "Both startLine and endLine must be provided for partial write", + ) } // Validate that file exists for partial write BEFORE attempting filePath := strings.TrimSpace(req.Filename) if _, err := os.Stat(filePath); os.IsNotExist(err) { - return t.createErrorResponse(err, fmt.Sprintf("Cannot perform partial write on file '%s' because it does not exist. For new files, omit startLine and endLine parameters to create the entire file", filePath)) + return t.createErrorResponse( + err, + fmt.Sprintf( + "Cannot perform partial write on file '%s' because it does not exist. "+ + "For new files, omit startLine and endLine parameters to create the entire file", + filePath, + ), + ) } // Smart write mode: this should be a partial write if mode == "write" { return t.handlePartialWrite(ctx, req) } else { - return t.createErrorResponse(fmt.Errorf("startLine and endLine can only be used with write mode"), "startLine and endLine can only be used with write mode") + return t.createErrorResponse( + fmt.Errorf("startLine and endLine can only be used with write mode"), + "startLine and endLine can only be used with write mode", + ) } } @@ -196,7 +223,10 @@ func (t WriteFileTool) handlePartialWrite(ctx context.Context, req WriteFileRequ return t.createErrorResponse(fmt.Errorf("invalid endLine: %d", req.EndLine), "endLine must be >= 1") } if req.StartLine > req.EndLine { - return t.createErrorResponse(fmt.Errorf("invalid line range: startLine=%d > endLine=%d", req.StartLine, req.EndLine), "startLine cannot be greater than endLine") + return t.createErrorResponse( + fmt.Errorf("invalid line range: startLine=%d > endLine=%d", req.StartLine, req.EndLine), + "startLine cannot be greater than endLine", + ) } filePath := strings.TrimSpace(req.Filename) @@ -256,14 +286,18 @@ func (t WriteFileTool) handlePartialWrite(ctx context.Context, req WriteFileRequ finalContent := strings.Join(result, lineEnding) // If original file had trailing newline, preserve it - if len(fileBytes) > 0 && (string(fileBytes[len(fileBytes)-1:]) == "\n" || strings.HasSuffix(string(fileBytes), lineEnding)) { + if len(fileBytes) > 0 && + (string(fileBytes[len(fileBytes)-1:]) == "\n" || strings.HasSuffix(string(fileBytes), lineEnding)) { finalContent += lineEnding } // Write the updated content - err = os.WriteFile(filePath, []byte(finalContent), 0644) + err = os.WriteFile(filePath, []byte(finalContent), 0600) if err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Failed to write updated content to file %s: %s", filePath, err.Error())) + return t.createErrorResponse( + err, + fmt.Sprintf("Failed to write updated content to file %s: %s", filePath, err.Error()), + ) } // Get file info @@ -311,7 +345,10 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ // Provide feedback for large content if len(content) > 10000 { - fmt.Printf("📝 Large content detected (%d chars). Consider breaking into smaller edits for better reliability.\n", len(content)) + fmt.Printf( + "📝 Large content detected (%d chars). Consider breaking into smaller edits for better reliability.\n", + len(content), + ) } // Ensure directory exists @@ -325,27 +362,39 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ switch mode { case "create": if _, err := os.Stat(filePath); err == nil { - return t.createErrorResponse(fmt.Errorf("file %s already exists (create mode)", filePath), fmt.Sprintf("File %s already exists. Cannot create file in 'create' mode when file already exists", filePath)) + return t.createErrorResponse( + fmt.Errorf("file %s already exists (create mode)", filePath), + fmt.Sprintf( + "File %s already exists. Cannot create file in 'create' mode when file already exists", + filePath, + ), + ) } - err = os.WriteFile(filePath, []byte(content), 0644) + err = os.WriteFile(filePath, []byte(content), 0600) operation = "Created" case "append": - file, openErr := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + file, openErr := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600) if openErr != nil { - return t.createErrorResponse(openErr, fmt.Sprintf("Failed to open file for append %s: %s", filePath, openErr.Error())) + return t.createErrorResponse( + openErr, + fmt.Sprintf("Failed to open file for append %s: %s", filePath, openErr.Error()), + ) } defer file.Close() _, err = file.WriteString(content) operation = "Appended to" default: // "write" - err = os.WriteFile(filePath, []byte(content), 0644) + err = os.WriteFile(filePath, []byte(content), 0600) operation = "Wrote" } if err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Failed to %s file %s: %s", strings.ToLower(operation), filePath, err.Error())) + return t.createErrorResponse( + err, + fmt.Sprintf("Failed to %s file %s: %s", strings.ToLower(operation), filePath, err.Error()), + ) } // Get file size for verification diff --git a/cli/azd/internal/agent/tools/io/write_file_test.go b/cli/azd/internal/agent/tools/io/write_file_test.go index 2aacfe62cdb..05fb0cb4937 100644 --- a/cli/azd/internal/agent/tools/io/write_file_test.go +++ b/cli/azd/internal/agent/tools/io/write_file_test.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -101,11 +104,15 @@ func TestWriteFileTool_AppendMode(t *testing.T) { testFile := filepath.Join(tempDir, "test.txt") // Create initial file - err := os.WriteFile(testFile, []byte("Initial content"), 0644) + err := os.WriteFile(testFile, []byte("Initial content"), 0600) require.NoError(t, err) tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "\nAppended content", "mode": "append"}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "\nAppended content", "mode": "append"}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -131,7 +138,11 @@ func TestWriteFileTool_CreateMode_Success(t *testing.T) { testFile := filepath.Join(tempDir, "new-file.txt") tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New file content", "mode": "create"}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "New file content", "mode": "create"}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -155,7 +166,7 @@ func TestWriteFileTool_CreateMode_FileExists(t *testing.T) { tempDir := t.TempDir() testFile := filepath.Join(tempDir, "existing.txt") - err := os.WriteFile(testFile, []byte("Existing content"), 0644) + err := os.WriteFile(testFile, []byte("Existing content"), 0600) require.NoError(t, err) tool := WriteFileTool{} @@ -181,11 +192,15 @@ func TestWriteFileTool_PartialWrite_Basic(t *testing.T) { // Create initial file with multiple lines initialContent := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5" - err := os.WriteFile(testFile, []byte(initialContent), 0644) + err := os.WriteFile(testFile, []byte(initialContent), 0600) require.NoError(t, err) tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "Modified Line 2\nModified Line 3", "startLine": 2, "endLine": 3}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "Modified Line 2\nModified Line 3", "startLine": 2, "endLine": 3}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -217,11 +232,15 @@ func TestWriteFileTool_PartialWrite_SingleLine(t *testing.T) { // Create initial file initialContent := "Line 1\nLine 2\nLine 3" - err := os.WriteFile(testFile, []byte(initialContent), 0644) + err := os.WriteFile(testFile, []byte(initialContent), 0600) require.NoError(t, err) tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "Replaced Line 2", "startLine": 2, "endLine": 2}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "Replaced Line 2", "startLine": 2, "endLine": 2}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -251,12 +270,16 @@ func TestWriteFileTool_PartialWrite_SingleLineToMultipleLines(t *testing.T) { // Create initial file initialContent := "Line 1\nLine 2\nLine 3\nLine 4" - err := os.WriteFile(testFile, []byte(initialContent), 0644) + err := os.WriteFile(testFile, []byte(initialContent), 0600) require.NoError(t, err) tool := WriteFileTool{} // Replace single line 2 with multiple lines - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New Line 2a\nNew Line 2b\nNew Line 2c", "startLine": 2, "endLine": 2}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "New Line 2a\nNew Line 2b\nNew Line 2c", "startLine": 2, "endLine": 2}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -287,7 +310,11 @@ func TestWriteFileTool_PartialWrite_FileNotExists(t *testing.T) { testFile := filepath.Join(tempDir, "nonexistent.txt") tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New content", "startLine": 1, "endLine": 1}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "New content", "startLine": 1, "endLine": 1}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -304,7 +331,7 @@ func TestWriteFileTool_PartialWrite_InvalidLineNumbers(t *testing.T) { tempDir := t.TempDir() testFile := filepath.Join(tempDir, "test.txt") - err := os.WriteFile(testFile, []byte("Line 1\nLine 2"), 0644) + err := os.WriteFile(testFile, []byte("Line 1\nLine 2"), 0600) require.NoError(t, err) tool := WriteFileTool{} @@ -324,14 +351,22 @@ func TestWriteFileTool_PartialWrite_InvalidLineNumbers(t *testing.T) { assert.Contains(t, result, "Both startLine and endLine must be provided") // Test startLine < 1 (this will trigger the partial write validation) - input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": 0, "endLine": 1}` + input = `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "content", "startLine": 0, "endLine": 1}` result, err = tool.Call(context.Background(), input) assert.NoError(t, err) assert.Contains(t, result, "error") assert.Contains(t, result, "Both startLine and endLine must be provided") // 0 is treated as "not provided" // Test valid line numbers but startLine > endLine - input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": 3, "endLine": 1}` + input = `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "content", "startLine": 3, "endLine": 1}` result, err = tool.Call(context.Background(), input) assert.NoError(t, err) assert.Contains(t, result, "error") @@ -345,12 +380,16 @@ func TestWriteFileTool_PartialWrite_BeyondFileLength(t *testing.T) { // Create initial file with 3 lines initialContent := "Line 1\nLine 2\nLine 3" - err := os.WriteFile(testFile, []byte(initialContent), 0644) + err := os.WriteFile(testFile, []byte(initialContent), 0600) require.NoError(t, err) tool := WriteFileTool{} // Try to replace lines 2-5 (beyond file length) - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New content", "startLine": 2, "endLine": 5}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "New content", "startLine": 2, "endLine": 5}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -377,11 +416,15 @@ func TestWriteFileTool_PartialWrite_PreserveLineEndings(t *testing.T) { // Create initial file with CRLF line endings initialContent := "Line 1\r\nLine 2\r\nLine 3\r\n" - err := os.WriteFile(testFile, []byte(initialContent), 0644) + err := os.WriteFile(testFile, []byte(initialContent), 0600) require.NoError(t, err) tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "Modified Line 2", "startLine": 2, "endLine": 2}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "Modified Line 2", "startLine": 2, "endLine": 2}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -440,19 +483,31 @@ func TestWriteFileTool_Integration_ComplexScenario(t *testing.T) { tool := WriteFileTool{} // Step 1: Create initial file - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "# Configuration File\nversion: 1.0\nname: test\nport: 8080\ndebug: false", "mode": "create"}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "# Configuration File\nversion: 1.0\nname: test\nport: 8080\ndebug: false", "mode": "create"}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) assert.Contains(t, result, `"success": true`) // Step 2: Append new section - input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "\n# Database Config\nhost: localhost\nport: 5432", "mode": "append"}` + input = `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "\n# Database Config\nhost: localhost\nport: 5432", "mode": "append"}` result, err = tool.Call(context.Background(), input) assert.NoError(t, err) assert.Contains(t, result, `"success": true`) // Step 3: Update specific lines (change port and debug) - input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "port: 9090\ndebug: true", "startLine": 4, "endLine": 5}` + input = `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "port: 9090\ndebug: true", "startLine": 4, "endLine": 5}` result, err = tool.Call(context.Background(), input) assert.NoError(t, err) @@ -465,6 +520,7 @@ func TestWriteFileTool_Integration_ComplexScenario(t *testing.T) { // Verify final content content, err := os.ReadFile(testFile) assert.NoError(t, err) + //nolint:lll expectedContent := "# Configuration File\nversion: 1.0\nname: test\nport: 9090\ndebug: true\n# Database Config\nhost: localhost\nport: 5432" assert.Equal(t, expectedContent, string(content)) } @@ -474,20 +530,28 @@ func TestWriteFileTool_PartialWrite_InvalidLineRanges(t *testing.T) { tempDir := t.TempDir() testFile := filepath.Join(tempDir, "test.txt") - err := os.WriteFile(testFile, []byte("Line 1\nLine 2"), 0644) + err := os.WriteFile(testFile, []byte("Line 1\nLine 2"), 0600) require.NoError(t, err) tool := WriteFileTool{} // Test negative startLine (will be handled by partial write validation) - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": -1, "endLine": 1}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "content", "startLine": -1, "endLine": 1}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) assert.Contains(t, result, "error") assert.Contains(t, result, "startLine must be") // Test negative endLine - input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": 1, "endLine": -1}` + input = `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "content", "startLine": 1, "endLine": -1}` result, err = tool.Call(context.Background(), input) assert.NoError(t, err) assert.Contains(t, result, "error") diff --git a/cli/azd/internal/agent/tools/loader.go b/cli/azd/internal/agent/tools/loader.go index 75be6baefe6..ec573ceac60 100644 --- a/cli/azd/internal/agent/tools/loader.go +++ b/cli/azd/internal/agent/tools/loader.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package tools import ( diff --git a/cli/azd/internal/agent/tools/mcp/loader.go b/cli/azd/internal/agent/tools/mcp/loader.go index 7ad5fdc1bd1..76346c11113 100644 --- a/cli/azd/internal/agent/tools/mcp/loader.go +++ b/cli/azd/internal/agent/tools/mcp/loader.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package mcp import ( diff --git a/cli/azd/internal/agent/tools/mcp/sampling_handler.go b/cli/azd/internal/agent/tools/mcp/sampling_handler.go index ffd948429c6..0af9f051f4a 100644 --- a/cli/azd/internal/agent/tools/mcp/sampling_handler.go +++ b/cli/azd/internal/agent/tools/mcp/sampling_handler.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package mcp import ( @@ -46,7 +49,10 @@ func (h *McpSamplingHandler) cleanContent(content string) string { return content } -func (h *McpSamplingHandler) CreateMessage(ctx context.Context, request mcp.CreateMessageRequest) (*mcp.CreateMessageResult, error) { +func (h *McpSamplingHandler) CreateMessage( + ctx context.Context, + request mcp.CreateMessageRequest, +) (*mcp.CreateMessageResult, error) { if h.debug { requestJson, err := json.MarshalIndent(request, "", " ") if err != nil { diff --git a/cli/azd/internal/agent/tools/weather/loader.go b/cli/azd/internal/agent/tools/weather/loader.go deleted file mode 100644 index afdf7894f68..00000000000 --- a/cli/azd/internal/agent/tools/weather/loader.go +++ /dev/null @@ -1,18 +0,0 @@ -package weather - -import ( - "github.com/tmc/langchaingo/tools" -) - -// WeatherToolsLoader loads weather-related tools -type WeatherToolsLoader struct{} - -func NewWeatherToolsLoader() *WeatherToolsLoader { - return &WeatherToolsLoader{} -} - -func (l *WeatherToolsLoader) LoadTools() ([]tools.Tool, error) { - return []tools.Tool{ - &WeatherTool{}, - }, nil -} diff --git a/cli/azd/internal/agent/tools/weather/weather.go b/cli/azd/internal/agent/tools/weather/weather.go deleted file mode 100644 index 0f8837c5124..00000000000 --- a/cli/azd/internal/agent/tools/weather/weather.go +++ /dev/null @@ -1,121 +0,0 @@ -package weather - -import ( - "context" - "fmt" - "math/rand" - "strings" - "time" - - "github.com/tmc/langchaingo/callbacks" -) - -// WeatherTool implements the Tool interface for getting weather information -type WeatherTool struct { - CallbacksHandler callbacks.Handler -} - -func (t WeatherTool) Name() string { - return "weather" -} - -func (t WeatherTool) Description() string { - return "Get current weather conditions for a city. Input: city name (e.g., 'San Diego' or 'New York')" -} - -func (t WeatherTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("weather: %s", input)) - } - - city := strings.TrimSpace(input) - if city == "" { - err := fmt.Errorf("city name is required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err - } - - // Initialize random seed based on current time - rand.Seed(time.Now().UnixNano()) - - // Generate more realistic temperature based on city - var temperature int - cityLower := strings.ToLower(city) - - // Assign temperature ranges based on typical climate - if strings.Contains(cityLower, "san diego") || strings.Contains(cityLower, "los angeles") || - strings.Contains(cityLower, "miami") || strings.Contains(cityLower, "phoenix") { - // Warm climate cities: 65-85°F - temperature = rand.Intn(21) + 65 - } else if strings.Contains(cityLower, "seattle") || strings.Contains(cityLower, "portland") || - strings.Contains(cityLower, "chicago") || strings.Contains(cityLower, "new york") { - // Moderate climate cities: 45-75°F - temperature = rand.Intn(31) + 45 - } else if strings.Contains(cityLower, "alaska") || strings.Contains(cityLower, "minneapolis") || - strings.Contains(cityLower, "denver") { - // Cold climate cities: 25-55°F - temperature = rand.Intn(31) + 25 - } else { - // Default range for unknown cities: 50-80°F - temperature = rand.Intn(31) + 50 - } - - // Weather conditions with probabilities - conditions := []string{ - "sunny", "sunny", "sunny", "sunny", // 40% chance - "partly cloudy", "partly cloudy", "partly cloudy", // 30% chance - "cloudy", "cloudy", // 20% chance - "rainy", // 10% chance - } - condition := conditions[rand.Intn(len(conditions))] - - // Add some variety to the response format - responseTemplates := []string{ - "It's %d°F and %s in %s", - "Current weather in %s: %d°F and %s", - "The weather in %s is %d°F with %s skies", - "%s is experiencing %s weather at %d°F", - } - - template := responseTemplates[rand.Intn(len(responseTemplates))] - - var response string - if strings.Contains(template, "It's %d°F and %s in %s") { - response = fmt.Sprintf(template, temperature, condition, city) - } else if strings.Contains(template, "Current weather in %s: %d°F and %s") { - response = fmt.Sprintf(template, city, temperature, condition) - } else if strings.Contains(template, "The weather in %s is %d°F with %s skies") { - response = fmt.Sprintf(template, city, temperature, condition) - } else { - // "%s is experiencing %s weather at %d°F" - response = fmt.Sprintf(template, city, condition, temperature) - } - - // Add some additional details occasionally - if rand.Intn(3) == 0 { - extras := []string{ - "Light breeze from the west.", - "Humidity is comfortable.", - "Perfect day to be outside!", - "Visibility is excellent.", - "No precipitation expected.", - } - if condition == "rainy" { - extras = []string{ - "Light rain expected to continue.", - "Bring an umbrella!", - "Rain should clear up by evening.", - } - } - extra := extras[rand.Intn(len(extras))] - response += ". " + extra - } - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, response) - } - - return response, nil -} diff --git a/cli/azd/pkg/llm/azure_openai.go b/cli/azd/pkg/llm/azure_openai.go index 8abe43f5f25..e1108982377 100644 --- a/cli/azd/pkg/llm/azure_openai.go +++ b/cli/azd/pkg/llm/azure_openai.go @@ -42,6 +42,20 @@ func (p *AzureOpenAiModelProvider) CreateModelContainer(opts ...ModelOption) (*M return nil, err } + // Validate required attributes + requiredFields := map[string]string{ + "token": modelConfig.Token, + "endpoint": modelConfig.Endpoint, + "apiVersion": modelConfig.ApiVersion, + "model": modelConfig.Model, + } + + for fieldName, fieldValue := range requiredFields { + if fieldValue == "" { + return nil, fmt.Errorf("azure openai model configuration is missing required '%s' field", fieldName) + } + } + modelContainer := &ModelContainer{ Type: LlmTypeOpenAIAzure, IsLocal: false, @@ -77,7 +91,7 @@ func (p *AzureOpenAiModelProvider) CreateModelContainer(opts ...ModelOption) (*M } openAiModel.CallbacksHandler = modelContainer.logger - modelContainer.Model = NewModel(openAiModel, callOptions...) + modelContainer.Model = newModelWithCallOptions(openAiModel, callOptions...) return modelContainer, nil } diff --git a/cli/azd/pkg/llm/manager.go b/cli/azd/pkg/llm/manager.go index 0c9c4a207ec..377e5d2311f 100644 --- a/cli/azd/pkg/llm/manager.go +++ b/cli/azd/pkg/llm/manager.go @@ -120,8 +120,3 @@ func (m Manager) GetDefaultModel(opts ...ModelOption) (*ModelContainer, error) { func (m Manager) GetModel(modelType LlmType, opts ...ModelOption) (*ModelContainer, error) { return m.ModelFactory.CreateModelContainer(modelType, opts...) } - -var availableLlmTypes = []LlmType{ - LlmTypeOpenAIAzure, - LlmTypeOllama, -} diff --git a/cli/azd/pkg/llm/model.go b/cli/azd/pkg/llm/model.go index b00e7730c49..5bfba259675 100644 --- a/cli/azd/pkg/llm/model.go +++ b/cli/azd/pkg/llm/model.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package llm import ( @@ -7,22 +10,26 @@ import ( "github.com/tmc/langchaingo/llms" ) -var _ llms.Model = (*Model)(nil) +var _ llms.Model = (*modelWithCallOptions)(nil) // / Wraps an langchaingo model to allow specifying specific call options at create time -type Model struct { +type modelWithCallOptions struct { model llms.Model options []llms.CallOption } -func NewModel(model llms.Model, options ...llms.CallOption) *Model { - return &Model{ +func newModelWithCallOptions(model llms.Model, options ...llms.CallOption) *modelWithCallOptions { + return &modelWithCallOptions{ model: model, options: options, } } -func (m *Model) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error) { +func (m *modelWithCallOptions) GenerateContent( + ctx context.Context, + messages []llms.MessageContent, + options ...llms.CallOption, +) (*llms.ContentResponse, error) { allOptions := []llms.CallOption{} allOptions = append(allOptions, m.options...) allOptions = append(allOptions, options...) @@ -30,6 +37,6 @@ func (m *Model) GenerateContent(ctx context.Context, messages []llms.MessageCont return m.model.GenerateContent(ctx, messages, allOptions...) } -func (m *Model) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { +func (m *modelWithCallOptions) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { return "", fmt.Errorf("Deprecated, call GenerateContent") } diff --git a/cli/azd/pkg/llm/model_factory.go b/cli/azd/pkg/llm/model_factory.go index 610c5118dba..3994b1d3e08 100644 --- a/cli/azd/pkg/llm/model_factory.go +++ b/cli/azd/pkg/llm/model_factory.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package llm import ( @@ -21,7 +24,8 @@ func (f *ModelFactory) CreateModelContainer(modelType LlmType, opts ...ModelOpti var modelProvider ModelProvider if err := f.serviceLocator.ResolveNamed(string(modelType), &modelProvider); err != nil { return nil, &internal.ErrorWithSuggestion{ - Err: fmt.Errorf("The model type '%s' is not supported. Support types include: azure, ollama", modelType), + Err: fmt.Errorf("The model type '%s' is not supported. Support types include: azure, ollama", modelType), + //nolint:lll Suggestion: "Use `azd config set` to set the model type and any model specific options, such as the model name or version.", } } diff --git a/cli/azd/pkg/llm/ollama.go b/cli/azd/pkg/llm/ollama.go index 3c58cd970b8..27d6e1e83af 100644 --- a/cli/azd/pkg/llm/ollama.go +++ b/cli/azd/pkg/llm/ollama.go @@ -5,11 +5,15 @@ package llm import ( "github.com/azure/azure-dev/cli/azd/pkg/config" + "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/llms/ollama" ) type OllamaModelConfig struct { - Model string `json:"model"` + Model string `json:"model"` + Version string `json:"version"` + Temperature *float64 `json:"temperature"` + MaxTokens *int `json:"maxTokens"` } type OllamaModelProvider struct { @@ -28,7 +32,7 @@ func (p *OllamaModelProvider) CreateModelContainer(opts ...ModelOption) (*ModelC return nil, err } - defaultLlamaVersion := "llama3" + defaultModel := "llama3" var modelConfig OllamaModelConfig ok, err := userConfig.GetSection("ai.agent.model.ollama", &modelConfig) @@ -37,15 +41,20 @@ func (p *OllamaModelProvider) CreateModelContainer(opts ...ModelOption) (*ModelC } if ok { - defaultLlamaVersion = modelConfig.Model + defaultModel = modelConfig.Model + } + + // Set defaults if not defined + if modelConfig.Version == "" { + modelConfig.Version = "latest" } modelContainer := &ModelContainer{ Type: LlmTypeOllama, IsLocal: true, Metadata: ModelMetadata{ - Name: defaultLlamaVersion, - Version: "latest", + Name: defaultModel, + Version: modelConfig.Version, }, } @@ -53,15 +62,24 @@ func (p *OllamaModelProvider) CreateModelContainer(opts ...ModelOption) (*ModelC opt(modelContainer) } - model, err := ollama.New( - ollama.WithModel(defaultLlamaVersion), + ollamaModel, err := ollama.New( + ollama.WithModel(defaultModel), ) if err != nil { return nil, err } - model.CallbacksHandler = modelContainer.logger - modelContainer.Model = model + callOptions := []llms.CallOption{} + if modelConfig.Temperature != nil { + callOptions = append(callOptions, llms.WithTemperature(*modelConfig.Temperature)) + } + + if modelConfig.MaxTokens != nil { + callOptions = append(callOptions, llms.WithMaxTokens(*modelConfig.MaxTokens)) + } + + ollamaModel.CallbacksHandler = modelContainer.logger + modelContainer.Model = newModelWithCallOptions(ollamaModel, callOptions...) return modelContainer, nil } From 56a68eaf1b98e69386dcb1208181d7f940263a5a Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Thu, 7 Aug 2025 12:38:36 -0700 Subject: [PATCH 033/116] Fixes more spell linter issues --- cli/azd/.vscode/cspell-azd-dictionary.txt | 8 ++++++++ cli/azd/.vscode/cspell.yaml | 1 + 2 files changed, 9 insertions(+) diff --git a/cli/azd/.vscode/cspell-azd-dictionary.txt b/cli/azd/.vscode/cspell-azd-dictionary.txt index 979b11834f0..63560f9360c 100644 --- a/cli/azd/.vscode/cspell-azd-dictionary.txt +++ b/cli/azd/.vscode/cspell-azd-dictionary.txt @@ -17,6 +17,7 @@ appinsightsexporter appinsightsstorage appplatform appservice +appuser arget armapimanagement armappconfiguration @@ -109,6 +110,7 @@ envsubst errcheck errorinfo errorlint +eventhub eventhubs executil flexconsumption @@ -158,6 +160,8 @@ mockarmresources mockazcli mongojs mvnw +myapp +myservice mysqladmin mysqlclient mysqldb @@ -193,8 +197,12 @@ psanford psycopg psycopgbinary pulumi +pycache pyapp pyproject +pytest +PYTHONDONTWRITEBYTECODE +PYTHONUNBUFFERED pyvenv rabbitmq reauthentication diff --git a/cli/azd/.vscode/cspell.yaml b/cli/azd/.vscode/cspell.yaml index da90d181a54..628897e2098 100644 --- a/cli/azd/.vscode/cspell.yaml +++ b/cli/azd/.vscode/cspell.yaml @@ -19,6 +19,7 @@ words: - idxs # Looks like the protogen has a spelling error for panics - pancis + - proto - protobuf - protoc - protoreflect From 10638e357cdce462167d3dc832445099d32ec22f Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Thu, 7 Aug 2025 17:42:08 -0700 Subject: [PATCH 034/116] Moves azd commands to MCP server tools --- cli/azd/cmd/mcp.go | 112 ++ cli/azd/cmd/root.go | 1 + cli/azd/docs/new-azd-command.md | 1325 +++++++++++++++++ .../tools/azd/azd_architecture_planning.go | 39 - .../tools/azd/azd_azure_yaml_generation.go | 39 - .../agent/tools/azd/azd_discovery_analysis.go | 39 - .../agent/tools/azd/azd_docker_generation.go | 39 - .../tools/azd/azd_iac_generation_rules.go | 39 - .../azd/azd_infrastructure_generation.go | 38 - .../internal/agent/tools/azd/azd_plan_init.go | 39 - .../agent/tools/azd/azd_project_validation.go | 46 - .../agent/tools/azd/azd_yaml_schema.go | 32 - cli/azd/internal/agent/tools/azd/loader.go | 38 - cli/azd/internal/agent/tools/loader.go | 2 - cli/azd/internal/agent/tools/mcp/mcp.json | 9 +- .../mcp/tools/azd_architecture_planning.go | 36 + .../mcp/tools/azd_azure_yaml_generation.go | 36 + .../mcp/tools/azd_discovery_analysis.go | 36 + .../mcp/tools/azd_docker_generation.go | 36 + .../mcp/tools/azd_iac_generation_rules.go | 36 + .../tools/azd_infrastructure_generation.go | 35 + cli/azd/internal/mcp/tools/azd_plan_init.go | 36 + .../mcp/tools/azd_project_validation.go | 36 + cli/azd/internal/mcp/tools/azd_yaml_schema.go | 27 + .../prompts/azd_architecture_planning.md | 0 .../prompts/azd_azure_yaml_generation.md | 0 .../tools}/prompts/azd_discovery_analysis.md | 0 .../tools}/prompts/azd_docker_generation.md | 0 .../prompts/azd_iac_generation_rules.md | 0 .../prompts/azd_infrastructure_generation.md | 0 .../tools}/prompts/azd_plan_init.md | 0 .../tools}/prompts/azd_project_validation.md | 0 .../azd => mcp/tools}/prompts/azure.yaml.json | 0 .../azd => mcp/tools}/prompts/prompts.go | 0 34 files changed, 1759 insertions(+), 392 deletions(-) create mode 100644 cli/azd/cmd/mcp.go create mode 100644 cli/azd/docs/new-azd-command.md delete mode 100644 cli/azd/internal/agent/tools/azd/azd_architecture_planning.go delete mode 100644 cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go delete mode 100644 cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go delete mode 100644 cli/azd/internal/agent/tools/azd/azd_docker_generation.go delete mode 100644 cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go delete mode 100644 cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go delete mode 100644 cli/azd/internal/agent/tools/azd/azd_plan_init.go delete mode 100644 cli/azd/internal/agent/tools/azd/azd_project_validation.go delete mode 100644 cli/azd/internal/agent/tools/azd/azd_yaml_schema.go delete mode 100644 cli/azd/internal/agent/tools/azd/loader.go create mode 100644 cli/azd/internal/mcp/tools/azd_architecture_planning.go create mode 100644 cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go create mode 100644 cli/azd/internal/mcp/tools/azd_discovery_analysis.go create mode 100644 cli/azd/internal/mcp/tools/azd_docker_generation.go create mode 100644 cli/azd/internal/mcp/tools/azd_iac_generation_rules.go create mode 100644 cli/azd/internal/mcp/tools/azd_infrastructure_generation.go create mode 100644 cli/azd/internal/mcp/tools/azd_plan_init.go create mode 100644 cli/azd/internal/mcp/tools/azd_project_validation.go create mode 100644 cli/azd/internal/mcp/tools/azd_yaml_schema.go rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azd_architecture_planning.md (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azd_azure_yaml_generation.md (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azd_discovery_analysis.md (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azd_docker_generation.md (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azd_iac_generation_rules.md (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azd_infrastructure_generation.md (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azd_plan_init.md (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azd_project_validation.md (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azure.yaml.json (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/prompts.go (100%) diff --git a/cli/azd/cmd/mcp.go b/cli/azd/cmd/mcp.go new file mode 100644 index 00000000000..b9c7652f3ec --- /dev/null +++ b/cli/azd/cmd/mcp.go @@ -0,0 +1,112 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package cmd + +import ( + "context" + "fmt" + "io" + + "github.com/azure/azure-dev/cli/azd/cmd/actions" + "github.com/azure/azure-dev/cli/azd/internal" + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools" + "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/output" + "github.com/mark3labs/mcp-go/server" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +// Register MCP commands +func mcpActions(root *actions.ActionDescriptor) *actions.ActionDescriptor { + group := root.Add("mcp", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "mcp", + Short: "Manage Model Context Protocol (MCP) server.", + }, + GroupingOptions: actions.CommandGroupOptions{ + RootLevelHelp: actions.CmdGroupAlpha, + }, + }) + + // azd mcp start + group.Add("start", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "start", + Short: "Starts the MCP server.", + Long: `Starts the Model Context Protocol (MCP) server. + +This command starts an MCP server that can be used by MCP clients to access +azd functionality through the Model Context Protocol interface.`, + Args: cobra.NoArgs, + }, + OutputFormats: []output.Format{output.NoneFormat}, + DefaultFormat: output.NoneFormat, + ActionResolver: newMcpStartAction, + FlagsResolver: newMcpStartFlags, + }) + + return group +} + +// Flags for MCP start command +type mcpStartFlags struct { + global *internal.GlobalCommandOptions +} + +func newMcpStartFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpStartFlags { + flags := &mcpStartFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *mcpStartFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + f.global = global +} + +// Action for MCP start command +type mcpStartAction struct { + flags *mcpStartFlags + console input.Console + writer io.Writer +} + +func newMcpStartAction( + flags *mcpStartFlags, + console input.Console, + writer io.Writer, +) actions.Action { + return &mcpStartAction{ + flags: flags, + console: console, + writer: writer, + } +} + +func (a *mcpStartAction) Run(ctx context.Context) (*actions.ActionResult, error) { + s := server.NewMCPServer( + "AZD MCP Server 🚀", "1.0.0", + server.WithToolCapabilities(true), + ) + s.EnableSampling() + + s.AddTools( + tools.NewAzdPlanInitTool(), + tools.NewAzdDiscoveryAnalysisTool(), + tools.NewAzdArchitecturePlanningTool(), + tools.NewAzdAzureYamlGenerationTool(), + tools.NewAzdDockerGenerationTool(), + tools.NewAzdInfrastructureGenerationTool(), + tools.NewAzdIacGenerationRulesTool(), + tools.NewAzdProjectValidationTool(), + tools.NewAzdYamlSchemaTool(), + ) + + // Start the server using stdio transport + if err := server.ServeStdio(s); err != nil { + fmt.Printf("Server error: %v\n", err) + } + + return nil, nil +} diff --git a/cli/azd/cmd/root.go b/cli/azd/cmd/root.go index 0572a8984b8..70e3153ce72 100644 --- a/cli/azd/cmd/root.go +++ b/cli/azd/cmd/root.go @@ -129,6 +129,7 @@ func NewRootCmd( templatesActions(root) authActions(root) hooksActions(root) + mcpActions(root) root.Add("version", &actions.ActionDescriptorOptions{ Command: &cobra.Command{ diff --git a/cli/azd/docs/new-azd-command.md b/cli/azd/docs/new-azd-command.md new file mode 100644 index 00000000000..26ecc5d5534 --- /dev/null +++ b/cli/azd/docs/new-azd-command.md @@ -0,0 +1,1325 @@ +# Adding New azd Commands - Comprehensive Guide + +This document provides detailed instructions for adding new commands or command groups to the Azure Developer CLI (azd). It's designed to enable both human developers and LLMs to systematically create new commands that integrate seamlessly with the existing azd architecture. + +## Table of Contents + +1. [Architecture Overview](#architecture-overview) +2. [File Structure and Naming Conventions](#file-structure-and-naming-conventions) +3. [Adding a New Top-Level Command Group](#adding-a-new-top-level-command-group) +4. [Adding Commands to Existing Groups](#adding-commands-to-existing-groups) +5. [Action Implementation Patterns](#action-implementation-patterns) +6. [Flags and Input Handling](#flags-and-input-handling) +7. [Output Formatting](#output-formatting) +8. [Error Handling](#error-handling) +9. [Integration with IoC Container](#integration-with-ioc-container) +10. [Complete Examples](#complete-examples) + +## Architecture Overview + +azd uses a layered architecture built on top of the [Cobra CLI library](https://github.com/spf13/cobra): + +``` +ActionDescriptor Tree → CobraBuilder → Cobra Commands → CLI +``` + +**Key Components:** +- **ActionDescriptor**: Higher-order component that describes commands, flags, middleware, and relationships +- **Action Interface**: Contains the actual command logic (`Run(ctx context.Context) (*ActionResult, error)`) +- **Flags**: Input parameters and options for commands +- **IoC Container**: Dependency injection system for resolving services +- **Output Formatters**: Handle JSON, Table, and None output formats + +## File Structure and Naming Conventions + +### File Organization + +Commands should be organized following these patterns: + +``` +cmd/ +├── root.go # Root command registration +├── .go # Top-level command groups (e.g., env.go, extension.go) +├── .go # Single commands (e.g., version.go, monitor.go) +└── actions/ + ├── action.go # Action interface definitions + └── action_descriptor.go # ActionDescriptor framework +``` + +### Naming Conventions + +| Component | Pattern | Example | +|-----------|---------|---------| +| **File Names** | `.go` | `extension.go`, `monitor.go` | +| **Command Groups** | `Actions(root *ActionDescriptor)` | `extensionActions()`, `envActions()` | +| **Action Types** | `Action` | `extensionListAction`, `envNewAction` | +| **Flag Types** | `Flags` | `extensionListFlags`, `envNewFlags` | +| **Constructors** | `new` | `newExtensionListAction`, `newExtensionListFlags` | +| **Cobra Commands** | `newCmd()` (when needed) | `newMonitorCmd()`, `newEnvListCmd()` | + +## Adding a New Top-Level Command Group + +### Step 1: Create the Command File + +Create a new file: `cmd/.go` + +```go +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package cmd + +import ( + "context" + "fmt" + "io" + + "github.com/azure/azure-dev/cli/azd/cmd/actions" + "github.com/azure/azure-dev/cli/azd/internal" + "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/output" + "github.com/azure/azure-dev/cli/azd/pkg/output/ux" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +// Register commands +func Actions(root *actions.ActionDescriptor) *actions.ActionDescriptor { + group := root.Add("", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "", + Aliases: []string{""}, // Optional + Short: "Manage resources.", + }, + GroupingOptions: actions.CommandGroupOptions{ + RootLevelHelp: actions.CmdGroupAzure, // Or appropriate group + }, + }) + + // Add subcommands here + // Example: azd list + group.Add("list", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "list", + Short: "List items.", + }, + OutputFormats: []output.Format{output.JsonFormat, output.TableFormat}, + DefaultFormat: output.TableFormat, + ActionResolver: newListAction, + FlagsResolver: newListFlags, + }) + + // Example: azd create + group.Add("create", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "create ", + Short: "Create a new item.", + Args: cobra.ExactArgs(1), + }, + OutputFormats: []output.Format{output.JsonFormat, output.NoneFormat}, + DefaultFormat: output.NoneFormat, + ActionResolver: newCreateAction, + FlagsResolver: newCreateFlags, + }) + + return group +} + +// Flags for list command +type ListFlags struct { + global *internal.GlobalCommandOptions + filter string + all bool + internal.EnvFlag +} + +func newListFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *ListFlags { + flags := &ListFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *ListFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + local.StringVar(&f.filter, "filter", "", "Filter results by name pattern") + local.BoolVar(&f.all, "all", false, "Show all items including hidden ones") + f.EnvFlag.Bind(local, global) + f.global = global +} + +// Action for list command +type ListAction struct { + flags *ListFlags + formatter output.Formatter + console input.Console + writer io.Writer + // Add your service dependencies here + // exampleService *services.ExampleService +} + +func newListAction( + flags *ListFlags, + formatter output.Formatter, + console input.Console, + writer io.Writer, + // Add your service dependencies here + // exampleService *services.ExampleService, +) actions.Action { + return &ListAction{ + flags: flags, + formatter: formatter, + console: console, + writer: writer, + // exampleService: exampleService, + } +} + +type ListItem struct { + Name string `json:"name"` + Description string `json:"description"` + Status string `json:"status"` + Created string `json:"created"` +} + +func (a *ListAction) Run(ctx context.Context) (*actions.ActionResult, error) { + a.console.MessageUxItem(ctx, &ux.MessageTitle{ + Title: "List items (azd list)", + TitleNote: "Retrieving available items", + }) + + // TODO: Implement actual list logic + // items, err := a.exampleService.List(ctx, a.flags.filter) + // if err != nil { + // return nil, fmt.Errorf("failed to list items: %w", err) + // } + + // Example placeholder data + items := []ListItem{ + { + Name: "example-item", + Description: "An example item", + Status: "active", + Created: "2024-01-01", + }, + } + + if len(items) == 0 { + a.console.Message(ctx, output.WithWarningFormat("No items found.")) + return nil, nil + } + + if a.formatter.Kind() == output.TableFormat { + columns := []output.Column{ + { + Heading: "Name", + ValueTemplate: "{{.Name}}", + }, + { + Heading: "Description", + ValueTemplate: "{{.Description}}", + }, + { + Heading: "Status", + ValueTemplate: "{{.Status}}", + }, + { + Heading: "Created", + ValueTemplate: "{{.Created}}", + }, + } + + return nil, a.formatter.Format(items, a.writer, output.TableFormatterOptions{ + Columns: columns, + }) + } + + return nil, a.formatter.Format(items, a.writer, nil) +} + +// Flags for create command +type CreateFlags struct { + global *internal.GlobalCommandOptions + description string + force bool + internal.EnvFlag +} + +func newCreateFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *CreateFlags { + flags := &CreateFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *CreateFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + local.StringVarP(&f.description, "description", "d", "", "Description for the new item") + local.BoolVarP(&f.force, "force", "f", false, "Force creation even if item exists") + f.EnvFlag.Bind(local, global) + f.global = global +} + +// Action for create command +type CreateAction struct { + args []string + flags *CreateFlags + console input.Console + // Add your service dependencies here + // exampleService *services.ExampleService +} + +func newCreateAction( + args []string, + flags *CreateFlags, + console input.Console, + // Add your service dependencies here + // exampleService *services.ExampleService, +) actions.Action { + return &CreateAction{ + args: args, + flags: flags, + console: console, + // exampleService: exampleService, + } +} + +func (a *CreateAction) Run(ctx context.Context) (*actions.ActionResult, error) { + itemName := a.args[0] + + a.console.MessageUxItem(ctx, &ux.MessageTitle{ + Title: "Create item (azd create)", + TitleNote: fmt.Sprintf("Creating new item '%s'", itemName), + }) + + stepMessage := fmt.Sprintf("Creating %s", output.WithHighLightFormat(itemName)) + a.console.ShowSpinner(ctx, stepMessage, input.Step) + + // TODO: Implement actual creation logic + // err := a.exampleService.Create(ctx, itemName, a.flags.description, a.flags.force) + // if err != nil { + // a.console.StopSpinner(ctx, stepMessage, input.StepFailed) + // return nil, fmt.Errorf("failed to create item: %w", err) + // } + + a.console.StopSpinner(ctx, stepMessage, input.StepDone) + + return &actions.ActionResult{ + Message: &actions.ResultMessage{ + Header: fmt.Sprintf("Successfully created item '%s'", itemName), + FollowUp: "Use 'azd list' to see all items.", + }, + }, nil +} +``` + +### Step 2: Register the Command Group + +Add the command group registration to `cmd/root.go`: + +```go +// In the NewRootCmd function, add your command group registration +func NewRootCmd(...) *cobra.Command { + // ... existing code ... + + configActions(root, opts) + envActions(root) + infraActions(root) + pipelineActions(root) + telemetryActions(root) + templatesActions(root) + authActions(root) + hooksActions(root) + Actions(root) // Add this line + + // ... rest of function ... +} +``` + +## Adding Commands to Existing Groups + +To add a new command to an existing command group (e.g., adding to `azd extension`): + +### Step 1: Add the Command to the Group + +In the existing command file (e.g., `cmd/extension.go`), add to the group registration function: + +```go +func extensionActions(root *actions.ActionDescriptor) *actions.ActionDescriptor { + group := root.Add("extension", &actions.ActionDescriptorOptions{ + // ... existing options ... + }) + + // ... existing commands ... + + // Add your new command + group.Add("validate", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "validate ", + Short: "Validate an extension configuration.", + Args: cobra.ExactArgs(1), + }, + OutputFormats: []output.Format{output.JsonFormat, output.NoneFormat}, + DefaultFormat: output.NoneFormat, + ActionResolver: newExtensionValidateAction, + FlagsResolver: newExtensionValidateFlags, + }) + + return group +} +``` + +### Step 2: Implement Flags and Action + +Add the flags and action implementation to the same file: + +```go +// Flags for the new command +type extensionValidateFlags struct { + strict bool + output string + global *internal.GlobalCommandOptions +} + +func newExtensionValidateFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *extensionValidateFlags { + flags := &extensionValidateFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *extensionValidateFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + local.BoolVar(&f.strict, "strict", false, "Enable strict validation mode") + local.StringVar(&f.output, "output-file", "", "Write validation results to file") + f.global = global +} + +// Action implementation +type extensionValidateAction struct { + args []string + flags *extensionValidateFlags + console input.Console + extensionManager *extensions.Manager // Use existing service dependencies +} + +func newExtensionValidateAction( + args []string, + flags *extensionValidateFlags, + console input.Console, + extensionManager *extensions.Manager, +) actions.Action { + return &extensionValidateAction{ + args: args, + flags: flags, + console: console, + extensionManager: extensionManager, + } +} + +func (a *extensionValidateAction) Run(ctx context.Context) (*actions.ActionResult, error) { + extensionName := a.args[0] + + a.console.MessageUxItem(ctx, &ux.MessageTitle{ + Title: "Validate extension (azd extension validate)", + TitleNote: fmt.Sprintf("Validating extension '%s'", extensionName), + }) + + stepMessage := fmt.Sprintf("Validating %s", output.WithHighLightFormat(extensionName)) + a.console.ShowSpinner(ctx, stepMessage, input.Step) + + // TODO: Implement validation logic + // validationResult, err := a.extensionManager.Validate(ctx, extensionName, a.flags.strict) + // if err != nil { + // a.console.StopSpinner(ctx, stepMessage, input.StepFailed) + // return nil, fmt.Errorf("validation failed: %w", err) + // } + + a.console.StopSpinner(ctx, stepMessage, input.StepDone) + + return &actions.ActionResult{ + Message: &actions.ResultMessage{ + Header: fmt.Sprintf("Extension '%s' validation completed successfully", extensionName), + FollowUp: "Extension is ready for use.", + }, + }, nil +} +``` + +## Action Implementation Patterns + +### Basic Action Structure + +```go +type myCommandAction struct { + // Dependencies + console input.Console + flags *myCommandFlags + + // Services (injected via IoC) + someService *services.SomeService + formatter output.Formatter + writer io.Writer +} + +func newMyCommandAction( + console input.Console, + flags *myCommandFlags, + someService *services.SomeService, + formatter output.Formatter, + writer io.Writer, +) actions.Action { + return &myCommandAction{ + console: console, + flags: flags, + someService: someService, + formatter: formatter, + writer: writer, + } +} + +func (a *myCommandAction) Run(ctx context.Context) (*actions.ActionResult, error) { + // 1. Display command start message + a.console.MessageUxItem(ctx, &ux.MessageTitle{ + Title: "My Command (azd mycommand)", + TitleNote: "Performing operation", + }) + + // 2. Show progress for long operations + stepMessage := "Processing request" + a.console.ShowSpinner(ctx, stepMessage, input.Step) + + // 3. Perform the actual work + result, err := a.someService.DoWork(ctx, a.flags.someFlag) + if err != nil { + a.console.StopSpinner(ctx, stepMessage, input.StepFailed) + return nil, fmt.Errorf("operation failed: %w", err) + } + + a.console.StopSpinner(ctx, stepMessage, input.StepDone) + + // 4. Format and display results + if a.formatter.Kind() != output.NoneFormat { + if err := a.formatter.Format(result, a.writer, nil); err != nil { + return nil, fmt.Errorf("failed to format output: %w", err) + } + } + + // 5. Return success result + return &actions.ActionResult{ + Message: &actions.ResultMessage{ + Header: "Operation completed successfully", + FollowUp: "Next steps: run 'azd mycommand list' to see results", + }, + }, nil +} +``` + +### Action with Complex Output Formatting + +```go +func (a *myListAction) Run(ctx context.Context) (*actions.ActionResult, error) { + items, err := a.service.List(ctx) + if err != nil { + return nil, fmt.Errorf("failed to retrieve items: %w", err) + } + + // Handle empty results + if len(items) == 0 { + a.console.Message(ctx, output.WithWarningFormat("No items found.")) + a.console.Message(ctx, fmt.Sprintf( + "Create one with %s", + output.WithHighLightFormat("azd mycommand create "), + )) + return nil, nil + } + + // Format output based on format type + switch a.formatter.Kind() { + case output.TableFormat: + columns := []output.Column{ + { + Heading: "Name", + ValueTemplate: "{{.Name}}", + }, + { + Heading: "Status", + ValueTemplate: "{{.Status}}", + }, + { + Heading: "Created", + ValueTemplate: "{{.CreatedAt | date}}", + }, + } + + return nil, a.formatter.Format(items, a.writer, output.TableFormatterOptions{ + Columns: columns, + }) + default: + return nil, a.formatter.Format(items, a.writer, nil) + } +} +``` + +## Flags and Input Handling + +### Standard Flag Patterns + +```go +type myCommandFlags struct { + // Basic types + stringFlag string + intFlag int + boolFlag bool + sliceFlag []string + + // Common azd patterns + subscription string + location string + environment string + + // Always include global options + global *internal.GlobalCommandOptions + + // Include environment flag for env-aware commands + internal.EnvFlag +} + +func newMyCommandFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *myCommandFlags { + flags := &myCommandFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *myCommandFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + // String flags + local.StringVarP(&f.stringFlag, "name", "n", "", "Name of the resource") + local.StringVar(&f.stringFlag, "long-flag", "default", "Description of flag") + + // Boolean flags + local.BoolVar(&f.boolFlag, "force", false, "Force the operation") + local.BoolVarP(&f.boolFlag, "verbose", "v", false, "Enable verbose output") + + // Integer flags + local.IntVar(&f.intFlag, "timeout", 300, "Timeout in seconds") + + // String slice flags + local.StringSliceVar(&f.sliceFlag, "tags", nil, "Tags to apply (can specify multiple)") + + // Common Azure flags + local.StringVarP(&f.subscription, "subscription", "s", "", "Azure subscription ID") + local.StringVarP(&f.location, "location", "l", "", "Azure location") + + // Bind environment flag for env-aware commands + f.EnvFlag.Bind(local, global) + + // Always set global + f.global = global +} +``` + +### Flag Validation + +```go +func (a *myCommandAction) Run(ctx context.Context) (*actions.ActionResult, error) { + // Validate required flags + if a.flags.stringFlag == "" { + return nil, fmt.Errorf("--name flag is required") + } + + // Validate flag combinations + if a.flags.force && a.flags.interactive { + return nil, fmt.Errorf("cannot use --force and --interactive together") + } + + // Validate enum values + validValues := []string{"dev", "test", "prod"} + if !slices.Contains(validValues, a.flags.environment) { + return nil, fmt.Errorf("invalid environment '%s', must be one of: %s", + a.flags.environment, strings.Join(validValues, ", ")) + } + + // Continue with command logic... +} +``` + +## Output Formatting + +### Standard Output Formats + +```go +// Define your output model +type MyItemOutput struct { + Name string `json:"name"` + Status string `json:"status"` + CreatedAt time.Time `json:"createdAt"` + Description string `json:"description,omitempty"` +} + +// Configure output formats in ActionDescriptorOptions +&actions.ActionDescriptorOptions{ + OutputFormats: []output.Format{ + output.JsonFormat, // --output json + output.TableFormat, // --output table (default) + output.NoneFormat, // --output none + }, + DefaultFormat: output.TableFormat, + // ... other options +} + +// Handle formatting in your action +func (a *myAction) Run(ctx context.Context) (*actions.ActionResult, error) { + data := getMyData() // Your data retrieval logic + + switch a.formatter.Kind() { + case output.TableFormat: + columns := []output.Column{ + { + Heading: "Name", + ValueTemplate: "{{.Name}}", + }, + { + Heading: "Status", + ValueTemplate: "{{.Status}}", + Width: 10, + }, + { + Heading: "Created", + ValueTemplate: "{{.CreatedAt | date}}", + }, + } + + return nil, a.formatter.Format(data, a.writer, output.TableFormatterOptions{ + Columns: columns, + }) + + case output.NoneFormat: + // Custom formatting for none output + for _, item := range data { + fmt.Fprintf(a.writer, "%s (%s)\n", item.Name, item.Status) + } + return nil, nil + + default: // JsonFormat and others + return nil, a.formatter.Format(data, a.writer, nil) + } +} +``` + +### Custom Display Methods + +```go +type MyDetailedOutput struct { + Name string + Description string + Properties map[string]string +} + +// Implement custom display for complex output +func (o *MyDetailedOutput) Display(writer io.Writer) error { + tabs := tabwriter.NewWriter( + writer, + 0, + output.TableTabSize, + 1, + output.TablePadCharacter, + output.TableFlags) + + text := [][]string{ + {"Name", ":", o.Name}, + {"Description", ":", o.Description}, + {"", "", ""}, + {"Properties", ":", ""}, + } + + for key, value := range o.Properties { + text = append(text, []string{" " + key, ":", value}) + } + + for _, line := range text { + _, err := tabs.Write([]byte(strings.Join(line, "\t") + "\n")) + if err != nil { + return err + } + } + + return tabs.Flush() +} + +// Use in action +func (a *myShowAction) Run(ctx context.Context) (*actions.ActionResult, error) { + data := getDetailedData() + + if a.formatter.Kind() == output.NoneFormat { + return nil, data.Display(a.writer) + } + + return nil, a.formatter.Format(data, a.writer, nil) +} +``` + +## Error Handling + +### Standard Error Patterns + +```go +func (a *myAction) Run(ctx context.Context) (*actions.ActionResult, error) { + // Service/API errors + result, err := a.service.DoSomething(ctx) + if err != nil { + // Wrap with context + return nil, fmt.Errorf("failed to perform operation: %w", err) + } + + // Validation errors + if result == nil { + return nil, fmt.Errorf("operation returned no results") + } + + // Business logic errors + if !result.IsValid { + return nil, fmt.Errorf("operation completed but result is invalid: %s", result.ValidationMessage) + } + + // Stop spinner on errors + stepMessage := "Processing" + a.console.ShowSpinner(ctx, stepMessage, input.Step) + + _, err = a.service.Process(ctx) + if err != nil { + a.console.StopSpinner(ctx, stepMessage, input.StepFailed) + return nil, fmt.Errorf("processing failed: %w", err) + } + + a.console.StopSpinner(ctx, stepMessage, input.StepDone) + + return &actions.ActionResult{ + Message: &actions.ResultMessage{ + Header: "Operation completed successfully", + }, + }, nil +} +``` + +### Error Handling with User Guidance + +```go +func (a *myAction) Run(ctx context.Context) (*actions.ActionResult, error) { + // Check prerequisites + if !a.checkPrerequisites(ctx) { + return nil, fmt.Errorf("prerequisites not met. Run 'azd auth login' first") + } + + // Handle specific error types + err := a.service.Operate(ctx) + if err != nil { + var notFoundErr *services.NotFoundError + var authErr *services.AuthenticationError + + switch { + case errors.As(err, ¬FoundErr): + return nil, fmt.Errorf("resource not found: %s. Use 'azd mycommand list' to see available resources", notFoundErr.ResourceName) + + case errors.As(err, &authErr): + return nil, fmt.Errorf("authentication failed: %w. Run 'azd auth login' to re-authenticate", err) + + default: + return nil, fmt.Errorf("operation failed: %w", err) + } + } + + return &actions.ActionResult{ + Message: &actions.ResultMessage{ + Header: "Operation completed", + }, + }, nil +} +``` + +## Integration with IoC Container + +### Service Registration + +When your command requires new services, register them in the appropriate place: + +```go +// In pkg/ioc/container.go or appropriate service registration location +func RegisterMyServices(container *ioc.Container) { + // Register your service + ioc.RegisterSingleton(container, func() *services.MyService { + return services.NewMyService() + }) + + // Register service with dependencies + ioc.RegisterSingleton(container, func( + httpClient *http.Client, + config *config.Config, + ) *services.MyComplexService { + return services.NewMyComplexService(httpClient, config) + }) +} +``` + +### Using Services in Actions + +```go +// Your action constructor automatically receives services via DI +func newMyCommandAction( + flags *myCommandFlags, + console input.Console, + formatter output.Formatter, + writer io.Writer, + // Your custom services + myService *services.MyService, + azureService *azure.AzureService, + // Standard azd services + azdContext *azdcontext.AzdContext, + env *environment.Environment, +) actions.Action { + return &myCommandAction{ + flags: flags, + console: console, + formatter: formatter, + writer: writer, + myService: myService, + azureService: azureService, + azdContext: azdContext, + env: env, + } +} +``` + +### Common Service Dependencies + +```go +// Commonly used services in azd commands: + +// Environment and context +azdContext *azdcontext.AzdContext +env *environment.Environment + +// Azure services +accountManager account.Manager +subscriptionResolver account.SubscriptionTenantResolver +resourceManager infra.ResourceManager +resourceService *azapi.ResourceService + +// User interaction +console input.Console +formatter output.Formatter +writer io.Writer + +// Configuration +config *config.Config +alphaFeatureManager *alpha.FeatureManager + +// Project and templates +projectManager *project.ProjectManager +templateManager *templates.TemplateManager +``` + +## Complete Examples + +### Example 1: Simple Single Command + +File: `cmd/validate.go` + +```go +package cmd + +import ( + "context" + "fmt" + + "github.com/azure/azure-dev/cli/azd/cmd/actions" + "github.com/azure/azure-dev/cli/azd/internal" + "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/output" + "github.com/azure/azure-dev/cli/azd/pkg/project" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +// Add to root.go registration +// root.Add("validate", &actions.ActionDescriptorOptions{ +// Command: newValidateCmd(), +// ActionResolver: newValidateAction, +// FlagsResolver: newValidateFlags, +// OutputFormats: []output.Format{output.JsonFormat, output.NoneFormat}, +// DefaultFormat: output.NoneFormat, +// GroupingOptions: actions.CommandGroupOptions{ +// RootLevelHelp: actions.CmdGroupManage, +// }, +// }) + +func newValidateCmd() *cobra.Command { + return &cobra.Command{ + Use: "validate", + Short: "Validate the current project configuration.", + } +} + +type validateFlags struct { + strict bool + global *internal.GlobalCommandOptions + internal.EnvFlag +} + +func newValidateFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *validateFlags { + flags := &validateFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *validateFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + local.BoolVar(&f.strict, "strict", false, "Enable strict validation mode") + f.EnvFlag.Bind(local, global) + f.global = global +} + +type validateAction struct { + flags *validateFlags + console input.Console + projectManager *project.ProjectManager +} + +func newValidateAction( + flags *validateFlags, + console input.Console, + projectManager *project.ProjectManager, +) actions.Action { + return &validateAction{ + flags: flags, + console: console, + projectManager: projectManager, + } +} + +func (a *validateAction) Run(ctx context.Context) (*actions.ActionResult, error) { + a.console.Message(ctx, "Validating project configuration...") + + // TODO: Implement validation logic + // isValid, errors := a.projectManager.Validate(ctx, a.flags.strict) + // if !isValid { + // return nil, fmt.Errorf("validation failed: %v", errors) + // } + + return &actions.ActionResult{ + Message: &actions.ResultMessage{ + Header: "Project validation completed successfully", + FollowUp: "Your project is ready for deployment.", + }, + }, nil +} +``` + +### Example 2: Command Group with Multiple Subcommands + +File: `cmd/resource.go` + +```go +package cmd + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/azure/azure-dev/cli/azd/cmd/actions" + "github.com/azure/azure-dev/cli/azd/internal" + "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/output" + "github.com/azure/azure-dev/cli/azd/pkg/output/ux" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +// Add to root.go: resourceActions(root) +func resourceActions(root *actions.ActionDescriptor) *actions.ActionDescriptor { + group := root.Add("resource", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "resource", + Short: "Manage Azure resources for the current project.", + }, + GroupingOptions: actions.CommandGroupOptions{ + RootLevelHelp: actions.CmdGroupAzure, + }, + }) + + group.Add("list", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "list", + Short: "List Azure resources for the current project.", + }, + OutputFormats: []output.Format{output.JsonFormat, output.TableFormat}, + DefaultFormat: output.TableFormat, + ActionResolver: newResourceListAction, + FlagsResolver: newResourceListFlags, + }) + + group.Add("show", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "show ", + Short: "Show details for a specific Azure resource.", + Args: cobra.ExactArgs(1), + }, + OutputFormats: []output.Format{output.JsonFormat, output.NoneFormat}, + DefaultFormat: output.NoneFormat, + ActionResolver: newResourceShowAction, + }) + + group.Add("delete", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "delete ", + Short: "Delete a specific Azure resource.", + Args: cobra.ExactArgs(1), + }, + OutputFormats: []output.Format{output.JsonFormat, output.NoneFormat}, + DefaultFormat: output.NoneFormat, + ActionResolver: newResourceDeleteAction, + FlagsResolver: newResourceDeleteFlags, + }) + + return group +} + +// List command implementation +type resourceListFlags struct { + resourceType string + location string + global *internal.GlobalCommandOptions + internal.EnvFlag +} + +func newResourceListFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *resourceListFlags { + flags := &resourceListFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *resourceListFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + local.StringVar(&f.resourceType, "type", "", "Filter by resource type") + local.StringVar(&f.location, "location", "", "Filter by location") + f.EnvFlag.Bind(local, global) + f.global = global +} + +type resourceListAction struct { + flags *resourceListFlags + formatter output.Formatter + console input.Console + writer io.Writer + // TODO: Add actual Azure resource service + // resourceService *azure.ResourceService +} + +func newResourceListAction( + flags *resourceListFlags, + formatter output.Formatter, + console input.Console, + writer io.Writer, +) actions.Action { + return &resourceListAction{ + flags: flags, + formatter: formatter, + console: console, + writer: writer, + } +} + +type resourceInfo struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Location string `json:"location"` + Status string `json:"status"` +} + +func (a *resourceListAction) Run(ctx context.Context) (*actions.ActionResult, error) { + a.console.MessageUxItem(ctx, &ux.MessageTitle{ + Title: "List Azure resources (azd resource list)", + TitleNote: "Retrieving resources for current project", + }) + + // TODO: Implement actual resource listing + // resources, err := a.resourceService.ListForProject(ctx, a.flags.resourceType, a.flags.location) + // if err != nil { + // return nil, fmt.Errorf("failed to list resources: %w", err) + // } + + // Placeholder data + resources := []resourceInfo{ + { + ID: "/subscriptions/xxx/resourceGroups/rg-example/providers/Microsoft.Web/sites/example-app", + Name: "example-app", + Type: "Microsoft.Web/sites", + Location: "eastus", + Status: "Running", + }, + } + + if len(resources) == 0 { + a.console.Message(ctx, output.WithWarningFormat("No resources found.")) + return nil, nil + } + + if a.formatter.Kind() == output.TableFormat { + columns := []output.Column{ + { + Heading: "Name", + ValueTemplate: "{{.Name}}", + }, + { + Heading: "Type", + ValueTemplate: "{{.Type}}", + }, + { + Heading: "Location", + ValueTemplate: "{{.Location}}", + }, + { + Heading: "Status", + ValueTemplate: "{{.Status}}", + }, + } + + return nil, a.formatter.Format(resources, a.writer, output.TableFormatterOptions{ + Columns: columns, + }) + } + + return nil, a.formatter.Format(resources, a.writer, nil) +} + +// Show command implementation +type resourceShowAction struct { + args []string + formatter output.Formatter + console input.Console + writer io.Writer +} + +func newResourceShowAction( + args []string, + formatter output.Formatter, + console input.Console, + writer io.Writer, +) actions.Action { + return &resourceShowAction{ + args: args, + formatter: formatter, + console: console, + writer: writer, + } +} + +func (a *resourceShowAction) Run(ctx context.Context) (*actions.ActionResult, error) { + resourceID := a.args[0] + + a.console.MessageUxItem(ctx, &ux.MessageTitle{ + Title: "Show Azure resource (azd resource show)", + TitleNote: fmt.Sprintf("Retrieving details for '%s'", resourceID), + }) + + // TODO: Implement actual resource details retrieval + // resource, err := a.resourceService.Get(ctx, resourceID) + // if err != nil { + // return nil, fmt.Errorf("failed to get resource details: %w", err) + // } + + // For now, just show that the command structure works + a.console.Message(ctx, fmt.Sprintf("Resource ID: %s", resourceID)) + a.console.Message(ctx, "TODO: Implement resource details display") + + return nil, nil +} + +// Delete command implementation +type resourceDeleteFlags struct { + force bool + global *internal.GlobalCommandOptions +} + +func newResourceDeleteFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *resourceDeleteFlags { + flags := &resourceDeleteFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *resourceDeleteFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + local.BoolVarP(&f.force, "force", "f", false, "Force deletion without confirmation") + f.global = global +} + +type resourceDeleteAction struct { + args []string + flags *resourceDeleteFlags + console input.Console +} + +func newResourceDeleteAction( + args []string, + flags *resourceDeleteFlags, + console input.Console, +) actions.Action { + return &resourceDeleteAction{ + args: args, + flags: flags, + console: console, + } +} + +func (a *resourceDeleteAction) Run(ctx context.Context) (*actions.ActionResult, error) { + resourceID := a.args[0] + + a.console.MessageUxItem(ctx, &ux.MessageTitle{ + Title: "Delete Azure resource (azd resource delete)", + TitleNote: fmt.Sprintf("Deleting resource '%s'", resourceID), + }) + + if !a.flags.force { + confirmed, err := a.console.Confirm(ctx, input.ConsoleOptions{ + Message: fmt.Sprintf("Are you sure you want to delete '%s'?", resourceID), + }) + if err != nil { + return nil, fmt.Errorf("failed to get confirmation: %w", err) + } + if !confirmed { + a.console.Message(ctx, "Deletion cancelled.") + return nil, nil + } + } + + stepMessage := fmt.Sprintf("Deleting %s", output.WithHighLightFormat(resourceID)) + a.console.ShowSpinner(ctx, stepMessage, input.Step) + + // TODO: Implement actual resource deletion + // err := a.resourceService.Delete(ctx, resourceID) + // if err != nil { + // a.console.StopSpinner(ctx, stepMessage, input.StepFailed) + // return nil, fmt.Errorf("failed to delete resource: %w", err) + // } + + // Simulate work + time.Sleep(1 * time.Second) + + a.console.StopSpinner(ctx, stepMessage, input.StepDone) + + return &actions.ActionResult{ + Message: &actions.ResultMessage{ + Header: fmt.Sprintf("Successfully deleted resource '%s'", resourceID), + FollowUp: "Use 'azd resource list' to see remaining resources.", + }, + }, nil +} +``` + +## Summary + +This guide provides a complete framework for adding new commands to azd. The key steps are: + +1. **Choose the pattern**: Single command or command group +2. **Create the file**: Follow naming conventions in `cmd/` directory +3. **Define the structure**: ActionDescriptor → Flags → Action +4. **Implement the logic**: Start with TODO comments for actual functionality +5. **Register the command**: Add to `root.go` or parent command group +6. **Handle dependencies**: Use IoC container for service injection +7. **Format output**: Support JSON, Table, and None formats appropriately +8. **Handle errors**: Provide clear error messages with guidance + +The generated command shells will compile and provide the basic CLI structure, allowing developers to focus on implementing the actual business logic within the marked TODO sections. diff --git a/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go b/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go deleted file mode 100644 index aa6ea409a26..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -var _ tools.Tool = &AzdArchitecturePlanningTool{} - -type AzdArchitecturePlanningTool struct { -} - -func (t *AzdArchitecturePlanningTool) Name() string { - return "azd_architecture_planning" -} - -func (t *AzdArchitecturePlanningTool) Description() string { - return `Returns instructions for selecting appropriate Azure services for discovered application components and -designing infrastructure architecture. - -The LLM agent should execute these instructions using available tools. - -Use this tool when: -- Discovery analysis has been completed and azd-arch-plan.md exists -- Application components have been identified and classified -- Need to map components to Azure hosting services -- Ready to plan containerization and database strategies - -Input: "./azd-arch-plan.md"` -} - -func (t *AzdArchitecturePlanningTool) Call(ctx context.Context, input string) (string, error) { - return prompts.AzdArchitecturePlanningPrompt, nil -} diff --git a/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go b/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go deleted file mode 100644 index 2590eb2c0ba..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -var _ tools.Tool = &AzdAzureYamlGenerationTool{} - -type AzdAzureYamlGenerationTool struct { -} - -func (t *AzdAzureYamlGenerationTool) Name() string { - return "azd_azure_yaml_generation" -} - -func (t *AzdAzureYamlGenerationTool) Description() string { - return `Returns instructions for generating the azure.yaml configuration file with proper service hosting, -build, and deployment settings for AZD projects. - -The LLM agent should execute these instructions using available tools. - -Use this tool when: -- Architecture planning has been completed and Azure services selected -- Need to create or update azure.yaml configuration file -- Services have been mapped to Azure hosting platforms -- Ready to define build and deployment configurations - -Input: "./azd-arch-plan.md"` -} - -func (t *AzdAzureYamlGenerationTool) Call(ctx context.Context, input string) (string, error) { - return prompts.AzdAzureYamlGenerationPrompt, nil -} diff --git a/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go b/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go deleted file mode 100644 index f8b13425eea..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -var _ tools.Tool = &AzdDiscoveryAnalysisTool{} - -type AzdDiscoveryAnalysisTool struct { -} - -func (t *AzdDiscoveryAnalysisTool) Name() string { - return "azd_discovery_analysis" -} - -func (t *AzdDiscoveryAnalysisTool) Description() string { - return `Returns instructions for performing comprehensive discovery and analysis of application components -to prepare for Azure Developer CLI (AZD) initialization. - -The LLM agent should execute these instructions using available tools. - -Use this tool when: -- Starting Phase 1 of AZD migration process -- Need to identify all application components and dependencies -- Codebase analysis required before architecture planning -- azd-arch-plan.md does not exist or needs updating - -Input: "./azd-arch-plan.md"` -} - -func (t *AzdDiscoveryAnalysisTool) Call(ctx context.Context, input string) (string, error) { - return prompts.AzdDiscoveryAnalysisPrompt, nil -} diff --git a/cli/azd/internal/agent/tools/azd/azd_docker_generation.go b/cli/azd/internal/agent/tools/azd/azd_docker_generation.go deleted file mode 100644 index 57c03e2d807..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_docker_generation.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -var _ tools.Tool = &AzdDockerGenerationTool{} - -type AzdDockerGenerationTool struct { -} - -func (t *AzdDockerGenerationTool) Name() string { - return "azd_docker_generation" -} - -func (t *AzdDockerGenerationTool) Description() string { - return `Returns instructions for generating optimized Dockerfiles and container configurations for containerizable -services in AZD projects. - -The LLM agent should execute these instructions using available tools. - -Use this tool when: -- Architecture planning identified services requiring containerization -- azd-arch-plan.md shows Container Apps or AKS as selected hosting platform -- Need Dockerfiles for microservices, APIs, or containerized web applications -- Ready to implement containerization strategy - -Input: "./azd-arch-plan.md"` -} - -func (t *AzdDockerGenerationTool) Call(ctx context.Context, input string) (string, error) { - return prompts.AzdDockerGenerationPrompt, nil -} diff --git a/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go b/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go deleted file mode 100644 index d55f903e4d2..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -var _ tools.Tool = &AzdIacGenerationRulesTool{} - -type AzdIacGenerationRulesTool struct { -} - -func (t *AzdIacGenerationRulesTool) Name() string { - return "azd_iac_generation_rules" -} - -func (t *AzdIacGenerationRulesTool) Description() string { - return `Returns comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules -for AZD projects. - -The LLM agent should reference these rules when generating infrastructure code. - -Use this tool when: -- Generating any Bicep infrastructure templates for AZD projects -- Need compliance rules and naming conventions for Azure resources -- Creating modular, reusable Bicep files -- Ensuring security and operational best practices - -Input: "./azd-arch-plan.md"` -} - -func (t *AzdIacGenerationRulesTool) Call(ctx context.Context, input string) (string, error) { - return prompts.AzdIacRulesPrompt, nil -} diff --git a/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go b/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go deleted file mode 100644 index 3c3e7cf52b9..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -var _ tools.Tool = &AzdInfrastructureGenerationTool{} - -type AzdInfrastructureGenerationTool struct { -} - -func (t *AzdInfrastructureGenerationTool) Name() string { - return "azd_infrastructure_generation" -} - -func (t *AzdInfrastructureGenerationTool) Description() string { - return `Returns instructions for generating modular Bicep infrastructure templates following Azure security and -operational best practices for AZD projects. - -The LLM agent should execute these instructions using available tools. - -Use this tool when: -- Architecture planning completed with Azure services selected -- Need to create Bicep infrastructure templates -- Ready to implement infrastructure as code for deployment - -Input: "./azd-arch-plan.md"` -} - -func (t *AzdInfrastructureGenerationTool) Call(ctx context.Context, input string) (string, error) { - return prompts.AzdInfrastructureGenerationPrompt, nil -} diff --git a/cli/azd/internal/agent/tools/azd/azd_plan_init.go b/cli/azd/internal/agent/tools/azd/azd_plan_init.go deleted file mode 100644 index a6eb422ab78..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_plan_init.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -var _ tools.Tool = &AzdPlanInitTool{} - -type AzdPlanInitTool struct { -} - -func (t *AzdPlanInitTool) Name() string { - return "azd_plan_init" -} - -func (t *AzdPlanInitTool) Description() string { - return `Returns instructions for orchestrating complete AZD application initialization using structured phases -with specialized tools. - -The LLM agent should execute these instructions using available tools. - -Use this tool when: -- Starting new AZD project initialization or migration -- Need structured approach to transform application into AZD-compatible project -- Want to ensure proper sequencing of discovery, planning, and file generation -- Require complete project orchestration guidance - -Input: "./azd-arch-plan.md"` -} - -func (t *AzdPlanInitTool) Call(ctx context.Context, input string) (string, error) { - return prompts.AzdPlanInitPrompt, nil -} diff --git a/cli/azd/internal/agent/tools/azd/azd_project_validation.go b/cli/azd/internal/agent/tools/azd/azd_project_validation.go deleted file mode 100644 index 7645fac9ca0..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_project_validation.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - _ "embed" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -// AzdProjectValidationTool validates an AZD project by running comprehensive checks on all components -// including azure.yaml schema validation, Bicep template validation, environment setup, packaging, -// and deployment preview. -type AzdProjectValidationTool struct{} - -// Name returns the name of the tool. -func (t *AzdProjectValidationTool) Name() string { - return "azd_project_validation" -} - -// Description returns the description of the tool. -func (t *AzdProjectValidationTool) Description() string { - return `Returns instructions for validating AZD project by running comprehensive checks on azure.yaml schema, -Bicep templates, environment setup, packaging, and deployment preview. - -The LLM agent should execute these instructions using available tools. - -Use this tool when: -- All AZD configuration files have been generated -- Ready to validate complete project before deployment -- Need to ensure azure.yaml, Bicep templates, and environment are properly configured -- Final validation step before running azd up - -Input: "./azd-arch-plan.md"` -} - -// Call executes the tool with the given arguments. -func (t *AzdProjectValidationTool) Call(ctx context.Context, args string) (string, error) { - return prompts.AzdProjectValidationPrompt, nil -} - -// Ensure AzdProjectValidationTool implements the Tool interface. -var _ tools.Tool = (*AzdProjectValidationTool)(nil) diff --git a/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go b/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go deleted file mode 100644 index 678d268d7ea..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -var _ tools.Tool = &AzdYamlSchemaTool{} - -type AzdYamlSchemaTool struct { -} - -func (t *AzdYamlSchemaTool) Name() string { - return "azd_yaml_schema" -} - -func (t *AzdYamlSchemaTool) Description() string { - return ` - Gets the Azure YAML JSON schema file specification and structure for azure.yaml configuration files used in AZD. - - Input: - ` -} - -func (t *AzdYamlSchemaTool) Call(ctx context.Context, input string) (string, error) { - return prompts.AzdYamlSchemaPrompt, nil -} diff --git a/cli/azd/internal/agent/tools/azd/loader.go b/cli/azd/internal/agent/tools/azd/loader.go deleted file mode 100644 index f55b9c93e77..00000000000 --- a/cli/azd/internal/agent/tools/azd/loader.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "github.com/tmc/langchaingo/tools" -) - -// AzdToolsLoader loads AZD-related tools -type AzdToolsLoader struct{} - -func NewAzdToolsLoader() *AzdToolsLoader { - return &AzdToolsLoader{} -} - -func (l *AzdToolsLoader) LoadTools() ([]tools.Tool, error) { - return []tools.Tool{ - // Original orchestrating tool - &AzdPlanInitTool{}, - - // Core workflow tools (use in sequence) - &AzdDiscoveryAnalysisTool{}, - &AzdArchitecturePlanningTool{}, - - // Focused file generation tools (use as needed) - &AzdAzureYamlGenerationTool{}, - &AzdInfrastructureGenerationTool{}, - &AzdDockerGenerationTool{}, - - // Validation tool (final step) - &AzdProjectValidationTool{}, - - // Supporting tools - &AzdIacGenerationRulesTool{}, - &AzdYamlSchemaTool{}, - }, nil -} diff --git a/cli/azd/internal/agent/tools/loader.go b/cli/azd/internal/agent/tools/loader.go index ec573ceac60..a0662c65c5c 100644 --- a/cli/azd/internal/agent/tools/loader.go +++ b/cli/azd/internal/agent/tools/loader.go @@ -6,7 +6,6 @@ package tools import ( "github.com/tmc/langchaingo/tools" - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/dev" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/io" ) @@ -23,7 +22,6 @@ type LocalToolsLoader struct { func NewLocalToolsLoader() *LocalToolsLoader { return &LocalToolsLoader{ loaders: []ToolLoader{ - azd.NewAzdToolsLoader(), dev.NewDevToolsLoader(), io.NewIoToolsLoader(), }, diff --git a/cli/azd/internal/agent/tools/mcp/mcp.json b/cli/azd/internal/agent/tools/mcp/mcp.json index efca4416be8..67aa3a5dd2d 100644 --- a/cli/azd/internal/agent/tools/mcp/mcp.json +++ b/cli/azd/internal/agent/tools/mcp/mcp.json @@ -2,8 +2,13 @@ "servers": { "Azure": { "type": "stdio", - "command": "azmcp", - "args": ["server", "start"] + "command": "npx", + "args": ["-y", "@azure/mcp@latest", "server", "start"] + }, + "azd": { + "type": "stdio", + "command": "azd", + "args": ["mcp", "start"] } } } diff --git a/cli/azd/internal/mcp/tools/azd_architecture_planning.go b/cli/azd/internal/mcp/tools/azd_architecture_planning.go new file mode 100644 index 00000000000..b22ae1dde7e --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_architecture_planning.go @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdArchitecturePlanningTool creates a new azd architecture planning tool +func NewAzdArchitecturePlanningTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_architecture_planning", + mcp.WithDescription(`Returns instructions for selecting appropriate Azure services for discovered application components and +designing infrastructure architecture. + +The LLM agent should execute these instructions using available tools. + +Use this tool when: +- Discovery analysis has been completed and azd-arch-plan.md exists +- Application components have been identified and classified +- Need to map components to Azure hosting services +- Ready to plan containerization and database strategies`), + ), + Handler: handleAzdArchitecturePlanning, + } +} + +func handleAzdArchitecturePlanning(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdArchitecturePlanningPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go b/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go new file mode 100644 index 00000000000..50751d9246d --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdAzureYamlGenerationTool creates a new azd azure yaml generation tool +func NewAzdAzureYamlGenerationTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_azure_yaml_generation", + mcp.WithDescription(`Returns instructions for generating the azure.yaml configuration file with proper service hosting, +build, and deployment settings for AZD projects. + +The LLM agent should execute these instructions using available tools. + +Use this tool when: +- Architecture planning has been completed and Azure services selected +- Need to create or update azure.yaml configuration file +- Services have been mapped to Azure hosting platforms +- Ready to define build and deployment configurations`), + ), + Handler: handleAzdAzureYamlGeneration, + } +} + +func handleAzdAzureYamlGeneration(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdAzureYamlGenerationPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/azd_discovery_analysis.go b/cli/azd/internal/mcp/tools/azd_discovery_analysis.go new file mode 100644 index 00000000000..2581b21b152 --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_discovery_analysis.go @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdDiscoveryAnalysisTool creates a new azd discovery analysis tool +func NewAzdDiscoveryAnalysisTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_discovery_analysis", + mcp.WithDescription(`Returns instructions for performing comprehensive discovery and analysis of application components +to prepare for Azure Developer CLI (AZD) initialization. + +The LLM agent should execute these instructions using available tools. + +Use this tool when: +- Starting Phase 1 of AZD migration process +- Need to identify all application components and dependencies +- Codebase analysis required before architecture planning +- azd-arch-plan.md does not exist or needs updating`), + ), + Handler: handleAzdDiscoveryAnalysis, + } +} + +func handleAzdDiscoveryAnalysis(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdDiscoveryAnalysisPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/azd_docker_generation.go b/cli/azd/internal/mcp/tools/azd_docker_generation.go new file mode 100644 index 00000000000..bca51b5b24b --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_docker_generation.go @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdDockerGenerationTool creates a new azd docker generation tool +func NewAzdDockerGenerationTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_docker_generation", + mcp.WithDescription(`Returns instructions for generating optimized Dockerfiles and container configurations for containerizable +services in AZD projects. + +The LLM agent should execute these instructions using available tools. + +Use this tool when: +- Architecture planning identified services requiring containerization +- azd-arch-plan.md shows Container Apps or AKS as selected hosting platform +- Need Dockerfiles for microservices, APIs, or containerized web applications +- Ready to implement containerization strategy`), + ), + Handler: handleAzdDockerGeneration, + } +} + +func handleAzdDockerGeneration(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdDockerGenerationPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go b/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go new file mode 100644 index 00000000000..3b9710088ff --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdIacGenerationRulesTool creates a new azd iac generation rules tool +func NewAzdIacGenerationRulesTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_iac_generation_rules", + mcp.WithDescription(`Returns comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules +for AZD projects. + +The LLM agent should reference these rules when generating infrastructure code. + +Use this tool when: +- Generating any Bicep infrastructure templates for AZD projects +- Need compliance rules and naming conventions for Azure resources +- Creating modular, reusable Bicep files +- Ensuring security and operational best practices"`), + ), + Handler: handleAzdIacGenerationRules, + } +} + +func handleAzdIacGenerationRules(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdIacRulesPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go b/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go new file mode 100644 index 00000000000..ea04eba4701 --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go @@ -0,0 +1,35 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdInfrastructureGenerationTool creates a new azd infrastructure generation tool +func NewAzdInfrastructureGenerationTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_infrastructure_generation", + mcp.WithDescription(`Returns instructions for generating modular Bicep infrastructure templates following Azure security and +operational best practices for AZD projects. + +The LLM agent should execute these instructions using available tools. + +Use this tool when: +- Architecture planning completed with Azure services selected +- Need to create Bicep infrastructure templates +- Ready to implement infrastructure as code for deployment`), + ), + Handler: handleAzdInfrastructureGeneration, + } +} + +func handleAzdInfrastructureGeneration(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdInfrastructureGenerationPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/azd_plan_init.go b/cli/azd/internal/mcp/tools/azd_plan_init.go new file mode 100644 index 00000000000..b2b2cb8a143 --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_plan_init.go @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdPlanInitTool creates a new azd plan init tool +func NewAzdPlanInitTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_plan_init", + mcp.WithDescription(`Returns instructions for orchestrating complete AZD application initialization using structured phases +with specialized tools. + +The LLM agent should execute these instructions using available tools. + +Use this tool when: +- Starting new AZD project initialization or migration +- Need structured approach to transform application into AZD-compatible project +- Want to ensure proper sequencing of discovery, planning, and file generation +- Require complete project orchestration guidance`), + ), + Handler: handleAzdPlanInit, + } +} + +func handleAzdPlanInit(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdPlanInitPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/azd_project_validation.go b/cli/azd/internal/mcp/tools/azd_project_validation.go new file mode 100644 index 00000000000..9620074e15a --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_project_validation.go @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdProjectValidationTool creates a new azd project validation tool +func NewAzdProjectValidationTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_project_validation", + mcp.WithDescription(`Returns instructions for validating AZD project by running comprehensive checks on azure.yaml schema, +Bicep templates, environment setup, packaging, and deployment preview. + +The LLM agent should execute these instructions using available tools. + +Use this tool when: +- All AZD configuration files have been generated +- Ready to validate complete project before deployment +- Need to ensure azure.yaml, Bicep templates, and environment are properly configured +- Final validation step before running azd up`), + ), + Handler: handleAzdProjectValidation, + } +} + +func handleAzdProjectValidation(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdProjectValidationPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/azd_yaml_schema.go b/cli/azd/internal/mcp/tools/azd_yaml_schema.go new file mode 100644 index 00000000000..6afaf884813 --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_yaml_schema.go @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdYamlSchemaTool creates a new azd yaml schema tool +func NewAzdYamlSchemaTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_yaml_schema", + mcp.WithDescription(`Gets the Azure YAML JSON schema file specification and structure for azure.yaml configuration files used in AZD.`), + ), + Handler: handleAzdYamlSchema, + } +} + +func handleAzdYamlSchema(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdYamlSchemaPrompt), nil +} diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_architecture_planning.md b/cli/azd/internal/mcp/tools/prompts/azd_architecture_planning.md similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azd_architecture_planning.md rename to cli/azd/internal/mcp/tools/prompts/azd_architecture_planning.md diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md b/cli/azd/internal/mcp/tools/prompts/azd_azure_yaml_generation.md similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md rename to cli/azd/internal/mcp/tools/prompts/azd_azure_yaml_generation.md diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_discovery_analysis.md b/cli/azd/internal/mcp/tools/prompts/azd_discovery_analysis.md similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azd_discovery_analysis.md rename to cli/azd/internal/mcp/tools/prompts/azd_discovery_analysis.md diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_docker_generation.md b/cli/azd/internal/mcp/tools/prompts/azd_docker_generation.md similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azd_docker_generation.md rename to cli/azd/internal/mcp/tools/prompts/azd_docker_generation.md diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md b/cli/azd/internal/mcp/tools/prompts/azd_iac_generation_rules.md similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md rename to cli/azd/internal/mcp/tools/prompts/azd_iac_generation_rules.md diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md b/cli/azd/internal/mcp/tools/prompts/azd_infrastructure_generation.md similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md rename to cli/azd/internal/mcp/tools/prompts/azd_infrastructure_generation.md diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_plan_init.md b/cli/azd/internal/mcp/tools/prompts/azd_plan_init.md similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azd_plan_init.md rename to cli/azd/internal/mcp/tools/prompts/azd_plan_init.md diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md b/cli/azd/internal/mcp/tools/prompts/azd_project_validation.md similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md rename to cli/azd/internal/mcp/tools/prompts/azd_project_validation.md diff --git a/cli/azd/internal/agent/tools/azd/prompts/azure.yaml.json b/cli/azd/internal/mcp/tools/prompts/azure.yaml.json similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azure.yaml.json rename to cli/azd/internal/mcp/tools/prompts/azure.yaml.json diff --git a/cli/azd/internal/agent/tools/azd/prompts/prompts.go b/cli/azd/internal/mcp/tools/prompts/prompts.go similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/prompts.go rename to cli/azd/internal/mcp/tools/prompts/prompts.go From ed8df2667be834dc0b009cd0901abbc308bef33f Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 8 Aug 2025 10:19:11 -0700 Subject: [PATCH 035/116] Fixes lint issues --- cli/azd/.vscode/cspell.yaml | 13 +++++++++++++ .../internal/mcp/tools/azd_architecture_planning.go | 6 ++++-- .../internal/mcp/tools/azd_azure_yaml_generation.go | 6 ++++-- .../internal/mcp/tools/azd_discovery_analysis.go | 6 ++++-- cli/azd/internal/mcp/tools/azd_docker_generation.go | 6 ++++-- .../internal/mcp/tools/azd_iac_generation_rules.go | 6 ++++-- .../mcp/tools/azd_infrastructure_generation.go | 6 ++++-- cli/azd/internal/mcp/tools/azd_plan_init.go | 6 ++++-- .../internal/mcp/tools/azd_project_validation.go | 6 ++++-- cli/azd/internal/mcp/tools/azd_yaml_schema.go | 5 ++++- 10 files changed, 49 insertions(+), 17 deletions(-) diff --git a/cli/azd/.vscode/cspell.yaml b/cli/azd/.vscode/cspell.yaml index 628897e2098..10092e76ae4 100644 --- a/cli/azd/.vscode/cspell.yaml +++ b/cli/azd/.vscode/cspell.yaml @@ -164,6 +164,19 @@ overrides: - dall - datasource - vectorizing + - filename: docs/new-azd-command.md + words: + - pflag + - struct + - Errorf + - Sprintf + - mycommand + - omitempty + - Fprintf + - tabwriter + - azdcontext + - azapi + - eastus ignorePaths: - "**/*_test.go" - "**/mock*.go" diff --git a/cli/azd/internal/mcp/tools/azd_architecture_planning.go b/cli/azd/internal/mcp/tools/azd_architecture_planning.go index b22ae1dde7e..960c74d0cce 100644 --- a/cli/azd/internal/mcp/tools/azd_architecture_planning.go +++ b/cli/azd/internal/mcp/tools/azd_architecture_planning.go @@ -16,7 +16,8 @@ func NewAzdArchitecturePlanningTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_architecture_planning", - mcp.WithDescription(`Returns instructions for selecting appropriate Azure services for discovered application components and + mcp.WithDescription( + `Returns instructions for selecting appropriate Azure services for discovered application components and designing infrastructure architecture. The LLM agent should execute these instructions using available tools. @@ -25,7 +26,8 @@ Use this tool when: - Discovery analysis has been completed and azd-arch-plan.md exists - Application components have been identified and classified - Need to map components to Azure hosting services -- Ready to plan containerization and database strategies`), +- Ready to plan containerization and database strategies`, + ), ), Handler: handleAzdArchitecturePlanning, } diff --git a/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go b/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go index 50751d9246d..eaa0d51d705 100644 --- a/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go +++ b/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go @@ -16,7 +16,8 @@ func NewAzdAzureYamlGenerationTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_azure_yaml_generation", - mcp.WithDescription(`Returns instructions for generating the azure.yaml configuration file with proper service hosting, + mcp.WithDescription( + `Returns instructions for generating the azure.yaml configuration file with proper service hosting, build, and deployment settings for AZD projects. The LLM agent should execute these instructions using available tools. @@ -25,7 +26,8 @@ Use this tool when: - Architecture planning has been completed and Azure services selected - Need to create or update azure.yaml configuration file - Services have been mapped to Azure hosting platforms -- Ready to define build and deployment configurations`), +- Ready to define build and deployment configurations`, + ), ), Handler: handleAzdAzureYamlGeneration, } diff --git a/cli/azd/internal/mcp/tools/azd_discovery_analysis.go b/cli/azd/internal/mcp/tools/azd_discovery_analysis.go index 2581b21b152..1f4f6bc5087 100644 --- a/cli/azd/internal/mcp/tools/azd_discovery_analysis.go +++ b/cli/azd/internal/mcp/tools/azd_discovery_analysis.go @@ -16,7 +16,8 @@ func NewAzdDiscoveryAnalysisTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_discovery_analysis", - mcp.WithDescription(`Returns instructions for performing comprehensive discovery and analysis of application components + mcp.WithDescription( + `Returns instructions for performing comprehensive discovery and analysis of application components to prepare for Azure Developer CLI (AZD) initialization. The LLM agent should execute these instructions using available tools. @@ -25,7 +26,8 @@ Use this tool when: - Starting Phase 1 of AZD migration process - Need to identify all application components and dependencies - Codebase analysis required before architecture planning -- azd-arch-plan.md does not exist or needs updating`), +- azd-arch-plan.md does not exist or needs updating`, + ), ), Handler: handleAzdDiscoveryAnalysis, } diff --git a/cli/azd/internal/mcp/tools/azd_docker_generation.go b/cli/azd/internal/mcp/tools/azd_docker_generation.go index bca51b5b24b..c784cfe61cb 100644 --- a/cli/azd/internal/mcp/tools/azd_docker_generation.go +++ b/cli/azd/internal/mcp/tools/azd_docker_generation.go @@ -16,7 +16,8 @@ func NewAzdDockerGenerationTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_docker_generation", - mcp.WithDescription(`Returns instructions for generating optimized Dockerfiles and container configurations for containerizable + mcp.WithDescription( + `Returns instructions for generating optimized Dockerfiles and container configurations for containerizable services in AZD projects. The LLM agent should execute these instructions using available tools. @@ -25,7 +26,8 @@ Use this tool when: - Architecture planning identified services requiring containerization - azd-arch-plan.md shows Container Apps or AKS as selected hosting platform - Need Dockerfiles for microservices, APIs, or containerized web applications -- Ready to implement containerization strategy`), +- Ready to implement containerization strategy`, + ), ), Handler: handleAzdDockerGeneration, } diff --git a/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go b/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go index 3b9710088ff..8e2c3f1199b 100644 --- a/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go +++ b/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go @@ -16,7 +16,8 @@ func NewAzdIacGenerationRulesTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_iac_generation_rules", - mcp.WithDescription(`Returns comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules + mcp.WithDescription( + `Returns comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules for AZD projects. The LLM agent should reference these rules when generating infrastructure code. @@ -25,7 +26,8 @@ Use this tool when: - Generating any Bicep infrastructure templates for AZD projects - Need compliance rules and naming conventions for Azure resources - Creating modular, reusable Bicep files -- Ensuring security and operational best practices"`), +- Ensuring security and operational best practices"`, + ), ), Handler: handleAzdIacGenerationRules, } diff --git a/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go b/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go index ea04eba4701..bab4cfd9cf6 100644 --- a/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go +++ b/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go @@ -16,7 +16,8 @@ func NewAzdInfrastructureGenerationTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_infrastructure_generation", - mcp.WithDescription(`Returns instructions for generating modular Bicep infrastructure templates following Azure security and + mcp.WithDescription( + `Returns instructions for generating modular Bicep infrastructure templates following Azure security and operational best practices for AZD projects. The LLM agent should execute these instructions using available tools. @@ -24,7 +25,8 @@ The LLM agent should execute these instructions using available tools. Use this tool when: - Architecture planning completed with Azure services selected - Need to create Bicep infrastructure templates -- Ready to implement infrastructure as code for deployment`), +- Ready to implement infrastructure as code for deployment`, + ), ), Handler: handleAzdInfrastructureGeneration, } diff --git a/cli/azd/internal/mcp/tools/azd_plan_init.go b/cli/azd/internal/mcp/tools/azd_plan_init.go index b2b2cb8a143..051d233295f 100644 --- a/cli/azd/internal/mcp/tools/azd_plan_init.go +++ b/cli/azd/internal/mcp/tools/azd_plan_init.go @@ -16,7 +16,8 @@ func NewAzdPlanInitTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_plan_init", - mcp.WithDescription(`Returns instructions for orchestrating complete AZD application initialization using structured phases + mcp.WithDescription( + `Returns instructions for orchestrating complete AZD application initialization using structured phases with specialized tools. The LLM agent should execute these instructions using available tools. @@ -25,7 +26,8 @@ Use this tool when: - Starting new AZD project initialization or migration - Need structured approach to transform application into AZD-compatible project - Want to ensure proper sequencing of discovery, planning, and file generation -- Require complete project orchestration guidance`), +- Require complete project orchestration guidance`, + ), ), Handler: handleAzdPlanInit, } diff --git a/cli/azd/internal/mcp/tools/azd_project_validation.go b/cli/azd/internal/mcp/tools/azd_project_validation.go index 9620074e15a..af913410b72 100644 --- a/cli/azd/internal/mcp/tools/azd_project_validation.go +++ b/cli/azd/internal/mcp/tools/azd_project_validation.go @@ -16,7 +16,8 @@ func NewAzdProjectValidationTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_project_validation", - mcp.WithDescription(`Returns instructions for validating AZD project by running comprehensive checks on azure.yaml schema, + mcp.WithDescription( + `Returns instructions for validating AZD project by running comprehensive checks on azure.yaml schema, Bicep templates, environment setup, packaging, and deployment preview. The LLM agent should execute these instructions using available tools. @@ -25,7 +26,8 @@ Use this tool when: - All AZD configuration files have been generated - Ready to validate complete project before deployment - Need to ensure azure.yaml, Bicep templates, and environment are properly configured -- Final validation step before running azd up`), +- Final validation step before running azd up`, + ), ), Handler: handleAzdProjectValidation, } diff --git a/cli/azd/internal/mcp/tools/azd_yaml_schema.go b/cli/azd/internal/mcp/tools/azd_yaml_schema.go index 6afaf884813..8b65faaddbe 100644 --- a/cli/azd/internal/mcp/tools/azd_yaml_schema.go +++ b/cli/azd/internal/mcp/tools/azd_yaml_schema.go @@ -16,7 +16,10 @@ func NewAzdYamlSchemaTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_yaml_schema", - mcp.WithDescription(`Gets the Azure YAML JSON schema file specification and structure for azure.yaml configuration files used in AZD.`), + mcp.WithDescription( + `Gets the Azure YAML JSON schema file specification and structure for azure.yaml `+ + `configuration files used in AZD.`, + ), ), Handler: handleAzdYamlSchema, } From 718112b296eee70a34a8db53d57f48f2e8d7e398 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 8 Aug 2025 10:44:16 -0700 Subject: [PATCH 036/116] Adds github copilot instructions --- cli/azd/.github/copilot-instructions.md | 200 ++++++++++++++++++++++++ cli/azd/.vscode/cspell.yaml | 5 + cli/azd/cmd/mcp.go | 8 +- 3 files changed, 208 insertions(+), 5 deletions(-) create mode 100644 cli/azd/.github/copilot-instructions.md diff --git a/cli/azd/.github/copilot-instructions.md b/cli/azd/.github/copilot-instructions.md new file mode 100644 index 00000000000..36484b26f95 --- /dev/null +++ b/cli/azd/.github/copilot-instructions.md @@ -0,0 +1,200 @@ +# GitHub Copilot Instructions for Azure Developer CLI (azd) + +## Project Overview + +The Azure Developer CLI (azd) is a comprehensive command-line tool built in Go that streamlines Azure application development and deployment. The project follows Microsoft coding standards and uses a layered architecture with dependency injection, structured command patterns, and comprehensive testing. + +## Getting Started + +### Prerequisites +- [Go](https://go.dev/dl/) 1.24 +- [VS Code](https://code.visualstudio.com/) with [Go extension](https://marketplace.visualstudio.com/items?itemName=golang.Go) + +### Building & Testing +```bash +# Build +cd cli/azd +go build + +# Run tests (unit only) +go test ./... -short + +# Run all tests (including end-to-end) +go test ./... +``` + +### Development Guidelines +- Check existing [bug issues](https://github.com/Azure/azure-dev/issues?q=is%3Aopen+is%3Aissue+label%3Abug) or [enhancement issues](https://github.com/Azure/azure-dev/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement) +- Open an issue before starting work on significant changes +- Submit pull requests following the established patterns + +## Architecture & Design Patterns + +### Core Architecture +- **Layered Architecture**: `ActionDescriptor Tree → CobraBuilder → Cobra Commands → CLI` +- **Dependency Injection**: IoC container pattern for service resolution +- **Command Pattern**: Actions implement the `Action` interface with `Run(ctx context.Context) (*ActionResult, error)` +- **Model Context Protocol (MCP)**: Server implementation for AI agent interactions + +### Key Components +- **ActionDescriptor**: Higher-order component defining commands, flags, middleware, and relationships +- **Actions**: Application logic handling CLI commands (`cmd/actions/`) +- **Tools**: External tool integrations and MCP server tools +- **Packages**: Reusable business logic (`pkg/`) +- **Internal**: Internal implementation details (`internal/`) + +## Command Development + +For detailed guidance on adding new commands, see: +- **[docs/new-azd-command.md](./docs/new-azd-command.md)** - Comprehensive guide for adding new commands + +### Quick Reference +- Follow the ActionDescriptor pattern for new commands +- Use dependency injection for service resolution +- Implement proper error handling and output formatting +- Support multiple output formats (JSON, Table, None) + +## Code Quality Standards + +### Required Linting Pipeline +Always run this complete pipeline before submitting changes: +```bash +cspell lint '**/*.go' --config ./.vscode/cspell.yaml --root . --no-progress && \ +golines . -w -m 125 && \ +golangci-lint run --timeout 5m && \ +../../eng/scripts/copyright-check.sh . --fix +``` + +**Pipeline Components:** +- `cspell`: Spell checking for Go files +- `golines`: Line length formatting for Go files (125 char limit) +- `golangci-lint`: Go code quality and style checking +- `copyright-check.sh`: Ensures proper Microsoft copyright headers + +### Line Length & Formatting +- **Maximum line length for Go files**: 125 characters (enforced by `lll` linter) +- Use `golines` with `-m 125` flag for automatic formatting of Go code +- Break long strings in Go code using string concatenation with `+` +- **Documentation files (Markdown)**: No strict line length limit, prioritize readability + +### Spelling & Documentation +- Use cspell with project config: `--config ./.vscode/cspell.yaml` +- Add technical terms to document-specific overrides in `.vscode/cspell.yaml` +- Pattern for document-specific words: +```yaml +overrides: + - filename: path/to/file.ext + words: + - technicalterm1 + - technicalterm2 +``` + +### Copyright Headers +All Go files must include Microsoft copyright header: +```go +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +``` + +## MCP Tools Development + +### Tool Pattern +MCP tools follow the ServerTool interface pattern from `github.com/mark3labs/mcp-go/server`. Each tool should have: +- Constructor function: `NewXXXTool() server.ServerTool` +- Handler function: `handleXXX(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error)` +- Proper tool descriptions and parameter definitions +- Snake_case tool names (e.g., `azd_plan_init`) + +## Package Structure Guidelines + +### Import Organization +1. Standard library imports +2. External dependencies +3. Azure/azd internal packages +4. Local package imports + +### Internal vs Package Separation +- `internal/`: Implementation details, not meant for external use +- `pkg/`: Reusable business logic that could be imported by other projects +- Clear interface boundaries between packages + +## Testing Requirements + +### Test Commands +```bash +# Unit tests only +go test ./... -short + +# All tests including end-to-end +go test ./... +``` + +### Test File Patterns +- Unit tests: `*_test.go` alongside source files +- Functional tests: `test/functional/` directory +- Mock exclusions configured in `.golangci.yaml` + +## Error Handling & Logging + +### Error Patterns +- Use `fmt.Errorf` for error wrapping with context +- Return meaningful error messages for CLI users +- Handle context cancellation appropriately + +### Output Formatting +- Support multiple output formats: JSON, Table, None +- Use structured output for machine consumption +- Provide user-friendly messages for human consumption + +## Documentation Standards + +### Code Documentation +- Public functions and types must have Go doc comments +- Comments should start with the function/type name +- Provide context and usage examples where helpful + +### Inline Documentation +- Use clear variable and function names +- Add comments for complex business logic +- Document non-obvious dependencies or assumptions + +## Security & Best Practices + +### Enabled Linters +- `errorlint`: Error handling best practices +- `gosec`: Security vulnerability detection +- `lll`: Line length enforcement (125 chars) +- `staticcheck`: Advanced static analysis + +### Security Considerations +- Handle sensitive data appropriately (credentials, tokens) +- Validate all user inputs +- Use secure defaults for configuration +- Follow Azure security best practices + +## Validation Checklist + +Before submitting any changes, ensure: + +- [ ] All linting pipeline steps pass without errors +- [ ] Copyright headers are present on all new files +- [ ] Spelling check passes with appropriate dictionary entries +- [ ] Line length under 125 characters for Go files (Markdown files have no strict limit) +- [ ] Tests pass (unit and integration where applicable) +- [ ] Error handling is comprehensive and user-friendly +- [ ] Documentation is updated for new features +- [ ] Command patterns follow established conventions +- [ ] MCP tools follow ServerTool interface pattern +- [ ] Package organization follows internal/pkg separation +- [ ] Import statements are properly organized + +## Common Patterns to Follow + +### Key Principles +- Use ActionDescriptor pattern for command registration +- Leverage dependency injection through IoC container +- Follow established naming conventions (see docs/new-azd-command.md) +- Implement proper error handling and output formatting +- Use structured configuration with sensible defaults + +This instruction set ensures consistency with the established codebase patterns and helps maintain the high-quality standards expected in the Azure Developer CLI project. diff --git a/cli/azd/.vscode/cspell.yaml b/cli/azd/.vscode/cspell.yaml index 10092e76ae4..16fd4325393 100644 --- a/cli/azd/.vscode/cspell.yaml +++ b/cli/azd/.vscode/cspell.yaml @@ -177,6 +177,11 @@ overrides: - azdcontext - azapi - eastus + - filename: .github/copilot-instructions.md + words: + - golines + - technicalterm + - Errorf ignorePaths: - "**/*_test.go" - "**/mock*.go" diff --git a/cli/azd/cmd/mcp.go b/cli/azd/cmd/mcp.go index b9c7652f3ec..7e78c9e8a80 100644 --- a/cli/azd/cmd/mcp.go +++ b/cli/azd/cmd/mcp.go @@ -22,11 +22,9 @@ import ( func mcpActions(root *actions.ActionDescriptor) *actions.ActionDescriptor { group := root.Add("mcp", &actions.ActionDescriptorOptions{ Command: &cobra.Command{ - Use: "mcp", - Short: "Manage Model Context Protocol (MCP) server.", - }, - GroupingOptions: actions.CommandGroupOptions{ - RootLevelHelp: actions.CmdGroupAlpha, + Use: "mcp", + Short: "Manage Model Context Protocol (MCP) server.", + Hidden: true, }, }) From 36d888b0a02e5d6f934e488bf871646e1cfef70f Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 8 Aug 2025 11:00:52 -0700 Subject: [PATCH 037/116] Updates inline go docs --- cli/azd/internal/agent/agent.go | 11 +++++++ .../internal/agent/conversational_agent.go | 14 ++++++--- .../logging/{logger.go => action_logger.go} | 0 cli/azd/internal/agent/one_shot_agent.go | 11 ++++--- cli/azd/internal/agent/tools/loader.go | 5 ++- cli/azd/internal/agent/tools/mcp/loader.go | 31 ++++++++++++++----- .../agent/tools/mcp/sampling_handler.go | 31 +++++++++++++------ cli/azd/pkg/llm/azure_openai.go | 6 ++++ cli/azd/pkg/llm/manager.go | 2 +- cli/azd/pkg/llm/model.go | 6 +++- cli/azd/pkg/llm/model_factory.go | 6 ++++ cli/azd/pkg/llm/ollama.go | 6 ++++ 12 files changed, 101 insertions(+), 28 deletions(-) rename cli/azd/internal/agent/logging/{logger.go => action_logger.go} (100%) diff --git a/cli/azd/internal/agent/agent.go b/cli/azd/internal/agent/agent.go index 8dc07ae2668..e3ead707b6d 100644 --- a/cli/azd/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -13,6 +13,8 @@ import ( "github.com/tmc/langchaingo/tools" ) +// Agent represents an AI agent that can execute tools and interact with language models. +// It manages multiple models for different purposes and maintains an executor for tool execution. type Agent struct { debug bool defaultModel llms.Model @@ -22,38 +24,45 @@ type Agent struct { callbacksHandler callbacks.Handler } +// AgentOption is a functional option for configuring an Agent type AgentOption func(*Agent) +// WithDebug returns an option that enables or disables debug logging for the agent func WithDebug(debug bool) AgentOption { return func(agent *Agent) { agent.debug = debug } } +// WithDefaultModel returns an option that sets the default language model for the agent func WithDefaultModel(model llms.Model) AgentOption { return func(agent *Agent) { agent.defaultModel = model } } +// WithSamplingModel returns an option that sets the sampling model for the agent func WithSamplingModel(model llms.Model) AgentOption { return func(agent *Agent) { agent.samplingModel = model } } +// WithTools returns an option that adds the specified tools to the agent's toolkit func WithTools(tools ...tools.Tool) AgentOption { return func(agent *Agent) { agent.tools = tools } } +// WithCallbacksHandler returns an option that sets the callbacks handler for the agent func WithCallbacksHandler(handler callbacks.Handler) AgentOption { return func(agent *Agent) { agent.callbacksHandler = handler } } +// toolNames returns a comma-separated string of all tool names in the provided slice func toolNames(tools []tools.Tool) string { var tn strings.Builder for i, tool := range tools { @@ -66,6 +75,8 @@ func toolNames(tools []tools.Tool) string { return tn.String() } +// toolDescriptions returns a formatted string containing the name and description +// of each tool in the provided slice, with each tool on a separate line func toolDescriptions(tools []tools.Tool) string { var ts strings.Builder for _, tool := range tools { diff --git a/cli/azd/internal/agent/conversational_agent.go b/cli/azd/internal/agent/conversational_agent.go index 1c6622cac7d..5fe070b2b02 100644 --- a/cli/azd/internal/agent/conversational_agent.go +++ b/cli/azd/internal/agent/conversational_agent.go @@ -26,12 +26,15 @@ import ( //go:embed prompts/conversational.txt var conversational_prompt_template string -// ConversationalAzdAiAgent represents an enhanced AZD Copilot agent with action tracking, -// intent validation, and conversation memory +// ConversationalAzdAiAgent represents an enhanced AZD Copilot agent with conversation memory, +// tool filtering, and interactive capabilities type ConversationalAzdAiAgent struct { *Agent } +// NewConversationalAzdAiAgent creates a new conversational agent with memory, tool loading, +// and MCP sampling capabilities. It filters out excluded tools and configures the agent +// for interactive conversations with a high iteration limit for complex tasks. func NewConversationalAzdAiAgent(llm llms.Model, opts ...AgentOption) (*ConversationalAzdAiAgent, error) { azdAgent := &ConversationalAzdAiAgent{ Agent: &Agent{ @@ -115,11 +118,14 @@ func NewConversationalAzdAiAgent(llm llms.Model, opts ...AgentOption) (*Conversa return azdAgent, nil } +// SendMessage processes a single message through the agent and returns the response func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, args ...string) (string, error) { return aai.runChain(ctx, strings.Join(args, "\n")) } -// RunConversationLoop runs the enhanced AZD Copilot agent with full capabilities +// StartConversation runs an interactive conversation loop with the agent. +// It accepts an optional initial query and handles user input/output with proper formatting. +// The conversation continues until the user types "exit" or "quit". func (aai *ConversationalAzdAiAgent) StartConversation(ctx context.Context, args ...string) (string, error) { fmt.Println("🤖 AZD Copilot - Interactive Mode") fmt.Println("═══════════════════════════════════════════════════════════") @@ -171,7 +177,7 @@ func (aai *ConversationalAzdAiAgent) StartConversation(ctx context.Context, args return "", nil } -// ProcessQuery processes a user query with full action tracking and validation +// runChain executes a user query through the agent's chain with memory and returns the response func (aai *ConversationalAzdAiAgent) runChain(ctx context.Context, userInput string) (string, error) { // Execute with enhanced input - agent should automatically handle memory output, err := chains.Run(ctx, aai.executor, userInput) diff --git a/cli/azd/internal/agent/logging/logger.go b/cli/azd/internal/agent/logging/action_logger.go similarity index 100% rename from cli/azd/internal/agent/logging/logger.go rename to cli/azd/internal/agent/logging/action_logger.go diff --git a/cli/azd/internal/agent/one_shot_agent.go b/cli/azd/internal/agent/one_shot_agent.go index e2d8c9adcd2..7137e6efccd 100644 --- a/cli/azd/internal/agent/one_shot_agent.go +++ b/cli/azd/internal/agent/one_shot_agent.go @@ -18,8 +18,8 @@ import ( mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" ) -// OneShotAzdAiAgent represents an enhanced AZD Copilot agent with action tracking, -// intent validation, and conversation memory +// OneShotAzdAiAgent represents an AZD Copilot agent designed for single-request processing +// without conversation memory, optimized for one-time queries and responses type OneShotAzdAiAgent struct { *Agent } @@ -27,6 +27,9 @@ type OneShotAzdAiAgent struct { //go:embed prompts/one_shot.txt var one_shot_prompt_template string +// NewOneShotAzdAiAgent creates a new one-shot agent optimized for single queries. +// It loads tools from multiple sources, filters excluded tools, and configures +// the agent for stateless operation without conversation memory. func NewOneShotAzdAiAgent(llm llms.Model, opts ...AgentOption) (*OneShotAzdAiAgent, error) { azdAgent := &OneShotAzdAiAgent{ Agent: &Agent{ @@ -100,12 +103,12 @@ func NewOneShotAzdAiAgent(llm llms.Model, opts ...AgentOption) (*OneShotAzdAiAge return azdAgent, nil } -// RunConversationLoop runs the enhanced AZD Copilot agent with full capabilities +// SendMessage processes a single message through the one-shot agent and returns the response func (aai *OneShotAzdAiAgent) SendMessage(ctx context.Context, args ...string) (string, error) { return aai.runChain(ctx, strings.Join(args, "\n")) } -// ProcessQuery processes a user query with full action tracking and validation +// runChain executes a user query through the one-shot agent without memory persistence func (aai *OneShotAzdAiAgent) runChain(ctx context.Context, userInput string) (string, error) { // Execute with enhanced input - agent should automatically handle memory output, err := chains.Run(ctx, aai.executor, userInput) diff --git a/cli/azd/internal/agent/tools/loader.go b/cli/azd/internal/agent/tools/loader.go index a0662c65c5c..d45098543a2 100644 --- a/cli/azd/internal/agent/tools/loader.go +++ b/cli/azd/internal/agent/tools/loader.go @@ -15,10 +15,12 @@ type ToolLoader interface { LoadTools() ([]tools.Tool, error) } +// LocalToolsLoader manages loading tools from multiple local tool categories type LocalToolsLoader struct { loaders []ToolLoader } +// NewLocalToolsLoader creates a new instance with default tool loaders for dev and io categories func NewLocalToolsLoader() *LocalToolsLoader { return &LocalToolsLoader{ loaders: []ToolLoader{ @@ -28,7 +30,8 @@ func NewLocalToolsLoader() *LocalToolsLoader { } } -// LoadLocalTools loads all tools from all categories with the provided callback handler +// LoadTools loads and returns all tools from all registered tool loaders. +// Returns an error if any individual loader fails to load its tools. func (l *LocalToolsLoader) LoadTools() ([]tools.Tool, error) { var allTools []tools.Tool diff --git a/cli/azd/internal/agent/tools/mcp/loader.go b/cli/azd/internal/agent/tools/mcp/loader.go index 76346c11113..af9e8321470 100644 --- a/cli/azd/internal/agent/tools/mcp/loader.go +++ b/cli/azd/internal/agent/tools/mcp/loader.go @@ -7,6 +7,7 @@ import ( "context" "encoding/json" "fmt" + "log" _ "embed" @@ -21,27 +22,40 @@ var _mcpJson string // McpConfig represents the overall MCP configuration structure type McpConfig struct { + // Servers maps server names to their configurations Servers map[string]ServerConfig `json:"servers"` } // ServerConfig represents an individual server configuration type ServerConfig struct { - Type string `json:"type"` - Command string `json:"command"` - Args []string `json:"args,omitempty"` - Env []string `json:"env,omitempty"` + // Type specifies the type of MCP server (e.g., "stdio") + Type string `json:"type"` + // Command is the executable path or command to run the MCP server + Command string `json:"command"` + // Args are optional command-line arguments for the server command + Args []string `json:"args,omitempty"` + // Env are optional environment variables for the server process + Env []string `json:"env,omitempty"` } +// McpToolsLoader manages the loading of tools from MCP (Model Context Protocol) servers type McpToolsLoader struct { + // samplingHandler handles sampling requests from MCP clients samplingHandler client.SamplingHandler } +// NewMcpToolsLoader creates a new instance of McpToolsLoader with the provided sampling handler func NewMcpToolsLoader(samplingHandler client.SamplingHandler) *McpToolsLoader { return &McpToolsLoader{ samplingHandler: samplingHandler, } } +// LoadTools loads and returns all available tools from configured MCP servers. +// It parses the embedded mcp.json configuration, connects to each server, +// and collects all tools from each successfully connected server. +// Returns an error if the configuration cannot be parsed, but continues +// processing other servers if individual server connections fail. func (l *McpToolsLoader) LoadTools() ([]tools.Tool, error) { // Deserialize the embedded mcp.json configuration var config McpConfig @@ -60,19 +74,22 @@ func (l *McpToolsLoader) LoadTools() ([]tools.Tool, error) { ctx := context.Background() if err := mcpClient.Start(ctx); err != nil { - return nil, err + log.Printf("Failed to start MCP client for server %s: %v", serverName, err) + continue } // Create the adapter adapter, err := langchaingo_mcp_adapter.New(mcpClient) if err != nil { - return nil, fmt.Errorf("failed to create adapter for server %s: %w", serverName, err) + log.Printf("Failed to create adapter for server %s: %v", serverName, err) + continue } // Get all tools from MCP server mcpTools, err := adapter.Tools() if err != nil { - return nil, fmt.Errorf("failed to get tools from server %s: %w", serverName, err) + log.Printf("Failed to get tools from server %s: %v", serverName, err) + continue } // Add the tools to our collection diff --git a/cli/azd/internal/agent/tools/mcp/sampling_handler.go b/cli/azd/internal/agent/tools/mcp/sampling_handler.go index 0af9f051f4a..0c0b3f7df0d 100644 --- a/cli/azd/internal/agent/tools/mcp/sampling_handler.go +++ b/cli/azd/internal/agent/tools/mcp/sampling_handler.go @@ -14,19 +14,25 @@ import ( "github.com/tmc/langchaingo/llms" ) +// McpSamplingHandler handles sampling requests from MCP clients by delegating +// to an underlying language model and converting responses to MCP format type McpSamplingHandler struct { llm llms.Model debug bool } +// SamplingHandlerOption is a functional option for configuring McpSamplingHandler type SamplingHandlerOption func(*McpSamplingHandler) +// WithDebug returns an option that enables or disables debug logging func WithDebug(debug bool) SamplingHandlerOption { return func(h *McpSamplingHandler) { h.debug = debug } } +// NewMcpSamplingHandler creates a new MCP sampling handler with the specified +// language model and applies any provided options func NewMcpSamplingHandler(llm llms.Model, opts ...SamplingHandlerOption) *McpSamplingHandler { handler := &McpSamplingHandler{ llm: llm, @@ -39,16 +45,10 @@ func NewMcpSamplingHandler(llm llms.Model, opts ...SamplingHandlerOption) *McpSa return handler } -// cleanContent converts literal line break escape sequences to actual line break characters -func (h *McpSamplingHandler) cleanContent(content string) string { - // Replace literal escape sequences with actual control characters - // Handle Windows-style \r\n first (most common), then individual ones - content = strings.ReplaceAll(content, "\\r\\n", "\r\n") - content = strings.ReplaceAll(content, "\\n", "\n") - content = strings.ReplaceAll(content, "\\r", "\r") - return content -} - +// CreateMessage handles MCP sampling requests by converting MCP messages to the +// language model format, generating a response, and converting back to MCP format. +// It supports various content types including text, maps, and arrays, and provides +// debug logging when enabled. Returns an error-wrapped response if LLM generation fails. func (h *McpSamplingHandler) CreateMessage( ctx context.Context, request mcp.CreateMessageRequest, @@ -156,3 +156,14 @@ func (h *McpSamplingHandler) CreateMessage( return samplingResponse, nil } + +// cleanContent converts literal line break escape sequences to actual line break characters. +// It handles Windows-style \r\n sequences first, then individual \n and \r sequences. +func (h *McpSamplingHandler) cleanContent(content string) string { + // Replace literal escape sequences with actual control characters + // Handle Windows-style \r\n first (most common), then individual ones + content = strings.ReplaceAll(content, "\\r\\n", "\r\n") + content = strings.ReplaceAll(content, "\\n", "\n") + content = strings.ReplaceAll(content, "\\r", "\r") + return content +} diff --git a/cli/azd/pkg/llm/azure_openai.go b/cli/azd/pkg/llm/azure_openai.go index e1108982377..7319e7c6087 100644 --- a/cli/azd/pkg/llm/azure_openai.go +++ b/cli/azd/pkg/llm/azure_openai.go @@ -11,6 +11,7 @@ import ( "github.com/tmc/langchaingo/llms/openai" ) +// AzureOpenAiModelConfig holds configuration settings for Azure OpenAI models type AzureOpenAiModelConfig struct { Model string `json:"model"` Version string `json:"version"` @@ -21,16 +22,21 @@ type AzureOpenAiModelConfig struct { MaxTokens *int `json:"maxTokens"` } +// AzureOpenAiModelProvider creates Azure OpenAI models from user configuration type AzureOpenAiModelProvider struct { userConfigManager config.UserConfigManager } +// NewAzureOpenAiModelProvider creates a new Azure OpenAI model provider func NewAzureOpenAiModelProvider(userConfigManager config.UserConfigManager) ModelProvider { return &AzureOpenAiModelProvider{ userConfigManager: userConfigManager, } } +// CreateModelContainer creates a model container for Azure OpenAI with configuration +// loaded from user settings. It validates required fields and applies optional parameters +// like temperature and max tokens before creating the OpenAI client. func (p *AzureOpenAiModelProvider) CreateModelContainer(opts ...ModelOption) (*ModelContainer, error) { userConfig, err := p.userConfigManager.Load() if err != nil { diff --git a/cli/azd/pkg/llm/manager.go b/cli/azd/pkg/llm/manager.go index 377e5d2311f..b00f98c53eb 100644 --- a/cli/azd/pkg/llm/manager.go +++ b/cli/azd/pkg/llm/manager.go @@ -116,7 +116,7 @@ func (m Manager) GetDefaultModel(opts ...ModelOption) (*ModelContainer, error) { return m.ModelFactory.CreateModelContainer(LlmType(defaultModelType), opts...) } -// GetModel returns the configured model from the global azd user configuration +// GetModel returns the specified model type from the global azd user configuration func (m Manager) GetModel(modelType LlmType, opts ...ModelOption) (*ModelContainer, error) { return m.ModelFactory.CreateModelContainer(modelType, opts...) } diff --git a/cli/azd/pkg/llm/model.go b/cli/azd/pkg/llm/model.go index 5bfba259675..aa55b628a6b 100644 --- a/cli/azd/pkg/llm/model.go +++ b/cli/azd/pkg/llm/model.go @@ -12,12 +12,13 @@ import ( var _ llms.Model = (*modelWithCallOptions)(nil) -// / Wraps an langchaingo model to allow specifying specific call options at create time +// modelWithCallOptions wraps a langchaingo model to allow specifying default call options at creation time type modelWithCallOptions struct { model llms.Model options []llms.CallOption } +// newModelWithCallOptions creates a new model wrapper with default call options func newModelWithCallOptions(model llms.Model, options ...llms.CallOption) *modelWithCallOptions { return &modelWithCallOptions{ model: model, @@ -25,6 +26,8 @@ func newModelWithCallOptions(model llms.Model, options ...llms.CallOption) *mode } } +// GenerateContent generates content using the wrapped model, combining default options +// with any additional options provided at call time func (m *modelWithCallOptions) GenerateContent( ctx context.Context, messages []llms.MessageContent, @@ -37,6 +40,7 @@ func (m *modelWithCallOptions) GenerateContent( return m.model.GenerateContent(ctx, messages, allOptions...) } +// Call is deprecated and returns an error directing users to use GenerateContent instead func (m *modelWithCallOptions) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { return "", fmt.Errorf("Deprecated, call GenerateContent") } diff --git a/cli/azd/pkg/llm/model_factory.go b/cli/azd/pkg/llm/model_factory.go index 3994b1d3e08..7aa79c462cc 100644 --- a/cli/azd/pkg/llm/model_factory.go +++ b/cli/azd/pkg/llm/model_factory.go @@ -10,16 +10,21 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/ioc" ) +// ModelFactory creates model containers using registered model providers type ModelFactory struct { serviceLocator ioc.ServiceLocator } +// NewModelFactory creates a new model factory with the given service locator func NewModelFactory(serviceLocator ioc.ServiceLocator) *ModelFactory { return &ModelFactory{ serviceLocator: serviceLocator, } } +// CreateModelContainer creates a model container for the specified model type. +// It resolves the appropriate model provider and delegates container creation to it. +// Returns an error with suggestions if the model type is not supported. func (f *ModelFactory) CreateModelContainer(modelType LlmType, opts ...ModelOption) (*ModelContainer, error) { var modelProvider ModelProvider if err := f.serviceLocator.ResolveNamed(string(modelType), &modelProvider); err != nil { @@ -33,6 +38,7 @@ func (f *ModelFactory) CreateModelContainer(modelType LlmType, opts ...ModelOpti return modelProvider.CreateModelContainer(opts...) } +// ModelProvider defines the interface for creating model containers type ModelProvider interface { CreateModelContainer(opts ...ModelOption) (*ModelContainer, error) } diff --git a/cli/azd/pkg/llm/ollama.go b/cli/azd/pkg/llm/ollama.go index 27d6e1e83af..51015163e09 100644 --- a/cli/azd/pkg/llm/ollama.go +++ b/cli/azd/pkg/llm/ollama.go @@ -9,6 +9,7 @@ import ( "github.com/tmc/langchaingo/llms/ollama" ) +// OllamaModelConfig holds configuration settings for Ollama models type OllamaModelConfig struct { Model string `json:"model"` Version string `json:"version"` @@ -16,16 +17,21 @@ type OllamaModelConfig struct { MaxTokens *int `json:"maxTokens"` } +// OllamaModelProvider creates Ollama models from user configuration with sensible defaults type OllamaModelProvider struct { userConfigManager config.UserConfigManager } +// NewOllamaModelProvider creates a new Ollama model provider func NewOllamaModelProvider(userConfigManager config.UserConfigManager) ModelProvider { return &OllamaModelProvider{ userConfigManager: userConfigManager, } } +// CreateModelContainer creates a model container for Ollama with configuration from user settings. +// It defaults to "llama3" model if none specified and "latest" version if not configured. +// Applies optional parameters like temperature and max tokens to the Ollama client. func (p *OllamaModelProvider) CreateModelContainer(opts ...ModelOption) (*ModelContainer, error) { userConfig, err := p.userConfigManager.Load() if err != nil { From 6e1ad1b3bfc5bc1e230a35f211cd04b19e0d1115 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 25 Jul 2025 17:41:26 -0700 Subject: [PATCH 038/116] WIP: azd ai chat Get AI configuration from config Adds factory to create agent instances --- .../extensions/azd.ai.start/AZURE_AI_SETUP.md | 98 +++++ cli/azd/extensions/azd.ai.start/README.md | 33 ++ cli/azd/extensions/azd.ai.start/USAGE.md | 50 +++ cli/azd/extensions/azd.ai.start/build.ps1 | 71 ++++ cli/azd/extensions/azd.ai.start/build.sh | 66 ++++ cli/azd/extensions/azd.ai.start/changelog.md | 3 + .../extensions/azd.ai.start/extension.yaml | 9 + cli/azd/extensions/azd.ai.start/go.mod | 60 +++ cli/azd/extensions/azd.ai.start/go.sum | 352 +++++++++++++++++ .../azd.ai.start/internal/agent/agent.go | 362 ++++++++++++++++++ .../azd.ai.start/internal/agent/factory.go | 66 ++++ .../azd.ai.start/internal/agent/response.go | 25 ++ .../azd.ai.start/internal/agent/stats.go | 16 + .../internal/cmd/enhanced_integration.go | 100 +++++ .../azd.ai.start/internal/cmd/root.go | 97 +++++ .../azd.ai.start/internal/logging/logger.go | 134 +++++++ .../azd.ai.start/internal/session/action.go | 41 ++ .../azd.ai.start/internal/session/session.go | 48 +++ .../internal/tools/change_directory.go | 51 +++ .../azd.ai.start/internal/tools/copy_file.go | 71 ++++ .../internal/tools/create_directory.go | 41 ++ .../internal/tools/current_directory.go | 39 ++ .../internal/tools/delete_directory.go | 53 +++ .../internal/tools/delete_file.go | 43 +++ .../internal/tools/directory_list.go | 92 +++++ .../azd.ai.start/internal/tools/file_info.go | 67 ++++ .../internal/tools/http_fetcher.go | 43 +++ .../azd.ai.start/internal/tools/move_file.go | 62 +++ .../azd.ai.start/internal/tools/read_file.go | 37 ++ .../azd.ai.start/internal/tools/weather.go | 105 +++++ .../azd.ai.start/internal/tools/write_file.go | 101 +++++ .../azd.ai.start/internal/utils/helpers.go | 41 ++ .../internal/validation/parser.go | 93 +++++ .../azd.ai.start/internal/validation/types.go | 21 + .../internal/validation/validator.go | 68 ++++ cli/azd/extensions/azd.ai.start/main.go | 30 ++ 36 files changed, 2689 insertions(+) create mode 100644 cli/azd/extensions/azd.ai.start/AZURE_AI_SETUP.md create mode 100644 cli/azd/extensions/azd.ai.start/README.md create mode 100644 cli/azd/extensions/azd.ai.start/USAGE.md create mode 100644 cli/azd/extensions/azd.ai.start/build.ps1 create mode 100644 cli/azd/extensions/azd.ai.start/build.sh create mode 100644 cli/azd/extensions/azd.ai.start/changelog.md create mode 100644 cli/azd/extensions/azd.ai.start/extension.yaml create mode 100644 cli/azd/extensions/azd.ai.start/go.mod create mode 100644 cli/azd/extensions/azd.ai.start/go.sum create mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/agent.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/factory.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/response.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/stats.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/cmd/root.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/logging/logger.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/session/action.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/session/session.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/file_info.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/move_file.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/read_file.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/weather.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/write_file.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/utils/helpers.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/validation/parser.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/validation/types.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/validation/validator.go create mode 100644 cli/azd/extensions/azd.ai.start/main.go diff --git a/cli/azd/extensions/azd.ai.start/AZURE_AI_SETUP.md b/cli/azd/extensions/azd.ai.start/AZURE_AI_SETUP.md new file mode 100644 index 00000000000..9d985f2f36e --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/AZURE_AI_SETUP.md @@ -0,0 +1,98 @@ +# Azure AI Integration Setup + +This AI agent can work with both OpenAI and Azure OpenAI Service. Here's how to configure each: + +## Option 1: Azure OpenAI Service (Recommended for Azure users) + +Azure OpenAI provides the same models as OpenAI but hosted on Azure infrastructure with enterprise security and compliance. + +### Prerequisites +1. Azure subscription +2. Azure OpenAI resource created in Azure portal +3. GPT model deployed (e.g., GPT-3.5-turbo or GPT-4) + +### Environment Variables +```bash +# Set these environment variables for Azure OpenAI +export AZURE_OPENAI_ENDPOINT="https://your-resource-name.openai.azure.com" +export AZURE_OPENAI_API_KEY="your-azure-openai-api-key" +export AZURE_OPENAI_DEPLOYMENT_NAME="your-gpt-deployment-name" +``` + +### PowerShell (Windows) +```powershell +$env:AZURE_OPENAI_ENDPOINT="https://your-resource-name.openai.azure.com" +$env:AZURE_OPENAI_API_KEY="your-azure-openai-api-key" +$env:AZURE_OPENAI_DEPLOYMENT_NAME="your-gpt-deployment-name" +``` + +## Option 2: OpenAI API (Direct) + +### Environment Variables +```bash +export OPENAI_API_KEY="your-openai-api-key" +``` + +### PowerShell (Windows) +```powershell +$env:OPENAI_API_KEY="your-openai-api-key" +``` + +## Usage Examples + +```bash +# Interactive mode +azd ai.chat + +# Direct query +azd ai.chat "How do I deploy a Node.js app to Azure Container Apps?" + +# Azure-specific queries +azd ai.chat "What's the best way to set up CI/CD with Azure DevOps for my web app?" +azd ai.chat "How do I configure Azure Key Vault for my application secrets?" +``` + +## Azure OpenAI Advantages + +- **Enterprise Security**: Your data stays within your Azure tenant +- **Compliance**: Meets enterprise compliance requirements +- **Integration**: Better integration with other Azure services +- **Cost Control**: Better cost management and billing integration +- **Regional Deployment**: Deploy closer to your users for lower latency + +## Setup Steps for Azure OpenAI + +1. **Create Azure OpenAI Resource**: + ```bash + az cognitiveservices account create \ + --name myopenai \ + --resource-group myresourcegroup \ + --location eastus \ + --kind OpenAI \ + --sku s0 + ``` + +2. **Deploy a Model**: + - Go to Azure OpenAI Studio + - Navigate to "Deployments" + - Create a new deployment with your chosen model (e.g., gpt-35-turbo) + - Note the deployment name for the environment variable + +3. **Get API Key**: + ```bash + az cognitiveservices account keys list \ + --name myopenai \ + --resource-group myresourcegroup + ``` + +4. **Set Environment Variables** as shown above + +## Model Compatibility + +The agent supports various GPT models available in Azure OpenAI: +- GPT-3.5-turbo +- GPT-4 +- GPT-4-turbo +- And newer models as they become available + +Just make sure your deployment name matches the model you want to use. diff --git a/cli/azd/extensions/azd.ai.start/README.md b/cli/azd/extensions/azd.ai.start/README.md new file mode 100644 index 00000000000..9ff29633ea4 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/README.md @@ -0,0 +1,33 @@ +# Node.js Express App + +This is a simple Node.js application using Express with a basic routing setup. + +## Project Structure + +``` +. +├── app.js +├── package.json +├── README.md +└── routes + └── index.js +``` + +## Getting Started + +1. Install dependencies: + ```bash + npm install + ``` +2. Start the server: + ```bash + npm start + ``` +3. Visit [http://localhost:3000](http://localhost:3000) in your browser. + +## Features +- Express server setup +- Modular routing + +## License +ISC \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/USAGE.md b/cli/azd/extensions/azd.ai.start/USAGE.md new file mode 100644 index 00000000000..7218badc825 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/USAGE.md @@ -0,0 +1,50 @@ +# Azure AI Agent - Multi-turn Chat Demo + +Your Azure AI Agent now supports two modes: + +## 1. Single Query Mode +For one-time questions, pass the query as arguments: +```bash +azd.ai.start.exe "How do I deploy a Node.js app to Azure?" +``` + +## 2. Interactive Chat Mode +For multi-turn conversations, run without arguments: +```bash +azd.ai.start.exe +``` + +In interactive mode, you'll see: +- 🤖 Welcome message with instructions +- 💬 You: prompt for your input +- 🤖 AI Agent: responses with context awareness +- Type 'exit' or 'quit' to end the session +- Maintains conversation history for context + +### Features: +- ✅ **Context Aware**: Remembers previous messages in the conversation +- ✅ **Azure Focused**: Specialized for Azure development tasks +- ✅ **Easy Exit**: Type 'exit', 'quit', or Ctrl+C to quit +- ✅ **Memory Management**: Keeps last 10 exchanges to prevent context overflow +- ✅ **Error Handling**: Gracefully handles errors and continues the conversation + +### Example Interactive Session: +``` +🤖 Azure AI Agent - Interactive Chat Mode +Type 'exit', 'quit', or press Ctrl+C to exit +═══════════════════════════════════════════════ + +💬 You: What is Azure App Service? + +🤖 AI Agent: Azure App Service is a platform-as-a-service (PaaS)... + +💬 You: How do I deploy to it? + +🤖 AI Agent: Based on our previous discussion about App Service... + +💬 You: exit + +👋 Goodbye! Thanks for using Azure AI Agent! +``` + +The agent maintains conversation context, so follow-up questions work naturally! diff --git a/cli/azd/extensions/azd.ai.start/build.ps1 b/cli/azd/extensions/azd.ai.start/build.ps1 new file mode 100644 index 00000000000..8cdd4ae9281 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/build.ps1 @@ -0,0 +1,71 @@ +# Get the directory of the script +$EXTENSION_DIR = Split-Path -Parent $MyInvocation.MyCommand.Path + +# Change to the script directory +Set-Location -Path $EXTENSION_DIR + +# Create a safe version of EXTENSION_ID replacing dots with dashes +$EXTENSION_ID_SAFE = $env:EXTENSION_ID -replace '\.', '-' + +# Define output directory +$OUTPUT_DIR = if ($env:OUTPUT_DIR) { $env:OUTPUT_DIR } else { Join-Path $EXTENSION_DIR "bin" } + +# Create output directory if it doesn't exist +if (-not (Test-Path -Path $OUTPUT_DIR)) { + New-Item -ItemType Directory -Path $OUTPUT_DIR | Out-Null +} + +# Get Git commit hash and build date +$COMMIT = git rev-parse HEAD +$BUILD_DATE = (Get-Date -Format "yyyy-MM-ddTHH:mm:ssZ") + +# List of OS and architecture combinations +if ($env:EXTENSION_PLATFORM) { + $PLATFORMS = @($env:EXTENSION_PLATFORM) +} +else { + $PLATFORMS = @( + "windows/amd64", + "windows/arm64", + "darwin/amd64", + "darwin/arm64", + "linux/amd64", + "linux/arm64" + ) +} + +$APP_PATH = "$env:EXTENSION_ID/internal/cmd" + +# Loop through platforms and build +foreach ($PLATFORM in $PLATFORMS) { + $OS, $ARCH = $PLATFORM -split '/' + + $OUTPUT_NAME = Join-Path $OUTPUT_DIR "$EXTENSION_ID_SAFE-$OS-$ARCH" + + if ($OS -eq "windows") { + $OUTPUT_NAME += ".exe" + } + + Write-Host "Building for $OS/$ARCH..." + + # Delete the output file if it already exists + if (Test-Path -Path $OUTPUT_NAME) { + Remove-Item -Path $OUTPUT_NAME -Force + } + + # Set environment variables for Go build + $env:GOOS = $OS + $env:GOARCH = $ARCH + + go build ` + -ldflags="-X '$APP_PATH.Version=$env:EXTENSION_VERSION' -X '$APP_PATH.Commit=$COMMIT' -X '$APP_PATH.BuildDate=$BUILD_DATE'" ` + -o $OUTPUT_NAME + + if ($LASTEXITCODE -ne 0) { + Write-Host "An error occurred while building for $OS/$ARCH" + exit 1 + } +} + +Write-Host "Build completed successfully!" +Write-Host "Binaries are located in the $OUTPUT_DIR directory." diff --git a/cli/azd/extensions/azd.ai.start/build.sh b/cli/azd/extensions/azd.ai.start/build.sh new file mode 100644 index 00000000000..f1a995ec5e9 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/build.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +# Get the directory of the script +EXTENSION_DIR="$(cd "$(dirname "$0")" && pwd)" + +# Change to the script directory +cd "$EXTENSION_DIR" || exit + +# Create a safe version of EXTENSION_ID replacing dots with dashes +EXTENSION_ID_SAFE="${EXTENSION_ID//./-}" + +# Define output directory +OUTPUT_DIR="${OUTPUT_DIR:-$EXTENSION_DIR/bin}" + +# Create output and target directories if they don't exist +mkdir -p "$OUTPUT_DIR" + +# Get Git commit hash and build date +COMMIT=$(git rev-parse HEAD) +BUILD_DATE=$(date -u +%Y-%m-%dT%H:%M:%SZ) + +# List of OS and architecture combinations +if [ -n "$EXTENSION_PLATFORM" ]; then + PLATFORMS=("$EXTENSION_PLATFORM") +else + PLATFORMS=( + "windows/amd64" + "windows/arm64" + "darwin/amd64" + "darwin/arm64" + "linux/amd64" + "linux/arm64" + ) +fi + +APP_PATH="$EXTENSION_ID/internal/cmd" + +# Loop through platforms and build +for PLATFORM in "${PLATFORMS[@]}"; do + OS=$(echo "$PLATFORM" | cut -d'/' -f1) + ARCH=$(echo "$PLATFORM" | cut -d'/' -f2) + + OUTPUT_NAME="$OUTPUT_DIR/$EXTENSION_ID_SAFE-$OS-$ARCH" + + if [ "$OS" = "windows" ]; then + OUTPUT_NAME+='.exe' + fi + + echo "Building for $OS/$ARCH..." + + # Delete the output file if it already exists + [ -f "$OUTPUT_NAME" ] && rm -f "$OUTPUT_NAME" + + # Set environment variables for Go build + GOOS=$OS GOARCH=$ARCH go build \ + -ldflags="-X '$APP_PATH.Version=$EXTENSION_VERSION' -X '$APP_PATH.Commit=$COMMIT' -X '$APP_PATH.BuildDate=$BUILD_DATE'" \ + -o "$OUTPUT_NAME" + + if [ $? -ne 0 ]; then + echo "An error occurred while building for $OS/$ARCH" + exit 1 + fi +done + +echo "Build completed successfully!" +echo "Binaries are located in the $OUTPUT_DIR directory." diff --git a/cli/azd/extensions/azd.ai.start/changelog.md b/cli/azd/extensions/azd.ai.start/changelog.md new file mode 100644 index 00000000000..b88d613cce0 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/changelog.md @@ -0,0 +1,3 @@ +# Release History + +## 0.0.1 - Initial Version \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/extension.yaml b/cli/azd/extensions/azd.ai.start/extension.yaml new file mode 100644 index 00000000000..2c645db27b3 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/extension.yaml @@ -0,0 +1,9 @@ +capabilities: + - custom-commands +description: Enables interactive AI agent through AZD +displayName: AZD AI Agent +id: azd.ai.start +language: go +namespace: ai.chat +usage: azd ai.chat [options] +version: 0.0.1 diff --git a/cli/azd/extensions/azd.ai.start/go.mod b/cli/azd/extensions/azd.ai.start/go.mod new file mode 100644 index 00000000000..c840c72262d --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/go.mod @@ -0,0 +1,60 @@ +module azd.ai.start + +go 1.24.1 + +require ( + github.com/fatih/color v1.18.0 + github.com/spf13/cobra v1.9.1 + github.com/tmc/langchaingo v0.1.13 +) + +require ( + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.3.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c // indirect + github.com/dlclark/regexp2 v1.10.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/goph/emperror v0.17.2 // indirect + github.com/huandu/xstrings v1.3.3 // indirect + github.com/imdario/mergo v0.3.13 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect + github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/mitchellh/reflectwalk v1.0.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/nikolalohinski/gonja v1.5.3 // indirect + github.com/pelletier/go-toml/v2 v2.0.9 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pkoukk/tiktoken-go v0.1.6 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/shopspring/decimal v1.2.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/cast v1.3.1 // indirect + github.com/spf13/pflag v1.0.6 // indirect + github.com/stretchr/testify v1.10.0 // indirect + github.com/yargevad/filepathx v1.0.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 // indirect + golang.org/x/crypto v0.37.0 // indirect + golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect + golang.org/x/net v0.39.0 // indirect + golang.org/x/oauth2 v0.25.0 // indirect + golang.org/x/sync v0.13.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/text v0.24.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250407143221-ac9807e6c755 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755 // indirect + google.golang.org/grpc v1.71.1 // indirect + google.golang.org/protobuf v1.36.6 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/cli/azd/extensions/azd.ai.start/go.sum b/cli/azd/extensions/azd.ai.start/go.sum new file mode 100644 index 00000000000..bc863f91c5c --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/go.sum @@ -0,0 +1,352 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.114.0 h1:OIPFAdfrFDFO2ve2U7r/H5SwSbBzEdrBdE7xkgwc+kY= +cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E= +cloud.google.com/go/ai v0.7.0 h1:P6+b5p4gXlza5E+u7uvcgYlzZ7103ACg70YdZeC6oGE= +cloud.google.com/go/ai v0.7.0/go.mod h1:7ozuEcraovh4ABsPbrec3o4LmFl9HigNI3D5haxYeQo= +cloud.google.com/go/aiplatform v1.68.0 h1:EPPqgHDJpBZKRvv+OsB3cr0jYz3EL2pZ+802rBPcG8U= +cloud.google.com/go/aiplatform v1.68.0/go.mod h1:105MFA3svHjC3Oazl7yjXAmIR89LKhRAeNdnDKJczME= +cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= +cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= +cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= +cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU= +cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng= +cloud.google.com/go/vertexai v0.12.0 h1:zTadEo/CtsoyRXNx3uGCncoWAP1H2HakGqwznt+iMo8= +cloud.google.com/go/vertexai v0.12.0/go.mod h1:8u+d0TsvBfAAd2x5R6GMgbYhsLgo3J7lmP4bR8g2ig8= +github.com/AssemblyAI/assemblyai-go-sdk v1.3.0 h1:AtOVgGxUycvK4P4ypP+1ZupecvFgnfH+Jsum0o5ILoU= +github.com/AssemblyAI/assemblyai-go-sdk v1.3.0/go.mod h1:H0naZbvpIW49cDA5ZZ/gggeXqi7ojSGB1mqshRk6kNE= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= +github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM= +github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJsnnd3H7Ho5jQ= +github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o= +github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss= +github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= +github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= +github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c h1:pi62a7GwfbxvZDXhV4DfhxeePzpVCoyr9/rZaWH5eow= +github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c/go.mod h1:mSTaPODklWyhruY0DZgPw1DI97K5cHXfU3afMqGf0IM= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= +github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/getzep/zep-go v1.0.4 h1:09o26bPP2RAPKFjWuVWwUWLbtFDF/S8bfbilxzeZAAg= +github.com/getzep/zep-go v1.0.4/go.mod h1:HC1Gz7oiyrzOTvzeKC4dQKUiUy87zpIJl0ZFXXdHuss= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/generative-ai-go v0.15.1 h1:n8aQUpvhPOlGVuM2DRkJ2jvx04zpp42B778AROJa+pQ= +github.com/google/generative-ai-go v0.15.1/go.mod h1:AAucpWZjXsDKhQYWvCYuP6d0yB1kX998pJlOW1rAesw= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= +github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18= +github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= +github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= +github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= +github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80 h1:6Yzfa6GP0rIo/kULo2bwGEkFvCePZ3qHDDTC3/J9Swo= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58= +github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/nikolalohinski/gonja v1.5.3 h1:GsA+EEaZDZPGJ8JtpeGN78jidhOlxeJROpqMT9fTj9c= +github.com/nikolalohinski/gonja v1.5.3/go.mod h1:RmjwxNiXAEqcq1HeK5SSMmqFJvKOfTfXhkJv6YBtPa4= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0= +github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkoukk/tiktoken-go v0.1.6 h1:JF0TlJzhTbrI30wCvFuiw6FzP2+/bR+FIxUdgEAcUsw= +github.com/pkoukk/tiktoken-go v0.1.6/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tmc/langchaingo v0.1.13 h1:rcpMWBIi2y3B90XxfE4Ao8dhCQPVDMaNPnN5cGB1CaA= +github.com/tmc/langchaingo v0.1.13/go.mod h1:vpQ5NOIhpzxDfTZK9B6tf2GM/MoaHewPWM5KXXGh7hg= +github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= +github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181 h1:K+bMSIx9A7mLES1rtG+qKduLIXq40DAzYHtb0XuCukA= +gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181/go.mod h1:dzYhVIwWCtzPAa4QP98wfB9+mzt33MSmM8wsKiMi2ow= +gitlab.com/golang-commonmark/linkify v0.0.0-20191026162114-a0c2df6c8f82 h1:oYrL81N608MLZhma3ruL8qTM4xcpYECGut8KSxRY59g= +gitlab.com/golang-commonmark/linkify v0.0.0-20191026162114-a0c2df6c8f82/go.mod h1:Gn+LZmCrhPECMD3SOKlE+BOHwhOYD9j7WT9NUtkCrC8= +gitlab.com/golang-commonmark/markdown v0.0.0-20211110145824-bf3e522c626a h1:O85GKETcmnCNAfv4Aym9tepU8OE0NmcZNqPlXcsBKBs= +gitlab.com/golang-commonmark/markdown v0.0.0-20211110145824-bf3e522c626a/go.mod h1:LaSIs30YPGs1H5jwGgPhLzc8vkNc/k0rDX/fEZqiU/M= +gitlab.com/golang-commonmark/mdurl v0.0.0-20191124015652-932350d1cb84 h1:qqjvoVXdWIcZCLPMlzgA7P9FZWdPGPvP/l3ef8GzV6o= +gitlab.com/golang-commonmark/mdurl v0.0.0-20191124015652-932350d1cb84/go.mod h1:IJZ+fdMvbW2qW6htJx7sLJ04FEs4Ldl/MDsJtMKywfw= +gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f h1:Wku8eEdeJqIOFHtrfkYUByc4bCaTeA6fL0UJgfEiFMI= +gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f/go.mod h1:Tiuhl+njh/JIg0uS/sOJVYi0x2HEa5rc1OAaVsb5tAs= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 h1:A3SayB3rNyt+1S6qpI9mHPkeHTZbD7XILEqWnYZb2l0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0/go.mod h1:27iA5uvhuRNmalO+iEUdVn5ZMj2qy10Mm+XRIpRmyuU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 h1:Ss6D3hLXTM0KobyBYEAygXzFfGcjnmfEJOBgSbemCtg= +go.starlark.net v0.0.0-20230302034142-4b1e35fe2254/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= +google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20240528184218-531527333157 h1:u7WMYrIrVvs0TF5yaKwKNbcJyySYf+HAIFXxWltJOXE= +google.golang.org/genproto v0.0.0-20240528184218-531527333157/go.mod h1:ubQlAQnzejB8uZzszhrTCU2Fyp6Vi7ZE5nn0c3W8+qQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250407143221-ac9807e6c755 h1:AMLTAunltONNuzWgVPZXrjLWtXpsG6A3yLLPEoJ/IjU= +google.golang.org/genproto/googleapis/api v0.0.0-20250407143221-ac9807e6c755/go.mod h1:2R6XrVC8Oc08GlNh8ujEpc7HkLiEZ16QeY7FxIs20ac= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755 h1:TwXJCGVREgQ/cl18iY0Z4wJCTL/GmW+Um2oSwZiZPnc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI= +google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go new file mode 100644 index 00000000000..ba8c284aa52 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go @@ -0,0 +1,362 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package agent + +import ( + "context" + "fmt" + "time" + + "github.com/tmc/langchaingo/agents" + "github.com/tmc/langchaingo/schema" + "github.com/tmc/langchaingo/tools" + + "azd.ai.start/internal/logging" + "azd.ai.start/internal/session" + "azd.ai.start/internal/utils" + "azd.ai.start/internal/validation" +) + +// AzureAIAgent represents an enhanced Azure AI agent with action tracking and intent validation +type AzureAIAgent struct { + agent *agents.ConversationalAgent + executor *agents.Executor + memory schema.Memory + tools []tools.Tool + intentValidator *validation.IntentValidator + actionLogger *logging.ActionLogger + currentSession *session.ActionSession +} + +// ProcessQuery processes a user query with full action tracking and validation +func (aai *AzureAIAgent) ProcessQuery(ctx context.Context, userInput string) (*AgentResponse, error) { + // Start new action session + sess := session.NewActionSession(userInput) + aai.currentSession = sess + + fmt.Printf("\n🎯 Intent: %s\n", userInput) + fmt.Printf("📋 Planning and executing actions...\n") + fmt.Println("═══════════════════════════════════════") + + // Clear previous actions + aai.actionLogger.Clear() + + // Enhanced user input with explicit completion requirements + enhancedInput := fmt.Sprintf(`%s + +IMPORTANT: You must complete this task successfully. Do not stop until: +1. All required actions have been executed +2. Any files that need to be created are actually saved +3. You verify the results of your actions +4. The task is fully accomplished + +If a tool fails, analyze why and try again with corrections. If you need to create files, use the write_file tool with the complete content.`, userInput) + + // Execute with enhanced input + result, err := aai.executor.Call(ctx, map[string]any{ + "input": enhancedInput, + }) + + if err != nil { + sess.End() + fmt.Printf("❌ Execution failed: %s\n", err.Error()) + return nil, err + } + + // Get executed actions from logger and intermediate steps + executedActions := aai.actionLogger.GetActions() + for _, action := range executedActions { + sess.AddExecutedAction(action) + } + + // If no actions in logger but we have intermediate steps, extract them + if len(sess.ExecutedActions) == 0 { + if steps, ok := result["intermediateSteps"].([]schema.AgentStep); ok && len(steps) > 0 { + for _, step := range steps { + actionLog := session.ActionLog{ + Timestamp: time.Now(), + Action: step.Action.Tool, + Tool: step.Action.Tool, + Input: step.Action.ToolInput, + Output: step.Observation, + Success: true, + Duration: time.Millisecond * 100, // Approximate + } + sess.AddExecutedAction(actionLog) + } + } + } + + // Check if any actions were taken - if not, this was likely conversational + if len(sess.ExecutedActions) == 0 { + fmt.Printf("💬 No tool actions needed - appears to be conversational\n") + + sess.End() + validationResult := &validation.ValidationResult{ + Status: validation.ValidationComplete, + Explanation: "Conversational response - no actions required", + Confidence: 1.0, + } + sess.SetValidationResult(validationResult) + + // Display simple summary for conversational responses + fmt.Println("\n📊 Session Summary") + fmt.Println("═══════════════════════════════════════") + duration := sess.EndTime.Sub(sess.StartTime) + fmt.Printf("⏱️ Duration: %v\n", duration.Round(time.Millisecond)) + fmt.Println("\n💬 Conversational response - no tool actions needed") + fmt.Printf("🎯 Intent Status: %s (%.1f%% confidence)\n", validationResult.Status, validationResult.Confidence*100) + fmt.Println("═══════════════════════════════════════") + + return NewAgentResponse(result["output"].(string), sess, validationResult), nil + } + + // Actions were taken, so validate and potentially retry + var lastResult = result + var lastValidation *validation.ValidationResult + maxAttempts := 3 // Maximum retry attempts for incomplete tasks + + for attempt := 1; attempt <= maxAttempts; attempt++ { + // Validate intent completion with enhanced validation + fmt.Printf("\n🔍 Validating completion...\n") + validationResult := aai.intentValidator.ValidateCompletion( + userInput, + sess.ExecutedActions, + ) + lastValidation = validationResult + sess.SetValidationResult(validationResult) + + // Check if task is complete + if validationResult.Status == validation.ValidationComplete { + fmt.Printf("✅ Task completed successfully!\n") + break + } + + // If task is incomplete and we have more attempts, retry + if attempt < maxAttempts { + if validationResult.Status == validation.ValidationIncomplete || validationResult.Status == validation.ValidationPartial { + fmt.Printf("⚠️ Task incomplete (attempt %d/%d): %s\n", attempt, maxAttempts, validationResult.Explanation) + fmt.Printf("🔄 Analyzing what's missing and taking corrective action...\n") + + // Clear previous actions for retry + aai.actionLogger.Clear() + + // Enhanced retry with feedback about what was incomplete + retryInput := fmt.Sprintf(`%s + +IMPORTANT: You must complete this task successfully. Do not stop until: +1. All required actions have been executed +2. Any files that need to be created are actually saved +3. You verify the results of your actions +4. The task is fully accomplished + +PREVIOUS ATTEMPT ANALYSIS: The previous attempt was marked as %s. +Reason: %s + +Please analyze what was missing or incomplete and take the necessary additional actions to fully complete the task.`, + userInput, validationResult.Status, validationResult.Explanation) + + // Execute retry + retryResult, err := aai.executor.Call(ctx, map[string]any{ + "input": retryInput, + }) + + if err != nil { + fmt.Printf("❌ Retry attempt %d failed: %s\n", attempt+1, err.Error()) + if attempt == maxAttempts-1 { + sess.End() + return nil, err + } + continue + } + + lastResult = retryResult + + // Get new actions from this retry + retryActions := aai.actionLogger.GetActions() + if len(retryActions) == 0 { + if steps, ok := retryResult["intermediateSteps"].([]schema.AgentStep); ok && len(steps) > 0 { + for _, step := range steps { + actionLog := session.ActionLog{ + Timestamp: time.Now(), + Action: step.Action.Tool, + Tool: step.Action.Tool, + Input: step.Action.ToolInput, + Output: step.Observation, + Success: true, + Duration: time.Millisecond * 100, + } + retryActions = append(retryActions, actionLog) + } + } + } + + // Accumulate actions from retry + for _, action := range retryActions { + sess.AddExecutedAction(action) + } + continue + } + } else { + // This was the last attempt and still incomplete + fmt.Printf("⚠️ Task still incomplete after %d attempts: %s\n", maxAttempts, validationResult.Explanation) + fmt.Printf("💡 Consider:\n") + fmt.Printf(" - Breaking the task into smaller, more specific steps\n") + fmt.Printf(" - Checking if all required files were actually created\n") + fmt.Printf(" - Verifying tool outputs were successful\n") + } + } + + sess.End() + + // Display comprehensive summary + aai.displayCompleteSummary(sess, lastResult) + + return NewAgentResponse(lastResult["output"].(string), sess, lastValidation), nil +} + +// ProcessQueryWithRetry processes a query with automatic retry on failure +func (aai *AzureAIAgent) ProcessQueryWithRetry(ctx context.Context, userInput string, maxRetries int) (*AgentResponse, error) { + var lastErr error + var lastResponse *AgentResponse + + for attempt := 1; attempt <= maxRetries; attempt++ { + fmt.Printf("\n🔄 Attempt %d/%d\n", attempt, maxRetries) + + response, err := aai.ProcessQuery(ctx, userInput) + if err != nil { + lastErr = err + fmt.Printf("❌ Attempt %d failed: %s\n", attempt, err.Error()) + continue + } + + lastResponse = response + + // Check if task completed successfully + if response.Validation.Status == validation.ValidationComplete { + fmt.Printf("✅ Task completed successfully on attempt %d\n", attempt) + return response, nil + } + + if response.Validation.Status == validation.ValidationPartial { + fmt.Printf("⚠️ Partial completion on attempt %d: %s\n", attempt, response.Validation.Explanation) + } else { + fmt.Printf("❌ Task incomplete on attempt %d: %s\n", attempt, response.Validation.Explanation) + } + + // Clear memory for fresh retry + aai.ClearMemory(ctx) + } + + if lastResponse != nil { + return lastResponse, nil + } + + return nil, fmt.Errorf("all %d attempts failed, last error: %w", maxRetries, lastErr) +} + +// GetSessionStats returns statistics about the current session +func (aai *AzureAIAgent) GetSessionStats() *SessionStats { + if aai.currentSession == nil { + return &SessionStats{} + } + + stats := &SessionStats{ + TotalActions: len(aai.currentSession.ExecutedActions), + SuccessfulActions: 0, + FailedActions: 0, + TotalDuration: aai.currentSession.EndTime.Sub(aai.currentSession.StartTime), + } + + for _, action := range aai.currentSession.ExecutedActions { + if action.Success { + stats.SuccessfulActions++ + } else { + stats.FailedActions++ + } + } + + return stats +} + +// GetMemoryContent returns the current memory content for debugging +func (aai *AzureAIAgent) GetMemoryContent(ctx context.Context) (map[string]any, error) { + return aai.memory.LoadMemoryVariables(ctx, map[string]any{}) +} + +// ClearMemory clears the conversation memory +func (aai *AzureAIAgent) ClearMemory(ctx context.Context) error { + return aai.memory.Clear(ctx) +} + +// EnableVerboseLogging enables detailed iteration logging +func (aai *AzureAIAgent) EnableVerboseLogging() { + // This would enable more detailed logging in the action logger + fmt.Println("🔍 Verbose logging enabled - you'll see detailed iteration steps") +} + +// displayCompleteSummary displays a comprehensive summary of the session +func (aai *AzureAIAgent) displayCompleteSummary(sess *session.ActionSession, result map[string]any) { + fmt.Println("\n📊 Session Summary") + fmt.Println("═══════════════════════════════════════") + + // Display timing + duration := sess.EndTime.Sub(sess.StartTime) + fmt.Printf("⏱️ Duration: %v\n", duration.Round(time.Millisecond)) + + // Display actions with attempt grouping + if len(sess.ExecutedActions) > 0 { + fmt.Println("\n🔧 Actions Executed:") + for i, action := range sess.ExecutedActions { + status := "✅" + if !action.Success { + status = "❌" + } + fmt.Printf(" %s %d. %s (%v)\n", + status, i+1, + utils.TruncateString(action.Input, 50), + action.Duration.Round(time.Millisecond)) + } + } else { + fmt.Println("\n🔧 No explicit tool actions required") + } + + // Display validation result with enhanced messaging + if validationResult, ok := sess.ValidationResult.(*validation.ValidationResult); ok { + fmt.Printf("\n🎯 Intent Status: %s", validationResult.Status) + if validationResult.Confidence > 0 { + fmt.Printf(" (%.1f%% confidence)", validationResult.Confidence*100) + } + fmt.Println() + + if validationResult.Explanation != "" { + fmt.Printf("💭 Assessment: %s\n", validationResult.Explanation) + } + + // Show completion status with actionable advice + switch validationResult.Status { + case validation.ValidationComplete: + fmt.Printf("🎉 Task completed successfully!\n") + case validation.ValidationPartial: + fmt.Printf("⚠️ Task partially completed. Some aspects may need attention.\n") + case validation.ValidationIncomplete: + fmt.Printf("❌ Task incomplete. Additional actions may be needed.\n") + case validation.ValidationError: + fmt.Printf("⚠️ Validation error. Please review the actions taken.\n") + } + } + + // Display intermediate steps if available + if steps, ok := result["intermediateSteps"].([]schema.AgentStep); ok && len(steps) > 0 { + fmt.Printf("\n🔍 Reasoning Steps: %d\n", len(steps)) + for i, step := range steps { + fmt.Printf("Step %d:\n", i+1) + fmt.Printf(" Tool: %s\n", step.Action.Tool) + fmt.Printf(" Input: %s\n", step.Action.ToolInput) + fmt.Printf(" Observation: %s\n", utils.TruncateString(step.Observation, 200)) + } + } + + fmt.Println("═══════════════════════════════════════") +} diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/factory.go b/cli/azd/extensions/azd.ai.start/internal/agent/factory.go new file mode 100644 index 00000000000..3b979181591 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/agent/factory.go @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package agent + +import ( + "github.com/tmc/langchaingo/agents" + "github.com/tmc/langchaingo/llms/openai" + "github.com/tmc/langchaingo/memory" + "github.com/tmc/langchaingo/tools" + + "azd.ai.start/internal/logging" + mytools "azd.ai.start/internal/tools" + "azd.ai.start/internal/validation" +) + +// CreateAzureAIAgent creates a new enhanced Azure AI agent +func CreateAzureAIAgent(llm *openai.LLM) *AzureAIAgent { + // 1. Smart Memory with conversation buffer + smartMemory := memory.NewConversationBuffer() + + // 2. Action Logger with comprehensive callbacks + actionLogger := logging.NewActionLogger() + + // 3. Enhanced Tools - just the essentials + tools := []tools.Tool{ + // Directory operations + mytools.DirectoryListTool{}, + mytools.CreateDirectoryTool{}, + mytools.DeleteDirectoryTool{}, + mytools.ChangeDirectoryTool{}, + mytools.CurrentDirectoryTool{}, + + // File operations + mytools.ReadFileTool{}, + mytools.WriteFileTool{}, + mytools.CopyFileTool{}, + mytools.MoveFileTool{}, + mytools.DeleteFileTool{}, + mytools.FileInfoTool{}, + + // Other tools + mytools.HTTPFetcherTool{}, + mytools.WeatherTool{}, + tools.Calculator{}, + } + + // 4. Create agent with default settings + agent := agents.NewConversationalAgent(llm, tools) + + // 5. Enhanced Executor with aggressive completion settings + executor := agents.NewExecutor(agent, + agents.WithMemory(smartMemory), + agents.WithMaxIterations(1000), // Much higher limit for complex multi-step processes + agents.WithReturnIntermediateSteps(), + ) + + return &AzureAIAgent{ + agent: agent, + executor: executor, + memory: smartMemory, + tools: tools, + intentValidator: validation.NewIntentValidator(llm), + actionLogger: actionLogger, + } +} diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/response.go b/cli/azd/extensions/azd.ai.start/internal/agent/response.go new file mode 100644 index 00000000000..5836fd13f50 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/agent/response.go @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package agent + +import ( + "azd.ai.start/internal/session" + "azd.ai.start/internal/validation" +) + +// AgentResponse represents the complete response from the agent +type AgentResponse struct { + Output string + Session *session.ActionSession + Validation *validation.ValidationResult +} + +// NewAgentResponse creates a new agent response +func NewAgentResponse(output string, sess *session.ActionSession, validationResult *validation.ValidationResult) *AgentResponse { + return &AgentResponse{ + Output: output, + Session: sess, + Validation: validationResult, + } +} diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/stats.go b/cli/azd/extensions/azd.ai.start/internal/agent/stats.go new file mode 100644 index 00000000000..d7649186ebc --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/agent/stats.go @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package agent + +import ( + "time" +) + +// SessionStats provides statistics about an agent session +type SessionStats struct { + TotalActions int + SuccessfulActions int + FailedActions int + TotalDuration time.Duration +} diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go new file mode 100644 index 00000000000..3ebc8c17eff --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go @@ -0,0 +1,100 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package cmd + +import ( + "bufio" + "context" + "fmt" + "os" + "strings" + + "github.com/tmc/langchaingo/llms/openai" + + "azd.ai.start/internal/agent" +) + +// RunEnhancedAzureAgent runs the enhanced Azure AI agent with full capabilities +func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) error { + // Create the enhanced agent + azureAgent := agent.CreateAzureAIAgent(llm) + + fmt.Println("🤖 Enhanced Azure AI Agent - Interactive Mode") + fmt.Println("Features: Action Tracking | Intent Validation | Smart Memory") + fmt.Println("═══════════════════════════════════════════════════════════") + + // Handle initial query if provided + var initialQuery string + if len(args) > 0 { + initialQuery = strings.Join(args, " ") + } + + scanner := bufio.NewScanner(os.Stdin) + + for { + var userInput string + + if initialQuery != "" { + userInput = initialQuery + initialQuery = "" // Clear after first use + fmt.Printf("💬 You: %s\n", userInput) + } else { + fmt.Print("\n💬 You: ") + if !scanner.Scan() { + break // EOF or error + } + userInput = strings.TrimSpace(scanner.Text()) + } + + // Check for exit commands + if userInput == "" { + continue + } + + if strings.ToLower(userInput) == "exit" || strings.ToLower(userInput) == "quit" { + fmt.Println("👋 Goodbye! Thanks for using the Enhanced Azure AI Agent!") + break + } + + // Special commands + if strings.ToLower(userInput) == "clear" { + err := azureAgent.ClearMemory(ctx) + if err != nil { + fmt.Printf("❌ Failed to clear memory: %s\n", err.Error()) + } else { + fmt.Println("🧹 Memory cleared!") + } + continue + } + + if strings.ToLower(userInput) == "stats" { + stats := azureAgent.GetSessionStats() + fmt.Printf("📊 Session Stats:\n") + fmt.Printf(" Total Actions: %d\n", stats.TotalActions) + fmt.Printf(" Successful: %d\n", stats.SuccessfulActions) + fmt.Printf(" Failed: %d\n", stats.FailedActions) + if stats.TotalDuration > 0 { + fmt.Printf(" Duration: %v\n", stats.TotalDuration) + } + continue + } + + // Process the query with the enhanced agent + fmt.Printf("\n🤖 Enhanced AI Agent:\n") + response, err := azureAgent.ProcessQuery(ctx, userInput) + if err != nil { + fmt.Printf("❌ Error: %v\n", err) + continue + } + + // Display the final response + fmt.Printf("\n💬 Final Response:\n%s\n", response.Output) + } + + if err := scanner.Err(); err != nil { + return fmt.Errorf("error reading input: %w", err) + } + + return nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go new file mode 100644 index 00000000000..a202faf1872 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go @@ -0,0 +1,97 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package cmd + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/azure/azure-dev/cli/azd/pkg/azdext" + "github.com/spf13/cobra" + "github.com/tmc/langchaingo/llms/openai" +) + +func NewRootCommand() *cobra.Command { + rootCmd := &cobra.Command{ + Use: "azd ai.chat [options]", + Short: "Enables interactive AI agent through AZD", + SilenceUsage: true, + SilenceErrors: true, + CompletionOptions: cobra.CompletionOptions{ + DisableDefaultCmd: true, + }, + RunE: func(cmd *cobra.Command, args []string) error { + return runAIAgent(cmd.Context(), args) + }, + } + + return rootCmd +} + +type AiModelConfig struct { + Endpoint string `json:"endpoint"` + ApiKey string `json:"apiKey"` + DeploymentName string `json:"deploymentName"` +} + +// runAIAgent creates and runs the enhanced AI agent using LangChain Go +func runAIAgent(ctx context.Context, args []string) error { + // Create a new context that includes the AZD access token + ctx = azdext.WithAccessToken(ctx) + + // Create a new AZD client + azdClient, err := azdext.NewAzdClient() + if err != nil { + return fmt.Errorf("failed to create azd client: %w", err) + } + + defer azdClient.Close() + + getSectionResponse, err := azdClient. + UserConfig(). + GetSection(ctx, &azdext.GetUserConfigSectionRequest{ + Path: "ai.chat.model", + }) + if err != nil { + return fmt.Errorf("AI model configuration not found, %w", err) + } + + var aiConfig *AiModelConfig + if err := json.Unmarshal(getSectionResponse.Section, &aiConfig); err != nil { + return fmt.Errorf("failed to unmarshal AI model configuration: %w", err) + } + + // Common deployment names to try + azureAPIVersion := "2024-02-15-preview" + + var llm *openai.LLM + + // Try different deployment names + if aiConfig.Endpoint != "" && aiConfig.ApiKey != "" { + // Use Azure OpenAI with proper configuration + fmt.Printf("🔵 Trying Azure OpenAI with deployment: %s\n", aiConfig.DeploymentName) + + llm, err = openai.New( + openai.WithToken(aiConfig.ApiKey), + openai.WithBaseURL(aiConfig.Endpoint+"/"), + openai.WithAPIType(openai.APITypeAzure), + openai.WithAPIVersion(azureAPIVersion), + openai.WithModel(aiConfig.DeploymentName), + ) + + if err == nil { + fmt.Printf("✅ Successfully connected with deployment: %s\n", aiConfig.DeploymentName) + } else { + fmt.Printf("❌ Failed with deployment %s: %v\n", aiConfig.DeploymentName, err) + } + } + + if llm == nil { + return fmt.Errorf("failed to connect to any Azure OpenAI deployment") + } + + // Use the enhanced Azure AI agent with full capabilities + return RunEnhancedAzureAgent(ctx, llm, args) +} diff --git a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go new file mode 100644 index 00000000000..da3a48bd262 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go @@ -0,0 +1,134 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package logging + +import ( + "context" + "fmt" + "time" + + "github.com/tmc/langchaingo/schema" + + "azd.ai.start/internal/session" + "azd.ai.start/internal/utils" +) + +// ActionLogger tracks and logs all agent actions +type ActionLogger struct { + actions []session.ActionLog + current *session.ActionLog +} + +// NewActionLogger creates a new action logger +func NewActionLogger() *ActionLogger { + return &ActionLogger{ + actions: make([]session.ActionLog, 0), + } +} + +// HandleToolStart is called when a tool execution starts +func (al *ActionLogger) HandleToolStart(ctx context.Context, input string) { + al.current = &session.ActionLog{ + Timestamp: time.Now(), + Input: input, + } + fmt.Printf("🔧 Executing: %s\n", input) +} + +// HandleToolEnd is called when a tool execution ends +func (al *ActionLogger) HandleToolEnd(ctx context.Context, output string) { + if al.current != nil { + al.current.Output = output + al.current.Success = true + al.current.Duration = time.Since(al.current.Timestamp) + al.actions = append(al.actions, *al.current) + fmt.Printf("✅ Result: %s\n", utils.TruncateString(output, 100)) + } +} + +// HandleToolError is called when a tool execution fails +func (al *ActionLogger) HandleToolError(ctx context.Context, err error) { + if al.current != nil { + al.current.Output = err.Error() + al.current.Success = false + al.current.Duration = time.Since(al.current.Timestamp) + al.actions = append(al.actions, *al.current) + fmt.Printf("❌ Error: %s\n", err.Error()) + } +} + +// HandleAgentStart is called when agent planning starts +func (al *ActionLogger) HandleAgentStart(ctx context.Context, input map[string]any) { + if userInput, ok := input["input"].(string); ok { + fmt.Printf("🎯 Processing: %s\n", userInput) + } +} + +// HandleAgentEnd is called when agent planning ends +func (al *ActionLogger) HandleAgentEnd(ctx context.Context, output schema.AgentFinish) { + fmt.Printf("🏁 Agent completed planning\n") +} + +// HandleChainStart is called when chain execution starts +func (al *ActionLogger) HandleChainStart(ctx context.Context, input map[string]any) { + fmt.Printf("🔗 Starting chain execution\n") +} + +// HandleChainEnd is called when chain execution ends +func (al *ActionLogger) HandleChainEnd(ctx context.Context, output map[string]any) { + fmt.Printf("🔗 Chain execution completed\n") +} + +// HandleChainError is called when chain execution fails +func (al *ActionLogger) HandleChainError(ctx context.Context, err error) { + fmt.Printf("🔗 Chain execution failed: %s\n", err.Error()) +} + +// HandleLLMStart is called when LLM call starts +func (al *ActionLogger) HandleLLMStart(ctx context.Context, prompts []string) { + fmt.Printf("🤖 LLM thinking...\n") +} + +// HandleLLMEnd is called when LLM call ends +func (al *ActionLogger) HandleLLMEnd(ctx context.Context, result string) { + fmt.Printf("🤖 LLM response received\n") +} + +// HandleAgentAction is called when an agent action is planned +func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.AgentAction) { + al.current = &session.ActionLog{ + Timestamp: time.Now(), + Action: action.Tool, + Tool: action.Tool, + Input: action.ToolInput, + } + fmt.Printf("🎯 Agent planned action: %s with input: %s\n", action.Tool, action.ToolInput) +} + +// HandleAgentFinish is called when the agent finishes +func (al *ActionLogger) HandleAgentFinish(ctx context.Context, finish schema.AgentFinish) { + fmt.Printf("🏁 Agent finished with result\n") +} + +// HandleLLMError is called when LLM call fails +func (al *ActionLogger) HandleLLMError(ctx context.Context, err error) { + fmt.Printf("🤖 LLM error: %s\n", err.Error()) +} + +// HandleStreamingFunc handles streaming responses +func (al *ActionLogger) HandleStreamingFunc(ctx context.Context, chunk []byte) error { + // Optional: Handle streaming output + return nil +} + +// GetActions returns all logged actions +func (al *ActionLogger) GetActions() []session.ActionLog { + return al.actions +} + +// Clear clears all logged actions +func (al *ActionLogger) Clear() { + al.actions = al.actions[:0] + al.current = nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/session/action.go b/cli/azd/extensions/azd.ai.start/internal/session/action.go new file mode 100644 index 00000000000..1111c2c4fed --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/session/action.go @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package session + +import ( + "time" +) + +// ActionLog represents a single action taken by the agent +type ActionLog struct { + Timestamp time.Time + Action string + Tool string + Input string + Output string + Success bool + Duration time.Duration +} + +// NewActionLog creates a new action log +func NewActionLog(tool, input string) *ActionLog { + return &ActionLog{ + Timestamp: time.Now(), + Tool: tool, + Action: tool, + Input: input, + } +} + +// SetOutput sets the output and success status for the action +func (al *ActionLog) SetOutput(output string, success bool) { + al.Output = output + al.Success = success + al.Duration = time.Since(al.Timestamp) +} + +// SetDuration sets the duration for the action +func (al *ActionLog) SetDuration(duration time.Duration) { + al.Duration = duration +} diff --git a/cli/azd/extensions/azd.ai.start/internal/session/session.go b/cli/azd/extensions/azd.ai.start/internal/session/session.go new file mode 100644 index 00000000000..44f0156a912 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/session/session.go @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package session + +import ( + "time" +) + +// ActionSession tracks the current conversation session and actions +type ActionSession struct { + InitialIntent string + PlannedActions []string + ExecutedActions []ActionLog + ValidationResult interface{} // Use interface{} to avoid circular dependency + StartTime time.Time + EndTime time.Time +} + +// NewActionSession creates a new action session +func NewActionSession(initialIntent string) *ActionSession { + return &ActionSession{ + InitialIntent: initialIntent, + PlannedActions: []string{}, + ExecutedActions: []ActionLog{}, + StartTime: time.Now(), + } +} + +// Start marks the session as started +func (as *ActionSession) Start() { + as.StartTime = time.Now() +} + +// End marks the session as ended +func (as *ActionSession) End() { + as.EndTime = time.Now() +} + +// AddExecutedAction adds an executed action to the session +func (as *ActionSession) AddExecutedAction(action ActionLog) { + as.ExecutedActions = append(as.ExecutedActions, action) +} + +// SetValidationResult sets the validation result for the session +func (as *ActionSession) SetValidationResult(result interface{}) { + as.ValidationResult = result +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go new file mode 100644 index 00000000000..ac64b00f30f --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go @@ -0,0 +1,51 @@ +package tools + +import ( + "context" + "fmt" + "os" + "path/filepath" +) + +// ChangeDirectoryTool implements the Tool interface for changing the current working directory +type ChangeDirectoryTool struct{} + +func (t ChangeDirectoryTool) Name() string { + return "change_directory" +} + +func (t ChangeDirectoryTool) Description() string { + return "Change the current working directory. Input: directory path (e.g., '../parent' or './subfolder' or absolute path)" +} + +func (t ChangeDirectoryTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("directory path is required") + } + + // Get current directory for reference + currentDir, _ := os.Getwd() + + // Convert to absolute path + absPath, err := filepath.Abs(input) + if err != nil { + return "", fmt.Errorf("failed to resolve path %s: %w", input, err) + } + + // Check if directory exists + info, err := os.Stat(absPath) + if err != nil { + return "", fmt.Errorf("directory %s does not exist: %w", absPath, err) + } + if !info.IsDir() { + return "", fmt.Errorf("%s is not a directory", absPath) + } + + // Change directory + err = os.Chdir(absPath) + if err != nil { + return "", fmt.Errorf("failed to change directory to %s: %w", absPath, err) + } + + return fmt.Sprintf("Changed directory from %s to %s", currentDir, absPath), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go new file mode 100644 index 00000000000..310866dfe40 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go @@ -0,0 +1,71 @@ +package tools + +import ( + "context" + "fmt" + "io" + "os" + "strings" +) + +// CopyFileTool implements the Tool interface for copying files +type CopyFileTool struct{} + +func (t CopyFileTool) Name() string { + return "copy_file" +} + +func (t CopyFileTool) Description() string { + return "Copy a file to a new location. Input format: 'source|destination' (e.g., 'file.txt|backup.txt' or './docs/readme.md|./backup/readme.md')" +} + +func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("input is required in format 'source|destination'") + } + + // Split on first occurrence of '|' to separate source from destination + parts := strings.SplitN(input, "|", 2) + if len(parts) != 2 { + return "", fmt.Errorf("invalid input format. Use 'source|destination'") + } + + source := strings.TrimSpace(parts[0]) + destination := strings.TrimSpace(parts[1]) + + if source == "" || destination == "" { + return "", fmt.Errorf("both source and destination paths are required") + } + + // Check if source file exists + sourceInfo, err := os.Stat(source) + if err != nil { + return "", fmt.Errorf("source file %s does not exist: %w", source, err) + } + + if sourceInfo.IsDir() { + return "", fmt.Errorf("source %s is a directory. Use copy_directory for directories", source) + } + + // Open source file + sourceFile, err := os.Open(source) + if err != nil { + return "", fmt.Errorf("failed to open source file %s: %w", source, err) + } + defer sourceFile.Close() + + // Create destination file + destFile, err := os.Create(destination) + if err != nil { + return "", fmt.Errorf("failed to create destination file %s: %w", destination, err) + } + defer destFile.Close() + + // Copy contents + bytesWritten, err := io.Copy(destFile, sourceFile) + if err != nil { + return "", fmt.Errorf("failed to copy file: %w", err) + } + + return fmt.Sprintf("Successfully copied %s to %s (%d bytes)", source, destination, bytesWritten), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go new file mode 100644 index 00000000000..992eb1842fb --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go @@ -0,0 +1,41 @@ +package tools + +import ( + "context" + "fmt" + "os" +) + +// CreateDirectoryTool implements the Tool interface for creating directories +type CreateDirectoryTool struct{} + +func (t CreateDirectoryTool) Name() string { + return "create_directory" +} + +func (t CreateDirectoryTool) Description() string { + return "Create a directory (and any necessary parent directories). Input: directory path (e.g., 'docs' or './src/components')" +} + +func (t CreateDirectoryTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("directory path is required") + } + + err := os.MkdirAll(input, 0755) + if err != nil { + return "", fmt.Errorf("failed to create directory %s: %w", input, err) + } + + // Check if directory already existed or was newly created + info, err := os.Stat(input) + if err != nil { + return "", fmt.Errorf("failed to verify directory creation: %w", err) + } + + if !info.IsDir() { + return "", fmt.Errorf("%s exists but is not a directory", input) + } + + return fmt.Sprintf("Successfully created directory: %s", input), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go new file mode 100644 index 00000000000..d3cd1ff67b8 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go @@ -0,0 +1,39 @@ +package tools + +import ( + "context" + "fmt" + "os" + + "github.com/tmc/langchaingo/callbacks" +) + +// CurrentDirectoryTool implements the Tool interface for getting current directory +type CurrentDirectoryTool struct { + CallbacksHandler callbacks.Handler +} + +func (t CurrentDirectoryTool) Name() string { + return "cwd" +} + +func (t CurrentDirectoryTool) Description() string { + return "Get the current working directory to understand the project context. Input: use 'current' or '.' (any input works)" +} + +func (t CurrentDirectoryTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, input) + } + + dir, err := os.Getwd() + if err != nil { + return "", fmt.Errorf("failed to get current directory: %w", err) + } + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, dir) + } + + return dir, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go new file mode 100644 index 00000000000..e3231003825 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go @@ -0,0 +1,53 @@ +package tools + +import ( + "context" + "fmt" + "os" +) + +// DeleteDirectoryTool implements the Tool interface for deleting directories +type DeleteDirectoryTool struct{} + +func (t DeleteDirectoryTool) Name() string { + return "delete_directory" +} + +func (t DeleteDirectoryTool) Description() string { + return "Delete a directory and all its contents. Input: directory path (e.g., 'temp-folder' or './old-docs')" +} + +func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("directory path is required") + } + + // Check if directory exists + info, err := os.Stat(input) + if err != nil { + return "", fmt.Errorf("directory %s does not exist: %w", input, err) + } + + // Make sure it's a directory, not a file + if !info.IsDir() { + return "", fmt.Errorf("%s is a file, not a directory. Use delete_file to remove files", input) + } + + // Count contents before deletion for reporting + files, err := os.ReadDir(input) + fileCount := 0 + if err == nil { + fileCount = len(files) + } + + // Delete the directory and all contents + err = os.RemoveAll(input) + if err != nil { + return "", fmt.Errorf("failed to delete directory %s: %w", input, err) + } + + if fileCount > 0 { + return fmt.Sprintf("Successfully deleted directory: %s (contained %d items)", input, fileCount), nil + } + return fmt.Sprintf("Successfully deleted empty directory: %s", input), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go new file mode 100644 index 00000000000..71a5b7618d1 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go @@ -0,0 +1,43 @@ +package tools + +import ( + "context" + "fmt" + "os" +) + +// DeleteFileTool implements the Tool interface for deleting files +type DeleteFileTool struct{} + +func (t DeleteFileTool) Name() string { + return "delete_file" +} + +func (t DeleteFileTool) Description() string { + return "Delete a file. Input: file path (e.g., 'temp.txt' or './docs/old-file.md')" +} + +func (t DeleteFileTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("file path is required") + } + + // Check if file exists and get info + info, err := os.Stat(input) + if err != nil { + return "", fmt.Errorf("file %s does not exist: %w", input, err) + } + + // Make sure it's a file, not a directory + if info.IsDir() { + return "", fmt.Errorf("%s is a directory, not a file. Use delete_directory to remove directories", input) + } + + // Delete the file + err = os.Remove(input) + if err != nil { + return "", fmt.Errorf("failed to delete file %s: %w", input, err) + } + + return fmt.Sprintf("Successfully deleted file: %s (%d bytes)", input, info.Size()), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go b/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go new file mode 100644 index 00000000000..133d52c8cea --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go @@ -0,0 +1,92 @@ +package tools + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" +) + +// DirectoryListTool implements the Tool interface for listing directory contents +type DirectoryListTool struct{} + +func (t DirectoryListTool) Name() string { + return "list_directory" +} + +func (t DirectoryListTool) Description() string { + return "List files and folders in a directory. Input: directory path (use '.' for current directory)" +} + +func (t DirectoryListTool) Call(ctx context.Context, input string) (string, error) { + path := strings.TrimSpace(input) + if path == "" { + path = "." + } + + // Get absolute path for clarity + absPath, err := filepath.Abs(path) + if err != nil { + return "", fmt.Errorf("failed to get absolute path for %s: %w", path, err) + } + + // Check if directory exists + info, err := os.Stat(absPath) + if err != nil { + return "", fmt.Errorf("failed to access %s: %w", absPath, err) + } + if !info.IsDir() { + return "", fmt.Errorf("%s is not a directory", absPath) + } + + // List directory contents + files, err := os.ReadDir(absPath) + if err != nil { + return "", fmt.Errorf("failed to read directory %s: %w", absPath, err) + } + + var result strings.Builder + result.WriteString(fmt.Sprintf("Contents of %s:\n", absPath)) + result.WriteString(fmt.Sprintf("Total items: %d\n\n", len(files))) + + // Separate directories and files + var dirs []string + var regularFiles []string + + for _, file := range files { + if file.IsDir() { + dirs = append(dirs, file.Name()+"/") + } else { + info, err := file.Info() + if err != nil { + regularFiles = append(regularFiles, file.Name()) + } else { + regularFiles = append(regularFiles, fmt.Sprintf("%s (%d bytes)", file.Name(), info.Size())) + } + } + } + + // Display directories first + if len(dirs) > 0 { + result.WriteString("Directories:\n") + for _, dir := range dirs { + result.WriteString(fmt.Sprintf(" 📁 %s\n", dir)) + } + result.WriteString("\n") + } + + // Then display files + if len(regularFiles) > 0 { + result.WriteString("Files:\n") + for _, file := range regularFiles { + result.WriteString(fmt.Sprintf(" 📄 %s\n", file)) + } + } + + if len(dirs) == 0 && len(regularFiles) == 0 { + result.WriteString("Directory is empty.\n") + } + + return result.String(), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go b/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go new file mode 100644 index 00000000000..8951b35bc77 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go @@ -0,0 +1,67 @@ +package tools + +import ( + "context" + "fmt" + "os" + "strings" + "time" +) + +// FileInfoTool implements the Tool interface for getting detailed file information +type FileInfoTool struct{} + +func (t FileInfoTool) Name() string { + return "file_info" +} + +func (t FileInfoTool) Description() string { + return "Get detailed information about a file or directory. Input: file or directory path (e.g., 'README.md' or './docs')" +} + +func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("file or directory path is required") + } + + info, err := os.Stat(input) + if err != nil { + return "", fmt.Errorf("failed to get info for %s: %w", input, err) + } + + var result strings.Builder + result.WriteString(fmt.Sprintf("Information for: %s\n", input)) + result.WriteString("═══════════════════════════════════\n") + + // Type + if info.IsDir() { + result.WriteString("Type: Directory\n") + + // Count contents if it's a directory + if files, err := os.ReadDir(input); err == nil { + result.WriteString(fmt.Sprintf("Contents: %d items\n", len(files))) + } + } else { + result.WriteString("Type: File\n") + result.WriteString(fmt.Sprintf("Size: %d bytes\n", info.Size())) + } + + // Permissions + result.WriteString(fmt.Sprintf("Permissions: %s\n", info.Mode().String())) + + // Timestamps + result.WriteString(fmt.Sprintf("Modified: %s\n", info.ModTime().Format(time.RFC3339))) + + // Additional file details + if !info.IsDir() { + if info.Size() == 0 { + result.WriteString("Note: File is empty\n") + } else if info.Size() > 1024*1024 { + result.WriteString(fmt.Sprintf("Size (human): %.2f MB\n", float64(info.Size())/(1024*1024))) + } else if info.Size() > 1024 { + result.WriteString(fmt.Sprintf("Size (human): %.2f KB\n", float64(info.Size())/1024)) + } + } + + return result.String(), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go b/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go new file mode 100644 index 00000000000..e87c7131b54 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go @@ -0,0 +1,43 @@ +package tools + +import ( + "context" + "fmt" + "io" + "net/http" +) + +// HTTPFetcherTool implements the Tool interface for making HTTP requests +type HTTPFetcherTool struct{} + +func (t HTTPFetcherTool) Name() string { + return "http_fetcher" +} + +func (t HTTPFetcherTool) Description() string { + return "Make HTTP GET requests to fetch content from URLs. Input should be a valid URL." +} + +func (t HTTPFetcherTool) Call(ctx context.Context, input string) (string, error) { + resp, err := http.Get(input) + if err != nil { + return "", fmt.Errorf("failed to fetch URL %s: %w", input, err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("HTTP request failed with status: %s", resp.Status) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("failed to read response body: %w", err) + } + + // Limit response size to avoid overwhelming the context + if len(body) > 5000 { + return fmt.Sprintf("Content (first 5000 chars): %s...\n[Content truncated]", string(body[:5000])), nil + } + + return string(body), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go new file mode 100644 index 00000000000..f39d6ede31b --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go @@ -0,0 +1,62 @@ +package tools + +import ( + "context" + "fmt" + "os" + "strings" +) + +// MoveFileTool implements the Tool interface for moving/renaming files +type MoveFileTool struct{} + +func (t MoveFileTool) Name() string { + return "move_file" +} + +func (t MoveFileTool) Description() string { + return "Move or rename a file. Input format: 'source|destination' (e.g., 'old.txt|new.txt' or './file.txt|./folder/file.txt')" +} + +func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("input is required in format 'source|destination'") + } + + // Split on first occurrence of '|' to separate source from destination + parts := strings.SplitN(input, "|", 2) + if len(parts) != 2 { + return "", fmt.Errorf("invalid input format. Use 'source|destination'") + } + + source := strings.TrimSpace(parts[0]) + destination := strings.TrimSpace(parts[1]) + + if source == "" || destination == "" { + return "", fmt.Errorf("both source and destination paths are required") + } + + // Check if source exists + sourceInfo, err := os.Stat(source) + if err != nil { + return "", fmt.Errorf("source %s does not exist: %w", source, err) + } + + // Check if destination already exists + if _, err := os.Stat(destination); err == nil { + return "", fmt.Errorf("destination %s already exists", destination) + } + + // Move/rename the file + err = os.Rename(source, destination) + if err != nil { + return "", fmt.Errorf("failed to move %s to %s: %w", source, destination, err) + } + + fileType := "file" + if sourceInfo.IsDir() { + fileType = "directory" + } + + return fmt.Sprintf("Successfully moved %s from %s to %s (%d bytes)", fileType, source, destination, sourceInfo.Size()), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go new file mode 100644 index 00000000000..1aca867c288 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go @@ -0,0 +1,37 @@ +package tools + +import ( + "context" + "fmt" + "os" +) + +// ReadFileTool implements the Tool interface for reading file contents +type ReadFileTool struct{} + +func (t ReadFileTool) Name() string { + return "read_file" +} + +func (t ReadFileTool) Description() string { + return "Read the contents of a file. Input: file path (e.g., 'README.md' or './docs/setup.md')" +} + +func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("file path is required") + } + + content, err := os.ReadFile(input) + if err != nil { + return "", fmt.Errorf("failed to read file %s: %w", input, err) + } + + // Limit file size to avoid overwhelming context + if len(content) > 5000 { + return fmt.Sprintf("File: %s (first 5000 chars)\n%s...\n[File truncated - total size: %d bytes]", + input, string(content[:5000]), len(content)), nil + } + + return fmt.Sprintf("File: %s\n%s", input, string(content)), nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/weather.go b/cli/azd/extensions/azd.ai.start/internal/tools/weather.go new file mode 100644 index 00000000000..a88c14f74e0 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/weather.go @@ -0,0 +1,105 @@ +package tools + +import ( + "context" + "fmt" + "math/rand" + "strings" + "time" +) + +// WeatherTool implements the Tool interface for getting weather information +type WeatherTool struct{} + +func (t WeatherTool) Name() string { + return "weather" +} + +func (t WeatherTool) Description() string { + return "Get current weather conditions for a city. Input: city name (e.g., 'San Diego' or 'New York')" +} + +func (t WeatherTool) Call(ctx context.Context, input string) (string, error) { + city := strings.TrimSpace(input) + if city == "" { + return "", fmt.Errorf("city name is required") + } + + // Initialize random seed based on current time + rand.Seed(time.Now().UnixNano()) + + // Generate more realistic temperature based on city + var temperature int + cityLower := strings.ToLower(city) + + // Assign temperature ranges based on typical climate + if strings.Contains(cityLower, "san diego") || strings.Contains(cityLower, "los angeles") || + strings.Contains(cityLower, "miami") || strings.Contains(cityLower, "phoenix") { + // Warm climate cities: 65-85°F + temperature = rand.Intn(21) + 65 + } else if strings.Contains(cityLower, "seattle") || strings.Contains(cityLower, "portland") || + strings.Contains(cityLower, "chicago") || strings.Contains(cityLower, "new york") { + // Moderate climate cities: 45-75°F + temperature = rand.Intn(31) + 45 + } else if strings.Contains(cityLower, "alaska") || strings.Contains(cityLower, "minneapolis") || + strings.Contains(cityLower, "denver") { + // Cold climate cities: 25-55°F + temperature = rand.Intn(31) + 25 + } else { + // Default range for unknown cities: 50-80°F + temperature = rand.Intn(31) + 50 + } + + // Weather conditions with probabilities + conditions := []string{ + "sunny", "sunny", "sunny", "sunny", // 40% chance + "partly cloudy", "partly cloudy", "partly cloudy", // 30% chance + "cloudy", "cloudy", // 20% chance + "rainy", // 10% chance + } + condition := conditions[rand.Intn(len(conditions))] + + // Add some variety to the response format + responseTemplates := []string{ + "It's %d°F and %s in %s", + "Current weather in %s: %d°F and %s", + "The weather in %s is %d°F with %s skies", + "%s is experiencing %s weather at %d°F", + } + + template := responseTemplates[rand.Intn(len(responseTemplates))] + + var response string + if strings.Contains(template, "It's %d°F and %s in %s") { + response = fmt.Sprintf(template, temperature, condition, city) + } else if strings.Contains(template, "Current weather in %s: %d°F and %s") { + response = fmt.Sprintf(template, city, temperature, condition) + } else if strings.Contains(template, "The weather in %s is %d°F with %s skies") { + response = fmt.Sprintf(template, city, temperature, condition) + } else { + // "%s is experiencing %s weather at %d°F" + response = fmt.Sprintf(template, city, condition, temperature) + } + + // Add some additional details occasionally + if rand.Intn(3) == 0 { + extras := []string{ + "Light breeze from the west.", + "Humidity is comfortable.", + "Perfect day to be outside!", + "Visibility is excellent.", + "No precipitation expected.", + } + if condition == "rainy" { + extras = []string{ + "Light rain expected to continue.", + "Bring an umbrella!", + "Rain should clear up by evening.", + } + } + extra := extras[rand.Intn(len(extras))] + response += ". " + extra + } + + return response, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go new file mode 100644 index 00000000000..1020e9c7283 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go @@ -0,0 +1,101 @@ +package tools + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" +) + +// WriteFileTool implements the Tool interface for writing file contents +type WriteFileTool struct{} + +func (t WriteFileTool) Name() string { + return "write_file" +} + +func (t WriteFileTool) Description() string { + return `Write content to a file. Input format: 'filepath|content' + +For multi-line content, use literal \n for newlines: +- Single line: 'test.txt|Hello World' +- Multi-line: 'script.bicep|param name string\nparam location string\nresource myResource...' + +Example Bicep file: +'main.bicep|param name string\nparam location string\n\nresource appService ''Microsoft.Web/sites@2022-03-01'' = {\n name: name\n location: location\n kind: ''app''\n properties: {\n serverFarmId: serverFarmId\n }\n}\n\noutput appServiceId string = appService.id' + +The tool will convert \n to actual newlines automatically.` +} + +func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return "", fmt.Errorf("input is required in format 'filepath|content'") + } + + // Split on first occurrence of '|' to separate path from content + parts := strings.SplitN(input, "|", 2) + if len(parts) != 2 { + return "", fmt.Errorf("invalid input format. Use 'filepath|content'") + } + + filePath := strings.TrimSpace(parts[0]) + content := parts[1] + + // Convert literal \n sequences to actual newlines (for agents that escape newlines) + content = strings.ReplaceAll(content, "\\n", "\n") + content = strings.ReplaceAll(content, "\\t", "\t") + + // Clean up any trailing quotes that might have been added during formatting + content = strings.TrimSuffix(content, "'") + content = strings.TrimSuffix(content, "\")") + + // Clean up any quotes around the filepath (from agent formatting) + filePath = strings.Trim(filePath, "\"'") + + if filePath == "" { + return "", fmt.Errorf("filepath cannot be empty") + } + + // Ensure the directory exists + dir := filepath.Dir(filePath) + if dir != "." && dir != "" { + if err := os.MkdirAll(dir, 0755); err != nil { + return "", fmt.Errorf("failed to create directory %s: %w", dir, err) + } + } + + // Write the file + err := os.WriteFile(filePath, []byte(content), 0644) + if err != nil { + return "", fmt.Errorf("failed to write file %s: %w", filePath, err) + } + + // Verify the file was written correctly + writtenContent, err := os.ReadFile(filePath) + if err != nil { + return "", fmt.Errorf("failed to verify written file %s: %w", filePath, err) + } + + lineCount := strings.Count(string(writtenContent), "\n") + 1 + if content != "" && !strings.HasSuffix(content, "\n") { + lineCount = strings.Count(content, "\n") + 1 + } + + return fmt.Sprintf("Successfully wrote %d bytes (%d lines) to %s. Content preview:\n%s", + len(content), lineCount, filePath, getContentPreview(content)), nil +} + +// getContentPreview returns a preview of the content for verification +func getContentPreview(content string) string { + lines := strings.Split(content, "\n") + if len(lines) <= 5 { + return content + } + + preview := strings.Join(lines[:3], "\n") + preview += fmt.Sprintf("\n... (%d more lines) ...\n", len(lines)-5) + preview += strings.Join(lines[len(lines)-2:], "\n") + + return preview +} diff --git a/cli/azd/extensions/azd.ai.start/internal/utils/helpers.go b/cli/azd/extensions/azd.ai.start/internal/utils/helpers.go new file mode 100644 index 00000000000..130734eff77 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/utils/helpers.go @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package utils + +import ( + "fmt" + "strings" + "time" + + "azd.ai.start/internal/session" +) + +// TruncateString truncates a string to a maximum length +func TruncateString(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + return s[:maxLen] + "..." +} + +// FormatActionsForValidation formats actions for the validation prompt +func FormatActionsForValidation(actions []session.ActionLog) string { + if len(actions) == 0 { + return "No actions executed" + } + + var formatted strings.Builder + for i, action := range actions { + status := "SUCCESS" + if !action.Success { + status = "FAILED" + } + formatted.WriteString(fmt.Sprintf("%d. Tool: %s | Input: %s | Status: %s | Duration: %v\n", + i+1, action.Tool, TruncateString(action.Input, 100), status, action.Duration.Round(time.Millisecond))) + if action.Output != "" { + formatted.WriteString(fmt.Sprintf(" Output: %s\n", TruncateString(action.Output, 200))) + } + } + return formatted.String() +} diff --git a/cli/azd/extensions/azd.ai.start/internal/validation/parser.go b/cli/azd/extensions/azd.ai.start/internal/validation/parser.go new file mode 100644 index 00000000000..2f814546798 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/validation/parser.go @@ -0,0 +1,93 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package validation + +import ( + "strings" +) + +// ParseValidationResult parses the validation result from LLM response +func ParseValidationResult(response string) *ValidationResult { + result := &ValidationResult{ + Status: ValidationError, + Explanation: "Failed to parse validation response", + Confidence: 0.0, + } + + lines := strings.Split(response, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + + if strings.HasPrefix(line, "STATUS:") { + statusStr := strings.TrimSpace(strings.TrimPrefix(line, "STATUS:")) + switch strings.ToUpper(statusStr) { + case "COMPLETE": + result.Status = ValidationComplete + case "PARTIAL": + result.Status = ValidationPartial + case "INCOMPLETE": + result.Status = ValidationIncomplete + case "ERROR": + result.Status = ValidationError + } + } else if strings.HasPrefix(line, "EXPLANATION:") { + result.Explanation = strings.TrimSpace(strings.TrimPrefix(line, "EXPLANATION:")) + } else if strings.HasPrefix(line, "CONFIDENCE:") { + confidenceStr := strings.TrimSpace(strings.TrimPrefix(line, "CONFIDENCE:")) + if conf, err := parseFloat(confidenceStr); err == nil { + result.Confidence = conf + } + } + } + + // If we couldn't parse the status, try to infer from the response content + if result.Status == ValidationError { + responseUpper := strings.ToUpper(response) + if strings.Contains(responseUpper, "COMPLETE") { + result.Status = ValidationComplete + } else if strings.Contains(responseUpper, "PARTIAL") { + result.Status = ValidationPartial + } else if strings.Contains(responseUpper, "INCOMPLETE") { + result.Status = ValidationIncomplete + } + result.Explanation = response + result.Confidence = 0.7 + } + + return result +} + +// parseFloat safely parses a float from string +func parseFloat(s string) (float64, error) { + // Simple float parsing for confidence values + s = strings.TrimSpace(s) + if s == "1" || s == "1.0" { + return 1.0, nil + } else if s == "0" || s == "0.0" { + return 0.0, nil + } else if strings.HasPrefix(s, "0.") { + // Simple decimal parsing for common cases + switch s { + case "0.1": + return 0.1, nil + case "0.2": + return 0.2, nil + case "0.3": + return 0.3, nil + case "0.4": + return 0.4, nil + case "0.5": + return 0.5, nil + case "0.6": + return 0.6, nil + case "0.7": + return 0.7, nil + case "0.8": + return 0.8, nil + case "0.9": + return 0.9, nil + } + } + return 0.5, nil // Default confidence +} diff --git a/cli/azd/extensions/azd.ai.start/internal/validation/types.go b/cli/azd/extensions/azd.ai.start/internal/validation/types.go new file mode 100644 index 00000000000..4b0ebcd25bc --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/validation/types.go @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package validation + +// ValidationResult represents the result of intent validation +type ValidationResult struct { + Status ValidationStatus + Explanation string + Confidence float64 +} + +// ValidationStatus represents the completion status of the original intent +type ValidationStatus string + +const ( + ValidationComplete ValidationStatus = "COMPLETE" + ValidationPartial ValidationStatus = "PARTIAL" + ValidationIncomplete ValidationStatus = "INCOMPLETE" + ValidationError ValidationStatus = "ERROR" +) diff --git a/cli/azd/extensions/azd.ai.start/internal/validation/validator.go b/cli/azd/extensions/azd.ai.start/internal/validation/validator.go new file mode 100644 index 00000000000..f9ae0311062 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/validation/validator.go @@ -0,0 +1,68 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package validation + +import ( + "context" + "fmt" + + "github.com/tmc/langchaingo/llms" + + "azd.ai.start/internal/session" + "azd.ai.start/internal/utils" +) + +// IntentValidator validates whether the original intent was fulfilled +type IntentValidator struct { + llm llms.Model +} + +// NewIntentValidator creates a new intent validator +func NewIntentValidator(llm llms.Model) *IntentValidator { + return &IntentValidator{llm: llm} +} + +// ValidateCompletion validates whether the original intent was fulfilled +func (iv *IntentValidator) ValidateCompletion( + originalIntent string, + executedActions []session.ActionLog, +) *ValidationResult { + if len(executedActions) == 0 { + return &ValidationResult{ + Status: ValidationIncomplete, + Explanation: "No actions were executed", + Confidence: 1.0, + } + } + + validationPrompt := fmt.Sprintf(` +Original User Intent: %s + +Actions Executed: +%s + +Based on the original intent and the actions that were executed, evaluate whether the user's intent was fulfilled. + +Respond with one of: COMPLETE, PARTIAL, INCOMPLETE, ERROR + +Then provide a brief explanation of your assessment. + +Format your response as: +STATUS: [COMPLETE/PARTIAL/INCOMPLETE/ERROR] +EXPLANATION: [Your explanation] +CONFIDENCE: [0.0-1.0]`, + originalIntent, + utils.FormatActionsForValidation(executedActions)) + + result, err := iv.llm.Call(context.Background(), validationPrompt) + if err != nil { + return &ValidationResult{ + Status: ValidationError, + Explanation: fmt.Sprintf("Validation failed: %s", err.Error()), + Confidence: 0.0, + } + } + + return ParseValidationResult(result) +} diff --git a/cli/azd/extensions/azd.ai.start/main.go b/cli/azd/extensions/azd.ai.start/main.go new file mode 100644 index 00000000000..026e7c944e7 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/main.go @@ -0,0 +1,30 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package main + +import ( + "context" + "os" + + "azd.ai.start/internal/cmd" + "github.com/fatih/color" +) + +func init() { + forceColorVal, has := os.LookupEnv("FORCE_COLOR") + if has && forceColorVal == "1" { + color.NoColor = false + } +} + +func main() { + // Execute the root command + ctx := context.Background() + rootCmd := cmd.NewRootCommand() + + if err := rootCmd.ExecuteContext(ctx); err != nil { + color.Red("Error: %v", err) + os.Exit(1) + } +} From add633c88b8d64b26e4872cdd4bf1d3b176a34d6 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Mon, 28 Jul 2025 18:04:42 -0700 Subject: [PATCH 039/116] azd agent --- cli/azd/extensions/azd.ai.start/go.mod | 10 +- cli/azd/extensions/azd.ai.start/go.sum | 4 + .../azd.ai.start/internal/agent/agent.go | 436 ++++-------------- .../azd.ai.start/internal/agent/factory.go | 66 --- .../agent/prompts/default_agent_prefix.txt | 19 + .../azd.ai.start/internal/agent/response.go | 25 - .../azd.ai.start/internal/agent/stats.go | 16 - .../internal/cmd/enhanced_integration.go | 29 +- .../azd.ai.start/internal/cmd/root.go | 6 + .../azd.ai.start/internal/logging/logger.go | 164 ++++--- .../internal/tools/change_directory.go | 50 +- .../azd.ai.start/internal/tools/copy_file.go | 65 ++- .../internal/tools/create_directory.go | 44 +- .../internal/tools/delete_directory.go | 48 +- .../internal/tools/delete_file.go | 41 +- .../internal/tools/directory_list.go | 32 +- .../azd.ai.start/internal/tools/file_info.go | 63 ++- .../internal/tools/http_fetcher.go | 39 +- .../azd.ai.start/internal/tools/move_file.go | 53 ++- .../azd.ai.start/internal/tools/read_file.go | 35 +- .../azd.ai.start/internal/tools/weather.go | 20 +- .../azd.ai.start/internal/tools/write_file.go | 97 ++-- .../internal/validation/validator.go | 68 --- 23 files changed, 687 insertions(+), 743 deletions(-) delete mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/factory.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt delete mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/response.go delete mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/stats.go delete mode 100644 cli/azd/extensions/azd.ai.start/internal/validation/validator.go diff --git a/cli/azd/extensions/azd.ai.start/go.mod b/cli/azd/extensions/azd.ai.start/go.mod index c840c72262d..2a66f9854b7 100644 --- a/cli/azd/extensions/azd.ai.start/go.mod +++ b/cli/azd/extensions/azd.ai.start/go.mod @@ -3,6 +3,7 @@ module azd.ai.start go 1.24.1 require ( + github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c github.com/fatih/color v1.18.0 github.com/spf13/cobra v1.9.1 github.com/tmc/langchaingo v0.1.13 @@ -12,7 +13,6 @@ require ( github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.3.1 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect - github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c // indirect github.com/dlclark/regexp2 v1.10.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/google/go-cmp v0.7.0 // indirect @@ -24,7 +24,6 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/mitchellh/copystructure v1.0.0 // indirect github.com/mitchellh/reflectwalk v1.0.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -38,20 +37,13 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/cast v1.3.1 // indirect github.com/spf13/pflag v1.0.6 // indirect - github.com/stretchr/testify v1.10.0 // indirect github.com/yargevad/filepathx v1.0.0 // indirect - go.opentelemetry.io/otel v1.35.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 // indirect golang.org/x/crypto v0.37.0 // indirect golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect golang.org/x/net v0.39.0 // indirect - golang.org/x/oauth2 v0.25.0 // indirect - golang.org/x/sync v0.13.0 // indirect golang.org/x/sys v0.32.0 // indirect golang.org/x/text v0.24.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250407143221-ac9807e6c755 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755 // indirect google.golang.org/grpc v1.71.1 // indirect google.golang.org/protobuf v1.36.6 // indirect diff --git a/cli/azd/extensions/azd.ai.start/go.sum b/cli/azd/extensions/azd.ai.start/go.sum index bc863f91c5c..3faa8263d70 100644 --- a/cli/azd/extensions/azd.ai.start/go.sum +++ b/cli/azd/extensions/azd.ai.start/go.sum @@ -228,6 +228,10 @@ go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 h1:Ss6D3hLXTM0KobyBYEAygXzFfGcjnmfEJOBgSbemCtg= diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go index ba8c284aa52..e227fe85ae1 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go @@ -5,358 +5,124 @@ package agent import ( "context" + _ "embed" "fmt" - "time" "github.com/tmc/langchaingo/agents" + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/chains" + "github.com/tmc/langchaingo/llms/openai" + "github.com/tmc/langchaingo/memory" "github.com/tmc/langchaingo/schema" "github.com/tmc/langchaingo/tools" - "azd.ai.start/internal/logging" "azd.ai.start/internal/session" - "azd.ai.start/internal/utils" - "azd.ai.start/internal/validation" + mytools "azd.ai.start/internal/tools" ) -// AzureAIAgent represents an enhanced Azure AI agent with action tracking and intent validation -type AzureAIAgent struct { - agent *agents.ConversationalAgent - executor *agents.Executor - memory schema.Memory - tools []tools.Tool - intentValidator *validation.IntentValidator - actionLogger *logging.ActionLogger - currentSession *session.ActionSession -} - -// ProcessQuery processes a user query with full action tracking and validation -func (aai *AzureAIAgent) ProcessQuery(ctx context.Context, userInput string) (*AgentResponse, error) { - // Start new action session - sess := session.NewActionSession(userInput) - aai.currentSession = sess - - fmt.Printf("\n🎯 Intent: %s\n", userInput) - fmt.Printf("📋 Planning and executing actions...\n") - fmt.Println("═══════════════════════════════════════") - - // Clear previous actions - aai.actionLogger.Clear() - - // Enhanced user input with explicit completion requirements - enhancedInput := fmt.Sprintf(`%s - -IMPORTANT: You must complete this task successfully. Do not stop until: -1. All required actions have been executed -2. Any files that need to be created are actually saved -3. You verify the results of your actions -4. The task is fully accomplished - -If a tool fails, analyze why and try again with corrections. If you need to create files, use the write_file tool with the complete content.`, userInput) - - // Execute with enhanced input - result, err := aai.executor.Call(ctx, map[string]any{ - "input": enhancedInput, - }) - - if err != nil { - sess.End() - fmt.Printf("❌ Execution failed: %s\n", err.Error()) - return nil, err - } - - // Get executed actions from logger and intermediate steps - executedActions := aai.actionLogger.GetActions() - for _, action := range executedActions { - sess.AddExecutedAction(action) - } - - // If no actions in logger but we have intermediate steps, extract them - if len(sess.ExecutedActions) == 0 { - if steps, ok := result["intermediateSteps"].([]schema.AgentStep); ok && len(steps) > 0 { - for _, step := range steps { - actionLog := session.ActionLog{ - Timestamp: time.Now(), - Action: step.Action.Tool, - Tool: step.Action.Tool, - Input: step.Action.ToolInput, - Output: step.Observation, - Success: true, - Duration: time.Millisecond * 100, // Approximate - } - sess.AddExecutedAction(actionLog) - } - } - } - - // Check if any actions were taken - if not, this was likely conversational - if len(sess.ExecutedActions) == 0 { - fmt.Printf("💬 No tool actions needed - appears to be conversational\n") +//go:embed prompts/default_agent_prefix.txt +var _defaultAgentPrefix string - sess.End() - validationResult := &validation.ValidationResult{ - Status: validation.ValidationComplete, - Explanation: "Conversational response - no actions required", - Confidence: 1.0, - } - sess.SetValidationResult(validationResult) - - // Display simple summary for conversational responses - fmt.Println("\n📊 Session Summary") - fmt.Println("═══════════════════════════════════════") - duration := sess.EndTime.Sub(sess.StartTime) - fmt.Printf("⏱️ Duration: %v\n", duration.Round(time.Millisecond)) - fmt.Println("\n💬 Conversational response - no tool actions needed") - fmt.Printf("🎯 Intent Status: %s (%.1f%% confidence)\n", validationResult.Status, validationResult.Confidence*100) - fmt.Println("═══════════════════════════════════════") - - return NewAgentResponse(result["output"].(string), sess, validationResult), nil - } - - // Actions were taken, so validate and potentially retry - var lastResult = result - var lastValidation *validation.ValidationResult - maxAttempts := 3 // Maximum retry attempts for incomplete tasks - - for attempt := 1; attempt <= maxAttempts; attempt++ { - // Validate intent completion with enhanced validation - fmt.Printf("\n🔍 Validating completion...\n") - validationResult := aai.intentValidator.ValidateCompletion( - userInput, - sess.ExecutedActions, - ) - lastValidation = validationResult - sess.SetValidationResult(validationResult) - - // Check if task is complete - if validationResult.Status == validation.ValidationComplete { - fmt.Printf("✅ Task completed successfully!\n") - break - } - - // If task is incomplete and we have more attempts, retry - if attempt < maxAttempts { - if validationResult.Status == validation.ValidationIncomplete || validationResult.Status == validation.ValidationPartial { - fmt.Printf("⚠️ Task incomplete (attempt %d/%d): %s\n", attempt, maxAttempts, validationResult.Explanation) - fmt.Printf("🔄 Analyzing what's missing and taking corrective action...\n") - - // Clear previous actions for retry - aai.actionLogger.Clear() - - // Enhanced retry with feedback about what was incomplete - retryInput := fmt.Sprintf(`%s - -IMPORTANT: You must complete this task successfully. Do not stop until: -1. All required actions have been executed -2. Any files that need to be created are actually saved -3. You verify the results of your actions -4. The task is fully accomplished - -PREVIOUS ATTEMPT ANALYSIS: The previous attempt was marked as %s. -Reason: %s - -Please analyze what was missing or incomplete and take the necessary additional actions to fully complete the task.`, - userInput, validationResult.Status, validationResult.Explanation) - - // Execute retry - retryResult, err := aai.executor.Call(ctx, map[string]any{ - "input": retryInput, - }) - - if err != nil { - fmt.Printf("❌ Retry attempt %d failed: %s\n", attempt+1, err.Error()) - if attempt == maxAttempts-1 { - sess.End() - return nil, err - } - continue - } - - lastResult = retryResult - - // Get new actions from this retry - retryActions := aai.actionLogger.GetActions() - if len(retryActions) == 0 { - if steps, ok := retryResult["intermediateSteps"].([]schema.AgentStep); ok && len(steps) > 0 { - for _, step := range steps { - actionLog := session.ActionLog{ - Timestamp: time.Now(), - Action: step.Action.Tool, - Tool: step.Action.Tool, - Input: step.Action.ToolInput, - Output: step.Observation, - Success: true, - Duration: time.Millisecond * 100, - } - retryActions = append(retryActions, actionLog) - } - } - } - - // Accumulate actions from retry - for _, action := range retryActions { - sess.AddExecutedAction(action) - } - continue - } - } else { - // This was the last attempt and still incomplete - fmt.Printf("⚠️ Task still incomplete after %d attempts: %s\n", maxAttempts, validationResult.Explanation) - fmt.Printf("💡 Consider:\n") - fmt.Printf(" - Breaking the task into smaller, more specific steps\n") - fmt.Printf(" - Checking if all required files were actually created\n") - fmt.Printf(" - Verifying tool outputs were successful\n") - } - } - - sess.End() - - // Display comprehensive summary - aai.displayCompleteSummary(sess, lastResult) - - return NewAgentResponse(lastResult["output"].(string), sess, lastValidation), nil -} - -// ProcessQueryWithRetry processes a query with automatic retry on failure -func (aai *AzureAIAgent) ProcessQueryWithRetry(ctx context.Context, userInput string, maxRetries int) (*AgentResponse, error) { - var lastErr error - var lastResponse *AgentResponse - - for attempt := 1; attempt <= maxRetries; attempt++ { - fmt.Printf("\n🔄 Attempt %d/%d\n", attempt, maxRetries) - - response, err := aai.ProcessQuery(ctx, userInput) - if err != nil { - lastErr = err - fmt.Printf("❌ Attempt %d failed: %s\n", attempt, err.Error()) - continue - } - - lastResponse = response - - // Check if task completed successfully - if response.Validation.Status == validation.ValidationComplete { - fmt.Printf("✅ Task completed successfully on attempt %d\n", attempt) - return response, nil - } - - if response.Validation.Status == validation.ValidationPartial { - fmt.Printf("⚠️ Partial completion on attempt %d: %s\n", attempt, response.Validation.Explanation) - } else { - fmt.Printf("❌ Task incomplete on attempt %d: %s\n", attempt, response.Validation.Explanation) - } - - // Clear memory for fresh retry - aai.ClearMemory(ctx) - } - - if lastResponse != nil { - return lastResponse, nil - } - - return nil, fmt.Errorf("all %d attempts failed, last error: %w", maxRetries, lastErr) +// AzureAIAgent represents an enhanced Azure AI agent with action tracking, intent validation, and conversation memory +type AzureAIAgent struct { + agent *agents.ConversationalAgent + executor *agents.Executor + memory schema.Memory // Maintains conversation history for context-aware responses + tools []tools.Tool + actionLogger callbacks.Handler + currentSession *session.ActionSession } -// GetSessionStats returns statistics about the current session -func (aai *AzureAIAgent) GetSessionStats() *SessionStats { - if aai.currentSession == nil { - return &SessionStats{} - } - - stats := &SessionStats{ - TotalActions: len(aai.currentSession.ExecutedActions), - SuccessfulActions: 0, - FailedActions: 0, - TotalDuration: aai.currentSession.EndTime.Sub(aai.currentSession.StartTime), +func NewAzureAIAgent(llm *openai.LLM) *AzureAIAgent { + smartMemory := memory.NewConversationBuffer( + memory.WithInputKey("input"), + memory.WithOutputKey("output"), + memory.WithHumanPrefix("Human"), + memory.WithAIPrefix("AI"), + ) + + tools := []tools.Tool{ + // Directory operations + mytools.DirectoryListTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.CreateDirectoryTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.DeleteDirectoryTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.ChangeDirectoryTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.CurrentDirectoryTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + + // File operations + mytools.ReadFileTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.WriteFileTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.CopyFileTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.MoveFileTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.DeleteFileTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.FileInfoTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + + // Other tools + mytools.HTTPFetcherTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + mytools.WeatherTool{ + CallbacksHandler: llm.CallbacksHandler, + }, + tools.Calculator{ + CallbacksHandler: llm.CallbacksHandler, + }, } - for _, action := range aai.currentSession.ExecutedActions { - if action.Success { - stats.SuccessfulActions++ - } else { - stats.FailedActions++ - } + // 4. Create agent with memory directly integrated + agent := agents.NewConversationalAgent(llm, tools, + agents.WithPromptPrefix(_defaultAgentPrefix), + agents.WithMemory(smartMemory), + agents.WithCallbacksHandler(llm.CallbacksHandler), + ) + + // 5. Create executor without separate memory configuration since agent already has it + executor := agents.NewExecutor(agent, + agents.WithMaxIterations(1000), // Much higher limit for complex multi-step processes + agents.WithMemory(smartMemory), + agents.WithCallbacksHandler(llm.CallbacksHandler), + agents.WithReturnIntermediateSteps(), + ) + + return &AzureAIAgent{ + agent: agent, + executor: executor, + memory: smartMemory, + tools: tools, + actionLogger: llm.CallbacksHandler, } - - return stats -} - -// GetMemoryContent returns the current memory content for debugging -func (aai *AzureAIAgent) GetMemoryContent(ctx context.Context) (map[string]any, error) { - return aai.memory.LoadMemoryVariables(ctx, map[string]any{}) -} - -// ClearMemory clears the conversation memory -func (aai *AzureAIAgent) ClearMemory(ctx context.Context) error { - return aai.memory.Clear(ctx) } -// EnableVerboseLogging enables detailed iteration logging -func (aai *AzureAIAgent) EnableVerboseLogging() { - // This would enable more detailed logging in the action logger - fmt.Println("🔍 Verbose logging enabled - you'll see detailed iteration steps") -} - -// displayCompleteSummary displays a comprehensive summary of the session -func (aai *AzureAIAgent) displayCompleteSummary(sess *session.ActionSession, result map[string]any) { - fmt.Println("\n📊 Session Summary") - fmt.Println("═══════════════════════════════════════") - - // Display timing - duration := sess.EndTime.Sub(sess.StartTime) - fmt.Printf("⏱️ Duration: %v\n", duration.Round(time.Millisecond)) - - // Display actions with attempt grouping - if len(sess.ExecutedActions) > 0 { - fmt.Println("\n🔧 Actions Executed:") - for i, action := range sess.ExecutedActions { - status := "✅" - if !action.Success { - status = "❌" - } - fmt.Printf(" %s %d. %s (%v)\n", - status, i+1, - utils.TruncateString(action.Input, 50), - action.Duration.Round(time.Millisecond)) - } - } else { - fmt.Println("\n🔧 No explicit tool actions required") - } - - // Display validation result with enhanced messaging - if validationResult, ok := sess.ValidationResult.(*validation.ValidationResult); ok { - fmt.Printf("\n🎯 Intent Status: %s", validationResult.Status) - if validationResult.Confidence > 0 { - fmt.Printf(" (%.1f%% confidence)", validationResult.Confidence*100) - } - fmt.Println() - - if validationResult.Explanation != "" { - fmt.Printf("💭 Assessment: %s\n", validationResult.Explanation) - } - - // Show completion status with actionable advice - switch validationResult.Status { - case validation.ValidationComplete: - fmt.Printf("🎉 Task completed successfully!\n") - case validation.ValidationPartial: - fmt.Printf("⚠️ Task partially completed. Some aspects may need attention.\n") - case validation.ValidationIncomplete: - fmt.Printf("❌ Task incomplete. Additional actions may be needed.\n") - case validation.ValidationError: - fmt.Printf("⚠️ Validation error. Please review the actions taken.\n") - } - } - - // Display intermediate steps if available - if steps, ok := result["intermediateSteps"].([]schema.AgentStep); ok && len(steps) > 0 { - fmt.Printf("\n🔍 Reasoning Steps: %d\n", len(steps)) - for i, step := range steps { - fmt.Printf("Step %d:\n", i+1) - fmt.Printf(" Tool: %s\n", step.Action.Tool) - fmt.Printf(" Input: %s\n", step.Action.ToolInput) - fmt.Printf(" Observation: %s\n", utils.TruncateString(step.Observation, 200)) - } +// ProcessQuery processes a user query with full action tracking and validation +func (aai *AzureAIAgent) ProcessQuery(ctx context.Context, userInput string) (string, error) { + // Execute with enhanced input - agent should automatically handle memory + output, err := chains.Run(ctx, aai.executor, userInput) + if err != nil { + fmt.Printf("❌ Execution failed: %s\n", err.Error()) + return "", err } - fmt.Println("═══════════════════════════════════════") + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/factory.go b/cli/azd/extensions/azd.ai.start/internal/agent/factory.go deleted file mode 100644 index 3b979181591..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/agent/factory.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package agent - -import ( - "github.com/tmc/langchaingo/agents" - "github.com/tmc/langchaingo/llms/openai" - "github.com/tmc/langchaingo/memory" - "github.com/tmc/langchaingo/tools" - - "azd.ai.start/internal/logging" - mytools "azd.ai.start/internal/tools" - "azd.ai.start/internal/validation" -) - -// CreateAzureAIAgent creates a new enhanced Azure AI agent -func CreateAzureAIAgent(llm *openai.LLM) *AzureAIAgent { - // 1. Smart Memory with conversation buffer - smartMemory := memory.NewConversationBuffer() - - // 2. Action Logger with comprehensive callbacks - actionLogger := logging.NewActionLogger() - - // 3. Enhanced Tools - just the essentials - tools := []tools.Tool{ - // Directory operations - mytools.DirectoryListTool{}, - mytools.CreateDirectoryTool{}, - mytools.DeleteDirectoryTool{}, - mytools.ChangeDirectoryTool{}, - mytools.CurrentDirectoryTool{}, - - // File operations - mytools.ReadFileTool{}, - mytools.WriteFileTool{}, - mytools.CopyFileTool{}, - mytools.MoveFileTool{}, - mytools.DeleteFileTool{}, - mytools.FileInfoTool{}, - - // Other tools - mytools.HTTPFetcherTool{}, - mytools.WeatherTool{}, - tools.Calculator{}, - } - - // 4. Create agent with default settings - agent := agents.NewConversationalAgent(llm, tools) - - // 5. Enhanced Executor with aggressive completion settings - executor := agents.NewExecutor(agent, - agents.WithMemory(smartMemory), - agents.WithMaxIterations(1000), // Much higher limit for complex multi-step processes - agents.WithReturnIntermediateSteps(), - ) - - return &AzureAIAgent{ - agent: agent, - executor: executor, - memory: smartMemory, - tools: tools, - intentValidator: validation.NewIntentValidator(llm), - actionLogger: actionLogger, - } -} diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt new file mode 100644 index 00000000000..2acc4c20233 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt @@ -0,0 +1,19 @@ +You are an Azure Developer CLI (AZD) agent. +You are an expert is building, provisioning and deploying Azure applications. +Always use Azure best patterns and practices. +If a tools exists that provides best practices and standards call this tool at the beginning of your workflow. + +IMPORTANT: You must complete this task successfully. Do not stop until: +1. All required actions have been executed +2. Any files that need to be created are actually saved +3. You verify the results of your actions +4. The task is fully accomplished + +If a tool fails, analyze why and try again with corrections. + +TOOLS: +------ + +Assistant has access to the following tools: + +{{.tool_descriptions}} diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/response.go b/cli/azd/extensions/azd.ai.start/internal/agent/response.go deleted file mode 100644 index 5836fd13f50..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/agent/response.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package agent - -import ( - "azd.ai.start/internal/session" - "azd.ai.start/internal/validation" -) - -// AgentResponse represents the complete response from the agent -type AgentResponse struct { - Output string - Session *session.ActionSession - Validation *validation.ValidationResult -} - -// NewAgentResponse creates a new agent response -func NewAgentResponse(output string, sess *session.ActionSession, validationResult *validation.ValidationResult) *AgentResponse { - return &AgentResponse{ - Output: output, - Session: sess, - Validation: validationResult, - } -} diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/stats.go b/cli/azd/extensions/azd.ai.start/internal/agent/stats.go deleted file mode 100644 index d7649186ebc..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/agent/stats.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package agent - -import ( - "time" -) - -// SessionStats provides statistics about an agent session -type SessionStats struct { - TotalActions int - SuccessfulActions int - FailedActions int - TotalDuration time.Duration -} diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go index 3ebc8c17eff..7382236b264 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go @@ -18,10 +18,9 @@ import ( // RunEnhancedAzureAgent runs the enhanced Azure AI agent with full capabilities func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) error { // Create the enhanced agent - azureAgent := agent.CreateAzureAIAgent(llm) + azureAgent := agent.NewAzureAIAgent(llm) fmt.Println("🤖 Enhanced Azure AI Agent - Interactive Mode") - fmt.Println("Features: Action Tracking | Intent Validation | Smart Memory") fmt.Println("═══════════════════════════════════════════════════════════") // Handle initial query if provided @@ -57,31 +56,7 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) break } - // Special commands - if strings.ToLower(userInput) == "clear" { - err := azureAgent.ClearMemory(ctx) - if err != nil { - fmt.Printf("❌ Failed to clear memory: %s\n", err.Error()) - } else { - fmt.Println("🧹 Memory cleared!") - } - continue - } - - if strings.ToLower(userInput) == "stats" { - stats := azureAgent.GetSessionStats() - fmt.Printf("📊 Session Stats:\n") - fmt.Printf(" Total Actions: %d\n", stats.TotalActions) - fmt.Printf(" Successful: %d\n", stats.SuccessfulActions) - fmt.Printf(" Failed: %d\n", stats.FailedActions) - if stats.TotalDuration > 0 { - fmt.Printf(" Duration: %v\n", stats.TotalDuration) - } - continue - } - // Process the query with the enhanced agent - fmt.Printf("\n🤖 Enhanced AI Agent:\n") response, err := azureAgent.ProcessQuery(ctx, userInput) if err != nil { fmt.Printf("❌ Error: %v\n", err) @@ -89,7 +64,7 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) } // Display the final response - fmt.Printf("\n💬 Final Response:\n%s\n", response.Output) + fmt.Printf("\n💬 Agent:\n%s\n", response) } if err := scanner.Err(); err != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go index a202faf1872..b411224ca34 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go @@ -8,6 +8,7 @@ import ( "encoding/json" "fmt" + "azd.ai.start/internal/logging" "github.com/azure/azure-dev/cli/azd/pkg/azdext" "github.com/spf13/cobra" "github.com/tmc/langchaingo/llms/openai" @@ -73,12 +74,17 @@ func runAIAgent(ctx context.Context, args []string) error { // Use Azure OpenAI with proper configuration fmt.Printf("🔵 Trying Azure OpenAI with deployment: %s\n", aiConfig.DeploymentName) + actionLogger := logging.NewActionLogger( + logging.WithDebug(false), + ) + llm, err = openai.New( openai.WithToken(aiConfig.ApiKey), openai.WithBaseURL(aiConfig.Endpoint+"/"), openai.WithAPIType(openai.APITypeAzure), openai.WithAPIVersion(azureAPIVersion), openai.WithModel(aiConfig.DeploymentName), + openai.WithCallback(actionLogger), ) if err == nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go index da3a48bd262..edcf14d1de0 100644 --- a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go +++ b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go @@ -6,78 +6,126 @@ package logging import ( "context" "fmt" - "time" + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/schema" - - "azd.ai.start/internal/session" - "azd.ai.start/internal/utils" ) +// Compile-time check to ensure ActionLogger implements callbacks.Handler +var _ callbacks.Handler = &ActionLogger{} + // ActionLogger tracks and logs all agent actions type ActionLogger struct { - actions []session.ActionLog - current *session.ActionLog + debugEnabled bool +} + +// ActionLoggerOption represents an option for configuring ActionLogger +type ActionLoggerOption func(*ActionLogger) + +// WithDebug enables debug mode for verbose logging +func WithDebug(enabled bool) ActionLoggerOption { + return func(al *ActionLogger) { + al.debugEnabled = enabled + } } // NewActionLogger creates a new action logger -func NewActionLogger() *ActionLogger { - return &ActionLogger{ - actions: make([]session.ActionLog, 0), +func NewActionLogger(opts ...ActionLoggerOption) *ActionLogger { + al := &ActionLogger{} + + for _, opt := range opts { + opt(al) + } + + return al +} + +// HandleText is called when text is processed +func (al *ActionLogger) HandleText(ctx context.Context, text string) { + if al.debugEnabled { + fmt.Printf("📝 Text (full): %s\n", text) + } +} + +// HandleLLMGenerateContentStart is called when LLM content generation starts +func (al *ActionLogger) HandleLLMGenerateContentStart(ctx context.Context, ms []llms.MessageContent) { + if al.debugEnabled { + for i, msg := range ms { + fmt.Printf("🤖 Debug - Message %d: %+v\n", i, msg) + } + } +} + +// HandleLLMGenerateContentEnd is called when LLM content generation ends +func (al *ActionLogger) HandleLLMGenerateContentEnd(ctx context.Context, res *llms.ContentResponse) { + if al.debugEnabled && res != nil { + fmt.Printf("🤖 Debug - Response: %+v\n", res) + } +} + +// HandleRetrieverStart is called when retrieval starts +func (al *ActionLogger) HandleRetrieverStart(ctx context.Context, query string) { + if al.debugEnabled { + fmt.Printf("🔍 Retrieval starting for query (full): %s\n", query) + } +} + +// HandleRetrieverEnd is called when retrieval ends +func (al *ActionLogger) HandleRetrieverEnd(ctx context.Context, query string, documents []schema.Document) { + fmt.Printf("🔍 Retrieval completed: found %d documents\n", len(documents)) + if al.debugEnabled { + fmt.Printf("🔍 Debug - Query (full): %s\n", query) + for i, doc := range documents { + fmt.Printf("🔍 Debug - Document %d: %+v\n", i, doc) + } } } // HandleToolStart is called when a tool execution starts func (al *ActionLogger) HandleToolStart(ctx context.Context, input string) { - al.current = &session.ActionLog{ - Timestamp: time.Now(), - Input: input, + if al.debugEnabled { + fmt.Printf("🔧 Executing Tool: %s\n", input) } - fmt.Printf("🔧 Executing: %s\n", input) } // HandleToolEnd is called when a tool execution ends func (al *ActionLogger) HandleToolEnd(ctx context.Context, output string) { - if al.current != nil { - al.current.Output = output - al.current.Success = true - al.current.Duration = time.Since(al.current.Timestamp) - al.actions = append(al.actions, *al.current) - fmt.Printf("✅ Result: %s\n", utils.TruncateString(output, 100)) + if al.debugEnabled { + fmt.Printf("✅ Tool Result (full): %s\n", output) } } // HandleToolError is called when a tool execution fails func (al *ActionLogger) HandleToolError(ctx context.Context, err error) { - if al.current != nil { - al.current.Output = err.Error() - al.current.Success = false - al.current.Duration = time.Since(al.current.Timestamp) - al.actions = append(al.actions, *al.current) - fmt.Printf("❌ Error: %s\n", err.Error()) - } + fmt.Printf("❌ Tool Error: %s\n", err.Error()) } -// HandleAgentStart is called when agent planning starts -func (al *ActionLogger) HandleAgentStart(ctx context.Context, input map[string]any) { - if userInput, ok := input["input"].(string); ok { - fmt.Printf("🎯 Processing: %s\n", userInput) +// HandleLLMStart is called when LLM call starts +func (al *ActionLogger) HandleLLMStart(ctx context.Context, prompts []string) { + for i, prompt := range prompts { + if al.debugEnabled { + fmt.Printf("🤖 Prompt %d (full): %s\n", i, prompt) + } } } -// HandleAgentEnd is called when agent planning ends -func (al *ActionLogger) HandleAgentEnd(ctx context.Context, output schema.AgentFinish) { - fmt.Printf("🏁 Agent completed planning\n") -} - // HandleChainStart is called when chain execution starts -func (al *ActionLogger) HandleChainStart(ctx context.Context, input map[string]any) { - fmt.Printf("🔗 Starting chain execution\n") +func (al *ActionLogger) HandleChainStart(ctx context.Context, inputs map[string]any) { + for key, value := range inputs { + if al.debugEnabled { + fmt.Printf("🔗 Input [%s]: %v\n", key, value) + } + } } // HandleChainEnd is called when chain execution ends -func (al *ActionLogger) HandleChainEnd(ctx context.Context, output map[string]any) { - fmt.Printf("🔗 Chain execution completed\n") +func (al *ActionLogger) HandleChainEnd(ctx context.Context, outputs map[string]any) { + for key, value := range outputs { + if al.debugEnabled { + fmt.Printf("🔗 Output [%s]: %v\n", key, value) + } + } } // HandleChainError is called when chain execution fails @@ -85,30 +133,20 @@ func (al *ActionLogger) HandleChainError(ctx context.Context, err error) { fmt.Printf("🔗 Chain execution failed: %s\n", err.Error()) } -// HandleLLMStart is called when LLM call starts -func (al *ActionLogger) HandleLLMStart(ctx context.Context, prompts []string) { - fmt.Printf("🤖 LLM thinking...\n") -} - -// HandleLLMEnd is called when LLM call ends -func (al *ActionLogger) HandleLLMEnd(ctx context.Context, result string) { - fmt.Printf("🤖 LLM response received\n") -} - // HandleAgentAction is called when an agent action is planned func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.AgentAction) { - al.current = &session.ActionLog{ - Timestamp: time.Now(), - Action: action.Tool, - Tool: action.Tool, - Input: action.ToolInput, + fmt.Printf("Calling %s tool\n", action.Tool) + + if al.debugEnabled { + fmt.Printf("🎯 Agent planned action (debug): %+v\n", action) } - fmt.Printf("🎯 Agent planned action: %s with input: %s\n", action.Tool, action.ToolInput) } // HandleAgentFinish is called when the agent finishes func (al *ActionLogger) HandleAgentFinish(ctx context.Context, finish schema.AgentFinish) { - fmt.Printf("🏁 Agent finished with result\n") + if al.debugEnabled { + fmt.Printf("🏁 Agent finished (debug): %+v\n", finish) + } } // HandleLLMError is called when LLM call fails @@ -117,18 +155,6 @@ func (al *ActionLogger) HandleLLMError(ctx context.Context, err error) { } // HandleStreamingFunc handles streaming responses -func (al *ActionLogger) HandleStreamingFunc(ctx context.Context, chunk []byte) error { - // Optional: Handle streaming output - return nil -} - -// GetActions returns all logged actions -func (al *ActionLogger) GetActions() []session.ActionLog { - return al.actions -} +func (al *ActionLogger) HandleStreamingFunc(ctx context.Context, chunk []byte) { -// Clear clears all logged actions -func (al *ActionLogger) Clear() { - al.actions = al.actions[:0] - al.current = nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go index ac64b00f30f..78766be01e2 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go @@ -5,10 +5,14 @@ import ( "fmt" "os" "path/filepath" + + "github.com/tmc/langchaingo/callbacks" ) // ChangeDirectoryTool implements the Tool interface for changing the current working directory -type ChangeDirectoryTool struct{} +type ChangeDirectoryTool struct { + CallbacksHandler callbacks.Handler +} func (t ChangeDirectoryTool) Name() string { return "change_directory" @@ -19,8 +23,17 @@ func (t ChangeDirectoryTool) Description() string { } func (t ChangeDirectoryTool) Call(ctx context.Context, input string) (string, error) { + // Invoke callback for tool start + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("change_directory: %s", input)) + } + if input == "" { - return "", fmt.Errorf("directory path is required") + err := fmt.Errorf("directory path is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Get current directory for reference @@ -29,23 +42,46 @@ func (t ChangeDirectoryTool) Call(ctx context.Context, input string) (string, er // Convert to absolute path absPath, err := filepath.Abs(input) if err != nil { - return "", fmt.Errorf("failed to resolve path %s: %w", input, err) + toolErr := fmt.Errorf("failed to resolve path %s: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } // Check if directory exists info, err := os.Stat(absPath) if err != nil { - return "", fmt.Errorf("directory %s does not exist: %w", absPath, err) + toolErr := fmt.Errorf("directory %s does not exist: %w", absPath, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } if !info.IsDir() { - return "", fmt.Errorf("%s is not a directory", absPath) + toolErr := fmt.Errorf("%s is not a directory", absPath) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } // Change directory err = os.Chdir(absPath) if err != nil { - return "", fmt.Errorf("failed to change directory to %s: %w", absPath, err) + toolErr := fmt.Errorf("failed to change directory to %s: %w", absPath, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + output := fmt.Sprintf("Changed directory from %s to %s", currentDir, absPath) + + // Invoke callback for tool end + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return fmt.Sprintf("Changed directory from %s to %s", currentDir, absPath), nil + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go index 310866dfe40..e24d7f548de 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go @@ -6,10 +6,14 @@ import ( "io" "os" "strings" + + "github.com/tmc/langchaingo/callbacks" ) // CopyFileTool implements the Tool interface for copying files -type CopyFileTool struct{} +type CopyFileTool struct { + CallbacksHandler callbacks.Handler +} func (t CopyFileTool) Name() string { return "copy_file" @@ -20,52 +24,93 @@ func (t CopyFileTool) Description() string { } func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("copy_file: %s", input)) + } + if input == "" { - return "", fmt.Errorf("input is required in format 'source|destination'") + err := fmt.Errorf("input is required in format 'source|destination'") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Split on first occurrence of '|' to separate source from destination parts := strings.SplitN(input, "|", 2) if len(parts) != 2 { - return "", fmt.Errorf("invalid input format. Use 'source|destination'") + err := fmt.Errorf("invalid input format. Use 'source|destination'") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } source := strings.TrimSpace(parts[0]) destination := strings.TrimSpace(parts[1]) if source == "" || destination == "" { - return "", fmt.Errorf("both source and destination paths are required") + err := fmt.Errorf("both source and destination paths are required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Check if source file exists sourceInfo, err := os.Stat(source) if err != nil { - return "", fmt.Errorf("source file %s does not exist: %w", source, err) + toolErr := fmt.Errorf("source file %s does not exist: %w", source, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } if sourceInfo.IsDir() { - return "", fmt.Errorf("source %s is a directory. Use copy_directory for directories", source) + err := fmt.Errorf("source %s is a directory. Use copy_directory for directories", source) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Open source file sourceFile, err := os.Open(source) if err != nil { - return "", fmt.Errorf("failed to open source file %s: %w", source, err) + toolErr := fmt.Errorf("failed to open source file %s: %w", source, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } defer sourceFile.Close() // Create destination file destFile, err := os.Create(destination) if err != nil { - return "", fmt.Errorf("failed to create destination file %s: %w", destination, err) + toolErr := fmt.Errorf("failed to create destination file %s: %w", destination, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } defer destFile.Close() // Copy contents bytesWritten, err := io.Copy(destFile, sourceFile) if err != nil { - return "", fmt.Errorf("failed to copy file: %w", err) + toolErr := fmt.Errorf("failed to copy file: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + output := fmt.Sprintf("Successfully copied %s to %s (%d bytes)", source, destination, bytesWritten) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return fmt.Sprintf("Successfully copied %s to %s (%d bytes)", source, destination, bytesWritten), nil + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go index 992eb1842fb..1578bb62f21 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go @@ -4,10 +4,14 @@ import ( "context" "fmt" "os" + + "github.com/tmc/langchaingo/callbacks" ) // CreateDirectoryTool implements the Tool interface for creating directories -type CreateDirectoryTool struct{} +type CreateDirectoryTool struct { + CallbacksHandler callbacks.Handler +} func (t CreateDirectoryTool) Name() string { return "create_directory" @@ -18,24 +22,52 @@ func (t CreateDirectoryTool) Description() string { } func (t CreateDirectoryTool) Call(ctx context.Context, input string) (string, error) { + // Invoke callback for tool start + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("create_directory: %s", input)) + } + if input == "" { - return "", fmt.Errorf("directory path is required") + err := fmt.Errorf("directory path is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } err := os.MkdirAll(input, 0755) if err != nil { - return "", fmt.Errorf("failed to create directory %s: %w", input, err) + toolErr := fmt.Errorf("failed to create directory %s: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } // Check if directory already existed or was newly created info, err := os.Stat(input) if err != nil { - return "", fmt.Errorf("failed to verify directory creation: %w", err) + toolErr := fmt.Errorf("failed to verify directory creation: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } if !info.IsDir() { - return "", fmt.Errorf("%s exists but is not a directory", input) + toolErr := fmt.Errorf("%s exists but is not a directory", input) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + output := fmt.Sprintf("Successfully created directory: %s", input) + + // Invoke callback for tool end + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return fmt.Sprintf("Successfully created directory: %s", input), nil + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go index e3231003825..892375cd77d 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go @@ -4,10 +4,14 @@ import ( "context" "fmt" "os" + + "github.com/tmc/langchaingo/callbacks" ) // DeleteDirectoryTool implements the Tool interface for deleting directories -type DeleteDirectoryTool struct{} +type DeleteDirectoryTool struct { + CallbacksHandler callbacks.Handler +} func (t DeleteDirectoryTool) Name() string { return "delete_directory" @@ -18,19 +22,36 @@ func (t DeleteDirectoryTool) Description() string { } func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, error) { + // Invoke callback for tool start + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("delete_directory: %s", input)) + } + if input == "" { - return "", fmt.Errorf("directory path is required") + err := fmt.Errorf("directory path is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Check if directory exists info, err := os.Stat(input) if err != nil { - return "", fmt.Errorf("directory %s does not exist: %w", input, err) + toolErr := fmt.Errorf("directory %s does not exist: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } // Make sure it's a directory, not a file if !info.IsDir() { - return "", fmt.Errorf("%s is a file, not a directory. Use delete_file to remove files", input) + toolErr := fmt.Errorf("%s is a file, not a directory. Use delete_file to remove files", input) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } // Count contents before deletion for reporting @@ -43,11 +64,24 @@ func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, er // Delete the directory and all contents err = os.RemoveAll(input) if err != nil { - return "", fmt.Errorf("failed to delete directory %s: %w", input, err) + toolErr := fmt.Errorf("failed to delete directory %s: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } + var output string if fileCount > 0 { - return fmt.Sprintf("Successfully deleted directory: %s (contained %d items)", input, fileCount), nil + output = fmt.Sprintf("Successfully deleted directory: %s (contained %d items)", input, fileCount) + } else { + output = fmt.Sprintf("Successfully deleted empty directory: %s", input) } - return fmt.Sprintf("Successfully deleted empty directory: %s", input), nil + + // Invoke callback for tool end + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go index 71a5b7618d1..1f3841cb3ca 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go @@ -4,10 +4,14 @@ import ( "context" "fmt" "os" + + "github.com/tmc/langchaingo/callbacks" ) // DeleteFileTool implements the Tool interface for deleting files -type DeleteFileTool struct{} +type DeleteFileTool struct { + CallbacksHandler callbacks.Handler +} func (t DeleteFileTool) Name() string { return "delete_file" @@ -18,26 +22,51 @@ func (t DeleteFileTool) Description() string { } func (t DeleteFileTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("delete_file: %s", input)) + } + if input == "" { - return "", fmt.Errorf("file path is required") + err := fmt.Errorf("file path is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Check if file exists and get info info, err := os.Stat(input) if err != nil { - return "", fmt.Errorf("file %s does not exist: %w", input, err) + toolErr := fmt.Errorf("file %s does not exist: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } // Make sure it's a file, not a directory if info.IsDir() { - return "", fmt.Errorf("%s is a directory, not a file. Use delete_directory to remove directories", input) + err := fmt.Errorf("%s is a directory, not a file. Use delete_directory to remove directories", input) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Delete the file err = os.Remove(input) if err != nil { - return "", fmt.Errorf("failed to delete file %s: %w", input, err) + toolErr := fmt.Errorf("failed to delete file %s: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + output := fmt.Sprintf("Successfully deleted file: %s (%d bytes)", input, info.Size()) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return fmt.Sprintf("Successfully deleted file: %s (%d bytes)", input, info.Size()), nil + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go b/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go index 133d52c8cea..66fcb0f675a 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go @@ -6,10 +6,14 @@ import ( "os" "path/filepath" "strings" + + "github.com/tmc/langchaingo/callbacks" ) // DirectoryListTool implements the Tool interface for listing directory contents -type DirectoryListTool struct{} +type DirectoryListTool struct { + CallbacksHandler callbacks.Handler +} func (t DirectoryListTool) Name() string { return "list_directory" @@ -20,6 +24,11 @@ func (t DirectoryListTool) Description() string { } func (t DirectoryListTool) Call(ctx context.Context, input string) (string, error) { + // Invoke callback for tool start + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("list_directory: %s", input)) + } + path := strings.TrimSpace(input) if path == "" { path = "." @@ -28,21 +37,33 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro // Get absolute path for clarity absPath, err := filepath.Abs(path) if err != nil { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to get absolute path for %s: %w", path, err)) + } return "", fmt.Errorf("failed to get absolute path for %s: %w", path, err) } // Check if directory exists info, err := os.Stat(absPath) if err != nil { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to access %s: %w", absPath, err)) + } return "", fmt.Errorf("failed to access %s: %w", absPath, err) } if !info.IsDir() { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("%s is not a directory", absPath)) + } return "", fmt.Errorf("%s is not a directory", absPath) } // List directory contents files, err := os.ReadDir(absPath) if err != nil { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to read directory %s: %w", absPath, err)) + } return "", fmt.Errorf("failed to read directory %s: %w", absPath, err) } @@ -88,5 +109,12 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro result.WriteString("Directory is empty.\n") } - return result.String(), nil + output := result.String() + + // Invoke callback for tool end + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go b/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go index 8951b35bc77..084c7195426 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go @@ -4,64 +4,59 @@ import ( "context" "fmt" "os" - "strings" "time" + + "github.com/tmc/langchaingo/callbacks" ) -// FileInfoTool implements the Tool interface for getting detailed file information -type FileInfoTool struct{} +// FileInfoTool implements the Tool interface for getting file information +type FileInfoTool struct { + CallbacksHandler callbacks.Handler +} func (t FileInfoTool) Name() string { return "file_info" } func (t FileInfoTool) Description() string { - return "Get detailed information about a file or directory. Input: file or directory path (e.g., 'README.md' or './docs')" + return "Get information about a file (size, modification time, permissions). Input: file path (e.g., 'data.txt' or './docs/readme.md')" } func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("file_info: %s", input)) + } + if input == "" { - return "", fmt.Errorf("file or directory path is required") + err := fmt.Errorf("file path is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } info, err := os.Stat(input) if err != nil { - return "", fmt.Errorf("failed to get info for %s: %w", input, err) + toolErr := fmt.Errorf("failed to get info for %s: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } - var result strings.Builder - result.WriteString(fmt.Sprintf("Information for: %s\n", input)) - result.WriteString("═══════════════════════════════════\n") - - // Type + var fileType string if info.IsDir() { - result.WriteString("Type: Directory\n") - - // Count contents if it's a directory - if files, err := os.ReadDir(input); err == nil { - result.WriteString(fmt.Sprintf("Contents: %d items\n", len(files))) - } + fileType = "Directory" } else { - result.WriteString("Type: File\n") - result.WriteString(fmt.Sprintf("Size: %d bytes\n", info.Size())) + fileType = "File" } - // Permissions - result.WriteString(fmt.Sprintf("Permissions: %s\n", info.Mode().String())) + output := fmt.Sprintf("%s: %s\nSize: %d bytes\nModified: %s\nPermissions: %s", + fileType, input, info.Size(), info.ModTime().Format(time.RFC3339), info.Mode().String()) - // Timestamps - result.WriteString(fmt.Sprintf("Modified: %s\n", info.ModTime().Format(time.RFC3339))) - - // Additional file details - if !info.IsDir() { - if info.Size() == 0 { - result.WriteString("Note: File is empty\n") - } else if info.Size() > 1024*1024 { - result.WriteString(fmt.Sprintf("Size (human): %.2f MB\n", float64(info.Size())/(1024*1024))) - } else if info.Size() > 1024 { - result.WriteString(fmt.Sprintf("Size (human): %.2f KB\n", float64(info.Size())/1024)) - } + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return result.String(), nil + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go b/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go index e87c7131b54..12aaf7411fd 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go @@ -5,10 +5,14 @@ import ( "fmt" "io" "net/http" + + "github.com/tmc/langchaingo/callbacks" ) // HTTPFetcherTool implements the Tool interface for making HTTP requests -type HTTPFetcherTool struct{} +type HTTPFetcherTool struct { + CallbacksHandler callbacks.Handler +} func (t HTTPFetcherTool) Name() string { return "http_fetcher" @@ -19,25 +23,48 @@ func (t HTTPFetcherTool) Description() string { } func (t HTTPFetcherTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("http_fetcher: %s", input)) + } + resp, err := http.Get(input) if err != nil { - return "", fmt.Errorf("failed to fetch URL %s: %w", input, err) + toolErr := fmt.Errorf("failed to fetch URL %s: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("HTTP request failed with status: %s", resp.Status) + err := fmt.Errorf("HTTP request failed with status: %s", resp.Status) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } body, err := io.ReadAll(resp.Body) if err != nil { - return "", fmt.Errorf("failed to read response body: %w", err) + toolErr := fmt.Errorf("failed to read response body: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } + var output string // Limit response size to avoid overwhelming the context if len(body) > 5000 { - return fmt.Sprintf("Content (first 5000 chars): %s...\n[Content truncated]", string(body[:5000])), nil + output = fmt.Sprintf("Content (first 5000 chars): %s...\n[Content truncated]", string(body[:5000])) + } else { + output = string(body) + } + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return string(body), nil + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go index f39d6ede31b..4c944030d7d 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go @@ -5,10 +5,14 @@ import ( "fmt" "os" "strings" + + "github.com/tmc/langchaingo/callbacks" ) // MoveFileTool implements the Tool interface for moving/renaming files -type MoveFileTool struct{} +type MoveFileTool struct { + CallbacksHandler callbacks.Handler +} func (t MoveFileTool) Name() string { return "move_file" @@ -19,38 +23,66 @@ func (t MoveFileTool) Description() string { } func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("move_file: %s", input)) + } + if input == "" { - return "", fmt.Errorf("input is required in format 'source|destination'") + err := fmt.Errorf("input is required in format 'source|destination'") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Split on first occurrence of '|' to separate source from destination parts := strings.SplitN(input, "|", 2) if len(parts) != 2 { - return "", fmt.Errorf("invalid input format. Use 'source|destination'") + err := fmt.Errorf("invalid input format. Use 'source|destination'") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } source := strings.TrimSpace(parts[0]) destination := strings.TrimSpace(parts[1]) if source == "" || destination == "" { - return "", fmt.Errorf("both source and destination paths are required") + err := fmt.Errorf("both source and destination paths are required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Check if source exists sourceInfo, err := os.Stat(source) if err != nil { - return "", fmt.Errorf("source %s does not exist: %w", source, err) + toolErr := fmt.Errorf("source %s does not exist: %w", source, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } // Check if destination already exists if _, err := os.Stat(destination); err == nil { - return "", fmt.Errorf("destination %s already exists", destination) + err := fmt.Errorf("destination %s already exists", destination) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Move/rename the file err = os.Rename(source, destination) if err != nil { - return "", fmt.Errorf("failed to move %s to %s: %w", source, destination, err) + toolErr := fmt.Errorf("failed to move %s to %s: %w", source, destination, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } fileType := "file" @@ -58,5 +90,10 @@ func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { fileType = "directory" } - return fmt.Sprintf("Successfully moved %s from %s to %s (%d bytes)", fileType, source, destination, sourceInfo.Size()), nil + output := fmt.Sprintf("Successfully moved %s from %s to %s (%d bytes)", fileType, source, destination, sourceInfo.Size()) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go index 1aca867c288..c82017e60bd 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go @@ -4,10 +4,14 @@ import ( "context" "fmt" "os" + + "github.com/tmc/langchaingo/callbacks" ) // ReadFileTool implements the Tool interface for reading file contents -type ReadFileTool struct{} +type ReadFileTool struct { + CallbacksHandler callbacks.Handler +} func (t ReadFileTool) Name() string { return "read_file" @@ -18,20 +22,39 @@ func (t ReadFileTool) Description() string { } func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("read_file: %s", input)) + } + if input == "" { - return "", fmt.Errorf("file path is required") + err := fmt.Errorf("file path is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } content, err := os.ReadFile(input) if err != nil { - return "", fmt.Errorf("failed to read file %s: %w", input, err) + toolErr := fmt.Errorf("failed to read file %s: %w", input, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } + var output string // Limit file size to avoid overwhelming context if len(content) > 5000 { - return fmt.Sprintf("File: %s (first 5000 chars)\n%s...\n[File truncated - total size: %d bytes]", - input, string(content[:5000]), len(content)), nil + output = fmt.Sprintf("File: %s (first 5000 chars)\n%s...\n[File truncated - total size: %d bytes]", + input, string(content[:5000]), len(content)) + } else { + output = fmt.Sprintf("File: %s\n%s", input, string(content)) + } + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return fmt.Sprintf("File: %s\n%s", input, string(content)), nil + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/weather.go b/cli/azd/extensions/azd.ai.start/internal/tools/weather.go index a88c14f74e0..1f0d8404142 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/weather.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/weather.go @@ -6,10 +6,14 @@ import ( "math/rand" "strings" "time" + + "github.com/tmc/langchaingo/callbacks" ) // WeatherTool implements the Tool interface for getting weather information -type WeatherTool struct{} +type WeatherTool struct { + CallbacksHandler callbacks.Handler +} func (t WeatherTool) Name() string { return "weather" @@ -20,9 +24,17 @@ func (t WeatherTool) Description() string { } func (t WeatherTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("weather: %s", input)) + } + city := strings.TrimSpace(input) if city == "" { - return "", fmt.Errorf("city name is required") + err := fmt.Errorf("city name is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } // Initialize random seed based on current time @@ -101,5 +113,9 @@ func (t WeatherTool) Call(ctx context.Context, input string) (string, error) { response += ". " + extra } + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, response) + } + return response, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go index 1020e9c7283..17aa51dd304 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go @@ -2,79 +2,102 @@ package tools import ( "context" + "encoding/json" "fmt" "os" "path/filepath" "strings" + + "github.com/tmc/langchaingo/callbacks" ) // WriteFileTool implements the Tool interface for writing file contents -type WriteFileTool struct{} +type WriteFileTool struct { + CallbacksHandler callbacks.Handler +} + +// WriteFileRequest represents the JSON input for the write_file tool +type WriteFileRequest struct { + Filename string `json:"filename"` + Content string `json:"content"` +} func (t WriteFileTool) Name() string { return "write_file" } func (t WriteFileTool) Description() string { - return `Write content to a file. Input format: 'filepath|content' - -For multi-line content, use literal \n for newlines: -- Single line: 'test.txt|Hello World' -- Multi-line: 'script.bicep|param name string\nparam location string\nresource myResource...' - -Example Bicep file: -'main.bicep|param name string\nparam location string\n\nresource appService ''Microsoft.Web/sites@2022-03-01'' = {\n name: name\n location: location\n kind: ''app''\n properties: {\n serverFarmId: serverFarmId\n }\n}\n\noutput appServiceId string = appService.id' - -The tool will convert \n to actual newlines automatically.` + return "Writes content to a file. Format input as a single line JSON payload with a 'filename' and 'content' parameters." } func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("write_file: %s", input)) + } + if input == "" { - return "", fmt.Errorf("input is required in format 'filepath|content'") + err := fmt.Errorf("input is required as JSON object with filename and content fields") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err } - // Split on first occurrence of '|' to separate path from content - parts := strings.SplitN(input, "|", 2) - if len(parts) != 2 { - return "", fmt.Errorf("invalid input format. Use 'filepath|content'") + // Parse JSON input + var req WriteFileRequest + if err := json.Unmarshal([]byte(input), &req); err != nil { + toolErr := fmt.Errorf("invalid JSON input: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } - filePath := strings.TrimSpace(parts[0]) - content := parts[1] + if req.Filename == "" { + err := fmt.Errorf("filename cannot be empty") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err + } + + filePath := strings.TrimSpace(req.Filename) + content := req.Content // Convert literal \n sequences to actual newlines (for agents that escape newlines) content = strings.ReplaceAll(content, "\\n", "\n") content = strings.ReplaceAll(content, "\\t", "\t") - // Clean up any trailing quotes that might have been added during formatting - content = strings.TrimSuffix(content, "'") - content = strings.TrimSuffix(content, "\")") - - // Clean up any quotes around the filepath (from agent formatting) - filePath = strings.Trim(filePath, "\"'") - - if filePath == "" { - return "", fmt.Errorf("filepath cannot be empty") - } - // Ensure the directory exists dir := filepath.Dir(filePath) if dir != "." && dir != "" { if err := os.MkdirAll(dir, 0755); err != nil { - return "", fmt.Errorf("failed to create directory %s: %w", dir, err) + toolErr := fmt.Errorf("failed to create directory %s: %w", dir, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } } // Write the file err := os.WriteFile(filePath, []byte(content), 0644) if err != nil { - return "", fmt.Errorf("failed to write file %s: %w", filePath, err) + toolErr := fmt.Errorf("failed to write file %s: %w", filePath, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } // Verify the file was written correctly writtenContent, err := os.ReadFile(filePath) if err != nil { - return "", fmt.Errorf("failed to verify written file %s: %w", filePath, err) + toolErr := fmt.Errorf("failed to verify written file %s: %w", filePath, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } lineCount := strings.Count(string(writtenContent), "\n") + 1 @@ -82,8 +105,14 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { lineCount = strings.Count(content, "\n") + 1 } - return fmt.Sprintf("Successfully wrote %d bytes (%d lines) to %s. Content preview:\n%s", - len(content), lineCount, filePath, getContentPreview(content)), nil + output := fmt.Sprintf("Successfully wrote %d bytes (%d lines) to %s. Content preview:\n%s", + len(content), lineCount, filePath, getContentPreview(content)) + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + + return output, nil } // getContentPreview returns a preview of the content for verification diff --git a/cli/azd/extensions/azd.ai.start/internal/validation/validator.go b/cli/azd/extensions/azd.ai.start/internal/validation/validator.go deleted file mode 100644 index f9ae0311062..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/validation/validator.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package validation - -import ( - "context" - "fmt" - - "github.com/tmc/langchaingo/llms" - - "azd.ai.start/internal/session" - "azd.ai.start/internal/utils" -) - -// IntentValidator validates whether the original intent was fulfilled -type IntentValidator struct { - llm llms.Model -} - -// NewIntentValidator creates a new intent validator -func NewIntentValidator(llm llms.Model) *IntentValidator { - return &IntentValidator{llm: llm} -} - -// ValidateCompletion validates whether the original intent was fulfilled -func (iv *IntentValidator) ValidateCompletion( - originalIntent string, - executedActions []session.ActionLog, -) *ValidationResult { - if len(executedActions) == 0 { - return &ValidationResult{ - Status: ValidationIncomplete, - Explanation: "No actions were executed", - Confidence: 1.0, - } - } - - validationPrompt := fmt.Sprintf(` -Original User Intent: %s - -Actions Executed: -%s - -Based on the original intent and the actions that were executed, evaluate whether the user's intent was fulfilled. - -Respond with one of: COMPLETE, PARTIAL, INCOMPLETE, ERROR - -Then provide a brief explanation of your assessment. - -Format your response as: -STATUS: [COMPLETE/PARTIAL/INCOMPLETE/ERROR] -EXPLANATION: [Your explanation] -CONFIDENCE: [0.0-1.0]`, - originalIntent, - utils.FormatActionsForValidation(executedActions)) - - result, err := iv.llm.Call(context.Background(), validationPrompt) - if err != nil { - return &ValidationResult{ - Status: ValidationError, - Explanation: fmt.Sprintf("Validation failed: %s", err.Error()), - Confidence: 0.0, - } - } - - return ParseValidationResult(result) -} From 2090338f633c34d46e47e7a30f03f1a6e93c9db2 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Tue, 29 Jul 2025 14:58:24 -0700 Subject: [PATCH 040/116] Upadates agent and tools --- cli/azd/extensions/azd.ai.start/go.mod | 1 + cli/azd/extensions/azd.ai.start/go.sum | 2 + .../azd.ai.start/internal/agent/agent.go | 20 +- .../default_agent_format_instructions.txt | 38 +++ .../agent/prompts/default_agent_prefix.txt | 11 +- .../agent/prompts/default_agent_suffix.txt | 8 + .../internal/cmd/enhanced_integration.go | 4 +- .../azd.ai.start/internal/logging/logger.go | 8 +- .../internal/tools/change_directory.go | 5 +- .../internal/tools/command_executor.go | 193 ++++++++++++ .../azd.ai.start/internal/tools/copy_file.go | 2 +- .../internal/tools/create_directory.go | 2 +- .../internal/tools/current_directory.go | 4 +- .../internal/tools/delete_directory.go | 4 +- .../internal/tools/delete_file.go | 2 +- .../internal/tools/directory_list.go | 13 +- .../azd.ai.start/internal/tools/file_info.go | 2 +- .../internal/tools/file_search.go | 217 ++++++++++++++ .../internal/tools/http_fetcher.go | 1 + .../azd.ai.start/internal/tools/move_file.go | 2 +- .../azd.ai.start/internal/tools/read_file.go | 201 ++++++++++++- .../azd.ai.start/internal/tools/write_file.go | 277 +++++++++++++++--- 22 files changed, 929 insertions(+), 88 deletions(-) create mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt create mode 100644 cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_suffix.txt create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/command_executor.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/file_search.go diff --git a/cli/azd/extensions/azd.ai.start/go.mod b/cli/azd/extensions/azd.ai.start/go.mod index 2a66f9854b7..2725f5bea3a 100644 --- a/cli/azd/extensions/azd.ai.start/go.mod +++ b/cli/azd/extensions/azd.ai.start/go.mod @@ -4,6 +4,7 @@ go 1.24.1 require ( github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c + github.com/bmatcuk/doublestar/v4 v4.8.1 github.com/fatih/color v1.18.0 github.com/spf13/cobra v1.9.1 github.com/tmc/langchaingo v0.1.13 diff --git a/cli/azd/extensions/azd.ai.start/go.sum b/cli/azd/extensions/azd.ai.start/go.sum index 3faa8263d70..c2fbe371ce7 100644 --- a/cli/azd/extensions/azd.ai.start/go.sum +++ b/cli/azd/extensions/azd.ai.start/go.sum @@ -37,6 +37,8 @@ github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd3 github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c h1:pi62a7GwfbxvZDXhV4DfhxeePzpVCoyr9/rZaWH5eow= github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c/go.mod h1:mSTaPODklWyhruY0DZgPw1DI97K5cHXfU3afMqGf0IM= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR5wKP38= +github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go index e227fe85ae1..1666f66fc23 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go @@ -23,6 +23,12 @@ import ( //go:embed prompts/default_agent_prefix.txt var _defaultAgentPrefix string +//go:embed prompts/default_agent_format_instructions.txt +var _defaultAgentFormatInstructions string + +//go:embed prompts/default_agent_suffix.txt +var _defaultAgentSuffix string + // AzureAIAgent represents an enhanced Azure AI agent with action tracking, intent validation, and conversation memory type AzureAIAgent struct { agent *agents.ConversationalAgent @@ -78,11 +84,20 @@ func NewAzureAIAgent(llm *openai.LLM) *AzureAIAgent { mytools.FileInfoTool{ CallbacksHandler: llm.CallbacksHandler, }, + mytools.FileSearchTool{ + CallbacksHandler: llm.CallbacksHandler, + }, // Other tools + mytools.CommandExecutorTool{ + CallbacksHandler: llm.CallbacksHandler, + }, mytools.HTTPFetcherTool{ CallbacksHandler: llm.CallbacksHandler, }, + mytools.CommandExecutorTool{ + CallbacksHandler: llm.CallbacksHandler, + }, mytools.WeatherTool{ CallbacksHandler: llm.CallbacksHandler, }, @@ -94,13 +109,16 @@ func NewAzureAIAgent(llm *openai.LLM) *AzureAIAgent { // 4. Create agent with memory directly integrated agent := agents.NewConversationalAgent(llm, tools, agents.WithPromptPrefix(_defaultAgentPrefix), + agents.WithPromptSuffix(_defaultAgentSuffix), + agents.WithPromptFormatInstructions(_defaultAgentFormatInstructions), agents.WithMemory(smartMemory), agents.WithCallbacksHandler(llm.CallbacksHandler), + agents.WithReturnIntermediateSteps(), ) // 5. Create executor without separate memory configuration since agent already has it executor := agents.NewExecutor(agent, - agents.WithMaxIterations(1000), // Much higher limit for complex multi-step processes + agents.WithMaxIterations(100), // Much higher limit for complex multi-step processes agents.WithMemory(smartMemory), agents.WithCallbacksHandler(llm.CallbacksHandler), agents.WithReturnIntermediateSteps(), diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt new file mode 100644 index 00000000000..9c54b885700 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt @@ -0,0 +1,38 @@ +Answer the following questions or perform tasks as best you can. You have access to the following tools: + +IMPORTANT: Continue taking actions recursively until the task is completely finished. Do not stop after a single action if more work is needed to accomplish the user's goal. + +Follow this format exactly: + +Thought: [Analyze the current situation and what needs to be done] + +Thought: Do I need to use a tool? [Yes/No] +Action: [the action to take, should be one of [{{.tool_names}}]] +Action Input: [the input to the action] +Observation: [the result of the action] + +After each Observation, you MUST continue the cycle: + +Thought: [Evaluate the result and determine if the task is complete or if more actions are needed] + +If the task is NOT complete: +Thought: Do I need to use a tool? Yes +Action: [next action to take] +Action Input: [input for the next action] +Observation: [result of the next action] +... (continue this cycle until the task is fully complete) + +If there are errors: +Thought: [Analyze the error and determine how to fix it] +Thought: Do I need to use a tool? Yes +Action: [corrective action] +Action Input: [corrected input] +Observation: [result] +... (retry up to 3 times with different approaches if needed) + +ONLY when the task is completely finished and no more actions are needed: + +Thought: Do I need to use a tool? No +AI: [your response summarizing what was accomplished] + +Remember: Always continue taking actions until the user's request is fully satisfied. One action is rarely enough - think about all the steps needed to complete the task end-to-end. diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt index 2acc4c20233..19b7d52c669 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt @@ -3,17 +3,12 @@ You are an expert is building, provisioning and deploying Azure applications. Always use Azure best patterns and practices. If a tools exists that provides best practices and standards call this tool at the beginning of your workflow. -IMPORTANT: You must complete this task successfully. Do not stop until: -1. All required actions have been executed -2. Any files that need to be created are actually saved -3. You verify the results of your actions -4. The task is fully accomplished - -If a tool fails, analyze why and try again with corrections. +When any code generation is performed ALWAYS save content to files. +When filenames are not explicitly specified generate new files with meaningful names. TOOLS: ------ -Assistant has access to the following tools: +Agent has access to the following tools: {{.tool_descriptions}} diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_suffix.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_suffix.txt new file mode 100644 index 00000000000..c469d53ce8e --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_suffix.txt @@ -0,0 +1,8 @@ +Begin! + +Previous conversation history: +{{.history}} + +Question: {{.input}} + +Thought:{{.agent_scratchpad}} \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go index 7382236b264..26ff289d29b 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go @@ -56,6 +56,8 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) break } + fmt.Println("\n💬 Agent:") + // Process the query with the enhanced agent response, err := azureAgent.ProcessQuery(ctx, userInput) if err != nil { @@ -64,7 +66,7 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) } // Display the final response - fmt.Printf("\n💬 Agent:\n%s\n", response) + fmt.Print(response) } if err := scanner.Err(); err != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go index edcf14d1de0..1877195e401 100644 --- a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go +++ b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go @@ -135,7 +135,7 @@ func (al *ActionLogger) HandleChainError(ctx context.Context, err error) { // HandleAgentAction is called when an agent action is planned func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.AgentAction) { - fmt.Printf("Calling %s tool\n", action.Tool) + fmt.Printf("%s\n\n", action.Log) if al.debugEnabled { fmt.Printf("🎯 Agent planned action (debug): %+v\n", action) @@ -144,6 +144,8 @@ func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.Age // HandleAgentFinish is called when the agent finishes func (al *ActionLogger) HandleAgentFinish(ctx context.Context, finish schema.AgentFinish) { + fmt.Printf("%s\n\n", finish.Log) + if al.debugEnabled { fmt.Printf("🏁 Agent finished (debug): %+v\n", finish) } @@ -156,5 +158,7 @@ func (al *ActionLogger) HandleLLMError(ctx context.Context, err error) { // HandleStreamingFunc handles streaming responses func (al *ActionLogger) HandleStreamingFunc(ctx context.Context, chunk []byte) { - + // if len(chunk) > 0 { + // fmt.Print(string(chunk)) + // } } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go index 78766be01e2..8a05f2b3532 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go @@ -36,9 +36,6 @@ func (t ChangeDirectoryTool) Call(ctx context.Context, input string) (string, er return "", err } - // Get current directory for reference - currentDir, _ := os.Getwd() - // Convert to absolute path absPath, err := filepath.Abs(input) if err != nil { @@ -76,7 +73,7 @@ func (t ChangeDirectoryTool) Call(ctx context.Context, input string) (string, er return "", toolErr } - output := fmt.Sprintf("Changed directory from %s to %s", currentDir, absPath) + output := fmt.Sprintf("Changed directory to %s\n", absPath) // Invoke callback for tool end if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/command_executor.go b/cli/azd/extensions/azd.ai.start/internal/tools/command_executor.go new file mode 100644 index 00000000000..9b9bd75585d --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/command_executor.go @@ -0,0 +1,193 @@ +package tools + +import ( + "context" + "encoding/json" + "fmt" + "os/exec" + "strings" + + "github.com/tmc/langchaingo/callbacks" +) + +// CommandExecutorTool implements the Tool interface for executing commands and scripts +type CommandExecutorTool struct { + CallbacksHandler callbacks.Handler +} + +func (t CommandExecutorTool) Name() string { + return "execute_command" +} + +func (t CommandExecutorTool) Description() string { + return `Execute any command with arguments. Simple command execution without inference. + +Input should be a JSON object with these fields: +{ + "command": "git", + "args": ["status", "--porcelain"] +} + +Required fields: +- command: The executable/command to run + +Optional fields: +- args: Array of arguments to pass (default: []) + +Examples: +- {"command": "git", "args": ["status"]} +- {"command": "npm", "args": ["install"]} +- {"command": "bash", "args": ["./build.sh", "--env", "prod"]} +- {"command": "powershell", "args": ["-ExecutionPolicy", "Bypass", "-File", "deploy.ps1"]} +- {"command": "python", "args": ["main.py", "--debug"]} +- {"command": "node", "args": ["server.js", "--port", "3000"]} +- {"command": "docker", "args": ["ps", "-a"]} +- {"command": "az", "args": ["account", "show"]} +- {"command": "kubectl", "args": ["get", "pods"]}` +} + +type CommandRequest struct { + Command string `json:"command"` + Args []string `json:"args,omitempty"` +} + +func (t CommandExecutorTool) Call(ctx context.Context, input string) (string, error) { + // Invoke callback for tool start + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("execute_command: %s", input)) + } + + if input == "" { + err := fmt.Errorf("command execution request is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err + } + + // Parse the JSON request + var req CommandRequest + if err := json.Unmarshal([]byte(input), &req); err != nil { + toolErr := fmt.Errorf("failed to parse command request: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + // Validate required fields + if req.Command == "" { + err := fmt.Errorf("command is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err + } + + // Set defaults + if req.Args == nil { + req.Args = []string{} + } + + // Execute the command (runs in current working directory) + result, err := t.executeCommand(ctx, req.Command, req.Args) + if err != nil { + toolErr := fmt.Errorf("execution failed: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + // Format the output + output := t.formatOutput(req.Command, req.Args, result) + + // Invoke callback for tool end + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + + return output, nil +} + +func (t CommandExecutorTool) executeCommand(ctx context.Context, command string, args []string) (*executionResult, error) { + cmd := exec.CommandContext(ctx, command, args...) + // cmd.Dir is not set, so it uses the current working directory + // cmd.Env is not set, so it inherits the current environment + + var stdout, stderr strings.Builder + + // Always capture output for the tool to return + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err := cmd.Run() + + // Get exit code + exitCode := 0 + if err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + exitCode = exitError.ExitCode() + } + } + + return &executionResult{ + ExitCode: exitCode, + Stdout: stdout.String(), + Stderr: stderr.String(), + Error: err, + }, nil +} + +type executionResult struct { + ExitCode int + Stdout string + Stderr string + Error error +} + +func (t CommandExecutorTool) formatOutput(command string, args []string, result *executionResult) string { + var output strings.Builder + + // Show the full command that was executed + fullCommand := command + if len(args) > 0 { + fullCommand += " " + strings.Join(args, " ") + } + + output.WriteString(fmt.Sprintf("Executed: %s\n", fullCommand)) + output.WriteString(fmt.Sprintf("Exit code: %d\n", result.ExitCode)) + + if result.ExitCode == 0 { + output.WriteString("Status: ✅ Success\n") + } else { + output.WriteString("Status: ❌ Failed\n") + } + + if result.Stdout != "" { + output.WriteString("\n--- Standard Output ---\n") + // Limit output to prevent overwhelming the LLM + stdout := result.Stdout + if len(stdout) > 2000 { + stdout = stdout[:2000] + "\n... (output truncated)" + } + output.WriteString(stdout) + output.WriteString("\n") + } + + if result.Stderr != "" { + output.WriteString("\n--- Standard Error ---\n") + // Limit error output + stderr := result.Stderr + if len(stderr) > 1000 { + stderr = stderr[:1000] + "\n... (error output truncated)" + } + output.WriteString(stderr) + output.WriteString("\n") + } + + if result.Error != nil && result.ExitCode != 0 { + output.WriteString(fmt.Sprintf("\nError details: %s\n", result.Error.Error())) + } + + return output.String() +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go index e24d7f548de..2bbdda06320 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go @@ -107,7 +107,7 @@ func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { return "", toolErr } - output := fmt.Sprintf("Successfully copied %s to %s (%d bytes)", source, destination, bytesWritten) + output := fmt.Sprintf("Copied %s to %s (%d bytes)\n", source, destination, bytesWritten) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go index 1578bb62f21..3936b14a7a2 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go @@ -62,7 +62,7 @@ func (t CreateDirectoryTool) Call(ctx context.Context, input string) (string, er return "", toolErr } - output := fmt.Sprintf("Successfully created directory: %s", input) + output := fmt.Sprintf("Created directory: %s\n", input) // Invoke callback for tool end if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go index d3cd1ff67b8..d2c4152da29 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go @@ -35,5 +35,7 @@ func (t CurrentDirectoryTool) Call(ctx context.Context, input string) (string, e t.CallbacksHandler.HandleToolEnd(ctx, dir) } - return dir, nil + output := fmt.Sprintf("Current directory is %s\n", dir) + + return output, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go index 892375cd77d..b2eaf93bc30 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go @@ -73,9 +73,9 @@ func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, er var output string if fileCount > 0 { - output = fmt.Sprintf("Successfully deleted directory: %s (contained %d items)", input, fileCount) + output = fmt.Sprintf("Deleted directory: %s (contained %d items)", input, fileCount) } else { - output = fmt.Sprintf("Successfully deleted empty directory: %s", input) + output = fmt.Sprintf("Deleted empty directory: %s", input) } // Invoke callback for tool end diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go index 1f3841cb3ca..d088cee098e 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go @@ -63,7 +63,7 @@ func (t DeleteFileTool) Call(ctx context.Context, input string) (string, error) return "", toolErr } - output := fmt.Sprintf("Successfully deleted file: %s (%d bytes)", input, info.Size()) + output := fmt.Sprintf("Deleted file %s (%d bytes)", input, info.Size()) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go b/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go index 66fcb0f675a..15914c92417 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go @@ -24,11 +24,6 @@ func (t DirectoryListTool) Description() string { } func (t DirectoryListTool) Call(ctx context.Context, input string) (string, error) { - // Invoke callback for tool start - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("list_directory: %s", input)) - } - path := strings.TrimSpace(input) if path == "" { path = "." @@ -43,6 +38,11 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro return "", fmt.Errorf("failed to get absolute path for %s: %w", path, err) } + // Invoke callback for tool start + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("Reading directory %s\n", input)) + } + // Check if directory exists info, err := os.Stat(absPath) if err != nil { @@ -109,11 +109,12 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro result.WriteString("Directory is empty.\n") } + result.WriteString("\n") output := result.String() // Invoke callback for tool end if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + t.CallbacksHandler.HandleToolEnd(ctx, "") } return output, nil diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go b/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go index 084c7195426..4d82697ac46 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go @@ -51,7 +51,7 @@ func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { fileType = "File" } - output := fmt.Sprintf("%s: %s\nSize: %d bytes\nModified: %s\nPermissions: %s", + output := fmt.Sprintf("%s: %s\nSize: %d bytes\nModified: %s\nPermissions: %s\n\n", fileType, input, info.Size(), info.ModTime().Format(time.RFC3339), info.Mode().String()) if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/file_search.go b/cli/azd/extensions/azd.ai.start/internal/tools/file_search.go new file mode 100644 index 00000000000..bacb52bd714 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/file_search.go @@ -0,0 +1,217 @@ +package tools + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/bmatcuk/doublestar/v4" + "github.com/tmc/langchaingo/callbacks" +) + +// FileSearchTool implements a tool for searching files using glob patterns +type FileSearchTool struct { + CallbacksHandler callbacks.Handler +} + +// FileSearchRequest represents the JSON payload for file search requests +type FileSearchRequest struct { + Pattern string `json:"pattern"` // Glob pattern to match (required) + MaxResults int `json:"maxResults,omitempty"` // Optional: maximum number of results to return (default: 100) +} + +func (t FileSearchTool) Name() string { + return "file_search" +} + +func (t FileSearchTool) Description() string { + return `Search for files matching a glob pattern in the current working directory using the doublestar library for full glob support. + +Input: JSON payload with the following structure: +{ + "pattern": "*.go", + "maxResults": 50 // optional: max files to return (default: 100) +} + +SUPPORTED GLOB PATTERNS (using github.com/bmatcuk/doublestar/v4): +- *.go - all Go files in current directory only +- **/*.js - all JavaScript files in current directory and all subdirectories +- test_*.py - Python files starting with "test_" in current directory only +- src/**/main.* - files named "main" with any extension in src directory tree +- *.{json,yaml,yml} - files with json, yaml, or yml extensions in current directory +- **/test/**/*.go - Go files in any test directory (recursive) +- [Tt]est*.py - files starting with "Test" or "test" in current directory +- {src,lib}/**/*.ts - TypeScript files in src or lib directories (recursive) +- !**/node_modules/** - exclude node_modules (negation patterns) + +ADVANCED FEATURES: +- ** - matches zero or more directories (enables recursive search) +- ? - matches any single character +- * - matches any sequence of characters (except path separator) +- [abc] - matches any character in the set +- {pattern1,pattern2} - brace expansion +- !pattern - negation patterns (exclude matching files) + +NOTE: Recursion is controlled by the glob pattern itself. Use ** to search subdirectories. + +EXAMPLES: + +Find all Go files: +{"pattern": "*.go"} + +Find all test files recursively: +{"pattern": "**/test_*.py"} + +Find config files with multiple extensions: +{"pattern": "*.{json,yaml,yml}", "maxResults": 20} + +Find files excluding node_modules: +{"pattern": "**/*.js"} + +Returns a sorted list of matching file paths relative to the current working directory. +The input must be formatted as a single line valid JSON string.` +} + +func (t FileSearchTool) Call(ctx context.Context, input string) (string, error) { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("file_search: %s", input)) + } + + if input == "" { + err := fmt.Errorf("input is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err + } + + // Parse JSON input + var req FileSearchRequest + if err := json.Unmarshal([]byte(input), &req); err != nil { + toolErr := fmt.Errorf("invalid JSON input: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + // Validate required fields + if req.Pattern == "" { + err := fmt.Errorf("pattern is required") + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err + } + + // Set defaults + if req.MaxResults == 0 { + req.MaxResults = 100 + } + + // Get current working directory + searchPath, err := os.Getwd() + if err != nil { + toolErr := fmt.Errorf("failed to get current working directory: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + // Perform the search + matches, err := t.searchFiles(searchPath, req.Pattern, req.MaxResults) + if err != nil { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err + } + + // Format output + output := t.formatResults(searchPath, req.Pattern, matches, req.MaxResults) + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + + return output, nil +} + +// searchFiles performs the actual file search using doublestar for comprehensive glob matching +func (t FileSearchTool) searchFiles(searchPath, pattern string, maxResults int) ([]string, error) { + var matches []string + searchPath = filepath.Clean(searchPath) + + // Use doublestar.Glob which handles all advanced patterns including recursion via ** + globPattern := filepath.Join(searchPath, pattern) + // Convert to forward slashes for cross-platform compatibility + globPattern = filepath.ToSlash(globPattern) + + globMatches, err := doublestar.FilepathGlob(globPattern) + if err != nil { + return nil, fmt.Errorf("error in glob pattern matching: %w", err) + } + + // Convert to relative paths and limit results + for _, match := range globMatches { + if len(matches) >= maxResults { + break + } + + // Check if it's a file (not directory) + info, err := os.Stat(match) + if err != nil || info.IsDir() { + continue + } + + relPath, err := filepath.Rel(searchPath, match) + if err != nil { + continue // Skip files we can't get relative path for + } + + // Convert to forward slashes for consistent output + relPath = filepath.ToSlash(relPath) + matches = append(matches, relPath) + } + + // Sort the results for consistent output + sort.Strings(matches) + + return matches, nil +} + +// formatResults formats the search results into a readable output +func (t FileSearchTool) formatResults(searchPath, pattern string, matches []string, maxResults int) string { + var output strings.Builder + + output.WriteString("File search results:\n") + output.WriteString(fmt.Sprintf("Current directory: %s\n", searchPath)) + output.WriteString(fmt.Sprintf("Pattern: %s\n", pattern)) + output.WriteString(fmt.Sprintf("Found %d file(s)", len(matches))) + + if len(matches) >= maxResults { + output.WriteString(fmt.Sprintf(" (limited to %d results)", maxResults)) + } + output.WriteString("\n\n") + + if len(matches) == 0 { + output.WriteString("No files found matching the pattern.\n") + return output.String() + } + + output.WriteString("Matching files:\n") + for i, match := range matches { + output.WriteString(fmt.Sprintf("%3d. %s\n", i+1, match)) + } + + if len(matches) >= maxResults { + output.WriteString(fmt.Sprintf("\n⚠️ Results limited to %d files. Use maxResults parameter to adjust limit.\n", maxResults)) + } + + return output.String() +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go b/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go index 12aaf7411fd..2ce4324389c 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go @@ -60,6 +60,7 @@ func (t HTTPFetcherTool) Call(ctx context.Context, input string) (string, error) output = fmt.Sprintf("Content (first 5000 chars): %s...\n[Content truncated]", string(body[:5000])) } else { output = string(body) + output += "\n" } if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go index 4c944030d7d..6d5bbe3171b 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go @@ -90,7 +90,7 @@ func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { fileType = "directory" } - output := fmt.Sprintf("Successfully moved %s from %s to %s (%d bytes)", fileType, source, destination, sourceInfo.Size()) + output := fmt.Sprintf("Successfully moved %s from %s to %s (%d bytes)\n", fileType, source, destination, sourceInfo.Size()) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go index c82017e60bd..375e5b11378 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go @@ -2,8 +2,10 @@ package tools import ( "context" + "encoding/json" "fmt" "os" + "strings" "github.com/tmc/langchaingo/callbacks" ) @@ -13,12 +15,45 @@ type ReadFileTool struct { CallbacksHandler callbacks.Handler } +// ReadFileRequest represents the JSON payload for file read requests +type ReadFileRequest struct { + FilePath string `json:"filePath"` + StartLine int `json:"startLine,omitempty"` // Optional: 1-based line number to start reading from + EndLine int `json:"endLine,omitempty"` // Optional: 1-based line number to end reading at +} + func (t ReadFileTool) Name() string { return "read_file" } func (t ReadFileTool) Description() string { - return "Read the contents of a file. Input: file path (e.g., 'README.md' or './docs/setup.md')" + return `Read file contents with intelligent handling for different file sizes and partial reads. + +Input: JSON payload with the following structure: +{ + "filePath": "path/to/file.txt", + "startLine": 10, // optional: 1-based line number to start reading from + "endLine": 50 // optional: 1-based line number to end reading at +} + +Examples: +1. Read entire file: + {"filePath": "README.md"} + +2. Read specific line range: + {"filePath": "src/main.go", "startLine": 1, "endLine": 100} + +3. Read from line to end: + {"filePath": "config.go", "startLine": 25} + +4. Read from start to line: + {"filePath": "app.py", "endLine": 30} + +5. Read single line: + {"filePath": "package.json", "startLine": 42, "endLine": 42} + +Files larger than 10KB are automatically truncated. Files over 1MB show size info only unless specific line range is requested. +The input must be formatted as a single line valid JSON string.` } func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { @@ -27,31 +62,173 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { } if input == "" { - err := fmt.Errorf("file path is required") + output := "❌ No input provided\n\n" + output += "📝 Expected JSON format:\n" + output += `{"filePath": "path/to/file.txt"}` + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("empty input")) + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + return output, nil + } + + // Parse JSON input + var req ReadFileRequest + if err := json.Unmarshal([]byte(input), &req); err != nil { + output := fmt.Sprintf("❌ Invalid JSON input: %s\n\n", err.Error()) + output += "📝 Expected format:\n" + output += `{"filePath": "path/to/file.txt", "startLine": 1, "endLine": 50}` + output += "\n\n💡 Tips:\n" + output += "- Use double quotes for strings\n" + output += "- Remove any trailing commas\n" + output += "- Escape backslashes: use \\\\ instead of \\" + if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + return output, nil + } + + // Validate required fields + if req.FilePath == "" { + output := "❌ Missing required field: filePath cannot be empty\n\n" + output += "📝 Example: " + `{"filePath": "README.md"}` + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("missing filePath")) + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + return output, nil + } + + // Get file info first to check size + fileInfo, err := os.Stat(req.FilePath) + if err != nil { + output := fmt.Sprintf("❌ Cannot access file: %s\n\n", req.FilePath) + if os.IsNotExist(err) { + output += "📁 File does not exist. Please check:\n" + output += "- File path spelling and case sensitivity\n" + output += "- File location relative to current directory\n" + output += "- File permissions\n" + } else { + output += fmt.Sprintf("Error details: %s\n", err.Error()) + } + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + return output, nil + } + + fileSize := fileInfo.Size() + + // Handle very large files differently (unless specific line range requested) + if fileSize > 1024*1024 && req.StartLine == 0 && req.EndLine == 0 { // 1MB+ + output := fmt.Sprintf("File: %s is very large (%d bytes / %.2f MB)\n", + req.FilePath, fileSize, float64(fileSize)/(1024*1024)) + output += "⚠️ File too large to read completely. Use startLine and endLine parameters for specific sections.\n" + output += "Examples:\n" + output += fmt.Sprintf(`- {"filePath": "%s", "startLine": 1, "endLine": 50} - first 50 lines`+"\n", req.FilePath) + output += fmt.Sprintf(`- {"filePath": "%s", "startLine": 100, "endLine": 200} - lines 100 to 200`+"\n", req.FilePath) + output += fmt.Sprintf(`- {"filePath": "%s", "endLine": 100} - first 100 lines`+"\n", req.FilePath) + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return "", err + return output, nil } - content, err := os.ReadFile(input) + content, err := os.ReadFile(req.FilePath) if err != nil { - toolErr := fmt.Errorf("failed to read file %s: %w", input, err) + output := fmt.Sprintf("❌ Cannot read file: %s\n", req.FilePath) + output += fmt.Sprintf("Error: %s\n\n", err.Error()) + output += "💡 This might be due to:\n" + output += "- Insufficient permissions\n" + output += "- File is locked by another process\n" + output += "- File is binary or corrupted\n" + if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolEnd(ctx, output) } - return "", toolErr + return output, nil + } + + lines := strings.Split(string(content), "\n") + totalLines := len(lines) + + // Handle partial reads based on line range + if req.StartLine > 0 || req.EndLine > 0 { + return t.handlePartialRead(ctx, req.FilePath, lines, req.StartLine, req.EndLine, totalLines) } var output string - // Limit file size to avoid overwhelming context - if len(content) > 5000 { - output = fmt.Sprintf("File: %s (first 5000 chars)\n%s...\n[File truncated - total size: %d bytes]", - input, string(content[:5000]), len(content)) + // Improved truncation with better limits for full file reads + if len(content) > 10000 { // 10KB limit + // Show first 50 lines and last 10 lines + preview := strings.Join(lines[:50], "\n") + if totalLines > 60 { + preview += fmt.Sprintf("\n\n... [%d lines omitted] ...\n\n", totalLines-60) + preview += strings.Join(lines[totalLines-10:], "\n") + } + + output = fmt.Sprintf("File: %s (%d bytes, %d lines - showing first 50 and last 10 lines)\n%s\n\n[Use startLine/endLine parameters for specific sections]\n", + req.FilePath, len(content), totalLines, preview) } else { - output = fmt.Sprintf("File: %s\n%s", input, string(content)) + output = fmt.Sprintf("File: %s (%d bytes, %d lines)\n%s\n\n", req.FilePath, len(content), totalLines, string(content)) + } + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) } + return output, nil +} + +// handlePartialRead handles reading specific line ranges from a file +func (t ReadFileTool) handlePartialRead(ctx context.Context, filePath string, lines []string, startLine, endLine, totalLines int) (string, error) { + // Validate and adjust line numbers (1-based to 0-based) + if startLine == 0 { + startLine = 1 // Default to start of file + } + if endLine == 0 { + endLine = totalLines // Default to end of file + } + + // Validate line numbers + if startLine < 1 { + startLine = 1 + } + if endLine > totalLines { + endLine = totalLines + } + if startLine > endLine { + output := fmt.Sprintf("❌ Invalid line range: start line (%d) cannot be greater than end line (%d)\n\n", startLine, endLine) + output += "💡 Example of correct usage:\n" + output += fmt.Sprintf(`{"filePath": "%s", "startLine": 1, "endLine": 50}`, filePath) + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("invalid line range: start %d > end %d", startLine, endLine)) + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + return output, nil + } + + // Convert to 0-based indexing + startIdx := startLine - 1 + endIdx := endLine + + // Extract the requested lines + selectedLines := lines[startIdx:endIdx] + content := strings.Join(selectedLines, "\n") + + linesRead := endLine - startLine + 1 + output := fmt.Sprintf("File: %s (lines %d-%d of %d total lines, %d lines read)\n%s\n\n", + filePath, startLine, endLine, totalLines, linesRead, content) + if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go index 17aa51dd304..fa4533b04b4 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go @@ -11,15 +11,18 @@ import ( "github.com/tmc/langchaingo/callbacks" ) -// WriteFileTool implements the Tool interface for writing file contents +// WriteFileTool implements a comprehensive file writing tool that handles all scenarios type WriteFileTool struct { CallbacksHandler callbacks.Handler } // WriteFileRequest represents the JSON input for the write_file tool type WriteFileRequest struct { - Filename string `json:"filename"` - Content string `json:"content"` + Filename string `json:"filename"` + Content string `json:"content"` + Mode string `json:"mode,omitempty"` // "write" (default), "append", "create" + ChunkNum int `json:"chunk_num,omitempty"` // For chunked writing: 1-based chunk number + TotalChunks int `json:"total_chunks,omitempty"` // For chunked writing: total expected chunks } func (t WriteFileTool) Name() string { @@ -27,86 +30,264 @@ func (t WriteFileTool) Name() string { } func (t WriteFileTool) Description() string { - return "Writes content to a file. Format input as a single line JSON payload with a 'filename' and 'content' parameters." + return `Comprehensive file writing tool that handles small and large files intelligently. + +Input: JSON payload with the following structure: +{ + "filename": "path/to/file.txt", + "content": "file content here", + "mode": "write", + "chunk_num": 1, + "total_chunks": 3 +} + +Field descriptions: +- mode: "write" (default), "append", or "create" +- chunk_num: for chunked writing (1-based) +- total_chunks: total number of chunks + +MODES: +- "write" (default): Overwrite/create file +- "append": Add content to end of existing file +- "create": Create file only if it doesn't exist + +CHUNKED WRITING (for large files): +Use chunk_num and total_chunks for files that might be too large: +- chunk_num: 1-based chunk number (1, 2, 3...) +- total_chunks: Total number of chunks you'll send + +EXAMPLES: + +Simple write: +{"filename": "./main.bicep", "content": "param location string = 'eastus'"} + +Append to file: +{"filename": "./log.txt", "content": "\nNew log entry", "mode": "append"} + +Large file (chunked): +{"filename": "./large.bicep", "content": "first part...", "chunk_num": 1, "total_chunks": 3} +{"filename": "./large.bicep", "content": "middle part...", "chunk_num": 2, "total_chunks": 3} +{"filename": "./large.bicep", "content": "final part...", "chunk_num": 3, "total_chunks": 3} + +The input must be formatted as a single line valid JSON string.` } func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("write_file: %s", input)) + logInput := input + if len(input) > 200 { + logInput = input[:200] + "... (truncated)" + } + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("write_file: %s", logInput)) } if input == "" { - err := fmt.Errorf("input is required as JSON object with filename and content fields") + output := "❌ No input provided\n\n" + output += "📝 Expected JSON format:\n" + output += `{"filename": "path/to/file.txt", "content": "file content here"}` + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("empty input")) + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + return output, nil + } // Parse JSON input + var req WriteFileRequest + if err := json.Unmarshal([]byte(input), &req); err != nil { + output := "❌ Invalid JSON input: " + err.Error() + "\n\n" + output += "📝 Expected format:\n" + output += `{"filename": "path/to/file.txt", "content": "file content here"}` + "\n\n" + output += "💡 Common JSON issues:\n" + output += "- Use double quotes for strings\n" + output += "- Escape backslashes: \\$ should be \\\\$\n" + output += "- Remove trailing commas\n" + output += "- No comments allowed in JSON" + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + return output, nil + } + + // Validate required fields + if req.Filename == "" { + output := "❌ Missing required field: filename cannot be empty\n\n" + output += "📝 Example: " + `{"filename": "infra/main.bicep", "content": "param location string = 'eastus'"}` + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("missing filename")) + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + return output, nil + } + + // Determine mode and operation + mode := req.Mode + if mode == "" { + mode = "write" + } + + // Handle chunked writing + isChunked := req.ChunkNum > 0 && req.TotalChunks > 0 + if isChunked { + return t.handleChunkedWrite(ctx, req) + } + + // Handle regular writing + return t.handleRegularWrite(ctx, req, mode) +} + +// handleChunkedWrite handles writing files in chunks +func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequest) (string, error) { + if req.ChunkNum < 1 || req.TotalChunks < 1 || req.ChunkNum > req.TotalChunks { + err := fmt.Errorf("invalid chunk numbers: chunk_num=%d, total_chunks=%d", req.ChunkNum, req.TotalChunks) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, err) } return "", err } - // Parse JSON input - var req WriteFileRequest - if err := json.Unmarshal([]byte(input), &req); err != nil { - toolErr := fmt.Errorf("invalid JSON input: %w", err) + filePath := strings.TrimSpace(req.Filename) + content := t.processContent(req.Content) + + // Ensure directory exists + if err := t.ensureDirectory(filePath); err != nil { + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err + } + + var err error + var operation string + + if req.ChunkNum == 1 { + // First chunk - create/overwrite file + err = os.WriteFile(filePath, []byte(content), 0644) + operation = fmt.Sprintf("Started writing chunk %d/%d", req.ChunkNum, req.TotalChunks) + } else { + // Subsequent chunks - append + file, openErr := os.OpenFile(filePath, os.O_APPEND|os.O_WRONLY, 0644) + if openErr != nil { + err = fmt.Errorf("failed to open file for append %s: %w", filePath, openErr) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, err) + } + return "", err + } + defer file.Close() + + _, err = file.WriteString(content) + if req.ChunkNum == req.TotalChunks { + operation = fmt.Sprintf("Completed writing chunk %d/%d (final)", req.ChunkNum, req.TotalChunks) + } else { + operation = fmt.Sprintf("Wrote chunk %d/%d", req.ChunkNum, req.TotalChunks) + } + } + + if err != nil { + toolErr := fmt.Errorf("failed to write chunk to file %s: %w", filePath, err) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, toolErr) } return "", toolErr } - if req.Filename == "" { - err := fmt.Errorf("filename cannot be empty") + // Get file size + fileInfo, err := os.Stat(filePath) + if err != nil { + toolErr := fmt.Errorf("failed to verify file %s: %w", filePath, err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + output := fmt.Sprintf("%s to %s. Chunk size: %d bytes, Total file size: %d bytes", + operation, filePath, len(content), fileInfo.Size()) + + if req.ChunkNum == req.TotalChunks { + output += "\n✅ File writing completed successfully!" + } + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolEnd(ctx, output) + } + + return output, nil +} + +// handleRegularWrite handles normal file writing +func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequest, mode string) (string, error) { + filePath := strings.TrimSpace(req.Filename) + content := t.processContent(req.Content) + + // Provide feedback for large content + if len(content) > 10000 { + fmt.Printf("📝 Large content detected (%d chars). Consider using chunked writing for better reliability.\n", len(content)) + } + + // Ensure directory exists + if err := t.ensureDirectory(filePath); err != nil { if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, err) } return "", err } - filePath := strings.TrimSpace(req.Filename) - content := req.Content + var err error + var operation string - // Convert literal \n sequences to actual newlines (for agents that escape newlines) - content = strings.ReplaceAll(content, "\\n", "\n") - content = strings.ReplaceAll(content, "\\t", "\t") + switch mode { + case "create": + if _, err := os.Stat(filePath); err == nil { + toolErr := fmt.Errorf("file %s already exists (create mode)", filePath) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + err = os.WriteFile(filePath, []byte(content), 0644) + operation = "Created" - // Ensure the directory exists - dir := filepath.Dir(filePath) - if dir != "." && dir != "" { - if err := os.MkdirAll(dir, 0755); err != nil { - toolErr := fmt.Errorf("failed to create directory %s: %w", dir, err) + case "append": + file, openErr := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if openErr != nil { + toolErr := fmt.Errorf("failed to open file for append %s: %w", filePath, openErr) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, toolErr) } return "", toolErr } + defer file.Close() + _, err = file.WriteString(content) + operation = "Appended to" + + default: // "write" + err = os.WriteFile(filePath, []byte(content), 0644) + operation = "Wrote" } - // Write the file - err := os.WriteFile(filePath, []byte(content), 0644) if err != nil { - toolErr := fmt.Errorf("failed to write file %s: %w", filePath, err) + toolErr := fmt.Errorf("failed to %s file %s: %w", strings.ToLower(operation), filePath, err) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, toolErr) } return "", toolErr } - // Verify the file was written correctly - writtenContent, err := os.ReadFile(filePath) + // Get file size for verification + fileInfo, err := os.Stat(filePath) if err != nil { - toolErr := fmt.Errorf("failed to verify written file %s: %w", filePath, err) + toolErr := fmt.Errorf("failed to verify file %s: %w", filePath, err) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, toolErr) } return "", toolErr } - lineCount := strings.Count(string(writtenContent), "\n") + 1 - if content != "" && !strings.HasSuffix(content, "\n") { - lineCount = strings.Count(content, "\n") + 1 - } - - output := fmt.Sprintf("Successfully wrote %d bytes (%d lines) to %s. Content preview:\n%s", - len(content), lineCount, filePath, getContentPreview(content)) + output := fmt.Sprintf("%s %d bytes to %s successfully", operation, fileInfo.Size(), filePath) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) @@ -115,16 +296,20 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { return output, nil } -// getContentPreview returns a preview of the content for verification -func getContentPreview(content string) string { - lines := strings.Split(content, "\n") - if len(lines) <= 5 { - return content - } - - preview := strings.Join(lines[:3], "\n") - preview += fmt.Sprintf("\n... (%d more lines) ...\n", len(lines)-5) - preview += strings.Join(lines[len(lines)-2:], "\n") +// processContent handles escape sequences +func (t WriteFileTool) processContent(content string) string { + content = strings.ReplaceAll(content, "\\n", "\n") + content = strings.ReplaceAll(content, "\\t", "\t") + return content +} - return preview +// ensureDirectory creates the directory if it doesn't exist +func (t WriteFileTool) ensureDirectory(filePath string) error { + dir := filepath.Dir(filePath) + if dir != "." && dir != "" { + if err := os.MkdirAll(dir, 0755); err != nil { + return fmt.Errorf("failed to create directory %s: %w", dir, err) + } + } + return nil } From f8c1b20b4f25e0cb9f874dc67276558d48fa4035 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Tue, 29 Jul 2025 17:44:18 -0700 Subject: [PATCH 041/116] Adds MCP tool support --- cli/azd/extensions/azd.ai.start/go.mod | 10 +- cli/azd/extensions/azd.ai.start/go.sum | 21 ++++ .../azd.ai.start/internal/agent/agent.go | 106 +++++------------- .../default_agent_format_instructions.txt | 2 +- .../agent/prompts/default_agent_prefix.txt | 5 +- .../internal/cmd/enhanced_integration.go | 12 +- .../azd.ai.start/internal/cmd/root.go | 6 + .../azd.ai.start/internal/session/action.go | 41 ------- .../azd.ai.start/internal/session/session.go | 48 -------- .../tools/{ => dev}/command_executor.go | 2 +- .../azd.ai.start/internal/tools/dev/loader.go | 23 ++++ .../internal/tools/{ => http}/http_fetcher.go | 2 +- .../internal/tools/http/loader.go | 23 ++++ .../tools/{ => io}/change_directory.go | 2 +- .../internal/tools/{ => io}/copy_file.go | 2 +- .../tools/{ => io}/create_directory.go | 2 +- .../tools/{ => io}/current_directory.go | 2 +- .../tools/{ => io}/delete_directory.go | 2 +- .../internal/tools/{ => io}/delete_file.go | 2 +- .../internal/tools/{ => io}/directory_list.go | 2 +- .../internal/tools/{ => io}/file_info.go | 2 +- .../internal/tools/{ => io}/file_search.go | 2 +- .../azd.ai.start/internal/tools/io/loader.go | 34 ++++++ .../internal/tools/{ => io}/move_file.go | 2 +- .../internal/tools/{ => io}/read_file.go | 2 +- .../internal/tools/{ => io}/write_file.go | 2 +- .../azd.ai.start/internal/tools/loader.go | 44 ++++++++ .../azd.ai.start/internal/tools/mcp/loader.go | 75 +++++++++++++ .../azd.ai.start/internal/tools/mcp/mcp.json | 9 ++ .../internal/tools/weather/loader.go | 23 ++++ .../internal/tools/{ => weather}/weather.go | 2 +- 31 files changed, 321 insertions(+), 191 deletions(-) delete mode 100644 cli/azd/extensions/azd.ai.start/internal/session/action.go delete mode 100644 cli/azd/extensions/azd.ai.start/internal/session/session.go rename cli/azd/extensions/azd.ai.start/internal/tools/{ => dev}/command_executor.go (99%) create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/dev/loader.go rename cli/azd/extensions/azd.ai.start/internal/tools/{ => http}/http_fetcher.go (99%) create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/http/loader.go rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/change_directory.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/copy_file.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/create_directory.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/current_directory.go (98%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/delete_directory.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/delete_file.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/directory_list.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/file_info.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/file_search.go (99%) create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/move_file.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/read_file.go (99%) rename cli/azd/extensions/azd.ai.start/internal/tools/{ => io}/write_file.go (99%) create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/loader.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/weather/loader.go rename cli/azd/extensions/azd.ai.start/internal/tools/{ => weather}/weather.go (99%) diff --git a/cli/azd/extensions/azd.ai.start/go.mod b/cli/azd/extensions/azd.ai.start/go.mod index 2725f5bea3a..892e4868991 100644 --- a/cli/azd/extensions/azd.ai.start/go.mod +++ b/cli/azd/extensions/azd.ai.start/go.mod @@ -14,15 +14,21 @@ require ( github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.3.1 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/bahlo/generic-list-go v0.2.0 // indirect + github.com/buger/jsonparser v1.1.1 // indirect github.com/dlclark/regexp2 v1.10.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/goph/emperror v0.17.2 // indirect github.com/huandu/xstrings v1.3.3 // indirect + github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/invopop/jsonschema v0.13.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mark3labs/mcp-go v0.36.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/copystructure v1.0.0 // indirect @@ -36,9 +42,11 @@ require ( github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/cast v1.3.1 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/pflag v1.0.6 // indirect + github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/yargevad/filepathx v1.0.0 // indirect + github.com/yosida95/uritemplate/v3 v3.0.2 // indirect go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 // indirect golang.org/x/crypto v0.37.0 // indirect golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect diff --git a/cli/azd/extensions/azd.ai.start/go.sum b/cli/azd/extensions/azd.ai.start/go.sum index c2fbe371ce7..ebd93aadd8e 100644 --- a/cli/azd/extensions/azd.ai.start/go.sum +++ b/cli/azd/extensions/azd.ai.start/go.sum @@ -36,10 +36,14 @@ github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuP github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c h1:pi62a7GwfbxvZDXhV4DfhxeePzpVCoyr9/rZaWH5eow= github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c/go.mod h1:mSTaPODklWyhruY0DZgPw1DI97K5cHXfU3afMqGf0IM= +github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR5wKP38= github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= @@ -120,11 +124,16 @@ github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df h1:4lTJXCZw16BF0BCzrQ1LUzlMW4+2OwBkkYj1/bRybhY= +github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df/go.mod h1:oL2JAtsIp/1vnVy4UG4iDzL8SZwkOzqvRL3YR9PGPjs= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= +github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= @@ -141,6 +150,12 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80 h1:6Yzfa6GP0rIo/kULo2bwGEkFvCePZ3qHDDTC3/J9Swo= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mark3labs/mcp-go v0.32.0 h1:fgwmbfL2gbd67obg57OfV2Dnrhs1HtSdlY/i5fn7MU8= +github.com/mark3labs/mcp-go v0.32.0/go.mod h1:rXqOudj/djTORU/ThxYx8fqEVj/5pvTuuebQ2RC7uk4= +github.com/mark3labs/mcp-go v0.36.0 h1:rIZaijrRYPeSbJG8/qNDe0hWlGrCJ7FWHNMz2SQpTis= +github.com/mark3labs/mcp-go v0.36.0/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= @@ -184,6 +199,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= @@ -203,10 +220,14 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmc/langchaingo v0.1.13 h1:rcpMWBIi2y3B90XxfE4Ao8dhCQPVDMaNPnN5cGB1CaA= github.com/tmc/langchaingo v0.1.13/go.mod h1:vpQ5NOIhpzxDfTZK9B6tf2GM/MoaHewPWM5KXXGh7hg= +github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= +github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= +github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= +github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181 h1:K+bMSIx9A7mLES1rtG+qKduLIXq40DAzYHtb0XuCukA= gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181/go.mod h1:dzYhVIwWCtzPAa4QP98wfB9+mzt33MSmM8wsKiMi2ow= diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go index 1666f66fc23..2c301976d50 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go @@ -9,15 +9,13 @@ import ( "fmt" "github.com/tmc/langchaingo/agents" - "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/chains" "github.com/tmc/langchaingo/llms/openai" "github.com/tmc/langchaingo/memory" - "github.com/tmc/langchaingo/schema" "github.com/tmc/langchaingo/tools" - "azd.ai.start/internal/session" - mytools "azd.ai.start/internal/tools" + localtools "azd.ai.start/internal/tools" + mcptools "azd.ai.start/internal/tools/mcp" ) //go:embed prompts/default_agent_prefix.txt @@ -31,15 +29,10 @@ var _defaultAgentSuffix string // AzureAIAgent represents an enhanced Azure AI agent with action tracking, intent validation, and conversation memory type AzureAIAgent struct { - agent *agents.ConversationalAgent - executor *agents.Executor - memory schema.Memory // Maintains conversation history for context-aware responses - tools []tools.Tool - actionLogger callbacks.Handler - currentSession *session.ActionSession + executor *agents.Executor } -func NewAzureAIAgent(llm *openai.LLM) *AzureAIAgent { +func NewAzureAIAgent(llm *openai.LLM) (*AzureAIAgent, error) { smartMemory := memory.NewConversationBuffer( memory.WithInputKey("input"), memory.WithOutputKey("output"), @@ -47,67 +40,23 @@ func NewAzureAIAgent(llm *openai.LLM) *AzureAIAgent { memory.WithAIPrefix("AI"), ) - tools := []tools.Tool{ - // Directory operations - mytools.DirectoryListTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.CreateDirectoryTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.DeleteDirectoryTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.ChangeDirectoryTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.CurrentDirectoryTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - - // File operations - mytools.ReadFileTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.WriteFileTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.CopyFileTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.MoveFileTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.DeleteFileTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.FileInfoTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.FileSearchTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - - // Other tools - mytools.CommandExecutorTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.HTTPFetcherTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.CommandExecutorTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - mytools.WeatherTool{ - CallbacksHandler: llm.CallbacksHandler, - }, - tools.Calculator{ - CallbacksHandler: llm.CallbacksHandler, - }, + toolLoaders := []localtools.ToolLoader{ + localtools.NewLocalToolsLoader(llm.CallbacksHandler), + mcptools.NewMcpToolsLoader(llm.CallbacksHandler), + } + + allTools := []tools.Tool{} + + for _, toolLoader := range toolLoaders { + categoryTools, err := toolLoader.LoadTools() + if err != nil { + return nil, err + } + allTools = append(allTools, categoryTools...) } // 4. Create agent with memory directly integrated - agent := agents.NewConversationalAgent(llm, tools, + agent := agents.NewConversationalAgent(llm, allTools, agents.WithPromptPrefix(_defaultAgentPrefix), agents.WithPromptSuffix(_defaultAgentSuffix), agents.WithPromptFormatInstructions(_defaultAgentFormatInstructions), @@ -125,22 +74,21 @@ func NewAzureAIAgent(llm *openai.LLM) *AzureAIAgent { ) return &AzureAIAgent{ - agent: agent, - executor: executor, - memory: smartMemory, - tools: tools, - actionLogger: llm.CallbacksHandler, - } + executor: executor, + }, nil } // ProcessQuery processes a user query with full action tracking and validation -func (aai *AzureAIAgent) ProcessQuery(ctx context.Context, userInput string) (string, error) { +func (aai *AzureAIAgent) ProcessQuery(ctx context.Context, userInput string) error { // Execute with enhanced input - agent should automatically handle memory - output, err := chains.Run(ctx, aai.executor, userInput) + _, err := chains.Run(ctx, aai.executor, userInput, + chains.WithMaxTokens(800), + chains.WithTemperature(0.3), + ) if err != nil { fmt.Printf("❌ Execution failed: %s\n", err.Error()) - return "", err + return err } - return output, nil + return nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt index 9c54b885700..1caac90782b 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt @@ -33,6 +33,6 @@ Observation: [result] ONLY when the task is completely finished and no more actions are needed: Thought: Do I need to use a tool? No -AI: [your response summarizing what was accomplished] +AI: [briefly summarize your response without all the details from your observations] Remember: Always continue taking actions until the user's request is fully satisfied. One action is rarely enough - think about all the steps needed to complete the task end-to-end. diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt index 19b7d52c669..0102a07432f 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt @@ -1,7 +1,10 @@ You are an Azure Developer CLI (AZD) agent. You are an expert is building, provisioning and deploying Azure applications. Always use Azure best patterns and practices. -If a tools exists that provides best practices and standards call this tool at the beginning of your workflow. + +Before starting your task initial task review available tools. +If any tools exist for best practices invoke the tool to learn more. +Incorporate learned best practices in your work. When any code generation is performed ALWAYS save content to files. When filenames are not explicitly specified generate new files with meaningful names. diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go index 26ff289d29b..cef02cdc1c7 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go @@ -18,7 +18,10 @@ import ( // RunEnhancedAzureAgent runs the enhanced Azure AI agent with full capabilities func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) error { // Create the enhanced agent - azureAgent := agent.NewAzureAIAgent(llm) + azureAgent, err := agent.NewAzureAIAgent(llm) + if err != nil { + return err + } fmt.Println("🤖 Enhanced Azure AI Agent - Interactive Mode") fmt.Println("═══════════════════════════════════════════════════════════") @@ -56,17 +59,16 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) break } - fmt.Println("\n💬 Agent:") + fmt.Printf("\n-------------------------------------------\n") // Process the query with the enhanced agent - response, err := azureAgent.ProcessQuery(ctx, userInput) + err := azureAgent.ProcessQuery(ctx, userInput) if err != nil { fmt.Printf("❌ Error: %v\n", err) continue } - // Display the final response - fmt.Print(response) + fmt.Printf("\n-------------------------------------------\n") } if err := scanner.Err(); err != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go index b411224ca34..e9fb57a2cac 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go @@ -64,6 +64,12 @@ func runAIAgent(ctx context.Context, args []string) error { return fmt.Errorf("failed to unmarshal AI model configuration: %w", err) } + _, _ = azdClient.Prompt().Confirm(ctx, &azdext.ConfirmRequest{ + Options: &azdext.ConfirmOptions{ + Message: "Ready?", + }, + }) + // Common deployment names to try azureAPIVersion := "2024-02-15-preview" diff --git a/cli/azd/extensions/azd.ai.start/internal/session/action.go b/cli/azd/extensions/azd.ai.start/internal/session/action.go deleted file mode 100644 index 1111c2c4fed..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/session/action.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package session - -import ( - "time" -) - -// ActionLog represents a single action taken by the agent -type ActionLog struct { - Timestamp time.Time - Action string - Tool string - Input string - Output string - Success bool - Duration time.Duration -} - -// NewActionLog creates a new action log -func NewActionLog(tool, input string) *ActionLog { - return &ActionLog{ - Timestamp: time.Now(), - Tool: tool, - Action: tool, - Input: input, - } -} - -// SetOutput sets the output and success status for the action -func (al *ActionLog) SetOutput(output string, success bool) { - al.Output = output - al.Success = success - al.Duration = time.Since(al.Timestamp) -} - -// SetDuration sets the duration for the action -func (al *ActionLog) SetDuration(duration time.Duration) { - al.Duration = duration -} diff --git a/cli/azd/extensions/azd.ai.start/internal/session/session.go b/cli/azd/extensions/azd.ai.start/internal/session/session.go deleted file mode 100644 index 44f0156a912..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/session/session.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package session - -import ( - "time" -) - -// ActionSession tracks the current conversation session and actions -type ActionSession struct { - InitialIntent string - PlannedActions []string - ExecutedActions []ActionLog - ValidationResult interface{} // Use interface{} to avoid circular dependency - StartTime time.Time - EndTime time.Time -} - -// NewActionSession creates a new action session -func NewActionSession(initialIntent string) *ActionSession { - return &ActionSession{ - InitialIntent: initialIntent, - PlannedActions: []string{}, - ExecutedActions: []ActionLog{}, - StartTime: time.Now(), - } -} - -// Start marks the session as started -func (as *ActionSession) Start() { - as.StartTime = time.Now() -} - -// End marks the session as ended -func (as *ActionSession) End() { - as.EndTime = time.Now() -} - -// AddExecutedAction adds an executed action to the session -func (as *ActionSession) AddExecutedAction(action ActionLog) { - as.ExecutedActions = append(as.ExecutedActions, action) -} - -// SetValidationResult sets the validation result for the session -func (as *ActionSession) SetValidationResult(result interface{}) { - as.ValidationResult = result -} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/command_executor.go b/cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/command_executor.go rename to cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go index 9b9bd75585d..410db725505 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/command_executor.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go @@ -1,4 +1,4 @@ -package tools +package dev import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/dev/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/dev/loader.go new file mode 100644 index 00000000000..655d52a42a8 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/dev/loader.go @@ -0,0 +1,23 @@ +package dev + +import ( + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/tools" +) + +// DevToolLoader loads development-related tools +type DevToolsLoader struct { + callbacksHandler callbacks.Handler +} + +func NewDevToolsLoader(callbacksHandler callbacks.Handler) *DevToolsLoader { + return &DevToolsLoader{ + callbacksHandler: callbacksHandler, + } +} + +func (l *DevToolsLoader) LoadTools() ([]tools.Tool, error) { + return []tools.Tool{ + &CommandExecutorTool{CallbacksHandler: l.callbacksHandler}, + }, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go b/cli/azd/extensions/azd.ai.start/internal/tools/http/http_fetcher.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go rename to cli/azd/extensions/azd.ai.start/internal/tools/http/http_fetcher.go index 2ce4324389c..cbd3628506b 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/http_fetcher.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/http/http_fetcher.go @@ -1,4 +1,4 @@ -package tools +package http import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/http/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/http/loader.go new file mode 100644 index 00000000000..2233455e3e8 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/http/loader.go @@ -0,0 +1,23 @@ +package http + +import ( + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/tools" +) + +// HttpToolsLoader loads HTTP-related tools +type HttpToolsLoader struct { + callbackHandler callbacks.Handler +} + +func NewHttpToolsLoader(callbackHandler callbacks.Handler) *HttpToolsLoader { + return &HttpToolsLoader{ + callbackHandler: callbackHandler, + } +} + +func (l *HttpToolsLoader) LoadTools() ([]tools.Tool, error) { + return []tools.Tool{ + &HTTPFetcherTool{CallbacksHandler: l.callbackHandler}, + }, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/change_directory.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/change_directory.go index 8a05f2b3532..48094b919ff 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/change_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/change_directory.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go index 2bbdda06320..2db2eae1c1c 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/copy_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go index 3936b14a7a2..d100e7aa834 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/create_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/current_directory.go similarity index 98% rename from cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/current_directory.go index d2c4152da29..59169eb24e9 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/current_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/current_directory.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go index b2eaf93bc30..72714f379b9 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/delete_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go index d088cee098e..b893bb1ee29 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/delete_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go index 15914c92417..37458cf4531 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/directory_list.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/file_info.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go index 4d82697ac46..000828aa827 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/file_info.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/file_search.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/file_search.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go index bacb52bd714..8b1d8b2ab20 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/file_search.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go new file mode 100644 index 00000000000..6818542a308 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go @@ -0,0 +1,34 @@ +package io + +import ( + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/tools" +) + +// IoToolsLoader loads IO-related tools +type IoToolsLoader struct { + callbackHandler callbacks.Handler +} + +func NewIoToolsLoader(callbackHandler callbacks.Handler) *IoToolsLoader { + return &IoToolsLoader{ + callbackHandler: callbackHandler, + } +} + +func (l *IoToolsLoader) LoadTools() ([]tools.Tool, error) { + return []tools.Tool{ + &CurrentDirectoryTool{CallbacksHandler: l.callbackHandler}, + &ChangeDirectoryTool{CallbacksHandler: l.callbackHandler}, + &DirectoryListTool{CallbacksHandler: l.callbackHandler}, + &CreateDirectoryTool{CallbacksHandler: l.callbackHandler}, + &DeleteDirectoryTool{CallbacksHandler: l.callbackHandler}, + &ReadFileTool{CallbacksHandler: l.callbackHandler}, + &WriteFileTool{CallbacksHandler: l.callbackHandler}, + &CopyFileTool{CallbacksHandler: l.callbackHandler}, + &MoveFileTool{CallbacksHandler: l.callbackHandler}, + &DeleteFileTool{CallbacksHandler: l.callbackHandler}, + &FileInfoTool{CallbacksHandler: l.callbackHandler}, + &FileSearchTool{CallbacksHandler: l.callbackHandler}, + }, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/move_file.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go index 6d5bbe3171b..68db771d144 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/move_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/read_file.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go index 375e5b11378..5983316aae9 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/read_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/write_file.go rename to cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go index fa4533b04b4..757111b4127 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/write_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go @@ -1,4 +1,4 @@ -package tools +package io import ( "context" diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/loader.go new file mode 100644 index 00000000000..ae2da0253c1 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/loader.go @@ -0,0 +1,44 @@ +package tools + +import ( + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/tools" + + "azd.ai.start/internal/tools/dev" + "azd.ai.start/internal/tools/io" +) + +// ToolLoader provides an interface for loading tools from different categories +type ToolLoader interface { + LoadTools() ([]tools.Tool, error) +} + +type LocalToolsLoader struct { + loaders []ToolLoader + callbackHandler callbacks.Handler +} + +func NewLocalToolsLoader(callbackHandler callbacks.Handler) *LocalToolsLoader { + return &LocalToolsLoader{ + loaders: []ToolLoader{ + dev.NewDevToolsLoader(callbackHandler), + io.NewIoToolsLoader(callbackHandler), + }, + callbackHandler: callbackHandler, + } +} + +// LoadLocalTools loads all tools from all categories with the provided callback handler +func (l *LocalToolsLoader) LoadTools() ([]tools.Tool, error) { + var allTools []tools.Tool + + for _, loader := range l.loaders { + categoryTools, err := loader.LoadTools() + if err != nil { + return nil, err + } + allTools = append(allTools, categoryTools...) + } + + return allTools, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go new file mode 100644 index 00000000000..5aee8932b06 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go @@ -0,0 +1,75 @@ +package mcp + +import ( + "encoding/json" + "fmt" + + _ "embed" + + langchaingo_mcp_adapter "github.com/i2y/langchaingo-mcp-adapter" + "github.com/mark3labs/mcp-go/client" + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/tools" +) + +//go:embed mcp.json +var _mcpJson string + +// McpConfig represents the overall MCP configuration structure +type McpConfig struct { + Servers map[string]ServerConfig `json:"servers"` +} + +// ServerConfig represents an individual server configuration +type ServerConfig struct { + Type string `json:"type"` + Command string `json:"command"` + Args []string `json:"args,omitempty"` + Env []string `json:"env,omitempty"` +} + +type McpToolsLoader struct { + callbackHandler callbacks.Handler +} + +func NewMcpToolsLoader(callbackHandler callbacks.Handler) *McpToolsLoader { + return &McpToolsLoader{ + callbackHandler: callbackHandler, + } +} + +func (l *McpToolsLoader) LoadTools() ([]tools.Tool, error) { + // Deserialize the embedded mcp.json configuration + var config McpConfig + if err := json.Unmarshal([]byte(_mcpJson), &config); err != nil { + return nil, fmt.Errorf("failed to parse mcp.json: %w", err) + } + + var allTools []tools.Tool + + // Iterate through each server configuration + for serverName, serverConfig := range config.Servers { + // Create MCP client for the server using stdio + mcpClient, err := client.NewStdioMCPClient(serverConfig.Command, serverConfig.Env, serverConfig.Args...) + if err != nil { + return nil, fmt.Errorf("failed to create MCP client for server %s: %w", serverName, err) + } + + // Create the adapter + adapter, err := langchaingo_mcp_adapter.New(mcpClient) + if err != nil { + return nil, fmt.Errorf("failed to create adapter for server %s: %w", serverName, err) + } + + // Get all tools from MCP server + mcpTools, err := adapter.Tools() + if err != nil { + return nil, fmt.Errorf("failed to get tools from server %s: %w", serverName, err) + } + + // Add the tools to our collection + allTools = append(allTools, mcpTools...) + } + + return allTools, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json new file mode 100644 index 00000000000..efca4416be8 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json @@ -0,0 +1,9 @@ +{ + "servers": { + "Azure": { + "type": "stdio", + "command": "azmcp", + "args": ["server", "start"] + } + } +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/weather/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/weather/loader.go new file mode 100644 index 00000000000..ce283e18fb3 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/weather/loader.go @@ -0,0 +1,23 @@ +package weather + +import ( + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/tools" +) + +// WeatherToolsLoader loads weather-related tools +type WeatherToolsLoader struct { + callbackHandler callbacks.Handler +} + +func NewWeatherToolsLoader(callbackHandler callbacks.Handler) *WeatherToolsLoader { + return &WeatherToolsLoader{ + callbackHandler: callbackHandler, + } +} + +func (l *WeatherToolsLoader) LoadTools() ([]tools.Tool, error) { + return []tools.Tool{ + &WeatherTool{CallbacksHandler: l.callbackHandler}, + }, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/weather.go b/cli/azd/extensions/azd.ai.start/internal/tools/weather/weather.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/weather.go rename to cli/azd/extensions/azd.ai.start/internal/tools/weather/weather.go index 1f0d8404142..0f8837c5124 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/weather.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/weather/weather.go @@ -1,4 +1,4 @@ -package tools +package weather import ( "context" From 1761578596bf8a11d004596435fbcc38cbee4161 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Wed, 30 Jul 2025 13:31:14 -0700 Subject: [PATCH 042/116] UX updates, tool JSON payloads --- .../azd.ai.start/internal/agent/agent.go | 2 - .../default_agent_format_instructions.txt | 10 +- .../internal/cmd/enhanced_integration.go | 17 +- .../azd.ai.start/internal/cmd/root.go | 26 ++- .../azd.ai.start/internal/logging/logger.go | 121 ++++++----- .../internal/tools/io/directory_list.go | 191 ++++++++++++++---- .../internal/tools/io/file_info.go | 41 +++- .../internal/tools/io/file_search.go | 59 +++--- .../internal/tools/io/read_file.go | 161 +++++++++++++-- .../internal/tools/io/write_file.go | 130 +++++++++--- .../azd.ai.start/internal/tools/mcp/mcp.json | 14 +- 11 files changed, 572 insertions(+), 200 deletions(-) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go index 2c301976d50..a5822a677c2 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go @@ -6,7 +6,6 @@ package agent import ( "context" _ "embed" - "fmt" "github.com/tmc/langchaingo/agents" "github.com/tmc/langchaingo/chains" @@ -86,7 +85,6 @@ func (aai *AzureAIAgent) ProcessQuery(ctx context.Context, userInput string) err chains.WithTemperature(0.3), ) if err != nil { - fmt.Printf("❌ Execution failed: %s\n", err.Error()) return err } diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt index 1caac90782b..f7db6dd7e21 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt @@ -16,6 +16,7 @@ After each Observation, you MUST continue the cycle: Thought: [Evaluate the result and determine if the task is complete or if more actions are needed] If the task is NOT complete: + Thought: Do I need to use a tool? Yes Action: [next action to take] Action Input: [input for the next action] @@ -23,6 +24,7 @@ Observation: [result of the next action] ... (continue this cycle until the task is fully complete) If there are errors: + Thought: [Analyze the error and determine how to fix it] Thought: Do I need to use a tool? Yes Action: [corrective action] @@ -30,9 +32,9 @@ Action Input: [corrected input] Observation: [result] ... (retry up to 3 times with different approaches if needed) -ONLY when the task is completely finished and no more actions are needed: +Remember: Always continue taking actions until the user's request is fully satisfied. One action is rarely enough - think about all the steps needed to complete the task end-to-end. + +When you are done answering the questions and performing all your tasks you MUST use the following format: Thought: Do I need to use a tool? No -AI: [briefly summarize your response without all the details from your observations] - -Remember: Always continue taking actions until the user's request is fully satisfied. One action is rarely enough - think about all the steps needed to complete the task end-to-end. +AI: [briefly summarize your response without all the details from your observations] \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go index cef02cdc1c7..dad0371268a 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go @@ -10,6 +10,7 @@ import ( "os" "strings" + "github.com/fatih/color" "github.com/tmc/langchaingo/llms/openai" "azd.ai.start/internal/agent" @@ -40,13 +41,16 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) if initialQuery != "" { userInput = initialQuery initialQuery = "" // Clear after first use - fmt.Printf("💬 You: %s\n", userInput) + color.Cyan("💬 You: %s\n", userInput) } else { - fmt.Print("\n💬 You: ") + fmt.Print(color.CyanString("\n💬 You: ")) + color.Set(color.FgCyan) // Set blue color for user input if !scanner.Scan() { - break // EOF or error + color.Unset() // Reset color + break // EOF or error } userInput = strings.TrimSpace(scanner.Text()) + color.Unset() // Reset color after input } // Check for exit commands @@ -59,16 +63,11 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) break } - fmt.Printf("\n-------------------------------------------\n") - // Process the query with the enhanced agent err := azureAgent.ProcessQuery(ctx, userInput) if err != nil { - fmt.Printf("❌ Error: %v\n", err) - continue + return err } - - fmt.Printf("\n-------------------------------------------\n") } if err := scanner.Err(); err != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go index e9fb57a2cac..d57df89a2ea 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go @@ -7,6 +7,7 @@ import ( "context" "encoding/json" "fmt" + "os" "azd.ai.start/internal/logging" "github.com/azure/azure-dev/cli/azd/pkg/azdext" @@ -15,6 +16,8 @@ import ( ) func NewRootCommand() *cobra.Command { + var debug bool + rootCmd := &cobra.Command{ Use: "azd ai.chat [options]", Short: "Enables interactive AI agent through AZD", @@ -24,10 +27,12 @@ func NewRootCommand() *cobra.Command { DisableDefaultCmd: true, }, RunE: func(cmd *cobra.Command, args []string) error { - return runAIAgent(cmd.Context(), args) + return runAIAgent(cmd.Context(), args, debug) }, } + rootCmd.Flags().BoolVar(&debug, "debug", false, "Enable debug logging") + return rootCmd } @@ -38,7 +43,7 @@ type AiModelConfig struct { } // runAIAgent creates and runs the enhanced AI agent using LangChain Go -func runAIAgent(ctx context.Context, args []string) error { +func runAIAgent(ctx context.Context, args []string, debug bool) error { // Create a new context that includes the AZD access token ctx = azdext.WithAccessToken(ctx) @@ -64,11 +69,16 @@ func runAIAgent(ctx context.Context, args []string) error { return fmt.Errorf("failed to unmarshal AI model configuration: %w", err) } - _, _ = azdClient.Prompt().Confirm(ctx, &azdext.ConfirmRequest{ - Options: &azdext.ConfirmOptions{ - Message: "Ready?", - }, - }) + if debug { + defaultValue := true + + _, _ = azdClient.Prompt().Confirm(ctx, &azdext.ConfirmRequest{ + Options: &azdext.ConfirmOptions{ + Message: fmt.Sprintf("Ready? (PID: %d - You can attach a debugger now)", os.Getpid()), + DefaultValue: &defaultValue, + }, + }) + } // Common deployment names to try azureAPIVersion := "2024-02-15-preview" @@ -81,7 +91,7 @@ func runAIAgent(ctx context.Context, args []string) error { fmt.Printf("🔵 Trying Azure OpenAI with deployment: %s\n", aiConfig.DeploymentName) actionLogger := logging.NewActionLogger( - logging.WithDebug(false), + logging.WithDebug(debug), ) llm, err = openai.New( diff --git a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go index 1877195e401..d148b9273b2 100644 --- a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go +++ b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go @@ -5,8 +5,11 @@ package logging import ( "context" - "fmt" + "encoding/json" + "regexp" + "strings" + "github.com/fatih/color" "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/schema" @@ -43,122 +46,132 @@ func NewActionLogger(opts ...ActionLoggerOption) *ActionLogger { // HandleText is called when text is processed func (al *ActionLogger) HandleText(ctx context.Context, text string) { - if al.debugEnabled { - fmt.Printf("📝 Text (full): %s\n", text) - } } // HandleLLMGenerateContentStart is called when LLM content generation starts func (al *ActionLogger) HandleLLMGenerateContentStart(ctx context.Context, ms []llms.MessageContent) { - if al.debugEnabled { - for i, msg := range ms { - fmt.Printf("🤖 Debug - Message %d: %+v\n", i, msg) - } - } } // HandleLLMGenerateContentEnd is called when LLM content generation ends func (al *ActionLogger) HandleLLMGenerateContentEnd(ctx context.Context, res *llms.ContentResponse) { - if al.debugEnabled && res != nil { - fmt.Printf("🤖 Debug - Response: %+v\n", res) + // Parse and print thoughts as "THOUGHT: " from content + // IF thought contains: "Do I need to use a tool?", omit this thought. + + for _, choice := range res.Choices { + content := choice.Content + + if al.debugEnabled { + color.HiBlack("\nHandleLLMGenerateContentEnd\n%s\n", content) + } + + // Find all "Thought:" patterns and extract the content that follows + thoughtRegex := regexp.MustCompile(`(?i)thought:\s*(.*)`) + matches := thoughtRegex.FindAllStringSubmatch(content, -1) + + for _, match := range matches { + if len(match) > 1 { + thought := strings.TrimSpace(match[1]) + if thought != "" { + // Skip thoughts that contain "Do I need to use a tool?" + if !strings.Contains(strings.ToLower(thought), "do i need to use a tool?") { + color.White("\n🤖 Agent: %s\n", thought) + } + } + } + } } } // HandleRetrieverStart is called when retrieval starts func (al *ActionLogger) HandleRetrieverStart(ctx context.Context, query string) { - if al.debugEnabled { - fmt.Printf("🔍 Retrieval starting for query (full): %s\n", query) - } } // HandleRetrieverEnd is called when retrieval ends func (al *ActionLogger) HandleRetrieverEnd(ctx context.Context, query string, documents []schema.Document) { - fmt.Printf("🔍 Retrieval completed: found %d documents\n", len(documents)) - if al.debugEnabled { - fmt.Printf("🔍 Debug - Query (full): %s\n", query) - for i, doc := range documents { - fmt.Printf("🔍 Debug - Document %d: %+v\n", i, doc) - } - } } // HandleToolStart is called when a tool execution starts func (al *ActionLogger) HandleToolStart(ctx context.Context, input string) { - if al.debugEnabled { - fmt.Printf("🔧 Executing Tool: %s\n", input) - } } // HandleToolEnd is called when a tool execution ends func (al *ActionLogger) HandleToolEnd(ctx context.Context, output string) { - if al.debugEnabled { - fmt.Printf("✅ Tool Result (full): %s\n", output) - } } // HandleToolError is called when a tool execution fails func (al *ActionLogger) HandleToolError(ctx context.Context, err error) { - fmt.Printf("❌ Tool Error: %s\n", err.Error()) + color.Red("\nTool Error: %s\n", err.Error()) } // HandleLLMStart is called when LLM call starts func (al *ActionLogger) HandleLLMStart(ctx context.Context, prompts []string) { - for i, prompt := range prompts { - if al.debugEnabled { - fmt.Printf("🤖 Prompt %d (full): %s\n", i, prompt) - } - } } // HandleChainStart is called when chain execution starts func (al *ActionLogger) HandleChainStart(ctx context.Context, inputs map[string]any) { - for key, value := range inputs { - if al.debugEnabled { - fmt.Printf("🔗 Input [%s]: %v\n", key, value) - } - } } // HandleChainEnd is called when chain execution ends func (al *ActionLogger) HandleChainEnd(ctx context.Context, outputs map[string]any) { - for key, value := range outputs { - if al.debugEnabled { - fmt.Printf("🔗 Output [%s]: %v\n", key, value) - } - } } // HandleChainError is called when chain execution fails func (al *ActionLogger) HandleChainError(ctx context.Context, err error) { - fmt.Printf("🔗 Chain execution failed: %s\n", err.Error()) + color.Red("\n%s\n", err.Error()) } // HandleAgentAction is called when an agent action is planned func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.AgentAction) { - fmt.Printf("%s\n\n", action.Log) - + // Print "Calling " + // Inspect action.ToolInput. Attempt to parse input as JSON + // If is valid JSON and contains a param 'filename' then print filename. + // example: "Calling read_file " if al.debugEnabled { - fmt.Printf("🎯 Agent planned action (debug): %+v\n", action) + color.HiBlack("\nHandleAgentAction\n%s\n", action.Log) + } + + var toolInput map[string]interface{} + if err := json.Unmarshal([]byte(action.ToolInput), &toolInput); err == nil { + // Successfully parsed JSON, check for filename parameter + if filename, ok := toolInput["filename"]; ok { + if filenameStr, ok := filename.(string); ok { + color.Green("\n🤖 Agent: Calling %s %s\n", action.Tool, filenameStr) + return + } + } + // JSON parsed but no filename found, use fallback format + color.Green("\n🤖 Agent: Calling %s tool\n", action.Tool) + } else { + // JSON parsing failed, show the input as text + color.Green("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, action.ToolInput) } } // HandleAgentFinish is called when the agent finishes func (al *ActionLogger) HandleAgentFinish(ctx context.Context, finish schema.AgentFinish) { - fmt.Printf("%s\n\n", finish.Log) - + // Find summary from format "AI: " + // Print: if al.debugEnabled { - fmt.Printf("🏁 Agent finished (debug): %+v\n", finish) + color.HiBlack("\nHandleAgentFinish\n%s\n", finish.Log) + } + + // Use regex to find AI summary, capturing everything after "AI:" (including multi-line) + // The (?s) flag makes . match newlines, (.+) captures everything after "AI:" + aiRegex := regexp.MustCompile(`(?is)AI:\s*(.+)`) + matches := aiRegex.FindStringSubmatch(finish.Log) + + if len(matches) > 1 { + summary := strings.TrimSpace(matches[1]) + color.White("\n🤖 Agent: %s\n", summary) } + // If "AI:" not found, don't print anything } // HandleLLMError is called when LLM call fails func (al *ActionLogger) HandleLLMError(ctx context.Context, err error) { - fmt.Printf("🤖 LLM error: %s\n", err.Error()) + color.Red("\nLLM Error: %s\n", err.Error()) } // HandleStreamingFunc handles streaming responses func (al *ActionLogger) HandleStreamingFunc(ctx context.Context, chunk []byte) { - // if len(chunk) > 0 { - // fmt.Print(string(chunk)) - // } } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go index 37458cf4531..f02e8e8df5e 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go @@ -2,6 +2,7 @@ package io import ( "context" + "encoding/json" "fmt" "os" "path/filepath" @@ -15,102 +16,204 @@ type DirectoryListTool struct { CallbacksHandler callbacks.Handler } +// ErrorResponse represents a JSON error response structure that can be reused across all tools +type ErrorResponse struct { + Error bool `json:"error"` + Message string `json:"message"` +} + func (t DirectoryListTool) Name() string { return "list_directory" } func (t DirectoryListTool) Description() string { - return "List files and folders in a directory. Input: directory path (use '.' for current directory)" + return `List files and folders in a directory. +Input: JSON object with required 'path' field: {"path": ".", "includeHidden": false} +Returns: JSON with directory contents including file names, types, and sizes. +The input must be formatted as a single line valid JSON string.` } func (t DirectoryListTool) Call(ctx context.Context, input string) (string, error) { - path := strings.TrimSpace(input) - if path == "" { - path = "." + // Parse JSON input + type InputParams struct { + Path string `json:"path"` + IncludeHidden bool `json:"includeHidden,omitempty"` } - // Get absolute path for clarity - absPath, err := filepath.Abs(path) - if err != nil { + var params InputParams + + // Clean the input first + cleanInput := strings.TrimSpace(input) + + // Parse as JSON - this is now required + if err := json.Unmarshal([]byte(cleanInput), ¶ms); err != nil { + errorResponse := ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"path\": \".\", \"include_hidden\": false}", err.Error()), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to get absolute path for %s: %w", path, err)) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to parse JSON input: %w", err)) } - return "", fmt.Errorf("failed to get absolute path for %s: %w", path, err) + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } - // Invoke callback for tool start + // Validate required path field + if params.Path == "" { + params.Path = "." + } + + path := strings.TrimSpace(params.Path) + + // Add debug logging if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("Reading directory %s\n", input)) + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("Processing JSON input: path='%s', include_hidden=%v", path, params.IncludeHidden)) + } + + // Get absolute path for clarity - handle "." explicitly to avoid potential issues + var absPath string + var err error + + if path == "." { + // Explicitly get current working directory instead of relying on filepath.Abs(".") + absPath, err = os.Getwd() + if err != nil { + errorResponse := ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Failed to get current working directory: %s", err.Error()), + } + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to get current working directory: %w", err)) + } + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil + } + } else { + absPath, err = filepath.Abs(path) + if err != nil { + errorResponse := ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Failed to get absolute path for %s: %s", path, err.Error()), + } + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to get absolute path for %s: %w", path, err)) + } + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil + } + } + + // Invoke callback for tool execution start + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("Reading directory %s (absolute: %s)", path, absPath)) } // Check if directory exists + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("Checking if directory exists: '%s'", absPath)) + } + info, err := os.Stat(absPath) if err != nil { + var message string + if os.IsNotExist(err) { + message = fmt.Sprintf("Directory does not exist: %s", absPath) + } else { + message = fmt.Sprintf("Failed to access %s: %s (original input: '%s', cleaned path: '%s')", absPath, err.Error(), input, path) + } + + errorResponse := ErrorResponse{ + Error: true, + Message: message, + } if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to access %s: %w", absPath, err)) } - return "", fmt.Errorf("failed to access %s: %w", absPath, err) + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } + if !info.IsDir() { + errorResponse := ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Path is not a directory: %s", absPath), + } if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("%s is not a directory", absPath)) } - return "", fmt.Errorf("%s is not a directory", absPath) + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } // List directory contents files, err := os.ReadDir(absPath) if err != nil { + errorResponse := ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Failed to read directory %s: %s", absPath, err.Error()), + } if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to read directory %s: %w", absPath, err)) } - return "", fmt.Errorf("failed to read directory %s: %w", absPath, err) + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } - var result strings.Builder - result.WriteString(fmt.Sprintf("Contents of %s:\n", absPath)) - result.WriteString(fmt.Sprintf("Total items: %d\n\n", len(files))) + // Prepare JSON response structure + type FileInfo struct { + Name string `json:"name"` + Type string `json:"type"` + Size int64 `json:"size,omitempty"` + IsDir bool `json:"isDirectory"` + } - // Separate directories and files - var dirs []string - var regularFiles []string + type DirectoryResponse struct { + Path string `json:"path"` + TotalItems int `json:"totalItems"` + Items []FileInfo `json:"items"` + } + + var items []FileInfo for _, file := range files { + fileInfo := FileInfo{ + Name: file.Name(), + IsDir: file.IsDir(), + } + if file.IsDir() { - dirs = append(dirs, file.Name()+"/") + fileInfo.Type = "directory" } else { - info, err := file.Info() - if err != nil { - regularFiles = append(regularFiles, file.Name()) - } else { - regularFiles = append(regularFiles, fmt.Sprintf("%s (%d bytes)", file.Name(), info.Size())) + fileInfo.Type = "file" + if info, err := file.Info(); err == nil { + fileInfo.Size = info.Size() } } - } - // Display directories first - if len(dirs) > 0 { - result.WriteString("Directories:\n") - for _, dir := range dirs { - result.WriteString(fmt.Sprintf(" 📁 %s\n", dir)) - } - result.WriteString("\n") + items = append(items, fileInfo) } - // Then display files - if len(regularFiles) > 0 { - result.WriteString("Files:\n") - for _, file := range regularFiles { - result.WriteString(fmt.Sprintf(" 📄 %s\n", file)) - } + response := DirectoryResponse{ + Path: absPath, + TotalItems: len(files), + Items: items, } - if len(dirs) == 0 && len(regularFiles) == 0 { - result.WriteString("Directory is empty.\n") + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + errorResponse := ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Failed to marshal JSON response: %s", err.Error()), + } + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to marshal JSON response: %w", err)) + } + errorJsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(errorJsonData), nil } - result.WriteString("\n") - output := result.String() + output := string(jsonData) // Invoke callback for tool end if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go index 000828aa827..98528d50b91 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go @@ -2,6 +2,7 @@ package io import ( "context" + "encoding/json" "fmt" "os" "time" @@ -19,7 +20,7 @@ func (t FileInfoTool) Name() string { } func (t FileInfoTool) Description() string { - return "Get information about a file (size, modification time, permissions). Input: file path (e.g., 'data.txt' or './docs/readme.md')" + return "Get information about a file (size, modification time, permissions). Input: file path (e.g., 'data.txt' or './docs/readme.md'). Returns JSON with file information." } func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { @@ -44,15 +45,45 @@ func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { return "", toolErr } + // Prepare JSON response structure + type FileInfoResponse struct { + Path string `json:"path"` + Name string `json:"name"` + Type string `json:"type"` + IsDirectory bool `json:"isDirectory"` + Size int64 `json:"size"` + ModifiedTime time.Time `json:"modifiedTime"` + Permissions string `json:"permissions"` + } + var fileType string if info.IsDir() { - fileType = "Directory" + fileType = "directory" } else { - fileType = "File" + fileType = "file" + } + + response := FileInfoResponse{ + Path: input, + Name: info.Name(), + Type: fileType, + IsDirectory: info.IsDir(), + Size: info.Size(), + ModifiedTime: info.ModTime(), + Permissions: info.Mode().String(), + } + + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } - output := fmt.Sprintf("%s: %s\nSize: %d bytes\nModified: %s\nPermissions: %s\n\n", - fileType, input, info.Size(), info.ModTime().Format(time.RFC3339), info.Mode().String()) + output := string(jsonData) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go index 8b1d8b2ab20..dd7a7a0de9c 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go @@ -7,7 +7,6 @@ import ( "os" "path/filepath" "sort" - "strings" "github.com/bmatcuk/doublestar/v4" "github.com/tmc/langchaingo/callbacks" @@ -37,6 +36,8 @@ Input: JSON payload with the following structure: "maxResults": 50 // optional: max files to return (default: 100) } +Returns JSON with search results and metadata. + SUPPORTED GLOB PATTERNS (using github.com/bmatcuk/doublestar/v4): - *.go - all Go files in current directory only - **/*.js - all JavaScript files in current directory and all subdirectories @@ -132,8 +133,15 @@ func (t FileSearchTool) Call(ctx context.Context, input string) (string, error) return "", err } - // Format output - output := t.formatResults(searchPath, req.Pattern, matches, req.MaxResults) + // Format output as JSON + output, err := t.formatResults(searchPath, req.Pattern, matches, req.MaxResults) + if err != nil { + toolErr := fmt.Errorf("failed to format results: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) @@ -185,33 +193,32 @@ func (t FileSearchTool) searchFiles(searchPath, pattern string, maxResults int) return matches, nil } -// formatResults formats the search results into a readable output -func (t FileSearchTool) formatResults(searchPath, pattern string, matches []string, maxResults int) string { - var output strings.Builder - - output.WriteString("File search results:\n") - output.WriteString(fmt.Sprintf("Current directory: %s\n", searchPath)) - output.WriteString(fmt.Sprintf("Pattern: %s\n", pattern)) - output.WriteString(fmt.Sprintf("Found %d file(s)", len(matches))) - - if len(matches) >= maxResults { - output.WriteString(fmt.Sprintf(" (limited to %d results)", maxResults)) +// formatResults formats the search results into a JSON response +func (t FileSearchTool) formatResults(searchPath, pattern string, matches []string, maxResults int) (string, error) { + // Prepare JSON response structure + type FileSearchResponse struct { + CurrentDirectory string `json:"currentDirectory"` + Pattern string `json:"pattern"` + TotalFound int `json:"totalFound"` + MaxResults int `json:"maxResults"` + ResultsLimited bool `json:"resultsLimited"` + Matches []string `json:"matches"` } - output.WriteString("\n\n") - if len(matches) == 0 { - output.WriteString("No files found matching the pattern.\n") - return output.String() + response := FileSearchResponse{ + CurrentDirectory: searchPath, + Pattern: pattern, + TotalFound: len(matches), + MaxResults: maxResults, + ResultsLimited: len(matches) >= maxResults, + Matches: matches, } - output.WriteString("Matching files:\n") - for i, match := range matches { - output.WriteString(fmt.Sprintf("%3d. %s\n", i+1, match)) - } - - if len(matches) >= maxResults { - output.WriteString(fmt.Sprintf("\n⚠️ Results limited to %d files. Use maxResults parameter to adjust limit.\n", maxResults)) + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + return "", fmt.Errorf("failed to marshal JSON response: %w", err) } - return output.String() + return string(jsonData), nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go index 5983316aae9..0890e127e76 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go @@ -6,6 +6,7 @@ import ( "fmt" "os" "strings" + "time" "github.com/tmc/langchaingo/callbacks" ) @@ -22,12 +23,39 @@ type ReadFileRequest struct { EndLine int `json:"endLine,omitempty"` // Optional: 1-based line number to end reading at } +// ReadFileResponse represents the JSON output for the read_file tool +type ReadFileResponse struct { + Success bool `json:"success"` + FilePath string `json:"filePath"` + Content string `json:"content"` + IsTruncated bool `json:"isTruncated"` + IsPartial bool `json:"isPartial"` + LineRange *LineRange `json:"lineRange,omitempty"` + FileInfo ReadFileInfo `json:"fileInfo"` + Message string `json:"message,omitempty"` +} + +// LineRange represents the range of lines read +type LineRange struct { + StartLine int `json:"startLine"` + EndLine int `json:"endLine"` + TotalLines int `json:"totalLines"` + LinesRead int `json:"linesRead"` +} + +// ReadFileInfo represents file metadata for read operations +type ReadFileInfo struct { + Size int64 `json:"size"` + ModifiedTime time.Time `json:"modifiedTime"` + Permissions string `json:"permissions"` +} + func (t ReadFileTool) Name() string { return "read_file" } func (t ReadFileTool) Description() string { - return `Read file contents with intelligent handling for different file sizes and partial reads. + return `Read file contents with intelligent handling for different file sizes and partial reads. Returns JSON response with file content and metadata. Input: JSON payload with the following structure: { @@ -127,14 +155,30 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { // Handle very large files differently (unless specific line range requested) if fileSize > 1024*1024 && req.StartLine == 0 && req.EndLine == 0 { // 1MB+ - output := fmt.Sprintf("File: %s is very large (%d bytes / %.2f MB)\n", - req.FilePath, fileSize, float64(fileSize)/(1024*1024)) - output += "⚠️ File too large to read completely. Use startLine and endLine parameters for specific sections.\n" - output += "Examples:\n" - output += fmt.Sprintf(`- {"filePath": "%s", "startLine": 1, "endLine": 50} - first 50 lines`+"\n", req.FilePath) - output += fmt.Sprintf(`- {"filePath": "%s", "startLine": 100, "endLine": 200} - lines 100 to 200`+"\n", req.FilePath) - output += fmt.Sprintf(`- {"filePath": "%s", "endLine": 100} - first 100 lines`+"\n", req.FilePath) + response := ReadFileResponse{ + Success: false, + FilePath: req.FilePath, + Content: "", + IsTruncated: false, + IsPartial: false, + FileInfo: ReadFileInfo{ + Size: fileSize, + ModifiedTime: fileInfo.ModTime(), + Permissions: fileInfo.Mode().String(), + }, + Message: fmt.Sprintf("File is very large (%.2f MB). Use startLine and endLine parameters for specific sections.", float64(fileSize)/(1024*1024)), + } + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + output := string(jsonData) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) } @@ -162,10 +206,13 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { // Handle partial reads based on line range if req.StartLine > 0 || req.EndLine > 0 { - return t.handlePartialRead(ctx, req.FilePath, lines, req.StartLine, req.EndLine, totalLines) + return t.handlePartialRead(ctx, req.FilePath, lines, req.StartLine, req.EndLine, totalLines, fileInfo) } - var output string + var finalContent string + var isTruncated bool + var message string + // Improved truncation with better limits for full file reads if len(content) > 10000 { // 10KB limit // Show first 50 lines and last 10 lines @@ -174,13 +221,39 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { preview += fmt.Sprintf("\n\n... [%d lines omitted] ...\n\n", totalLines-60) preview += strings.Join(lines[totalLines-10:], "\n") } - - output = fmt.Sprintf("File: %s (%d bytes, %d lines - showing first 50 and last 10 lines)\n%s\n\n[Use startLine/endLine parameters for specific sections]\n", - req.FilePath, len(content), totalLines, preview) + finalContent = preview + isTruncated = true + message = "Large file truncated - showing first 50 and last 10 lines" } else { - output = fmt.Sprintf("File: %s (%d bytes, %d lines)\n%s\n\n", req.FilePath, len(content), totalLines, string(content)) + finalContent = string(content) + isTruncated = false + message = "File read successfully" + } + + response := ReadFileResponse{ + Success: true, + FilePath: req.FilePath, + Content: finalContent, + IsTruncated: isTruncated, + IsPartial: false, + FileInfo: ReadFileInfo{ + Size: fileSize, + ModifiedTime: fileInfo.ModTime(), + Permissions: fileInfo.Mode().String(), + }, + Message: message, + } + + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } + output := string(jsonData) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) } @@ -189,7 +262,7 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { } // handlePartialRead handles reading specific line ranges from a file -func (t ReadFileTool) handlePartialRead(ctx context.Context, filePath string, lines []string, startLine, endLine, totalLines int) (string, error) { +func (t ReadFileTool) handlePartialRead(ctx context.Context, filePath string, lines []string, startLine, endLine, totalLines int, fileInfo os.FileInfo) (string, error) { // Validate and adjust line numbers (1-based to 0-based) if startLine == 0 { startLine = 1 // Default to start of file @@ -206,10 +279,30 @@ func (t ReadFileTool) handlePartialRead(ctx context.Context, filePath string, li endLine = totalLines } if startLine > endLine { - output := fmt.Sprintf("❌ Invalid line range: start line (%d) cannot be greater than end line (%d)\n\n", startLine, endLine) - output += "💡 Example of correct usage:\n" - output += fmt.Sprintf(`{"filePath": "%s", "startLine": 1, "endLine": 50}`, filePath) + response := ReadFileResponse{ + Success: false, + FilePath: filePath, + Content: "", + IsTruncated: false, + IsPartial: false, + FileInfo: ReadFileInfo{ + Size: fileInfo.Size(), + ModifiedTime: fileInfo.ModTime(), + Permissions: fileInfo.Mode().String(), + }, + Message: fmt.Sprintf("Invalid line range: start line (%d) cannot be greater than end line (%d)", startLine, endLine), + } + + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + output := string(jsonData) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("invalid line range: start %d > end %d", startLine, endLine)) t.CallbacksHandler.HandleToolEnd(ctx, output) @@ -226,9 +319,37 @@ func (t ReadFileTool) handlePartialRead(ctx context.Context, filePath string, li content := strings.Join(selectedLines, "\n") linesRead := endLine - startLine + 1 - output := fmt.Sprintf("File: %s (lines %d-%d of %d total lines, %d lines read)\n%s\n\n", - filePath, startLine, endLine, totalLines, linesRead, content) + response := ReadFileResponse{ + Success: true, + FilePath: filePath, + Content: content, + IsTruncated: false, + IsPartial: true, + LineRange: &LineRange{ + StartLine: startLine, + EndLine: endLine, + TotalLines: totalLines, + LinesRead: linesRead, + }, + FileInfo: ReadFileInfo{ + Size: fileInfo.Size(), + ModifiedTime: fileInfo.ModTime(), + Permissions: fileInfo.Mode().String(), + }, + Message: fmt.Sprintf("Successfully read %d lines (%d-%d) from file", linesRead, startLine, endLine), + } + + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + output := string(jsonData) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go index 757111b4127..f0413af9e75 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go @@ -7,6 +7,7 @@ import ( "os" "path/filepath" "strings" + "time" "github.com/tmc/langchaingo/callbacks" ) @@ -20,9 +21,35 @@ type WriteFileTool struct { type WriteFileRequest struct { Filename string `json:"filename"` Content string `json:"content"` - Mode string `json:"mode,omitempty"` // "write" (default), "append", "create" - ChunkNum int `json:"chunk_num,omitempty"` // For chunked writing: 1-based chunk number - TotalChunks int `json:"total_chunks,omitempty"` // For chunked writing: total expected chunks + Mode string `json:"mode,omitempty"` // "write" (default), "append", "create" + ChunkNum int `json:"chunkNum,omitempty"` // For chunked writing: 1-based chunk number + TotalChunks int `json:"totalChunks,omitempty"` // For chunked writing: total expected chunks +} + +// WriteFileResponse represents the JSON output for the write_file tool +type WriteFileResponse struct { + Success bool `json:"success"` + Operation string `json:"operation"` + FilePath string `json:"filePath"` + BytesWritten int `json:"bytesWritten"` + IsChunked bool `json:"isChunked"` + ChunkInfo *ChunkInfo `json:"chunkInfo,omitempty"` + FileInfo FileInfoDetails `json:"fileInfo"` + Message string `json:"message,omitempty"` +} + +// ChunkInfo represents chunked writing details +type ChunkInfo struct { + ChunkNumber int `json:"chunkNumber"` + TotalChunks int `json:"totalChunks"` + IsComplete bool `json:"isComplete"` +} + +// FileInfoDetails represents file metadata +type FileInfoDetails struct { + Size int64 `json:"size"` + ModifiedTime time.Time `json:"modifiedTime"` + Permissions string `json:"permissions"` } func (t WriteFileTool) Name() string { @@ -30,21 +57,21 @@ func (t WriteFileTool) Name() string { } func (t WriteFileTool) Description() string { - return `Comprehensive file writing tool that handles small and large files intelligently. + return `Comprehensive file writing tool that handles small and large files intelligently. Returns JSON response with operation details. Input: JSON payload with the following structure: { "filename": "path/to/file.txt", "content": "file content here", "mode": "write", - "chunk_num": 1, - "total_chunks": 3 + "chunkNum": 1, + "totalChunks": 3 } Field descriptions: - mode: "write" (default), "append", or "create" -- chunk_num: for chunked writing (1-based) -- total_chunks: total number of chunks +- chunkNum: for chunked writing (1-based) +- totalChunks: total number of chunks MODES: - "write" (default): Overwrite/create file @@ -52,9 +79,9 @@ MODES: - "create": Create file only if it doesn't exist CHUNKED WRITING (for large files): -Use chunk_num and total_chunks for files that might be too large: -- chunk_num: 1-based chunk number (1, 2, 3...) -- total_chunks: Total number of chunks you'll send +Use chunkNum and totalChunks for files that might be too large: +- chunkNum: 1-based chunk number (1, 2, 3...) +- totalChunks: Total number of chunks you'll send EXAMPLES: @@ -65,9 +92,9 @@ Append to file: {"filename": "./log.txt", "content": "\nNew log entry", "mode": "append"} Large file (chunked): -{"filename": "./large.bicep", "content": "first part...", "chunk_num": 1, "total_chunks": 3} -{"filename": "./large.bicep", "content": "middle part...", "chunk_num": 2, "total_chunks": 3} -{"filename": "./large.bicep", "content": "final part...", "chunk_num": 3, "total_chunks": 3} +{"filename": "./large.bicep", "content": "first part...", "chunkNum": 1, "totalChunks": 3} +{"filename": "./large.bicep", "content": "middle part...", "chunkNum": 2, "totalChunks": 3} +{"filename": "./large.bicep", "content": "final part...", "chunkNum": 3, "totalChunks": 3} The input must be formatted as a single line valid JSON string.` } @@ -141,7 +168,7 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { // handleChunkedWrite handles writing files in chunks func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequest) (string, error) { if req.ChunkNum < 1 || req.TotalChunks < 1 || req.ChunkNum > req.TotalChunks { - err := fmt.Errorf("invalid chunk numbers: chunk_num=%d, total_chunks=%d", req.ChunkNum, req.TotalChunks) + err := fmt.Errorf("invalid chunk numbers: chunkNum=%d, totalChunks=%d", req.ChunkNum, req.TotalChunks) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolError(ctx, err) } @@ -165,7 +192,7 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ if req.ChunkNum == 1 { // First chunk - create/overwrite file err = os.WriteFile(filePath, []byte(content), 0644) - operation = fmt.Sprintf("Started writing chunk %d/%d", req.ChunkNum, req.TotalChunks) + operation = "write" } else { // Subsequent chunks - append file, openErr := os.OpenFile(filePath, os.O_APPEND|os.O_WRONLY, 0644) @@ -179,11 +206,7 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ defer file.Close() _, err = file.WriteString(content) - if req.ChunkNum == req.TotalChunks { - operation = fmt.Sprintf("Completed writing chunk %d/%d (final)", req.ChunkNum, req.TotalChunks) - } else { - operation = fmt.Sprintf("Wrote chunk %d/%d", req.ChunkNum, req.TotalChunks) - } + operation = "append" } if err != nil { @@ -194,7 +217,7 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ return "", toolErr } - // Get file size + // Get file info fileInfo, err := os.Stat(filePath) if err != nil { toolErr := fmt.Errorf("failed to verify file %s: %w", filePath, err) @@ -204,13 +227,43 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ return "", toolErr } - output := fmt.Sprintf("%s to %s. Chunk size: %d bytes, Total file size: %d bytes", - operation, filePath, len(content), fileInfo.Size()) + // Create JSON response + response := WriteFileResponse{ + Success: true, + Operation: operation, + FilePath: filePath, + BytesWritten: len(content), + IsChunked: true, + ChunkInfo: &ChunkInfo{ + ChunkNumber: req.ChunkNum, + TotalChunks: req.TotalChunks, + IsComplete: req.ChunkNum == req.TotalChunks, + }, + FileInfo: FileInfoDetails{ + Size: fileInfo.Size(), + ModifiedTime: fileInfo.ModTime(), + Permissions: fileInfo.Mode().String(), + }, + } if req.ChunkNum == req.TotalChunks { - output += "\n✅ File writing completed successfully!" + response.Message = "File writing completed successfully" + } else { + response.Message = fmt.Sprintf("Chunk %d/%d written successfully", req.ChunkNum, req.TotalChunks) + } + + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr } + output := string(jsonData) + if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) } @@ -287,7 +340,32 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ return "", toolErr } - output := fmt.Sprintf("%s %d bytes to %s successfully", operation, fileInfo.Size(), filePath) + // Create JSON response + response := WriteFileResponse{ + Success: true, + Operation: operation, + FilePath: filePath, + BytesWritten: len(content), + IsChunked: false, + FileInfo: FileInfoDetails{ + Size: fileInfo.Size(), + ModifiedTime: fileInfo.ModTime(), + Permissions: fileInfo.Mode().String(), + }, + Message: fmt.Sprintf("File %s successfully", strings.ToLower(operation)), + } + + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, toolErr) + } + return "", toolErr + } + + output := string(jsonData) if t.CallbacksHandler != nil { t.CallbacksHandler.HandleToolEnd(ctx, output) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json index efca4416be8..dd2078ee03e 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json +++ b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json @@ -1,9 +1,19 @@ { "servers": { - "Azure": { + "bestpractices": { "type": "stdio", "command": "azmcp", - "args": ["server", "start"] + "args": ["server", "start", "--namespace", "bestpractices"] + }, + "subscription": { + "type": "stdio", + "command": "azmcp", + "args": ["server", "start", "--namespace", "subscription"] + }, + "deploy": { + "type": "stdio", + "command": "azmcp", + "args": ["server", "start", "--namespace", "deploy"] } } } From 9167aa0b4fcc406d3742120f8c5a01ee571f53e9 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Wed, 30 Jul 2025 17:16:50 -0700 Subject: [PATCH 043/116] Adds c2c similar tools --- .../default_agent_format_instructions.txt | 2 +- .../internal/cmd/enhanced_integration.go | 2 +- .../azd.ai.start/internal/logging/logger.go | 60 +- .../tools/azd/azd_iac_generation_rules.go | 28 + .../internal/tools/azd/azd_plan_init.go | 28 + .../internal/tools/azd/azd_yaml_schema.go | 28 + .../azd.ai.start/internal/tools/azd/loader.go | 25 + .../azd/prompts/azd_iac_generation_rules.md | 195 ++ .../tools/azd/prompts/azd_plan_init.md | 267 +++ .../tools/azd/prompts/azd_yaml_schema.md | 18 + .../tools/azd/prompts/azure.yaml.json | 1819 +++++++++++++++++ .../internal/tools/azd/prompts/prompts.go | 14 + .../internal/tools/common/types.go | 7 + .../internal/tools/dev/command_executor.go | 189 +- .../internal/tools/io/copy_file.go | 140 +- .../internal/tools/io/create_directory.go | 4 + .../internal/tools/io/delete_directory.go | 4 + .../internal/tools/io/delete_file.go | 4 + .../internal/tools/io/directory_list.go | 21 +- .../internal/tools/io/file_info.go | 4 + .../internal/tools/io/move_file.go | 3 + .../azd.ai.start/internal/tools/loader.go | 2 + .../azd.ai.start/internal/tools/mcp/mcp.json | 14 +- 23 files changed, 2739 insertions(+), 139 deletions(-) create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_yaml_schema.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azure.yaml.json create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/common/types.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt index f7db6dd7e21..d66dcb43d88 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt @@ -34,7 +34,7 @@ Observation: [result] Remember: Always continue taking actions until the user's request is fully satisfied. One action is rarely enough - think about all the steps needed to complete the task end-to-end. -When you are done answering the questions and performing all your tasks you MUST use the following format: +When you are done answering the questions and performing all your tasks you MUST ALWAYS use the following format: Thought: Do I need to use a tool? No AI: [briefly summarize your response without all the details from your observations] \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go index dad0371268a..7e436b76e6d 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go @@ -66,7 +66,7 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) // Process the query with the enhanced agent err := azureAgent.ProcessQuery(ctx, userInput) if err != nil { - return err + continue } } diff --git a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go index d148b9273b2..aa693cc7f6b 100644 --- a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go +++ b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go @@ -6,6 +6,7 @@ package logging import ( "context" "encoding/json" + "fmt" "regexp" "strings" @@ -65,7 +66,9 @@ func (al *ActionLogger) HandleLLMGenerateContentEnd(ctx context.Context, res *ll } // Find all "Thought:" patterns and extract the content that follows - thoughtRegex := regexp.MustCompile(`(?i)thought:\s*(.*)`) + // (?is) flags: i=case insensitive, s=dot matches newlines + // .*? is non-greedy to stop at the first occurrence of next pattern or end + thoughtRegex := regexp.MustCompile(`(?is)thought:\s*(.*?)(?:\n\s*(?:action|final answer|observation|ai):|$)`) matches := thoughtRegex.FindAllStringSubmatch(content, -1) for _, match := range matches { @@ -120,6 +123,14 @@ func (al *ActionLogger) HandleChainError(ctx context.Context, err error) { color.Red("\n%s\n", err.Error()) } +// truncateString truncates a string to maxLen characters and adds "..." if truncated +func truncateString(s string, maxLen int) string { + if len(s) > maxLen { + return s[:maxLen-3] + "..." + } + return s +} + // HandleAgentAction is called when an agent action is planned func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.AgentAction) { // Print "Calling " @@ -132,18 +143,47 @@ func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.Age var toolInput map[string]interface{} if err := json.Unmarshal([]byte(action.ToolInput), &toolInput); err == nil { - // Successfully parsed JSON, check for filename parameter - if filename, ok := toolInput["filename"]; ok { - if filenameStr, ok := filename.(string); ok { - color.Green("\n🤖 Agent: Calling %s %s\n", action.Tool, filenameStr) - return + // Successfully parsed JSON, create comma-delimited key-value pairs + excludedKeys := map[string]bool{"content": true} + var params []string + + for key, value := range toolInput { + if excludedKeys[key] { + continue + } + + var valueStr string + switch v := value.(type) { + case []interface{}: + // Handle arrays by joining with spaces + var strSlice []string + for _, item := range v { + strSlice = append(strSlice, strings.TrimSpace(string(fmt.Sprintf("%v", item)))) + } + valueStr = strings.Join(strSlice, " ") + default: + valueStr = strings.TrimSpace(fmt.Sprintf("%v", v)) + } + + if valueStr != "" { + params = append(params, fmt.Sprintf("%s: %s", key, valueStr)) } } - // JSON parsed but no filename found, use fallback format - color.Green("\n🤖 Agent: Calling %s tool\n", action.Tool) + + var paramStr string + if len(params) > 0 { + paramStr = strings.Join(params, ", ") + } else { + paramStr = "tool" + } + + paramStr = truncateString(paramStr, 100) + output := fmt.Sprintf("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, paramStr) + color.Green(output) } else { - // JSON parsing failed, show the input as text - color.Green("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, action.ToolInput) + // JSON parsing failed, show the input as text with truncation + toolInput := truncateString(action.ToolInput, 100) + color.Green("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, toolInput) } } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go new file mode 100644 index 00000000000..f67c067e820 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go @@ -0,0 +1,28 @@ +package azd + +import ( + "context" + + "azd.ai.start/internal/tools/azd/prompts" + "github.com/tmc/langchaingo/tools" +) + +var _ tools.Tool = &AzdIacGenerationRulesTool{} + +type AzdIacGenerationRulesTool struct { +} + +func (t *AzdIacGenerationRulesTool) Name() string { + return "azd_iac_generation_rules" +} + +func (t *AzdIacGenerationRulesTool) Description() string { + return ` + Gets the infrastructure as code (IaC) rules and best practices and patterns to use when generating bicep files and modules for use within AZD. + Input: empty string + ` +} + +func (t *AzdIacGenerationRulesTool) Call(ctx context.Context, input string) (string, error) { + return prompts.AzdIacRulesPrompt, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go new file mode 100644 index 00000000000..1e648939d2b --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go @@ -0,0 +1,28 @@ +package azd + +import ( + "context" + + "azd.ai.start/internal/tools/azd/prompts" + "github.com/tmc/langchaingo/tools" +) + +var _ tools.Tool = &AzdPlanInitTool{} + +type AzdPlanInitTool struct { +} + +func (t *AzdPlanInitTool) Name() string { + return "azd_plan_init" +} + +func (t *AzdPlanInitTool) Description() string { + return ` + Gets the required workflow steps and best practices and patterns for initializing or migrating an application to use AZD. + Input: empty string + ` +} + +func (t *AzdPlanInitTool) Call(ctx context.Context, input string) (string, error) { + return prompts.AzdPlanInitPrompt, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go new file mode 100644 index 00000000000..db83ddf3d08 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go @@ -0,0 +1,28 @@ +package azd + +import ( + "context" + + "azd.ai.start/internal/tools/azd/prompts" + "github.com/tmc/langchaingo/tools" +) + +var _ tools.Tool = &AzdYamlSchemaTool{} + +type AzdYamlSchemaTool struct { +} + +func (t *AzdYamlSchemaTool) Name() string { + return "azd_yaml_schema" +} + +func (t *AzdYamlSchemaTool) Description() string { + return ` + Gets the Azure YAML JSON schema file specification and structure for azure.yaml configuration files used in AZD. + Input: empty string + ` +} + +func (t *AzdYamlSchemaTool) Call(ctx context.Context, input string) (string, error) { + return prompts.AzdYamlSchemaPrompt, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go new file mode 100644 index 00000000000..648d70bb569 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go @@ -0,0 +1,25 @@ +package azd + +import ( + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/tools" +) + +// AzdToolsLoader loads AZD-related tools +type AzdToolsLoader struct { + callbackHandler callbacks.Handler +} + +func NewAzdToolsLoader(callbackHandler callbacks.Handler) *AzdToolsLoader { + return &AzdToolsLoader{ + callbackHandler: callbackHandler, + } +} + +func (l *AzdToolsLoader) LoadTools() ([]tools.Tool, error) { + return []tools.Tool{ + &AzdPlanInitTool{}, + &AzdIacGenerationRulesTool{}, + &AzdYamlSchemaTool{}, + }, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md new file mode 100644 index 00000000000..dd89e2586b7 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md @@ -0,0 +1,195 @@ +# Infrastructure as Code (IaC) Generation Rules for Azure Developer CLI (AZD) + +This document provides comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules for AZD projects. Follow these rules strictly when generating Azure infrastructure code. + +## Core Generation Rules + +### File Structure and Organization + +- **REQUIRED**: Place all IaC files in the `./infra` folder within an AZD project +- **REQUIRED**: Name the main deployment file `main.bicep` - this is the primary deployment target +- **REQUIRED**: The root level `main.bicep` must be a subscription level deployment using `targetScope = 'subscription'` +- **REQUIRED**: The main.bicep file must create a resource group as the primary container for all resources +- **REQUIRED**: Pass the resource group scope to all child modules that deploy resources +- **REQUIRED**: Create modular, reusable Bicep files instead of monolithic templates +- **RECOMMENDED**: Organize modules by resource type or logical grouping + +### Azure Best Practices Compliance + +- **REQUIRED**: Follow Azure Well-Architected Framework principles +- **REQUIRED**: Use Bicep best practices including proper parameter validation and resource dependencies +- **REQUIRED**: Leverage Azure Verified Modules (AVM) when available - always check for existing AVM modules before creating custom ones +- **REQUIRED**: Implement least-privilege access principles + +### Naming Conventions + +- **REQUIRED**: Use consistent naming pattern: `{resourcePrefix}-{name}-{uniqueHash}` +- **REQUIRED**: Generate unique hash using combination of environment name, subscription ID, and resource group name +- **EXAMPLE**: `app-myservice-h3x9k2` where `h3x9k2` is generated from env/subscription/rg +- **FORBIDDEN**: Hard-code tenant IDs, subscription IDs, or resource group names + +### Module Parameters + +- **REQUIRED**: Every module must accept these standard parameters: + - `name` (string): Base name for the resource + - `location` (string): Azure region for deployment + - `tags` (object): Resource tags for governance +- **REQUIRED**: Modules that deploy Azure resources must use `targetScope = 'resourceGroup'` and be called with the resource group scope from main.bicep +- **REQUIRED**: Provide intelligent defaults for optional parameters +- **REQUIRED**: Use parameter decorators for validation (e.g., `@minLength`, `@allowed`) +- **RECOMMENDED**: Group related parameters using objects when appropriate + +### Tagging Strategy + +- **REQUIRED**: Tag resource groups with `azd-env-name: {environment-name}` +- **REQUIRED**: Tag hosting resources with `azd-service-name: {service-name}` +- **RECOMMENDED**: Include additional governance tags (cost center, owner, etc.) + +### Security and Compliance + +- **FORBIDDEN**: Hard-code secrets, connection strings, or sensitive values +- **REQUIRED**: Use Key Vault references for secrets +- **REQUIRED**: Enable diagnostic settings and logging where applicable +- **REQUIRED**: Follow principle of least privilege for managed identities + +### Quality Assurance + +- **REQUIRED**: Validate all generated Bicep code using Bicep CLI +- **REQUIRED**: Address all warnings and errors before considering code complete +- **REQUIRED**: Test deployment in a sandbox environment when possible + +## Supported Azure Services + +### Primary Hosting Resources (Choose One) + +1. **Azure Container Apps** ⭐ **(PREFERRED)** + - Best for containerized applications + - Built-in scaling and networking + - Supports both HTTP and background services + +2. **Azure App Service** + - Best for web applications and APIs + - Supports multiple runtime stacks + - Built-in CI/CD integration + +3. **Azure Function Apps** + - Best for serverless and event-driven workloads + - Multiple hosting plans available + - Trigger-based execution model + +4. **Azure Static Web Apps** + - Best for frontend applications + - Built-in GitHub/Azure DevOps integration + - Free tier available + +5. **Azure Kubernetes Service (AKS)** + - Best for complex containerized workloads + - Full Kubernetes capabilities + - Requires advanced configuration + +### Essential Supporting Resources + +**REQUIRED** - Include these resources in most AZD applications: + +- **Log Analytics Workspace** + - Central logging and monitoring + - Required for Application Insights + - Enable diagnostic settings for all resources + +- **Application Insights** + - Application performance monitoring + - Dependency tracking and telemetry + - Link to Log Analytics workspace + +- **Azure Key Vault** + - Secure storage for secrets, keys, and certificates + - Use managed identity for access + - Enable soft delete and purge protection + +**CONDITIONAL** - Include based on application requirements: + +- **Azure Container Registry** (for container-based apps) +- **Azure Service Bus** (for messaging scenarios) +- **Azure Cosmos DB** (for NoSQL data storage) +- **Azure SQL Database** (for relational data storage) +- **Azure Storage Account** (for blob/file storage) +- **Azure Cache for Redis** (for caching scenarios) + +## Code Generation Examples + +### Main.bicep Structure Template + +```bicep +targetScope = 'subscription' + +@description('Name of the environment') +param environmentName string + +@description('Location for all resources') +param location string + +@description('Tags to apply to all resources') +param tags object = {} + +// Generate unique suffix for resource names +var resourceSuffix = take(uniqueString(subscription().id, environmentName, location), 6) +var resourceGroupName = 'rg-${environmentName}-${resourceSuffix}' + +// Create the resource group +resource resourceGroup 'Microsoft.Resources/resourceGroups@2021-04-01' = { + name: resourceGroupName + location: location + tags: union(tags, { + 'azd-env-name': environmentName + }) +} + +// Example module deployment with resource group scope +module appService 'modules/app-service.bicep' = { + name: 'app-service' + scope: resourceGroup + params: { + name: 'myapp' + location: location + tags: tags + } +} +``` + +### Child Module Structure Template + +```bicep +targetScope = 'resourceGroup' + +@description('Base name for all resources') +param name string + +@description('Location for all resources') +param location string = resourceGroup().location + +@description('Tags to apply to all resources') +param tags object = {} + +// Generate unique suffix for resource names +var resourceSuffix = take(uniqueString(subscription().id, resourceGroup().name, name), 6) +var resourceName = '${name}-${resourceSuffix}' + +// Resource definitions here... +``` + +## Validation Checklist + +Before completing code generation, verify: + +- [ ] All files are in `./infra` folder +- [ ] `main.bicep` exists as primary deployment file with subscription scope +- [ ] Resource group is created in `main.bicep` and properly tagged +- [ ] All child modules use `targetScope = 'resourceGroup'` and receive resource group scope +- [ ] All resources use consistent naming convention +- [ ] Required tags are applied correctly +- [ ] No hard-coded secrets or identifiers +- [ ] Parameters have appropriate validation +- [ ] Bicep CLI validation passes without errors +- [ ] AVM modules are used where available +- [ ] Supporting resources are included as needed +- [ ] Security best practices are followed diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md new file mode 100644 index 00000000000..d8f7a391f68 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md @@ -0,0 +1,267 @@ +# AZD Application Initialization and Migration Plan + +This document provides a comprehensive, step-by-step plan for initializing or migrating applications to use Azure Developer CLI (AZD). Follow these steps sequentially to ensure successful AZD adoption. + +## Executive Summary + +Transform any application into an AZD-compatible project by: + +1. Analyzing the current codebase and architecture +2. Identifying all application components and dependencies +3. Generating required configuration and infrastructure files +4. Establishing the AZD environment structure + +## Phase 1: Discovery and Analysis + +### Step 1: Deep File System Analysis + +**REQUIRED ACTIONS:** + +- Scan all files in the current working directory recursively +- Document file structure, programming languages, and frameworks detected +- Identify configuration files (package.json, requirements.txt, pom.xml, etc.) +- Locate any existing Docker files, docker-compose files, or containerization configs +- Find database configuration files and connection strings +- Identify API endpoints, service definitions, and application entry points + +**OUTPUT:** Complete inventory of all discoverable application artifacts + +### Step 2: Component Classification + +**REQUIRED ACTIONS:** + +- Categorize each discovered component into one of these types: + - **Web Applications** (frontend, SPA, static sites) + - **API Services** (REST APIs, GraphQL, gRPC services) + - **Background Services** (workers, processors, scheduled jobs) + - **Databases** (relational, NoSQL, caching) + - **Messaging Systems** (queues, topics, event streams) + - **AI/ML Components** (models, inference endpoints, training jobs) + - **Supporting Services** (authentication, logging, monitoring) + +**OUTPUT:** Structured component inventory with classifications + +### Step 3: Dependency Mapping + +**REQUIRED ACTIONS:** + +- Map inter-component dependencies and communication patterns +- Identify external service dependencies (third-party APIs, SaaS services) +- Document data flow between components +- Identify shared resources and configuration + +**OUTPUT:** Component dependency graph and communication matrix + +## Phase 2: Architecture Planning and Azure Service Selection + +### Application Component Planning + +For each identified application component, execute the following analysis: + +**REQUIRED ANALYSIS:** + +- **Hosting Platform Selection:** + - **Azure Container Apps** (PREFERRED for microservices and containerized apps) + - **Azure App Service** (for web apps and APIs with specific runtime requirements) + - **Azure Functions** (for serverless and event-driven components) + - **Azure Static Web Apps** (for frontend applications and SPAs) + - **Azure Kubernetes Service** (for complex orchestration requirements) + +- **Containerization Assessment:** + - Determine if component can run in Docker container + - If Dockerfile doesn't exist, plan Docker container strategy + - Identify base images and runtime requirements + - Document port mappings and environment variables + +- **Configuration Requirements:** + - Identify environment-specific settings + - Map secrets and sensitive configuration + - Document connection strings and service endpoints + - Plan configuration injection strategy + +**OUTPUT:** Hosting strategy and containerization plan for each component + +### Database Component Planning + +For components using persistent data storage: + +**REQUIRED ANALYSIS:** + +- **Azure Database Service Selection:** + - **Azure SQL Database** (for relational data with SQL Server compatibility) + - **Azure Database for PostgreSQL** (for PostgreSQL workloads) + - **Azure Database for MySQL** (for MySQL workloads) + - **Azure Cosmos DB** (for NoSQL, multi-model data) + - **Azure Cache for Redis** (for caching and session storage) + +- **Migration Strategy:** + - Assess current database schema and data + - Plan data migration approach + - Identify backup and recovery requirements + - Document connection string patterns + +**OUTPUT:** Database hosting plan and migration strategy + +### Messaging Component Planning + +For components using asynchronous communication: + +**REQUIRED ANALYSIS:** + +- **Azure Messaging Service Selection:** + - **Azure Service Bus** (for reliable enterprise messaging) + - **Azure Event Hubs** (for high-throughput event streaming) + - **Azure Event Grid** (for event-driven architectures) + - **Azure Storage Queues** (for simple queue scenarios) + +- **Integration Planning:** + - Map message flows and routing + - Identify message schemas and formats + - Plan dead letter handling and error scenarios + - Document scaling and throughput requirements + +**OUTPUT:** Messaging architecture and integration plan + +### AI Component Planning + +For components using artificial intelligence or machine learning: + +**REQUIRED ANALYSIS:** + +- **Azure AI Service Selection:** + - **Azure OpenAI Service** (for GPT models and cognitive services) + - **Azure AI Services** (for vision, speech, language processing) + - **Azure Machine Learning** (for custom ML models and training) + - **Azure Cognitive Search** (for intelligent search capabilities) + +- **Model and Data Requirements:** + - Identify required AI models and versions + - Document input/output data formats + - Plan model deployment and scaling strategy + - Assess training data and pipeline requirements + +**OUTPUT:** AI service architecture and deployment plan + +## Phase 3: File Generation and Configuration + +### Step 1: Generate azure.yaml Configuration + +**REQUIRED ACTIONS:** + +- Create `azure.yaml` file in the root directory +- Define all services with appropriate hosting configurations +- Specify build and deployment instructions for each service +- Configure environment variable mappings +- Reference infrastructure templates correctly + +**TEMPLATE STRUCTURE:** + +```yaml +name: {project-name} +services: + {service-name}: + project: ./path/to/service + host: {hosting-type} + # Additional service-specific configuration +``` + +### Step 2: Generate Infrastructure as Code Files + +**REQUIRED ACTIONS:** + +- Create `./infra` directory structure +- Generate `main.bicep` as primary deployment template +- Create modular Bicep files for each resource type +- **CRITICAL:** Follow all rules from AZD IaC Generation Rules document +- Implement proper naming conventions and tagging strategies +- Include supporting resources (Log Analytics, Application Insights, Key Vault) + +### Step 3: Generate Container Configurations + +**REQUIRED ACTIONS:** + +- Create Dockerfile for each containerizable component +- Use appropriate base images for detected programming languages +- Configure health checks and startup commands +- Set proper working directories and file permissions +- Optimize for production deployment + +### Step 4: Generate Architecture Documentation + +**REQUIRED ACTIONS:** + +- Create `azd-arch-plan.md` with comprehensive analysis +- Document all discovered components and their relationships +- Include architecture diagrams (text-based or mermaid) +- Explain Azure service selections and rationale +- Provide deployment and operational guidance + +**DOCUMENT STRUCTURE:** + +- Executive Summary +- Application Architecture Overview +- Component Analysis +- Azure Service Mapping +- Infrastructure Design +- Deployment Strategy +- Operational Considerations + +## Phase 4: Environment Initialization + +### Step 1: Create AZD Environment + +**REQUIRED ACTIONS:** + +- Execute: `azd env new {directory-name}-dev` +- Use current working directory name as environment name base +- Configure environment-specific settings +- Validate environment configuration + +### Step 2: Validation and Testing + +**REQUIRED ACTIONS:** + +- Run `azd package` to validate service configurations +- Execute `azd provision --dry-run` to test infrastructure templates +- Verify all Bicep files compile without errors +- Check all referenced files and paths exist +- Validate environment variable configurations + +## Success Criteria + +The migration is successful when: + +- [ ] All application components are identified and classified +- [ ] `azure.yaml` file is valid and complete +- [ ] All infrastructure files are generated and error-free +- [ ] Required Dockerfiles are created for containerizable components +- [ ] `azd-arch-plan.md` provides comprehensive documentation +- [ ] AZD environment is initialized and validated +- [ ] `azd package` completes without errors +- [ ] `azd provision --dry-run` validates successfully + +## Common Patterns and Best Practices + +### For Multi-Service Applications + +- Use Azure Container Apps for microservices architecture +- Implement shared infrastructure (networking, monitoring) +- Configure service-to-service communication properly + +### For Data-Intensive Applications + +- Co-locate compute and data services in same region +- Implement proper connection pooling and caching +- Configure backup and disaster recovery + +### For AI-Enabled Applications + +- Separate AI services from main application logic +- Implement proper error handling for AI service calls +- Plan for model updates and versioning + +### For High-Availability Applications + +- Configure multiple availability zones +- Implement health checks and auto-scaling +- Plan for disaster recovery scenarios diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_yaml_schema.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_yaml_schema.md new file mode 100644 index 00000000000..9701dd9c5ac --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_yaml_schema.md @@ -0,0 +1,18 @@ +# Azure YAML Schema + +This document contains the JSON schema specification for the azure.yaml configuration file used in Azure Developer CLI (AZD) projects. + +## Schema Content + + + +The azure.yaml file is the main configuration file for AZD projects and defines: + +- Project metadata +- Services configuration +- Infrastructure settings +- Hooks and workflows +- Environment variables +- And other project-specific settings + +This schema helps validate and provide IntelliSense support for azure.yaml files in various editors and tools. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azure.yaml.json b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azure.yaml.json new file mode 100644 index 00000000000..747fd7fa649 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azure.yaml.json @@ -0,0 +1,1819 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://raw.githubusercontent.com/Azure/azure-dev/main/schemas/v1.0/azure.yaml.json", + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "minLength": 2, + "title": "Name of the application", + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$", + "description": "The application name. Only lowercase letters, numbers, and hyphens (-) are allowed. The name must start and end with a letter or number." + }, + "resourceGroup": { + "type": "string", + "minLength": 3, + "maxLength": 64, + "title": "Name of the Azure resource group", + "description": "When specified will override the resource group name used for infrastructure provisioning. Supports environment variable substitution." + }, + "metadata": { + "type": "object", + "properties": { + "template": { + "type": "string", + "title": "Identifier of the template from which the application was created. Optional.", + "examples": [ + "todo-nodejs-mongo@0.0.1-beta" + ] + } + } + }, + "infra": { + "type": "object", + "title": "The infrastructure configuration used for the application", + "description": "Optional. Provides additional configuration for Azure infrastructure provisioning.", + "additionalProperties": true, + "properties": { + "provider": { + "type": "string", + "title": "Type of infrastructure provisioning provider", + "description": "Optional. The infrastructure provisioning provider used to provision the Azure resources for the application. (Default: bicep)", + "enum": [ + "bicep", + "terraform" + ] + }, + "path": { + "type": "string", + "title": "Path to the location that contains Azure provisioning templates", + "description": "Optional. The relative folder path to the Azure provisioning templates for the specified provider. (Default: infra)" + }, + "module": { + "type": "string", + "title": "Name of the default module within the Azure provisioning templates", + "description": "Optional. The name of the Azure provisioning module used when provisioning resources. (Default: main)" + } + } + }, + "services": { + "type": "object", + "title": "Definition of services that comprise the application", + "minProperties": 1, + "additionalProperties": { + "type": "object", + "additionalProperties": false, + "required": [ + "host" + ], + "properties": { + "apiVersion": { + "type": "string", + "title": "Resource provider API version for deployments", + "description": "Optional. The resource provider API version to use for the service. If not specified, the default SDK API version is used. Only valid when host is 'containerapp'." + }, + "resourceGroup": { + "type": "string", + "title": "Name of the Azure resource group that contains the resource", + "description": "By default, the CLI will discover the Azure resource within the default resource group. When specified, the CLI will instead find the Azure resource within the specified resource group. Supports environment variable substitution." + }, + "resourceName": { + "type": "string", + "title": "Name of the Azure resource that implements the service", + "description": "By default, the CLI will discover the Azure resource with tag 'azd-service-name' set to the current service's name. When specified, the CLI will instead find the Azure resource with the matching resource name. Supports environment variable substitution." + }, + "project": { + "type": "string", + "title": "Path to the service source code directory" + }, + "image": { + "type": "string", + "title": "Optional. The source image to be used for the container image instead of building from source. Supports environment variable substitution.", + "description": "If omitted, container image will be built from source specified in the 'project' property. Setting both 'project' and 'image' is invalid." + }, + "host": { + "type": "string", + "title": "Required. The type of Azure resource used for service implementation", + "description": "The Azure service that will be used as the target for deployment operations for the service.", + "enum": [ + "appservice", + "containerapp", + "function", + "springapp", + "staticwebapp", + "aks", + "ai.endpoint" + ] + }, + "language": { + "type": "string", + "title": "Service implementation language", + "enum": [ + "dotnet", + "csharp", + "fsharp", + "py", + "python", + "js", + "ts", + "java", + "docker" + ] + }, + "module": { + "type": "string", + "title": "(DEPRECATED) Path of the infrastructure module used to deploy the service relative to the root infra folder", + "description": "If omitted, the CLI will assume the module name is the same as the service name. This property will be deprecated in a future release." + }, + "dist": { + "type": "string", + "title": "Relative path to service deployment artifacts" + }, + "docker": { + "$ref": "#/definitions/docker" + }, + "k8s": { + "$ref": "#/definitions/aksOptions" + }, + "config": { + "type": "object", + "additionalProperties": true + }, + "hooks": { + "type": "object", + "title": "Service level hooks", + "description": "Hooks should match `service` event names prefixed with `pre` or `post` depending on when the script should execute. When specifying paths they should be relative to the service path.", + "additionalProperties": false, + "properties": { + "predeploy": { + "title": "pre deploy hook", + "description": "Runs before the service is deployed to Azure", + "$ref": "#/definitions/hooks" + }, + "postdeploy": { + "title": "post deploy hook", + "description": "Runs after the service is deployed to Azure", + "$ref": "#/definitions/hooks" + }, + "prerestore": { + "title": "pre restore hook", + "description": "Runs before the service dependencies are restored", + "$ref": "#/definitions/hooks" + }, + "postrestore": { + "title": "post restore hook", + "description": "Runs after the service dependencies are restored", + "$ref": "#/definitions/hooks" + }, + "prebuild": { + "title": "pre build hook", + "description": "Runs before the service is built", + "$ref": "#/definitions/hooks" + }, + "postbuild": { + "title": "post build hook", + "description": "Runs after the service is built", + "$ref": "#/definitions/hooks" + }, + "prepackage": { + "title": "pre package hook", + "description": "Runs before the service is deployment package is created", + "$ref": "#/definitions/hooks" + }, + "postpackage": { + "title": "post package hook", + "description": "Runs after the service is deployment package is created", + "$ref": "#/definitions/hooks" + } + } + } + }, + "allOf": [ + { + "if": { + "properties": { + "host": { + "const": "containerapp" + } + } + }, + "then": { + "anyOf": [ + { + "required": [ + "image" + ], + "properties": { + "language": false + }, + "not": { + "required": [ + "project" + ] + } + }, + { + "required": [ + "project" + ], + "not": { + "required": [ + "image" + ] + } + } + ] + } + }, + { + "if": { + "not": { + "properties": { + "host": { + "const": "containerapp" + } + } + } + }, + "then": { + "properties": { + "image": false + } + } + }, + { + "if": { + "not": { + "properties": { + "host": { + "enum": [ + "containerapp", + "aks", + "ai.endpoint" + ] + } + } + } + }, + "then": { + "required": [ + "project", + "language" + ], + "properties": { + "docker": false + } + } + }, + { + "if": { + "properties": { + "host": { + "const": "ai.endpoint" + } + } + }, + "then": { + "required": [ + "config" + ], + "properties": { + "config": { + "$ref": "#/definitions/aiEndpointConfig", + "title": "The Azure AI endpoint configuration.", + "description": "Required. Provides additional configuration for Azure AI online endpoint deployment." + } + } + } + }, + { + "if": { + "not": { + "properties": { + "host": { + "enum": [ + "aks" + ] + } + } + } + }, + "then": { + "properties": { + "k8s": false + } + } + }, + { + "if": { + "properties": { + "language": { + "const": "java" + } + } + }, + "then": { + "properties": { + "dist": { + "type": "string", + "description": "Optional. The path to the directory containing a single Java archive file (.jar/.ear/.war), or the path to the specific Java archive file to be included in the deployment artifact. If omitted, the CLI will detect the output directory based on the build system in-use. For maven, the default output directory 'target' is assumed." + } + } + } + }, + { + "if": { + "not": { + "properties": { + "host": { + "const": "containerapp" + } + } + } + }, + "then": { + "properties": { + "apiVersion": false + } + } + }, + { + "properties": { + "dist": { + "type": "string", + "description": "Optional. The CLI will use files under this path to create the deployment artifact (ZIP file). If omitted, all files under service project directory will be included." + } + } + } + ] + } + }, + "resources": { + "type": "object", + "additionalProperties": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "title": "Type of resource", + "description": "The type of resource to be created. (Example: db.postgres)", + "enum": [ + "db.postgres", + "db.mysql", + "db.redis", + "db.mongo", + "db.cosmos", + "ai.openai.model", + "ai.project", + "ai.search", + "host.containerapp", + "host.appservice", + "messaging.eventhubs", + "messaging.servicebus", + "storage", + "keyvault" + ] + }, + "uses": { + "type": "array", + "title": "Other resources that this resource uses", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "existing": { + "type": "boolean", + "title": "An existing resource for referencing purposes", + "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", + "default": false + } + }, + "allOf": [ + { + "if": { + "properties": { + "type": { + "const": "host.appservice" + } + } + }, + "then": { + "$ref": "#/definitions/appServiceResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "host.containerapp" + } + } + }, + "then": { + "$ref": "#/definitions/containerAppResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "ai.openai.model" + } + } + }, + "then": { + "$ref": "#/definitions/aiModelResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "ai.project" + } + } + }, + "then": { + "$ref": "#/definitions/aiProjectResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "ai.search" + } + } + }, + "then": { + "$ref": "#/definitions/aiSearchResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "db.postgres" + } + } + }, + "then": { + "$ref": "#/definitions/genericDbResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "db.mysql" + } + } + }, + "then": { + "$ref": "#/definitions/genericDbResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "db.redis" + } + } + }, + "then": { + "$ref": "#/definitions/genericDbResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "db.mongo" + } + } + }, + "then": { + "$ref": "#/definitions/genericDbResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "db.cosmos" + } + } + }, + "then": { + "$ref": "#/definitions/cosmosDbResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "messaging.eventhubs" + } + } + }, + "then": { + "$ref": "#/definitions/eventHubsResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "messaging.servicebus" + } + } + }, + "then": { + "$ref": "#/definitions/serviceBusResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "storage" + } + } + }, + "then": { + "$ref": "#/definitions/storageAccountResource" + } + }, + { + "if": { + "properties": { + "type": { + "const": "keyvault" + } + } + }, + "then": { + "$ref": "#/definitions/keyVaultResource" + } + } + ] + } + }, + "pipeline": { + "type": "object", + "title": "Definition of continuous integration pipeline", + "properties": { + "provider": { + "type": "string", + "title": "Type of pipeline provider", + "description": "Optional. The pipeline provider to be used for continuous integration. (Default: github)", + "enum": [ + "github", + "azdo" + ] + }, + "variables": { + "type": "array", + "title": "Optional. List of azd environment variables to be used in the pipeline as variables.", + "description": "If variable is found on azd environment, it is set as a variable for the pipeline.", + "items": { + "type": "string" + } + }, + "secrets": { + "type": "array", + "title": "Optional. List of azd environment variables to be used in the pipeline as secrets.", + "description": "If variable is found on azd environment, it is set as a secret for the pipeline.", + "items": { + "type": "string" + } + } + } + }, + "hooks": { + "type": "object", + "title": "Command level hooks", + "description": "Hooks should match `azd` command names prefixed with `pre` or `post` depending on when the script should execute. When specifying paths they should be relative to the project path.", + "additionalProperties": false, + "properties": { + "preprovision": { + "title": "pre provision hook", + "description": "Runs before the `provision` command", + "$ref": "#/definitions/hooks" + }, + "postprovision": { + "title": "post provision hook", + "description": "Runs after the `provision` command", + "$ref": "#/definitions/hooks" + }, + "preinfracreate": { + "title": "pre infra create hook", + "description": "Runs before the `infra create` or `provision` commands", + "$ref": "#/definitions/hooks" + }, + "postinfracreate": { + "title": "post infra create hook", + "description": "Runs after the `infra create` or `provision` commands", + "$ref": "#/definitions/hooks" + }, + "preinfradelete": { + "title": "pre infra delete hook", + "description": "Runs before the `infra delete` or `down` commands", + "$ref": "#/definitions/hooks" + }, + "postinfradelete": { + "title": "post infra delete hook", + "description": "Runs after the `infra delete` or `down` commands", + "$ref": "#/definitions/hooks" + }, + "predown": { + "title": "pre down hook", + "description": "Runs before the `infra delete` or `down` commands", + "$ref": "#/definitions/hooks" + }, + "postdown": { + "title": "post down hook", + "description": "Runs after the `infra delete` or `down` commands", + "$ref": "#/definitions/hooks" + }, + "preup": { + "title": "pre up hook", + "description": "Runs before the `up` command", + "$ref": "#/definitions/hooks" + }, + "postup": { + "title": "post up hook", + "description": "Runs after the `up` command", + "$ref": "#/definitions/hooks" + }, + "prepackage": { + "title": "pre package hook", + "description": "Runs before the `package` command", + "$ref": "#/definitions/hooks" + }, + "postpackage": { + "title": "post package hook", + "description": "Runs after the `package` command", + "$ref": "#/definitions/hooks" + }, + "predeploy": { + "title": "pre deploy hook", + "description": "Runs before the `deploy` command", + "$ref": "#/definitions/hooks" + }, + "postdeploy": { + "title": "post deploy hook", + "description": "Runs after the `deploy` command", + "$ref": "#/definitions/hooks" + }, + "prerestore": { + "title": "pre restore hook", + "description": "Runs before the `restore` command", + "$ref": "#/definitions/hooks" + }, + "postrestore": { + "title": "post restore hook", + "description": "Runs after the `restore` command", + "$ref": "#/definitions/hooks" + } + } + }, + "requiredVersions": { + "type": "object", + "additionalProperties": false, + "properties": { + "azd": { + "type": "string", + "title": "A range of supported versions of `azd` for this project", + "description": "A range of supported versions of `azd` for this project. If the version of `azd` is outside this range, the project will fail to load. Optional (allows all versions if absent).", + "examples": [ + ">= 0.6.0-beta.3" + ] + } + } + }, + "state": { + "type": "object", + "title": "The state configuration used for the project.", + "description": "Optional. Provides additional configuration for state management.", + "additionalProperties": false, + "properties": { + "remote": { + "type": "object", + "additionalProperties": false, + "title": "The remote state configuration.", + "description": "Optional. Provides additional configuration for remote state management such as Azure Blob Storage.", + "required": [ + "backend" + ], + "properties": { + "backend": { + "type": "string", + "title": "The remote state backend type.", + "description": "Optional. The remote state backend type. (Default: AzureBlobStorage)", + "default": "AzureBlobStorage", + "enum": [ + "AzureBlobStorage" + ] + }, + "config": { + "type": "object", + "additionalProperties": true + } + }, + "allOf": [ + { + "if": { + "properties": { + "backend": { + "const": "AzureBlobStorage" + } + } + }, + "then": { + "required": [ + "config" + ], + "properties": { + "config": { + "$ref": "#/definitions/azureBlobStorageConfig" + } + } + } + } + ] + } + } + }, + "platform": { + "type": "object", + "title": "The platform configuration used for the project.", + "description": "Optional. Provides additional configuration for platform specific features such as Azure Dev Center.", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "title": "The platform type.", + "description": "Required. The platform type. (Example: devcenter)", + "enum": [ + "devcenter" + ] + }, + "config": { + "type": "object", + "additionalProperties": true + } + }, + "allOf": [ + { + "if": { + "properties": { + "type": { + "const": "devcenter" + } + } + }, + "then": { + "properties": { + "config": { + "$ref": "#/definitions/azureDevCenterConfig" + } + } + } + } + ] + }, + "workflows": { + "type": "object", + "title": "The workflows configuration used for the project.", + "description": "Optional. Provides additional configuration for workflows such as override azd up behavior.", + "additionalProperties": false, + "properties": { + "up": { + "title": "The up workflow configuration", + "description": "When specified will override the default behavior for the azd up workflow. Common use cases include changing the order of the provision, package and deploy commands.", + "$ref": "#/definitions/workflow" + } + } + }, + "cloud": { + "type": "object", + "title": "The cloud configuration used for the project.", + "description": "Optional. Provides additional configuration for deploying to sovereign clouds such as Azure Government. The default cloud is AzureCloud.", + "additionalProperties": false, + "properties": { + "name": { + "enum": [ + "AzureCloud", + "AzureChinaCloud", + "AzureUSGovernment" + ] + } + } + } + }, + "definitions": { + "hooks": { + "anyOf": [ + { + "$ref": "#/definitions/hook" + }, + { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/hook" + } + } + ] + }, + "hook": { + "type": "object", + "additionalProperties": false, + "properties": { + "shell": { + "type": "string", + "title": "Type of shell to execute scripts", + "description": "Optional. The type of shell to use for the hook. (Default: sh)", + "enum": [ + "sh", + "pwsh" + ], + "default": "sh" + }, + "run": { + "type": "string", + "title": "Required. The inline script or relative path of your scripts from the project or service path", + "description": "When specifying an inline script you also must specify the `shell` to use. This is automatically inferred when using paths." + }, + "continueOnError": { + "type": "boolean", + "default": false, + "title": "Whether or not a script error will halt the azd command", + "description": "Optional. When set to true will continue to run the command even after a script error has occurred. (Default: false)" + }, + "interactive": { + "type": "boolean", + "default": false, + "title": "Whether the script will run in interactive mode", + "description": "Optional. When set to true will bind the script to stdin, stdout & stderr of the running console. (Default: false)" + }, + "windows": { + "title": "The hook configuration used for Windows environments", + "description": "When specified overrides the hook configuration when executed in Windows environments", + "default": null, + "$ref": "#/definitions/hook" + }, + "posix": { + "title": "The hook configuration used for POSIX (Linux & MacOS) environments", + "description": "When specified overrides the hook configuration when executed in POSIX environments", + "default": null, + "$ref": "#/definitions/hook" + }, + "secrets": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "Optional. Map of azd environment variables to hook secrets.", + "description": "If variable was set as a secret in the environment, the secret value will be passed to the hook.", + "examples": [ + { + "WITH_SECRET_VALUE": "ENV_VAR_WITH_SECRET" + } + ] + } + }, + "allOf": [ + { + "if": { + "allOf": [ + { + "required": [ + "windows" + ] + }, + { + "required": [ + "posix" + ] + } + ] + }, + "then": { + "properties": { + "run": false, + "shell": false, + "interactive": false, + "continueOnError": false, + "secrets": false + } + } + }, + { + "if": { + "anyOf": [ + { + "required": [ + "interactive" + ] + }, + { + "required": [ + "continueOnError" + ] + }, + { + "required": [ + "secrets" + ] + }, + { + "required": [ + "shell" + ] + } + ] + }, + "then": { + "required": [ + "run" + ] + } + } + ] + }, + "docker": { + "type": "object", + "description": "This is only applicable when `host` is `containerapp` or `aks`", + "additionalProperties": false, + "properties": { + "path": { + "type": "string", + "title": "The path to the Dockerfile", + "description": "Path to the Dockerfile is relative to your service", + "default": "./Dockerfile" + }, + "context": { + "type": "string", + "title": "The docker build context", + "description": "When specified overrides the default context", + "default": "." + }, + "platform": { + "type": "string", + "title": "The platform target", + "default": "amd64" + }, + "registry": { + "type": "string", + "title": "Optional. The container registry to push the image to.", + "description": "If omitted, will default to value of AZURE_CONTAINER_REGISTRY_ENDPOINT environment variable. Supports environment variable substitution." + }, + "image": { + "type": "string", + "title": "Optional. The name that will be applied to the built container image.", + "description": "If omitted, will default to the '{appName}/{serviceName}-{environmentName}'. Supports environment variable substitution." + }, + "tag": { + "type": "string", + "title": "The tag that will be applied to the built container image.", + "description": "If omitted, will default to 'azd-deploy-{unix time (seconds)}'. Supports environment variable substitution. For example, to generate unique tags for a given release: myapp/myimage:${DOCKER_IMAGE_TAG}" + }, + "buildArgs": { + "type": "array", + "title": "Optional. Build arguments to pass to the docker build command", + "description": "Build arguments to pass to the docker build command.", + "items": { + "type": "string" + } + }, + "remoteBuild": { + "type": "boolean", + "title": "Optional. Whether to build the image remotely", + "description": "If set to true, the image will be built remotely using the Azure Container Registry remote build feature. If set to false, the image will be built locally using Docker." + } + } + }, + "aksOptions": { + "type": "object", + "title": "Optional. The Azure Kubernetes Service (AKS) configuration options", + "additionalProperties": false, + "properties": { + "deploymentPath": { + "type": "string", + "title": "Optional. The relative path from the service path to the k8s deployment manifests. (Default: manifests)", + "description": "When set it will override the default deployment path location for k8s deployment manifests.", + "default": "manifests" + }, + "namespace": { + "type": "string", + "title": "Optional. The k8s namespace of the deployed resources. (Default: Project name)", + "description": "When specified a new k8s namespace will be created if it does not already exist" + }, + "deployment": { + "type": "object", + "title": "Optional. The k8s deployment configuration", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "title": "Optional. The name of the k8s deployment resource to use during deployment. (Default: Service name)", + "description": "Used during deployment to ensure if the k8s deployment rollout has been completed. If not set will search for a deployment resource in the same namespace that contains the service name." + } + } + }, + "service": { + "type": "object", + "title": "Optional. The k8s service configuration", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "title": "Optional. The name of the k8s service resource to use as the default service endpoint. (Default: Service name)", + "description": "Used when determining endpoints for the default service resource. If not set will search for a deployment resource in the same namespace that contains the service name." + } + } + }, + "ingress": { + "type": "object", + "title": "Optional. The k8s ingress configuration", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "title": "Optional. The name of the k8s ingress resource to use as the default service endpoint. (Default: Service name)", + "description": "Used when determining endpoints for the default ingress resource. If not set will search for a deployment resource in the same namespace that contains the service name." + }, + "relativePath": { + "type": "string", + "title": "Optional. The relative path to the service from the root of your ingress controller.", + "description": "When set will be appended to the root of your ingress resource path." + } + } + }, + "helm": { + "type": "object", + "title": "Optional. The helm configuration", + "additionalProperties": false, + "properties": { + "repositories": { + "type": "array", + "title": "Optional. The helm repositories to add", + "description": "When set will add the helm repositories to the helm client.", + "minItems": 1, + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "name", + "url" + ], + "properties": { + "name": { + "type": "string", + "title": "The name of the helm repository", + "description": "The name of the helm repository to add." + }, + "url": { + "type": "string", + "title": "The url of the helm repository", + "description": "The url of the helm repository to add." + } + } + } + }, + "releases": { + "type": "array", + "title": "Optional. The helm releases to install", + "description": "When set will install the helm releases to the k8s cluster.", + "minItems": 1, + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "name", + "chart" + ], + "properties": { + "name": { + "type": "string", + "title": "The name of the helm release", + "description": "The name of the helm release to install." + }, + "chart": { + "type": "string", + "title": "The name of the helm chart", + "description": "The name of the helm chart to install." + }, + "version": { + "type": "string", + "title": "The version of the helm chart", + "description": "The version of the helm chart to install." + }, + "namespace": { + "type": "string", + "title": "Optional. The k8s namespace to install the helm chart", + "description": "When set will install the helm chart to the specified namespace. Defaults to the service namespace." + }, + "values": { + "type": "string", + "title": "Optional. Relative path from service to a values.yaml to pass to the helm chart", + "description": "When set will pass the values to the helm chart." + } + } + } + } + } + }, + "kustomize": { + "type": "object", + "title": "Optional. The kustomize configuration", + "additionalProperties": false, + "properties": { + "dir": { + "type": "string", + "title": "Optional. The relative path to the kustomize directory.", + "description": "When set will use the kustomize directory to deploy to the k8s cluster. Supports environment variable substitution." + }, + "edits": { + "type": "array", + "title": "Optional. The kustomize edits to apply before deployment.", + "description": "When set will apply the edits to the kustomize directory before deployment. Supports environment variable substitution.", + "items": { + "type": "string" + } + }, + "env": { + "type": "object", + "title": "Optional. The environment key/value pairs used to generate a .env file.", + "description": "When set will generate a .env file in the kustomize directory. Values support environment variable substitution.", + "additionalProperties": { + "type": [ + "string", + "boolean", + "number" + ] + } + } + } + } + } + }, + "azureBlobStorageConfig": { + "type": "object", + "title": "The Azure Blob Storage remote state backend configuration.", + "description": "Optional. Provides additional configuration for remote state management such as Azure Blob Storage.", + "additionalProperties": false, + "required": [ + "accountName" + ], + "properties": { + "accountName": { + "type": "string", + "title": "The Azure Storage account name.", + "description": "Required. The Azure Storage account name." + }, + "containerName": { + "type": "string", + "title": "The Azure Storage container name.", + "description": "Optional. The Azure Storage container name. Defaults to project name if not specified." + }, + "endpoint": { + "type": "string", + "title": "The Azure Storage endpoint.", + "description": "Optional. The Azure Storage endpoint. (Default: blob.core.windows.net)" + } + } + }, + "azureDevCenterConfig": { + "type": "object", + "title": "The dev center configuration used for the project.", + "description": "Optional. Provides additional project configuration for Azure Dev Center integration.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "title": "The name of the Azure Dev Center", + "description": "Optional. Used as the default dev center for this project." + }, + "project": { + "type": "string", + "title": "The name of the Azure Dev Center project.", + "description": "Optional. Used as the default dev center project for this project." + }, + "catalog": { + "type": "string", + "title": "The name of the Azure Dev Center catalog.", + "description": "Optional. Used as the default dev center catalog for this project." + }, + "environmentDefinition": { + "type": "string", + "title": "The name of the Dev Center catalog environment definition.", + "description": "Optional. Used as the default dev center environment definition for this project." + }, + "environmentType": { + "type": "string", + "title": "The Dev Center project environment type used for the deployment environment.", + "description": "Optional. Used as the default environment type for this project." + } + } + }, + "workflow": { + "anyOf": [ + { + "type": "object", + "additionalProperties": false, + "required": [ + "steps" + ], + "properties": { + "steps": { + "type": "array", + "title": "The steps to execute in the workflow", + "description": "The steps to execute in the workflow. (Example: provision, package, deploy)", + "minItems": 1, + "items": { + "type": "object", + "$ref": "#/definitions/workflowStep" + } + } + } + }, + { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/workflowStep" + } + } + ] + }, + "workflowStep": { + "properties": { + "azd": { + "title": "The azd command command configuration", + "description": "The azd command configuration to execute. (Example: up)", + "$ref": "#/definitions/azdCommand" + } + } + }, + "azdCommand": { + "anyOf": [ + { + "type": "string", + "title": "The azd command to execute", + "description": "The name and args of the azd command to execute. (Example: deploy --all)" + }, + { + "type": "object", + "additionalProperties": false, + "required": [ + "args" + ], + "properties": { + "args": { + "type": "array", + "title": "The arguments or flags to pass to the azd command", + "description": "The arguments to pass to the azd command. (Example: --all)", + "minItems": 1 + } + } + } + ] + }, + "aiComponentConfig": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name of the AI component.", + "description": "Optional. When omitted AZD will generate a name based on the component type and the service name. Supports environment variable substitution." + }, + "path": { + "type": "string", + "title": "Path to the AI component configuration file or path.", + "description": "Required. The path to the AI component configuration file or path to the AI component source code." + }, + "overrides": { + "type": "object", + "title": "A map of key value pairs used to override the AI component configuration.", + "description": "Optional. Supports environment variable substitution.", + "additionalProperties": { + "type": "string" + } + } + }, + "required": [ + "path" + ] + }, + "aiDeploymentConfig": { + "allOf": [ + { + "$ref": "#/definitions/aiComponentConfig" + }, + { + "type": "object", + "properties": { + "environment": { + "type": "object", + "title": "A map of key/value pairs to set as environment variables for the deployment.", + "description": "Optional. Values support OS & AZD environment variable substitution.", + "additionalProperties": { + "type": "string" + } + } + } + } + ] + }, + "aiEndpointConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "workspace": { + "type": "string", + "title": "The name of the AI Studio project workspace.", + "description": "Optional. When omitted AZD will use the value specified in the 'AZUREAI_PROJECT_NAME' environment variable. Supports environment variable substitution." + }, + "flow": { + "$ref": "#/definitions/aiComponentConfig", + "title": "The Azure AI Studio Prompt Flow configuration.", + "description": "Optional. When omitted a prompt flow will be not created." + }, + "environment": { + "$ref": "#/definitions/aiComponentConfig", + "title": "The Azure AI Studio custom environment configuration.", + "description": "Optional. When omitted a custom environment will not be created." + }, + "model": { + "$ref": "#/definitions/aiComponentConfig", + "title": "The Azure AI Studio model configuration.", + "description": "Optional. When omitted a model will not be created." + }, + "deployment": { + "$ref": "#/definitions/aiDeploymentConfig", + "title": "The Azure AI Studio online endpoint deployment configuration.", + "description": "Required. A new online endpoint deployment will be created and traffic will automatically to shifted to the new deployment upon successful completion." + } + }, + "required": [ + "deployment" + ] + }, + "appServiceResource": { + "type": "object", + "description": "An Azure App Service web app.", + "additionalProperties": false, + "required": [ + "port", + "runtime" + ], + "properties": { + "type": { + "type": "string", + "const": "host.appservice" + }, + "uses": { + "type": "array", + "title": "Other resources that this resource uses", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "port": { + "type": "integer", + "title": "Port that the web app listens on", + "description": "Optional. The port that the web app listens on. (Default: 80)" + }, + "env": { + "type": "array", + "title": "Environment variables to set for the web app", + "items": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "title": "Name of the environment variable" + }, + "value": { + "type": "string", + "title": "Value of the environment variable. Supports environment variable substitution." + }, + "secret": { + "type": "string", + "title": "Secret value of the environment variable. Supports environment variable substitution." + } + } + } + }, + "runtime": { + "type": "object", + "title": "Runtime stack configuration", + "description": "Required. The language runtime configuration for the App Service web app.", + "required": [ + "stack", + "version" + ], + "properties": { + "stack": { + "type": "string", + "title": "Language runtime stack", + "description": "Required. The language runtime stack.", + "enum": [ + "node", + "python" + ] + }, + "version": { + "type": "string", + "title": "Runtime stack version", + "description": "Required. The language runtime version. Format varies by stack. (Example: '22-lts' for Node, '3.13' for Python)" + } + } + }, + "startupCommand": { + "type": "string", + "title": "Startup command", + "description": "Optional. Startup command that will be run as part of web app startup." + } + } + }, + "containerAppResource": { + "type": "object", + "description": "A Docker-based container app.", + "additionalProperties": false, + "required": [ + "port" + ], + "properties": { + "type": { + "type": "string", + "const": "host.containerapp" + }, + "uses": { + "type": "array", + "title": "Other resources that this resource uses", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "port": { + "type": "integer", + "title": "Port that the container app listens on", + "description": "Optional. The port that the container app listens on. (Default: 80)" + }, + "env": { + "type": "array", + "title": "Environment variables to set for the container app", + "items": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "title": "Name of the environment variable" + }, + "value": { + "type": "string", + "title": "Value of the environment variable. Supports environment variable substitution." + }, + "secret": { + "type": "string", + "title": "Secret value of the environment variable. Supports environment variable substitution." + } + } + } + } + } + }, + "aiModelResource": { + "type": "object", + "description": "A deployed, ready-to-use AI model.", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "const": "ai.openai.model" + }, + "existing": { + "type": "boolean", + "title": "An existing resource for referencing purposes", + "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", + "default": false + }, + "model": { + "type": "object", + "description": "The underlying AI model.", + "additionalProperties": false, + "required": [ + "name", + "version" + ], + "properties": { + "name": { + "type": "string", + "title": "The name of the AI model.", + "description": "Required. The name of the AI model." + }, + "version": { + "type": "string", + "title": "The version of the AI model.", + "description": "Required. The version of the AI model." + } + } + } + }, + "allOf": [ + { + "if": { + "properties": { + "existing": { + "const": false + } + } + }, + "then": { + "required": [ + "model" + ] + } + } + ] + }, + "aiProjectResource": { + "type": "object", + "description": "An Azure AI Foundry project with models.", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "const": "ai.project" + }, + "existing": { + "type": "boolean", + "title": "An existing resource for referencing purposes", + "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", + "default": false + }, + "models": { + "type": "array", + "title": "AI models to deploy", + "description": "Optional. The AI models to be deployed as part of the AI project.", + "minItems": 1, + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "name", + "version", + "format", + "sku" + ], + "properties": { + "name": { + "type": "string", + "title": "The name of the AI model.", + "description": "Required. The name of the AI model." + }, + "version": { + "type": "string", + "title": "The version of the AI model.", + "description": "Required. The version of the AI model." + }, + "format": { + "type": "string", + "title": "The format of the AI model.", + "description": "Required. The format of the AI model. (Example: Microsoft, OpenAI)" + }, + "sku": { + "type": "object", + "title": "The SKU configuration for the AI model.", + "description": "Required. The SKU details for the AI model.", + "additionalProperties": false, + "required": [ + "name", + "usageName", + "capacity" + ], + "properties": { + "name": { + "type": "string", + "title": "The name of the SKU.", + "description": "Required. The name of the SKU. (Example: GlobalStandard)" + }, + "usageName": { + "type": "string", + "title": "The usage name of the SKU.", + "description": "Required. The usage name of the SKU for billing purposes. (Example: AIServices.GlobalStandard.MaaS, OpenAI.GlobalStandard.gpt-4o-mini)" + }, + "capacity": { + "type": "integer", + "title": "The capacity of the SKU.", + "description": "Required. The capacity of the SKU." + } + } + } + } + } + } + } + }, + "aiSearchResource": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "const": "ai.search" + }, + "existing": { + "type": "boolean", + "title": "An existing resource for referencing purposes", + "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", + "default": false + } + } + }, + "genericDbResource": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "title": "Type of resource", + "description": "The type of resource to be created. (Example: db.postgres)", + "enum": [ + "db.postgres", + "db.redis", + "db.mysql", + "db.mongo" + ] + } + } + }, + "cosmosDbResource": { + "type": "object", + "description": "A deployed, ready-to-use Azure Cosmos DB for NoSQL database.", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "const": "db.cosmos" + }, + "containers": { + "type": "array", + "title": "Containers", + "description": "Containers to be created to store data. Each container stores a collection of items.", + "items": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "title": "Container name.", + "description": "Required. The name of the container." + }, + "partitionKeys": { + "type": "array", + "title": "Partition keys.", + "description": "Required. The partition key(s) used to distribute data across partitions. The ordering of keys matters. By default, a single partition key '/id' is naturally a great choice for most applications.", + "minLength": 1, + "maxLength": 3, + "items": { + "type": "string" + } + } + } + } + } + } + }, + "eventHubsResource": { + "type": "object", + "description": "An Azure Event Hubs namespace.", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "const": "messaging.eventhubs" + }, + "existing": { + "type": "boolean", + "title": "An existing resource for referencing purposes", + "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", + "default": false + }, + "hubs": { + "type": "array", + "title": "Hubs to create in the Event Hubs namespace", + "additionalProperties": false, + "items": { + "type": "string" + }, + "uniqueItems": true + } + } + }, + "serviceBusResource": { + "type": "object", + "description": "An Azure Service Bus namespace.", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "const": "messaging.servicebus" + }, + "existing": { + "type": "boolean", + "title": "An existing resource for referencing purposes", + "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", + "default": false + }, + "queues": { + "type": "array", + "title": "Queues to create in the Service Bus namespace", + "additionalProperties": false, + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "topics": { + "type": "array", + "title": "Topics to create in the Service Bus namespace", + "additionalProperties": false, + "items": { + "type": "string" + }, + "uniqueItems": true + } + } + }, + "storageAccountResource": { + "type": "object", + "description": "A deployed, ready-to-use Azure Storage Account.", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "const": "storage" + }, + "existing": { + "type": "boolean", + "title": "An existing resource for referencing purposes", + "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", + "default": false + }, + "containers": { + "type": "array", + "title": "Azure Storage Account container names.", + "description": "The container names of Azure Storage Account.", + "items": { + "type": "string", + "title": "Azure Storage Account container name", + "description": "The container name of Azure Storage Account." + } + } + } + }, + "keyVaultResource": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "const": "keyvault" + }, + "existing": { + "type": "boolean", + "title": "An existing resource for referencing purposes", + "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", + "default": false + } + } + } + } +} \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go new file mode 100644 index 00000000000..05a9bc1619f --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go @@ -0,0 +1,14 @@ +package prompts + +import ( + _ "embed" +) + +//go:embed azd_plan_init.md +var AzdPlanInitPrompt string + +//go:embed azd_iac_generation_rules.md +var AzdIacRulesPrompt string + +//go:embed azure.yaml.json +var AzdYamlSchemaPrompt string diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/common/types.go b/cli/azd/extensions/azd.ai.start/internal/tools/common/types.go new file mode 100644 index 00000000000..47f14eea64e --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/common/types.go @@ -0,0 +1,7 @@ +package common + +// ErrorResponse represents a JSON error response structure that can be reused across all tools +type ErrorResponse struct { + Error bool `json:"error"` + Message string `json:"message"` +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go b/cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go index 410db725505..e1fc36a05f3 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go @@ -4,9 +4,12 @@ import ( "context" "encoding/json" "fmt" + "os" "os/exec" + "runtime" "strings" + "azd.ai.start/internal/tools/common" "github.com/tmc/langchaingo/callbacks" ) @@ -20,7 +23,7 @@ func (t CommandExecutorTool) Name() string { } func (t CommandExecutorTool) Description() string { - return `Execute any command with arguments. Simple command execution without inference. + return `Execute any command with arguments through the system shell for better compatibility. Input should be a JSON object with these fields: { @@ -34,10 +37,19 @@ Required fields: Optional fields: - args: Array of arguments to pass (default: []) +Returns a JSON response with execution details: +- Success responses include: command, fullCommand, exitCode, success, stdout, stderr +- Error responses include: error (true), message + +The tool automatically uses the appropriate shell: +- Windows: cmd.exe /C for built-in commands and proper path resolution +- Unix/Linux/macOS: sh -c for POSIX compatibility + Examples: - {"command": "git", "args": ["status"]} - {"command": "npm", "args": ["install"]} -- {"command": "bash", "args": ["./build.sh", "--env", "prod"]} +- {"command": "dir"} (Windows built-in command) +- {"command": "ls", "args": ["-la"]} (Unix command) - {"command": "powershell", "args": ["-ExecutionPolicy", "Bypass", "-File", "deploy.ps1"]} - {"command": "python", "args": ["main.py", "--debug"]} - {"command": "node", "args": ["server.js", "--port", "3000"]} @@ -51,6 +63,15 @@ type CommandRequest struct { Args []string `json:"args,omitempty"` } +type CommandResponse struct { + Command string `json:"command"` + FullCommand string `json:"fullCommand"` + ExitCode int `json:"exitCode"` + Success bool `json:"success"` + Stdout string `json:"stdout,omitempty"` + Stderr string `json:"stderr,omitempty"` +} + func (t CommandExecutorTool) Call(ctx context.Context, input string) (string, error) { // Invoke callback for tool start if t.CallbacksHandler != nil { @@ -58,30 +79,42 @@ func (t CommandExecutorTool) Call(ctx context.Context, input string) (string, er } if input == "" { - err := fmt.Errorf("command execution request is required") + errorResponse := common.ErrorResponse{ + Error: true, + Message: "command execution request is required", + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("command execution request is required")) } - return "", err + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } // Parse the JSON request var req CommandRequest if err := json.Unmarshal([]byte(input), &req); err != nil { - toolErr := fmt.Errorf("failed to parse command request: %w", err) + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("failed to parse command request: %s", err.Error()), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to parse command request: %w", err)) } - return "", toolErr + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } // Validate required fields if req.Command == "" { - err := fmt.Errorf("command is required") + errorResponse := common.ErrorResponse{ + Error: true, + Message: "command is required", + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("command is required")) } - return "", err + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } // Set defaults @@ -92,28 +125,66 @@ func (t CommandExecutorTool) Call(ctx context.Context, input string) (string, er // Execute the command (runs in current working directory) result, err := t.executeCommand(ctx, req.Command, req.Args) if err != nil { - toolErr := fmt.Errorf("execution failed: %w", err) + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("execution failed: %s", err.Error()), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("execution failed: %w", err)) } - return "", toolErr + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } - // Format the output - output := t.formatOutput(req.Command, req.Args, result) + // Create the success response (even if command had non-zero exit code) + response := t.createSuccessResponse(req.Command, req.Args, result) + + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("failed to marshal JSON response: %s", err.Error()), + } + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to marshal JSON response: %w", err)) + } + errorJsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(errorJsonData), nil + } // Invoke callback for tool end if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + t.CallbacksHandler.HandleToolEnd(ctx, string(jsonData)) } - return output, nil + return string(jsonData), nil } func (t CommandExecutorTool) executeCommand(ctx context.Context, command string, args []string) (*executionResult, error) { - cmd := exec.CommandContext(ctx, command, args...) - // cmd.Dir is not set, so it uses the current working directory - // cmd.Env is not set, so it inherits the current environment + // Handle shell-specific command execution for better compatibility + var cmd *exec.Cmd + + if runtime.GOOS == "windows" { + // On Windows, use cmd.exe to handle built-in commands and path resolution + allArgs := append([]string{"/C", command}, args...) + cmd = exec.CommandContext(ctx, "cmd", allArgs...) + } else { + // On Unix-like systems, use sh for better command resolution + fullCommand := command + if len(args) > 0 { + fullCommand += " " + strings.Join(args, " ") + } + cmd = exec.CommandContext(ctx, "sh", "-c", fullCommand) + } + + // Set working directory explicitly to current directory + if wd, err := os.Getwd(); err == nil { + cmd.Dir = wd + } + + // Inherit environment variables + cmd.Env = os.Environ() var stdout, stderr strings.Builder @@ -122,11 +193,18 @@ func (t CommandExecutorTool) executeCommand(ctx context.Context, command string, cmd.Stderr = &stderr err := cmd.Run() - // Get exit code + // Get exit code and determine if this is a system error vs command error exitCode := 0 + var cmdError error + if err != nil { if exitError, ok := err.(*exec.ExitError); ok { + // Command ran but exited with non-zero code - this is normal exitCode = exitError.ExitCode() + cmdError = nil // Don't treat non-zero exit as a system error + } else { + // System error (command not found, permission denied, etc.) + cmdError = err } } @@ -134,60 +212,41 @@ func (t CommandExecutorTool) executeCommand(ctx context.Context, command string, ExitCode: exitCode, Stdout: stdout.String(), Stderr: stderr.String(), - Error: err, - }, nil + Error: cmdError, // Only system errors, not command exit codes + }, cmdError // Return system errors to caller } -type executionResult struct { - ExitCode int - Stdout string - Stderr string - Error error -} - -func (t CommandExecutorTool) formatOutput(command string, args []string, result *executionResult) string { - var output strings.Builder - - // Show the full command that was executed +func (t CommandExecutorTool) createSuccessResponse(command string, args []string, result *executionResult) CommandResponse { + // Create full command string fullCommand := command if len(args) > 0 { fullCommand += " " + strings.Join(args, " ") } - output.WriteString(fmt.Sprintf("Executed: %s\n", fullCommand)) - output.WriteString(fmt.Sprintf("Exit code: %d\n", result.ExitCode)) - - if result.ExitCode == 0 { - output.WriteString("Status: ✅ Success\n") - } else { - output.WriteString("Status: ❌ Failed\n") + // Limit output to prevent overwhelming the response + stdout := result.Stdout + if len(stdout) > 2000 { + stdout = stdout[:2000] + "\n... (output truncated)" } - if result.Stdout != "" { - output.WriteString("\n--- Standard Output ---\n") - // Limit output to prevent overwhelming the LLM - stdout := result.Stdout - if len(stdout) > 2000 { - stdout = stdout[:2000] + "\n... (output truncated)" - } - output.WriteString(stdout) - output.WriteString("\n") + stderr := result.Stderr + if len(stderr) > 1000 { + stderr = stderr[:1000] + "\n... (error output truncated)" } - if result.Stderr != "" { - output.WriteString("\n--- Standard Error ---\n") - // Limit error output - stderr := result.Stderr - if len(stderr) > 1000 { - stderr = stderr[:1000] + "\n... (error output truncated)" - } - output.WriteString(stderr) - output.WriteString("\n") - } - - if result.Error != nil && result.ExitCode != 0 { - output.WriteString(fmt.Sprintf("\nError details: %s\n", result.Error.Error())) + return CommandResponse{ + Command: command, + FullCommand: fullCommand, + ExitCode: result.ExitCode, + Success: result.ExitCode == 0, + Stdout: stdout, + Stderr: stderr, } +} - return output.String() +type executionResult struct { + ExitCode int + Stdout string + Stderr string + Error error } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go index 2db2eae1c1c..ba48734dd51 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go @@ -2,11 +2,13 @@ package io import ( "context" + "encoding/json" "fmt" "io" "os" "strings" + "azd.ai.start/internal/tools/common" "github.com/tmc/langchaingo/callbacks" ) @@ -20,96 +22,160 @@ func (t CopyFileTool) Name() string { } func (t CopyFileTool) Description() string { - return "Copy a file to a new location. Input format: 'source|destination' (e.g., 'file.txt|backup.txt' or './docs/readme.md|./backup/readme.md')" + return `Copy a file to a new location. +Input: JSON object with required 'source' and 'destination' fields: {"source": "file.txt", "destination": "backup.txt"} +Returns: JSON with copy operation details or error information. +The input must be formatted as a single line valid JSON string.` } func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("copy_file: %s", input)) + // Parse JSON input + type InputParams struct { + Source string `json:"source"` + Destination string `json:"destination"` } - if input == "" { - err := fmt.Errorf("input is required in format 'source|destination'") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + var params InputParams + + // Clean the input first + cleanInput := strings.TrimSpace(input) + + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("copy_file: %s", cleanInput)) } - // Split on first occurrence of '|' to separate source from destination - parts := strings.SplitN(input, "|", 2) - if len(parts) != 2 { - err := fmt.Errorf("invalid input format. Use 'source|destination'") + // Parse as JSON - this is now required + if err := json.Unmarshal([]byte(cleanInput), ¶ms); err != nil { + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"source\": \"file.txt\", \"destination\": \"backup.txt\"}", err.Error()), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to parse JSON input: %w", err)) } - return "", err + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } - source := strings.TrimSpace(parts[0]) - destination := strings.TrimSpace(parts[1]) + source := strings.TrimSpace(params.Source) + destination := strings.TrimSpace(params.Destination) if source == "" || destination == "" { - err := fmt.Errorf("both source and destination paths are required") + errorResponse := common.ErrorResponse{ + Error: true, + Message: "Both source and destination paths are required", + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("both source and destination paths are required")) } - return "", err + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } // Check if source file exists sourceInfo, err := os.Stat(source) if err != nil { - toolErr := fmt.Errorf("source file %s does not exist: %w", source, err) + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Source file %s does not exist: %s", source, err.Error()), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("source file %s does not exist: %w", source, err)) } - return "", toolErr + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } if sourceInfo.IsDir() { - err := fmt.Errorf("source %s is a directory. Use copy_directory for directories", source) + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Source %s is a directory. Use copy_directory for directories", source), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("source %s is a directory. Use copy_directory for directories", source)) } - return "", err + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } // Open source file sourceFile, err := os.Open(source) if err != nil { - toolErr := fmt.Errorf("failed to open source file %s: %w", source, err) + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Failed to open source file %s: %s", source, err.Error()), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to open source file %s: %w", source, err)) } - return "", toolErr + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } defer sourceFile.Close() // Create destination file destFile, err := os.Create(destination) if err != nil { - toolErr := fmt.Errorf("failed to create destination file %s: %w", destination, err) + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Failed to create destination file %s: %s", destination, err.Error()), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to create destination file %s: %w", destination, err)) } - return "", toolErr + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil } defer destFile.Close() // Copy contents bytesWritten, err := io.Copy(destFile, sourceFile) if err != nil { - toolErr := fmt.Errorf("failed to copy file: %w", err) + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Failed to copy file: %s", err.Error()), + } + if t.CallbacksHandler != nil { + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to copy file: %w", err)) + } + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil + } + + // Prepare JSON response structure + type CopyResponse struct { + Success bool `json:"success"` + Source string `json:"source"` + Destination string `json:"destination"` + BytesCopied int64 `json:"bytesCopied"` + Message string `json:"message"` + } + + response := CopyResponse{ + Success: true, + Source: source, + Destination: destination, + BytesCopied: bytesWritten, + Message: fmt.Sprintf("Successfully copied %s to %s (%d bytes)", source, destination, bytesWritten), + } + + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + errorResponse := common.ErrorResponse{ + Error: true, + Message: fmt.Sprintf("Failed to marshal JSON response: %s", err.Error()), + } if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to marshal JSON response: %w", err)) } - return "", toolErr + errorJsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(errorJsonData), nil } - output := fmt.Sprintf("Copied %s to %s (%d bytes)\n", source, destination, bytesWritten) + output := string(jsonData) if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + t.CallbacksHandler.HandleToolEnd(ctx, fmt.Sprintf("Copied %s to %s (%d bytes)", source, destination, bytesWritten)) } return output, nil diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go index d100e7aa834..c79ac8d46c6 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "strings" "github.com/tmc/langchaingo/callbacks" ) @@ -27,6 +28,9 @@ func (t CreateDirectoryTool) Call(ctx context.Context, input string) (string, er t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("create_directory: %s", input)) } + input = strings.TrimPrefix(input, `"`) + input = strings.TrimSuffix(input, `"`) + if input == "" { err := fmt.Errorf("directory path is required") if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go index 72714f379b9..7afb090f868 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "strings" "github.com/tmc/langchaingo/callbacks" ) @@ -27,6 +28,9 @@ func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, er t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("delete_directory: %s", input)) } + input = strings.TrimPrefix(input, `"`) + input = strings.TrimSuffix(input, `"`) + if input == "" { err := fmt.Errorf("directory path is required") if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go index b893bb1ee29..57c51b415de 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "strings" "github.com/tmc/langchaingo/callbacks" ) @@ -26,6 +27,9 @@ func (t DeleteFileTool) Call(ctx context.Context, input string) (string, error) t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("delete_file: %s", input)) } + input = strings.TrimPrefix(input, `"`) + input = strings.TrimSuffix(input, `"`) + if input == "" { err := fmt.Errorf("file path is required") if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go index f02e8e8df5e..c0b4e09ee2e 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go @@ -8,6 +8,7 @@ import ( "path/filepath" "strings" + "azd.ai.start/internal/tools/common" "github.com/tmc/langchaingo/callbacks" ) @@ -16,12 +17,6 @@ type DirectoryListTool struct { CallbacksHandler callbacks.Handler } -// ErrorResponse represents a JSON error response structure that can be reused across all tools -type ErrorResponse struct { - Error bool `json:"error"` - Message string `json:"message"` -} - func (t DirectoryListTool) Name() string { return "list_directory" } @@ -47,7 +42,7 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro // Parse as JSON - this is now required if err := json.Unmarshal([]byte(cleanInput), ¶ms); err != nil { - errorResponse := ErrorResponse{ + errorResponse := common.ErrorResponse{ Error: true, Message: fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"path\": \".\", \"include_hidden\": false}", err.Error()), } @@ -78,7 +73,7 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro // Explicitly get current working directory instead of relying on filepath.Abs(".") absPath, err = os.Getwd() if err != nil { - errorResponse := ErrorResponse{ + errorResponse := common.ErrorResponse{ Error: true, Message: fmt.Sprintf("Failed to get current working directory: %s", err.Error()), } @@ -91,7 +86,7 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro } else { absPath, err = filepath.Abs(path) if err != nil { - errorResponse := ErrorResponse{ + errorResponse := common.ErrorResponse{ Error: true, Message: fmt.Sprintf("Failed to get absolute path for %s: %s", path, err.Error()), } @@ -122,7 +117,7 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro message = fmt.Sprintf("Failed to access %s: %s (original input: '%s', cleaned path: '%s')", absPath, err.Error(), input, path) } - errorResponse := ErrorResponse{ + errorResponse := common.ErrorResponse{ Error: true, Message: message, } @@ -134,7 +129,7 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro } if !info.IsDir() { - errorResponse := ErrorResponse{ + errorResponse := common.ErrorResponse{ Error: true, Message: fmt.Sprintf("Path is not a directory: %s", absPath), } @@ -148,7 +143,7 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro // List directory contents files, err := os.ReadDir(absPath) if err != nil { - errorResponse := ErrorResponse{ + errorResponse := common.ErrorResponse{ Error: true, Message: fmt.Sprintf("Failed to read directory %s: %s", absPath, err.Error()), } @@ -202,7 +197,7 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro // Convert to JSON jsonData, err := json.MarshalIndent(response, "", " ") if err != nil { - errorResponse := ErrorResponse{ + errorResponse := common.ErrorResponse{ Error: true, Message: fmt.Sprintf("Failed to marshal JSON response: %s", err.Error()), } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go index 98528d50b91..afc5a0aca15 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "os" + "strings" "time" "github.com/tmc/langchaingo/callbacks" @@ -28,6 +29,9 @@ func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("file_info: %s", input)) } + input = strings.TrimPrefix(input, `"`) + input = strings.TrimSuffix(input, `"`) + if input == "" { err := fmt.Errorf("file path is required") if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go index 68db771d144..51c12488774 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go @@ -27,6 +27,9 @@ func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("move_file: %s", input)) } + input = strings.TrimPrefix(input, `"`) + input = strings.TrimSuffix(input, `"`) + if input == "" { err := fmt.Errorf("input is required in format 'source|destination'") if t.CallbacksHandler != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/loader.go index ae2da0253c1..3a2dab5c83a 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/loader.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/loader.go @@ -4,6 +4,7 @@ import ( "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" + "azd.ai.start/internal/tools/azd" "azd.ai.start/internal/tools/dev" "azd.ai.start/internal/tools/io" ) @@ -21,6 +22,7 @@ type LocalToolsLoader struct { func NewLocalToolsLoader(callbackHandler callbacks.Handler) *LocalToolsLoader { return &LocalToolsLoader{ loaders: []ToolLoader{ + azd.NewAzdToolsLoader(callbackHandler), dev.NewDevToolsLoader(callbackHandler), io.NewIoToolsLoader(callbackHandler), }, diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json index dd2078ee03e..efca4416be8 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json +++ b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json @@ -1,19 +1,9 @@ { "servers": { - "bestpractices": { + "Azure": { "type": "stdio", "command": "azmcp", - "args": ["server", "start", "--namespace", "bestpractices"] - }, - "subscription": { - "type": "stdio", - "command": "azmcp", - "args": ["server", "start", "--namespace", "subscription"] - }, - "deploy": { - "type": "stdio", - "command": "azmcp", - "args": ["server", "start", "--namespace", "deploy"] + "args": ["server", "start"] } } } From e04ed749645c72e7873ec6ad079f44172e3f4da9 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Wed, 30 Jul 2025 17:30:27 -0700 Subject: [PATCH 044/116] Wire up noop sampling handler --- .../azd.ai.start/internal/tools/mcp/loader.go | 35 +++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go index 5aee8932b06..51464e0229d 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go @@ -1,6 +1,7 @@ package mcp import ( + "context" "encoding/json" "fmt" @@ -8,6 +9,8 @@ import ( langchaingo_mcp_adapter "github.com/i2y/langchaingo-mcp-adapter" "github.com/mark3labs/mcp-go/client" + "github.com/mark3labs/mcp-go/client/transport" + "github.com/mark3labs/mcp-go/mcp" "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" ) @@ -28,6 +31,14 @@ type ServerConfig struct { Env []string `json:"env,omitempty"` } +type McpSamplingHandler struct { +} + +func (h *McpSamplingHandler) CreateMessage(ctx context.Context, request mcp.CreateMessageRequest) (*mcp.CreateMessageResult, error) { + // TODO: implement sampling handler + return &mcp.CreateMessageResult{}, nil +} + type McpToolsLoader struct { callbackHandler callbacks.Handler } @@ -50,9 +61,29 @@ func (l *McpToolsLoader) LoadTools() ([]tools.Tool, error) { // Iterate through each server configuration for serverName, serverConfig := range config.Servers { // Create MCP client for the server using stdio - mcpClient, err := client.NewStdioMCPClient(serverConfig.Command, serverConfig.Env, serverConfig.Args...) + samplingHandler := &McpSamplingHandler{} + stdioTransport := transport.NewStdio(serverConfig.Command, serverConfig.Env, serverConfig.Args...) + mcpClient := client.NewClient(stdioTransport, client.WithSamplingHandler(samplingHandler)) + + ctx := context.Background() + + if err := mcpClient.Start(ctx); err != nil { + return nil, err + } + + // Initialize the connection + _, err := mcpClient.Initialize(ctx, mcp.InitializeRequest{ + Params: mcp.InitializeParams{ + ProtocolVersion: mcp.LATEST_PROTOCOL_VERSION, + ClientInfo: mcp.Implementation{ + Name: "azd-agent-host", + Version: "1.0.0", + }, + Capabilities: mcp.ClientCapabilities{}, + }, + }) if err != nil { - return nil, fmt.Errorf("failed to create MCP client for server %s: %w", serverName, err) + return nil, err } // Create the adapter From 0c5e3db659ea1cf7f89e92a98389e078be0e08c6 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Thu, 31 Jul 2025 13:18:11 -0700 Subject: [PATCH 045/116] Adds sampling handler --- .../azd.ai.start/internal/agent/agent.go | 5 +- .../azd.ai.start/internal/logging/logger.go | 37 ++++-- .../azd.ai.start/internal/tools/mcp/loader.go | 31 +---- .../internal/tools/mcp/sampling_handler.go | 111 ++++++++++++++++++ 4 files changed, 149 insertions(+), 35 deletions(-) create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go index a5822a677c2..d3a38403d4b 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go @@ -39,9 +39,12 @@ func NewAzureAIAgent(llm *openai.LLM) (*AzureAIAgent, error) { memory.WithAIPrefix("AI"), ) + // Create sampling handler for MCP + samplingHandler := mcptools.NewMcpSamplingHandler(llm) + toolLoaders := []localtools.ToolLoader{ localtools.NewLocalToolsLoader(llm.CallbacksHandler), - mcptools.NewMcpToolsLoader(llm.CallbacksHandler), + mcptools.NewMcpToolsLoader(llm.CallbacksHandler, samplingHandler), } allTools := []tools.Tool{} diff --git a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go index aa693cc7f6b..ee56ceb3625 100644 --- a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go +++ b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go @@ -155,12 +155,29 @@ func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.Age var valueStr string switch v := value.(type) { case []interface{}: + // Skip empty arrays + if len(v) == 0 { + continue + } // Handle arrays by joining with spaces var strSlice []string for _, item := range v { strSlice = append(strSlice, strings.TrimSpace(string(fmt.Sprintf("%v", item)))) } valueStr = strings.Join(strSlice, " ") + case map[string]interface{}: + // Skip empty maps + if len(v) == 0 { + continue + } + valueStr = strings.TrimSpace(fmt.Sprintf("%v", v)) + case string: + // Skip empty strings + trimmed := strings.TrimSpace(v) + if trimmed == "" { + continue + } + valueStr = trimmed default: valueStr = strings.TrimSpace(fmt.Sprintf("%v", v)) } @@ -173,17 +190,23 @@ func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.Age var paramStr string if len(params) > 0 { paramStr = strings.Join(params, ", ") + paramStr = truncateString(paramStr, 100) + output := fmt.Sprintf("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, paramStr) + color.Green(output) } else { - paramStr = "tool" + output := fmt.Sprintf("\n🤖 Agent: Calling %s tool\n", action.Tool) + color.Green(output) } - - paramStr = truncateString(paramStr, 100) - output := fmt.Sprintf("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, paramStr) - color.Green(output) } else { // JSON parsing failed, show the input as text with truncation - toolInput := truncateString(action.ToolInput, 100) - color.Green("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, toolInput) + toolInput := strings.TrimSpace(action.ToolInput) + if toolInput == "" { + output := fmt.Sprintf("\n🤖 Agent: Calling %s tool\n", action.Tool) + color.Green(output) + } else { + toolInput = truncateString(toolInput, 100) + color.Green("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, toolInput) + } } } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go index 51464e0229d..e30c3fb5e0e 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go @@ -10,7 +10,6 @@ import ( langchaingo_mcp_adapter "github.com/i2y/langchaingo-mcp-adapter" "github.com/mark3labs/mcp-go/client" "github.com/mark3labs/mcp-go/client/transport" - "github.com/mark3labs/mcp-go/mcp" "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" ) @@ -31,21 +30,15 @@ type ServerConfig struct { Env []string `json:"env,omitempty"` } -type McpSamplingHandler struct { -} - -func (h *McpSamplingHandler) CreateMessage(ctx context.Context, request mcp.CreateMessageRequest) (*mcp.CreateMessageResult, error) { - // TODO: implement sampling handler - return &mcp.CreateMessageResult{}, nil -} - type McpToolsLoader struct { callbackHandler callbacks.Handler + samplingHandler client.SamplingHandler } -func NewMcpToolsLoader(callbackHandler callbacks.Handler) *McpToolsLoader { +func NewMcpToolsLoader(callbackHandler callbacks.Handler, samplingHandler client.SamplingHandler) *McpToolsLoader { return &McpToolsLoader{ callbackHandler: callbackHandler, + samplingHandler: samplingHandler, } } @@ -61,9 +54,8 @@ func (l *McpToolsLoader) LoadTools() ([]tools.Tool, error) { // Iterate through each server configuration for serverName, serverConfig := range config.Servers { // Create MCP client for the server using stdio - samplingHandler := &McpSamplingHandler{} stdioTransport := transport.NewStdio(serverConfig.Command, serverConfig.Env, serverConfig.Args...) - mcpClient := client.NewClient(stdioTransport, client.WithSamplingHandler(samplingHandler)) + mcpClient := client.NewClient(stdioTransport, client.WithSamplingHandler(l.samplingHandler)) ctx := context.Background() @@ -71,21 +63,6 @@ func (l *McpToolsLoader) LoadTools() ([]tools.Tool, error) { return nil, err } - // Initialize the connection - _, err := mcpClient.Initialize(ctx, mcp.InitializeRequest{ - Params: mcp.InitializeParams{ - ProtocolVersion: mcp.LATEST_PROTOCOL_VERSION, - ClientInfo: mcp.Implementation{ - Name: "azd-agent-host", - Version: "1.0.0", - }, - Capabilities: mcp.ClientCapabilities{}, - }, - }) - if err != nil { - return nil, err - } - // Create the adapter adapter, err := langchaingo_mcp_adapter.New(mcpClient) if err != nil { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go new file mode 100644 index 00000000000..d9505abf102 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go @@ -0,0 +1,111 @@ +package mcp + +import ( + "context" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/tmc/langchaingo/llms" +) + +type McpSamplingHandler struct { + llm llms.Model +} + +func NewMcpSamplingHandler(llm llms.Model) *McpSamplingHandler { + return &McpSamplingHandler{ + llm: llm, + } +} + +func (h *McpSamplingHandler) CreateMessage(ctx context.Context, request mcp.CreateMessageRequest) (*mcp.CreateMessageResult, error) { + messages := []llms.MessageContent{} + for _, inputMessage := range request.Messages { + // Map MCP Role to langchaingo ChatMessageType + var chatMessageType llms.ChatMessageType + switch inputMessage.Role { + case mcp.RoleAssistant: + chatMessageType = llms.ChatMessageTypeAI + case mcp.RoleUser: + chatMessageType = llms.ChatMessageTypeHuman + default: + // Fallback for unknown roles + chatMessageType = llms.ChatMessageTypeHuman + } + + // Handle Content field - it's defined as 'any' in MCP SamplingMessage + var parts []llms.ContentPart + switch content := inputMessage.Content.(type) { + case string: + // Simple text content + parts = []llms.ContentPart{ + llms.TextContent{ + Text: content, + }, + } + case []interface{}: + // Array of content parts (could be text, images, etc.) + for _, part := range content { + if textPart, ok := part.(string); ok { + parts = append(parts, llms.TextContent{ + Text: textPart, + }) + } + // Could add support for other content types here (images, etc.) + } + case map[string]interface{}: + // Map content - convert each key/value pair to text content + for key, value := range content { + parts = append(parts, llms.TextContent{ + Text: fmt.Sprintf("%s: %v", key, value), + }) + } + default: + // Fallback: convert to string + parts = []llms.ContentPart{ + llms.TextContent{ + Text: fmt.Sprintf("%v", content), + }, + } + } + + messages = append(messages, llms.MessageContent{ + Role: chatMessageType, + Parts: parts, + }) + } + + res, err := h.llm.GenerateContent(ctx, messages) + if err != nil { + return nil, err + } + + // Transform langchaingo response back to MCP format + // Get model name from hints if available + modelName := "" + if request.ModelPreferences != nil && len(request.ModelPreferences.Hints) > 0 { + modelName = request.ModelPreferences.Hints[0].Name + } + + if len(res.Choices) == 0 { + return &mcp.CreateMessageResult{ + SamplingMessage: mcp.SamplingMessage{ + Role: mcp.RoleAssistant, + Content: "", + }, + Model: modelName, + StopReason: "no_choices", + }, nil + } + + // Use the first choice + choice := res.Choices[0] + + return &mcp.CreateMessageResult{ + SamplingMessage: mcp.SamplingMessage{ + Role: mcp.RoleAssistant, + Content: choice.Content, + }, + Model: modelName, + }, nil +} From 8377ecb58b3d875235cc11cd0a906d2604fd24b4 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Thu, 31 Jul 2025 18:25:42 -0700 Subject: [PATCH 046/116] Fixed sampling --- cli/azd/extensions/azd.ai.start/README.md | 33 ---- cli/azd/extensions/azd.ai.start/USAGE.md | 50 ------ .../azd.ai.start/internal/agent/agent.go | 83 +++++++-- .../internal/cmd/enhanced_integration.go | 17 +- .../azd.ai.start/internal/cmd/root.go | 39 ++++- .../azd.ai.start/internal/logging/logger.go | 2 +- .../internal/tools/mcp/sampling_handler.go | 157 +++++++++++------- .../azd.ai.start/internal/utils/helpers.go | 41 ----- .../internal/validation/parser.go | 93 ----------- .../azd.ai.start/internal/validation/types.go | 21 --- 10 files changed, 200 insertions(+), 336 deletions(-) delete mode 100644 cli/azd/extensions/azd.ai.start/README.md delete mode 100644 cli/azd/extensions/azd.ai.start/USAGE.md delete mode 100644 cli/azd/extensions/azd.ai.start/internal/utils/helpers.go delete mode 100644 cli/azd/extensions/azd.ai.start/internal/validation/parser.go delete mode 100644 cli/azd/extensions/azd.ai.start/internal/validation/types.go diff --git a/cli/azd/extensions/azd.ai.start/README.md b/cli/azd/extensions/azd.ai.start/README.md deleted file mode 100644 index 9ff29633ea4..00000000000 --- a/cli/azd/extensions/azd.ai.start/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# Node.js Express App - -This is a simple Node.js application using Express with a basic routing setup. - -## Project Structure - -``` -. -├── app.js -├── package.json -├── README.md -└── routes - └── index.js -``` - -## Getting Started - -1. Install dependencies: - ```bash - npm install - ``` -2. Start the server: - ```bash - npm start - ``` -3. Visit [http://localhost:3000](http://localhost:3000) in your browser. - -## Features -- Express server setup -- Modular routing - -## License -ISC \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/USAGE.md b/cli/azd/extensions/azd.ai.start/USAGE.md deleted file mode 100644 index 7218badc825..00000000000 --- a/cli/azd/extensions/azd.ai.start/USAGE.md +++ /dev/null @@ -1,50 +0,0 @@ -# Azure AI Agent - Multi-turn Chat Demo - -Your Azure AI Agent now supports two modes: - -## 1. Single Query Mode -For one-time questions, pass the query as arguments: -```bash -azd.ai.start.exe "How do I deploy a Node.js app to Azure?" -``` - -## 2. Interactive Chat Mode -For multi-turn conversations, run without arguments: -```bash -azd.ai.start.exe -``` - -In interactive mode, you'll see: -- 🤖 Welcome message with instructions -- 💬 You: prompt for your input -- 🤖 AI Agent: responses with context awareness -- Type 'exit' or 'quit' to end the session -- Maintains conversation history for context - -### Features: -- ✅ **Context Aware**: Remembers previous messages in the conversation -- ✅ **Azure Focused**: Specialized for Azure development tasks -- ✅ **Easy Exit**: Type 'exit', 'quit', or Ctrl+C to quit -- ✅ **Memory Management**: Keeps last 10 exchanges to prevent context overflow -- ✅ **Error Handling**: Gracefully handles errors and continues the conversation - -### Example Interactive Session: -``` -🤖 Azure AI Agent - Interactive Chat Mode -Type 'exit', 'quit', or press Ctrl+C to exit -═══════════════════════════════════════════════ - -💬 You: What is Azure App Service? - -🤖 AI Agent: Azure App Service is a platform-as-a-service (PaaS)... - -💬 You: How do I deploy to it? - -🤖 AI Agent: Based on our previous discussion about App Service... - -💬 You: exit - -👋 Goodbye! Thanks for using Azure AI Agent! -``` - -The agent maintains conversation context, so follow-up questions work naturally! diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go index d3a38403d4b..fc1cd9e73cb 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go @@ -9,11 +9,13 @@ import ( "github.com/tmc/langchaingo/agents" "github.com/tmc/langchaingo/chains" - "github.com/tmc/langchaingo/llms/openai" + "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/memory" "github.com/tmc/langchaingo/tools" + "azd.ai.start/internal/logging" localtools "azd.ai.start/internal/tools" + "azd.ai.start/internal/tools/mcp" mcptools "azd.ai.start/internal/tools/mcp" ) @@ -26,12 +28,42 @@ var _defaultAgentFormatInstructions string //go:embed prompts/default_agent_suffix.txt var _defaultAgentSuffix string -// AzureAIAgent represents an enhanced Azure AI agent with action tracking, intent validation, and conversation memory -type AzureAIAgent struct { - executor *agents.Executor +// AzdAiAgent represents an enhanced AZD Copilot agent with action tracking, intent validation, and conversation memory +type AzdAiAgent struct { + debug bool + defaultModel llms.Model + samplingModel llms.Model + executor *agents.Executor } -func NewAzureAIAgent(llm *openai.LLM) (*AzureAIAgent, error) { +type AgentOption func(*AzdAiAgent) + +func WithDebug(debug bool) AgentOption { + return func(agent *AzdAiAgent) { + agent.debug = debug + } +} + +func WithSamplingModel(model llms.Model) AgentOption { + return func(agent *AzdAiAgent) { + agent.samplingModel = model + } +} + +func NewAzdAiAgent(llm llms.Model, opts ...AgentOption) (*AzdAiAgent, error) { + azdAgent := &AzdAiAgent{ + defaultModel: llm, + samplingModel: llm, + } + + for _, opt := range opts { + opt(azdAgent) + } + + actionLogger := logging.NewActionLogger( + logging.WithDebug(azdAgent.debug), + ) + smartMemory := memory.NewConversationBuffer( memory.WithInputKey("input"), memory.WithOutputKey("output"), @@ -40,48 +72,63 @@ func NewAzureAIAgent(llm *openai.LLM) (*AzureAIAgent, error) { ) // Create sampling handler for MCP - samplingHandler := mcptools.NewMcpSamplingHandler(llm) + samplingHandler := mcptools.NewMcpSamplingHandler( + azdAgent.samplingModel, + mcp.WithDebug(azdAgent.debug), + ) toolLoaders := []localtools.ToolLoader{ - localtools.NewLocalToolsLoader(llm.CallbacksHandler), - mcptools.NewMcpToolsLoader(llm.CallbacksHandler, samplingHandler), + localtools.NewLocalToolsLoader(actionLogger), + mcptools.NewMcpToolsLoader(actionLogger, samplingHandler), } allTools := []tools.Tool{} + // Define block list of excluded tools + excludedTools := map[string]bool{ + "extension_az": true, + "extension_azd": true, + // Add more excluded tools here as needed + } + for _, toolLoader := range toolLoaders { categoryTools, err := toolLoader.LoadTools() if err != nil { return nil, err } - allTools = append(allTools, categoryTools...) + + // Filter out excluded tools + for _, tool := range categoryTools { + if !excludedTools[tool.Name()] { + allTools = append(allTools, tool) + } + } } // 4. Create agent with memory directly integrated - agent := agents.NewConversationalAgent(llm, allTools, + conversationAgent := agents.NewConversationalAgent(llm, allTools, agents.WithPromptPrefix(_defaultAgentPrefix), agents.WithPromptSuffix(_defaultAgentSuffix), agents.WithPromptFormatInstructions(_defaultAgentFormatInstructions), agents.WithMemory(smartMemory), - agents.WithCallbacksHandler(llm.CallbacksHandler), + agents.WithCallbacksHandler(actionLogger), agents.WithReturnIntermediateSteps(), ) // 5. Create executor without separate memory configuration since agent already has it - executor := agents.NewExecutor(agent, - agents.WithMaxIterations(100), // Much higher limit for complex multi-step processes + executor := agents.NewExecutor(conversationAgent, + agents.WithMaxIterations(500), // Much higher limit for complex multi-step processes agents.WithMemory(smartMemory), - agents.WithCallbacksHandler(llm.CallbacksHandler), + agents.WithCallbacksHandler(actionLogger), agents.WithReturnIntermediateSteps(), ) - return &AzureAIAgent{ - executor: executor, - }, nil + azdAgent.executor = executor + return azdAgent, nil } // ProcessQuery processes a user query with full action tracking and validation -func (aai *AzureAIAgent) ProcessQuery(ctx context.Context, userInput string) error { +func (aai *AzdAiAgent) ProcessQuery(ctx context.Context, userInput string) error { // Execute with enhanced input - agent should automatically handle memory _, err := chains.Run(ctx, aai.executor, userInput, chains.WithMaxTokens(800), diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go index 7e436b76e6d..3a27dc4643c 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go @@ -11,20 +11,13 @@ import ( "strings" "github.com/fatih/color" - "github.com/tmc/langchaingo/llms/openai" "azd.ai.start/internal/agent" ) -// RunEnhancedAzureAgent runs the enhanced Azure AI agent with full capabilities -func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) error { - // Create the enhanced agent - azureAgent, err := agent.NewAzureAIAgent(llm) - if err != nil { - return err - } - - fmt.Println("🤖 Enhanced Azure AI Agent - Interactive Mode") +// RunEnhancedAgentLoop runs the enhanced AZD Copilot agent with full capabilities +func RunEnhancedAgentLoop(ctx context.Context, agent *agent.AzdAiAgent, args []string) error { + fmt.Println("🤖 AZD Copilot - Interactive Mode") fmt.Println("═══════════════════════════════════════════════════════════") // Handle initial query if provided @@ -59,12 +52,12 @@ func RunEnhancedAzureAgent(ctx context.Context, llm *openai.LLM, args []string) } if strings.ToLower(userInput) == "exit" || strings.ToLower(userInput) == "quit" { - fmt.Println("👋 Goodbye! Thanks for using the Enhanced Azure AI Agent!") + fmt.Println("👋 Goodbye! Thanks for using AZD Copilot!") break } // Process the query with the enhanced agent - err := azureAgent.ProcessQuery(ctx, userInput) + err := agent.ProcessQuery(ctx, userInput) if err != nil { continue } diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go index d57df89a2ea..4031d40b7f5 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go @@ -9,6 +9,7 @@ import ( "fmt" "os" + "azd.ai.start/internal/agent" "azd.ai.start/internal/logging" "github.com/azure/azure-dev/cli/azd/pkg/azdext" "github.com/spf13/cobra" @@ -83,18 +84,17 @@ func runAIAgent(ctx context.Context, args []string, debug bool) error { // Common deployment names to try azureAPIVersion := "2024-02-15-preview" - var llm *openai.LLM + var defaultModel *openai.LLM + var samplingModel *openai.LLM + + actionLogger := logging.NewActionLogger(logging.WithDebug(debug)) // Try different deployment names if aiConfig.Endpoint != "" && aiConfig.ApiKey != "" { // Use Azure OpenAI with proper configuration fmt.Printf("🔵 Trying Azure OpenAI with deployment: %s\n", aiConfig.DeploymentName) - actionLogger := logging.NewActionLogger( - logging.WithDebug(debug), - ) - - llm, err = openai.New( + defaultModel, err = openai.New( openai.WithToken(aiConfig.ApiKey), openai.WithBaseURL(aiConfig.Endpoint+"/"), openai.WithAPIType(openai.APITypeAzure), @@ -108,12 +108,33 @@ func runAIAgent(ctx context.Context, args []string, debug bool) error { } else { fmt.Printf("❌ Failed with deployment %s: %v\n", aiConfig.DeploymentName, err) } + + samplingModel, err = openai.New( + openai.WithToken(aiConfig.ApiKey), + openai.WithBaseURL(aiConfig.Endpoint+"/"), + openai.WithAPIType(openai.APITypeAzure), + openai.WithAPIVersion(azureAPIVersion), + openai.WithModel(aiConfig.DeploymentName), + ) + + if err != nil { + return err + } + } + + // Create the enhanced agent + azdAgent, err := agent.NewAzdAiAgent(defaultModel, + agent.WithSamplingModel(samplingModel), + agent.WithDebug(debug), + ) + if err != nil { + return err } - if llm == nil { + if defaultModel == nil { return fmt.Errorf("failed to connect to any Azure OpenAI deployment") } - // Use the enhanced Azure AI agent with full capabilities - return RunEnhancedAzureAgent(ctx, llm, args) + // Use the enhanced AZD Copilot agent with full capabilities + return RunEnhancedAgentLoop(ctx, azdAgent, args) } diff --git a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go index ee56ceb3625..e3f9b64e0e4 100644 --- a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go +++ b/cli/azd/extensions/azd.ai.start/internal/logging/logger.go @@ -68,7 +68,7 @@ func (al *ActionLogger) HandleLLMGenerateContentEnd(ctx context.Context, res *ll // Find all "Thought:" patterns and extract the content that follows // (?is) flags: i=case insensitive, s=dot matches newlines // .*? is non-greedy to stop at the first occurrence of next pattern or end - thoughtRegex := regexp.MustCompile(`(?is)thought:\s*(.*?)(?:\n\s*(?:action|final answer|observation|ai):|$)`) + thoughtRegex := regexp.MustCompile(`(?is)thought:\s*(.*?)(?:\n\s*(?:action|final answer|observation|ai|thought):|$)`) matches := thoughtRegex.FindAllStringSubmatch(content, -1) for _, match := range matches { diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go index d9505abf102..ffd948429c6 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go @@ -2,110 +2,151 @@ package mcp import ( "context" + "encoding/json" "fmt" + "strings" + "github.com/fatih/color" "github.com/mark3labs/mcp-go/mcp" "github.com/tmc/langchaingo/llms" ) type McpSamplingHandler struct { - llm llms.Model + llm llms.Model + debug bool } -func NewMcpSamplingHandler(llm llms.Model) *McpSamplingHandler { - return &McpSamplingHandler{ +type SamplingHandlerOption func(*McpSamplingHandler) + +func WithDebug(debug bool) SamplingHandlerOption { + return func(h *McpSamplingHandler) { + h.debug = debug + } +} + +func NewMcpSamplingHandler(llm llms.Model, opts ...SamplingHandlerOption) *McpSamplingHandler { + handler := &McpSamplingHandler{ llm: llm, } + + for _, opt := range opts { + opt(handler) + } + + return handler +} + +// cleanContent converts literal line break escape sequences to actual line break characters +func (h *McpSamplingHandler) cleanContent(content string) string { + // Replace literal escape sequences with actual control characters + // Handle Windows-style \r\n first (most common), then individual ones + content = strings.ReplaceAll(content, "\\r\\n", "\r\n") + content = strings.ReplaceAll(content, "\\n", "\n") + content = strings.ReplaceAll(content, "\\r", "\r") + return content } func (h *McpSamplingHandler) CreateMessage(ctx context.Context, request mcp.CreateMessageRequest) (*mcp.CreateMessageResult, error) { - messages := []llms.MessageContent{} - for _, inputMessage := range request.Messages { - // Map MCP Role to langchaingo ChatMessageType - var chatMessageType llms.ChatMessageType - switch inputMessage.Role { - case mcp.RoleAssistant: - chatMessageType = llms.ChatMessageTypeAI - case mcp.RoleUser: - chatMessageType = llms.ChatMessageTypeHuman - default: - // Fallback for unknown roles - chatMessageType = llms.ChatMessageTypeHuman + if h.debug { + requestJson, err := json.MarshalIndent(request, "", " ") + if err != nil { + return nil, err } - // Handle Content field - it's defined as 'any' in MCP SamplingMessage + color.HiBlack("\nSamplingStart\n%s\n", requestJson) + } + + messages := []llms.MessageContent{} + for _, msg := range request.Messages { var parts []llms.ContentPart - switch content := inputMessage.Content.(type) { + + switch content := msg.Content.(type) { + case mcp.TextContent: + parts = append(parts, llms.TextPart(h.cleanContent(content.Text))) case string: // Simple text content - parts = []llms.ContentPart{ - llms.TextContent{ - Text: content, - }, + parts = append(parts, llms.TextPart(h.cleanContent(content))) + case map[string]interface{}: + // Map content - convert each key/value pair to text content + for key, value := range content { + if key == "text" { + parts = append(parts, llms.TextPart(h.cleanContent(fmt.Sprintf("%v", value)))) + break + } } case []interface{}: // Array of content parts (could be text, images, etc.) for _, part := range content { if textPart, ok := part.(string); ok { - parts = append(parts, llms.TextContent{ - Text: textPart, - }) + parts = append(parts, llms.TextPart(h.cleanContent(textPart))) } - // Could add support for other content types here (images, etc.) - } - case map[string]interface{}: - // Map content - convert each key/value pair to text content - for key, value := range content { - parts = append(parts, llms.TextContent{ - Text: fmt.Sprintf("%s: %v", key, value), - }) } + default: // Fallback: convert to string - parts = []llms.ContentPart{ - llms.TextContent{ - Text: fmt.Sprintf("%v", content), - }, - } + parts = append(parts, llms.TextPart(h.cleanContent(fmt.Sprintf("%v", content)))) } messages = append(messages, llms.MessageContent{ - Role: chatMessageType, + Role: llms.ChatMessageTypeAI, Parts: parts, }) } + if h.debug { + inputJson, err := json.MarshalIndent(messages, "", " ") + if err != nil { + return nil, err + } + + color.HiBlack("\nSamplingLLMContent\n%s\n", inputJson) + } + res, err := h.llm.GenerateContent(ctx, messages) if err != nil { - return nil, err + return &mcp.CreateMessageResult{ + SamplingMessage: mcp.SamplingMessage{ + Role: mcp.RoleAssistant, + Content: llms.TextPart(err.Error()), + }, + Model: "llm-delegated", + StopReason: "error", + }, nil } - // Transform langchaingo response back to MCP format - // Get model name from hints if available - modelName := "" - if request.ModelPreferences != nil && len(request.ModelPreferences.Hints) > 0 { - modelName = request.ModelPreferences.Hints[0].Name - } + var samplingResponse *mcp.CreateMessageResult if len(res.Choices) == 0 { - return &mcp.CreateMessageResult{ + samplingResponse = &mcp.CreateMessageResult{ SamplingMessage: mcp.SamplingMessage{ Role: mcp.RoleAssistant, - Content: "", + Content: llms.TextPart(""), }, - Model: modelName, + Model: "llm-delegated", StopReason: "no_choices", - }, nil + } + } else { + // Use the first choice + choice := res.Choices[0] + + samplingResponse = &mcp.CreateMessageResult{ + SamplingMessage: mcp.SamplingMessage{ + Role: mcp.RoleAssistant, + Content: llms.TextPart(choice.Content), + }, + Model: "llm-delegated", + StopReason: "endTurn", + } } - // Use the first choice - choice := res.Choices[0] + if h.debug { + responseJson, err := json.MarshalIndent(samplingResponse, "", " ") + if err != nil { + return nil, err + } + + color.HiBlack("\nSamplingEnd\n%s\n", responseJson) + } - return &mcp.CreateMessageResult{ - SamplingMessage: mcp.SamplingMessage{ - Role: mcp.RoleAssistant, - Content: choice.Content, - }, - Model: modelName, - }, nil + return samplingResponse, nil } diff --git a/cli/azd/extensions/azd.ai.start/internal/utils/helpers.go b/cli/azd/extensions/azd.ai.start/internal/utils/helpers.go deleted file mode 100644 index 130734eff77..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/utils/helpers.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package utils - -import ( - "fmt" - "strings" - "time" - - "azd.ai.start/internal/session" -) - -// TruncateString truncates a string to a maximum length -func TruncateString(s string, maxLen int) string { - if len(s) <= maxLen { - return s - } - return s[:maxLen] + "..." -} - -// FormatActionsForValidation formats actions for the validation prompt -func FormatActionsForValidation(actions []session.ActionLog) string { - if len(actions) == 0 { - return "No actions executed" - } - - var formatted strings.Builder - for i, action := range actions { - status := "SUCCESS" - if !action.Success { - status = "FAILED" - } - formatted.WriteString(fmt.Sprintf("%d. Tool: %s | Input: %s | Status: %s | Duration: %v\n", - i+1, action.Tool, TruncateString(action.Input, 100), status, action.Duration.Round(time.Millisecond))) - if action.Output != "" { - formatted.WriteString(fmt.Sprintf(" Output: %s\n", TruncateString(action.Output, 200))) - } - } - return formatted.String() -} diff --git a/cli/azd/extensions/azd.ai.start/internal/validation/parser.go b/cli/azd/extensions/azd.ai.start/internal/validation/parser.go deleted file mode 100644 index 2f814546798..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/validation/parser.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package validation - -import ( - "strings" -) - -// ParseValidationResult parses the validation result from LLM response -func ParseValidationResult(response string) *ValidationResult { - result := &ValidationResult{ - Status: ValidationError, - Explanation: "Failed to parse validation response", - Confidence: 0.0, - } - - lines := strings.Split(response, "\n") - for _, line := range lines { - line = strings.TrimSpace(line) - - if strings.HasPrefix(line, "STATUS:") { - statusStr := strings.TrimSpace(strings.TrimPrefix(line, "STATUS:")) - switch strings.ToUpper(statusStr) { - case "COMPLETE": - result.Status = ValidationComplete - case "PARTIAL": - result.Status = ValidationPartial - case "INCOMPLETE": - result.Status = ValidationIncomplete - case "ERROR": - result.Status = ValidationError - } - } else if strings.HasPrefix(line, "EXPLANATION:") { - result.Explanation = strings.TrimSpace(strings.TrimPrefix(line, "EXPLANATION:")) - } else if strings.HasPrefix(line, "CONFIDENCE:") { - confidenceStr := strings.TrimSpace(strings.TrimPrefix(line, "CONFIDENCE:")) - if conf, err := parseFloat(confidenceStr); err == nil { - result.Confidence = conf - } - } - } - - // If we couldn't parse the status, try to infer from the response content - if result.Status == ValidationError { - responseUpper := strings.ToUpper(response) - if strings.Contains(responseUpper, "COMPLETE") { - result.Status = ValidationComplete - } else if strings.Contains(responseUpper, "PARTIAL") { - result.Status = ValidationPartial - } else if strings.Contains(responseUpper, "INCOMPLETE") { - result.Status = ValidationIncomplete - } - result.Explanation = response - result.Confidence = 0.7 - } - - return result -} - -// parseFloat safely parses a float from string -func parseFloat(s string) (float64, error) { - // Simple float parsing for confidence values - s = strings.TrimSpace(s) - if s == "1" || s == "1.0" { - return 1.0, nil - } else if s == "0" || s == "0.0" { - return 0.0, nil - } else if strings.HasPrefix(s, "0.") { - // Simple decimal parsing for common cases - switch s { - case "0.1": - return 0.1, nil - case "0.2": - return 0.2, nil - case "0.3": - return 0.3, nil - case "0.4": - return 0.4, nil - case "0.5": - return 0.5, nil - case "0.6": - return 0.6, nil - case "0.7": - return 0.7, nil - case "0.8": - return 0.8, nil - case "0.9": - return 0.9, nil - } - } - return 0.5, nil // Default confidence -} diff --git a/cli/azd/extensions/azd.ai.start/internal/validation/types.go b/cli/azd/extensions/azd.ai.start/internal/validation/types.go deleted file mode 100644 index 4b0ebcd25bc..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/validation/types.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package validation - -// ValidationResult represents the result of intent validation -type ValidationResult struct { - Status ValidationStatus - Explanation string - Confidence float64 -} - -// ValidationStatus represents the completion status of the original intent -type ValidationStatus string - -const ( - ValidationComplete ValidationStatus = "COMPLETE" - ValidationPartial ValidationStatus = "PARTIAL" - ValidationIncomplete ValidationStatus = "INCOMPLETE" - ValidationError ValidationStatus = "ERROR" -) From 063780917860b3a83ad736739e949a69221f0867 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 1 Aug 2025 14:30:46 -0700 Subject: [PATCH 047/116] Adds azd helper tools --- .../default_agent_format_instructions.txt | 2 +- .../tools/azd/azd_architecture_planning.go | 31 ++ .../tools/azd/azd_azure_yaml_generation.go | 31 ++ .../tools/azd/azd_discovery_analysis.go | 31 ++ .../tools/azd/azd_docker_generation.go | 31 ++ .../tools/azd/azd_iac_generation_rules.go | 3 +- .../azd/azd_infrastructure_generation.go | 31 ++ .../internal/tools/azd/azd_plan_init.go | 3 +- .../tools/azd/azd_project_validation.go | 37 +++ .../internal/tools/azd/azd_yaml_schema.go | 3 +- .../azd.ai.start/internal/tools/azd/loader.go | 15 + .../internal/tools/azd/prompts/README.md | 199 ++++++++++++ .../azd/prompts/azd_architecture_planning.md | 165 ++++++++++ .../azd/prompts/azd_azure_yaml_generation.md | 200 ++++++++++++ .../azd/prompts/azd_discovery_analysis.md | 200 ++++++++++++ .../azd/prompts/azd_docker_generation.md | 174 +++++++++++ .../azd/prompts/azd_iac_generation_rules.md | 22 ++ .../prompts/azd_infrastructure_generation.md | 159 ++++++++++ .../tools/azd/prompts/azd_plan_init.md | 291 +++++------------- .../azd/prompts/azd_project_validation.md | 181 +++++++++++ .../internal/tools/azd/prompts/prompts.go | 15 + .../azd.ai.start/internal/tools/io/loader.go | 2 +- .../internal/tools/io/write_file.go | 150 +++------ 23 files changed, 1646 insertions(+), 330 deletions(-) create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_architecture_planning.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_azure_yaml_generation.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_discovery_analysis.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_docker_generation.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_infrastructure_generation.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_project_validation.go create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/README.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_architecture_planning.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_azure_yaml_generation.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_discovery_analysis.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_docker_generation.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_infrastructure_generation.md create mode 100644 cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_project_validation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt index d66dcb43d88..4ff35663ba8 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt +++ b/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt @@ -34,7 +34,7 @@ Observation: [result] Remember: Always continue taking actions until the user's request is fully satisfied. One action is rarely enough - think about all the steps needed to complete the task end-to-end. -When you are done answering the questions and performing all your tasks you MUST ALWAYS use the following format: +When you are done or handing control back to the user you MUST ALWAYS use the following format: Thought: Do I need to use a tool? No AI: [briefly summarize your response without all the details from your observations] \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_architecture_planning.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_architecture_planning.go new file mode 100644 index 00000000000..44894270ea0 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_architecture_planning.go @@ -0,0 +1,31 @@ +package azd + +import ( + "context" + + "azd.ai.start/internal/tools/azd/prompts" + "github.com/tmc/langchaingo/tools" +) + +var _ tools.Tool = &AzdArchitecturePlanningTool{} + +type AzdArchitecturePlanningTool struct { +} + +func (t *AzdArchitecturePlanningTool) Name() string { + return "azd_architecture_planning" +} + +func (t *AzdArchitecturePlanningTool) Description() string { + return ` + Performs Azure service selection and architecture planning for applications preparing for Azure Developer CLI (AZD) initialization. + This is Phase 2 of the AZD migration process that maps components to Azure services, plans hosting strategies, + and designs infrastructure architecture based on discovery results. + + Input: "./azd-arch-plan.md" + ` +} + +func (t *AzdArchitecturePlanningTool) Call(ctx context.Context, input string) (string, error) { + return prompts.AzdArchitecturePlanningPrompt, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_azure_yaml_generation.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_azure_yaml_generation.go new file mode 100644 index 00000000000..ea2ae2e26f2 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_azure_yaml_generation.go @@ -0,0 +1,31 @@ +package azd + +import ( + "context" + + "azd.ai.start/internal/tools/azd/prompts" + "github.com/tmc/langchaingo/tools" +) + +var _ tools.Tool = &AzdAzureYamlGenerationTool{} + +type AzdAzureYamlGenerationTool struct { +} + +func (t *AzdAzureYamlGenerationTool) Name() string { + return "azd_azure_yaml_generation" +} + +func (t *AzdAzureYamlGenerationTool) Description() string { + return ` + Generates the azure.yaml configuration file for Azure Developer CLI (AZD) projects. + This specialized tool focuses on creating service definitions, hosting configurations, + and deployment instructions. Can be used independently for service configuration updates. + + Input: "./azd-arch-plan.md" + ` +} + +func (t *AzdAzureYamlGenerationTool) Call(ctx context.Context, input string) (string, error) { + return prompts.AzdAzureYamlGenerationPrompt, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_discovery_analysis.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_discovery_analysis.go new file mode 100644 index 00000000000..db865d67398 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_discovery_analysis.go @@ -0,0 +1,31 @@ +package azd + +import ( + "context" + + "azd.ai.start/internal/tools/azd/prompts" + "github.com/tmc/langchaingo/tools" +) + +var _ tools.Tool = &AzdDiscoveryAnalysisTool{} + +type AzdDiscoveryAnalysisTool struct { +} + +func (t *AzdDiscoveryAnalysisTool) Name() string { + return "azd_discovery_analysis" +} + +func (t *AzdDiscoveryAnalysisTool) Description() string { + return ` + Performs comprehensive discovery and analysis of applications to prepare them for Azure Developer CLI (AZD) initialization. + This is Phase 1 of the AZD migration process that analyzes codebase, identifies components and dependencies, + and creates a foundation for architecture planning. + + Input: "./azd-arch-plan.md" + ` +} + +func (t *AzdDiscoveryAnalysisTool) Call(ctx context.Context, input string) (string, error) { + return prompts.AzdDiscoveryAnalysisPrompt, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_docker_generation.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_docker_generation.go new file mode 100644 index 00000000000..89ddea37bae --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_docker_generation.go @@ -0,0 +1,31 @@ +package azd + +import ( + "context" + + "azd.ai.start/internal/tools/azd/prompts" + "github.com/tmc/langchaingo/tools" +) + +var _ tools.Tool = &AzdDockerGenerationTool{} + +type AzdDockerGenerationTool struct { +} + +func (t *AzdDockerGenerationTool) Name() string { + return "azd_docker_generation" +} + +func (t *AzdDockerGenerationTool) Description() string { + return ` + Generates Dockerfiles and container configurations for Azure Developer CLI (AZD) projects. + This specialized tool focuses on containerization requirements, creating optimized Dockerfiles + for different programming languages, and configuring container-specific settings for Azure hosting. + + Input: "./azd-arch-plan.md" + ` +} + +func (t *AzdDockerGenerationTool) Call(ctx context.Context, input string) (string, error) { + return prompts.AzdDockerGenerationPrompt, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go index f67c067e820..47e2c5a738e 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go @@ -19,7 +19,8 @@ func (t *AzdIacGenerationRulesTool) Name() string { func (t *AzdIacGenerationRulesTool) Description() string { return ` Gets the infrastructure as code (IaC) rules and best practices and patterns to use when generating bicep files and modules for use within AZD. - Input: empty string + + Input: "./azd-arch-plan.md" ` } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_infrastructure_generation.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_infrastructure_generation.go new file mode 100644 index 00000000000..b93c5960369 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_infrastructure_generation.go @@ -0,0 +1,31 @@ +package azd + +import ( + "context" + + "azd.ai.start/internal/tools/azd/prompts" + "github.com/tmc/langchaingo/tools" +) + +var _ tools.Tool = &AzdInfrastructureGenerationTool{} + +type AzdInfrastructureGenerationTool struct { +} + +func (t *AzdInfrastructureGenerationTool) Name() string { + return "azd_infrastructure_generation" +} + +func (t *AzdInfrastructureGenerationTool) Description() string { + return ` + Generates Bicep infrastructure templates for Azure Developer CLI (AZD) projects. + This specialized tool focuses on creating modular Bicep templates, parameter files, + and implementing Azure security and operational best practices for infrastructure as code. + + Input: "./azd-arch-plan.md" + ` +} + +func (t *AzdInfrastructureGenerationTool) Call(ctx context.Context, input string) (string, error) { + return prompts.AzdInfrastructureGenerationPrompt, nil +} diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go index 1e648939d2b..005ebafb441 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go @@ -19,7 +19,8 @@ func (t *AzdPlanInitTool) Name() string { func (t *AzdPlanInitTool) Description() string { return ` Gets the required workflow steps and best practices and patterns for initializing or migrating an application to use AZD. - Input: empty string + + Input: "./azd-arch-plan.md" ` } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_project_validation.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_project_validation.go new file mode 100644 index 00000000000..de7639839a5 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_project_validation.go @@ -0,0 +1,37 @@ +package azd + +import ( + "context" + _ "embed" + + "github.com/tmc/langchaingo/tools" +) + +//go:embed prompts/azd_project_validation.md +var azdProjectValidationPrompt string + +// AzdProjectValidationTool validates an AZD project by running comprehensive checks on all components +// including azure.yaml schema validation, Bicep template validation, environment setup, packaging, +// and deployment preview. +type AzdProjectValidationTool struct{} + +// Name returns the name of the tool. +func (t *AzdProjectValidationTool) Name() string { + return "azd_project_validation" +} + +// Description returns the description of the tool. +func (t *AzdProjectValidationTool) Description() string { + return ` + Validates an AZD project by running comprehensive checks on all components including azure.yaml schema validation, Bicep template validation, environment setup, packaging, and deployment preview. + + Input: "./azd-arch-plan.md"` +} + +// Call executes the tool with the given arguments. +func (t *AzdProjectValidationTool) Call(ctx context.Context, args string) (string, error) { + return azdProjectValidationPrompt, nil +} + +// Ensure AzdProjectValidationTool implements the Tool interface. +var _ tools.Tool = (*AzdProjectValidationTool)(nil) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go index db83ddf3d08..850091db4ea 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go @@ -19,7 +19,8 @@ func (t *AzdYamlSchemaTool) Name() string { func (t *AzdYamlSchemaTool) Description() string { return ` Gets the Azure YAML JSON schema file specification and structure for azure.yaml configuration files used in AZD. - Input: empty string + + Input: ` } diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go index 648d70bb569..b4ac9a4de31 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go @@ -18,7 +18,22 @@ func NewAzdToolsLoader(callbackHandler callbacks.Handler) *AzdToolsLoader { func (l *AzdToolsLoader) LoadTools() ([]tools.Tool, error) { return []tools.Tool{ + // Original orchestrating tool &AzdPlanInitTool{}, + + // Core workflow tools (use in sequence) + &AzdDiscoveryAnalysisTool{}, + &AzdArchitecturePlanningTool{}, + + // Focused file generation tools (use as needed) + &AzdAzureYamlGenerationTool{}, + &AzdInfrastructureGenerationTool{}, + &AzdDockerGenerationTool{}, + + // Validation tool (final step) + &AzdProjectValidationTool{}, + + // Supporting tools &AzdIacGenerationRulesTool{}, &AzdYamlSchemaTool{}, }, nil diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/README.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/README.md new file mode 100644 index 00000000000..01c5a3ab0dd --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/README.md @@ -0,0 +1,199 @@ +# AZD Modular Tools Overview + +This document provides an overview of the modular AZD initialization tools that replace the monolithic `azd_plan_init` tool. Each tool is designed to be used independently or as part of a complete AZD migration workflow. + +## Tool Structure + +The AZD initialization process has been broken down into focused, modular tools: + +### 1. Discovery and Analysis Tool (`azd_discovery_analysis`) + +**Purpose:** Analyze applications and identify components and dependencies +**Use When:** Starting a new AZD migration or need to understand an existing codebase +**Output:** Component inventory and dependency mapping in `azd-arch-plan.md` + +### 2. Architecture Planning Tool (`azd_architecture_planning`) + +**Purpose:** Select Azure services and plan hosting strategies +**Use When:** You have discovered components and need to plan Azure service mapping +**Prerequisites:** Completed discovery and analysis +**Output:** Architecture decisions and service selections in `azd-arch-plan.md` + +### 3. Azure.yaml Generation Tool (`azd_azure_yaml_generation`) + +**Purpose:** Generate azure.yaml service configuration file +**Use When:** You need to create or update just the service definitions +**Prerequisites:** Understanding of application services and hosting requirements +**Output:** Valid `azure.yaml` file + +### 4. Infrastructure Generation Tool (`azd_infrastructure_generation`) + +**Purpose:** Generate Bicep infrastructure templates +**Use When:** You need to create or update just the infrastructure components +**Prerequisites:** Architecture decisions about Azure services +**Output:** Complete Bicep template structure + +### 5. Docker Generation Tool (`azd_docker_generation`) + +**Purpose:** Generate Dockerfiles and container configurations +**Use When:** You need containerization for your services +**Prerequisites:** Understanding of application services and containerization needs +**Output:** Optimized Dockerfiles and .dockerignore files + +### 6. Project Validation Tool (`azd_project_validation`) + +**Purpose:** Validate the complete AZD project setup and configuration +**Use When:** All files are generated and you need to validate the setup +**Prerequisites:** All configuration files generated +**Output:** Validation report and ready-to-deploy confirmation + +## Complete Workflow + +For a full AZD migration, use the tools in this sequence: + +``` +1. azd_discovery_analysis + ↓ +2. azd_architecture_planning + ↓ +3a. azd_azure_yaml_generation +3b. azd_infrastructure_generation +3c. azd_docker_generation (if containerization needed) + ↓ +4. azd_project_validation +``` + +## Selective Usage + +You can also use individual tools for specific tasks: + +**Generate only azure.yaml:** +``` +azd_discovery_analysis → azd_azure_yaml_generation +``` + +**Generate only infrastructure:** +``` +azd_architecture_planning → azd_infrastructure_generation +``` + +**Add containerization:** +``` +azd_docker_generation (based on existing analysis) +``` + +**Validate existing project:** +``` +azd_project_validation (for validation and testing) +``` + +## Central Planning Document + +All tools use `azd-arch-plan.md` as the central planning document: + +- **Created by:** Discovery and Analysis tool +- **Updated by:** All subsequent tools +- **Purpose:** Track progress, document decisions, and maintain project state +- **Location:** Current working directory + +## Key Features + +### Modular Design +- Each tool has a specific responsibility +- Tools can be used independently or together +- Clear prerequisites and outputs +- Consistent documentation patterns + +### Azure Best Practices +- All tools implement Azure best practices +- Security-first approach +- Cost optimization considerations +- Operational excellence patterns + +### LLM Optimized +- Clear, actionable instructions +- Structured output formats +- Comprehensive validation steps +- Troubleshooting guidance + +### Progress Tracking +- Checkboxes for completed actions +- Clear success criteria +- Validation requirements +- Next step guidance + +## Tool Selection Guide + +**Use the Discovery Tool when:** +- Starting a new AZD migration +- Don't understand the application structure +- Need to document existing architecture +- Want to identify all components and dependencies + +**Use the Architecture Planning Tool when:** +- Have component inventory +- Need to select Azure services +- Planning hosting strategies +- Designing infrastructure architecture + +**Use the File Generation Tool when:** +- Have architecture decisions +- Need to create all AZD files +- Want complete project setup +- Ready to implement infrastructure + +**Use the Environment Initialization Tool when:** +- All files are generated +- Ready to create AZD environment +- Need to validate complete setup +- Preparing for deployment + +**Use the Azure.yaml Generation Tool when:** +- Only need service configuration +- Updating existing azure.yaml +- Working with known service requirements +- Quick service definition setup + +**Use the Infrastructure Generation Tool when:** +- Only need Bicep templates +- Updating existing infrastructure +- Working with specific Azure service requirements +- Advanced infrastructure customization + +## Benefits of Modular Approach + +### For Users +- **Faster iterations:** Update only what you need +- **Better understanding:** Focus on one aspect at a time +- **Reduced complexity:** Smaller, focused tasks +- **Flexible workflow:** Use tools in different orders based on needs + +### For LLMs +- **Clearer context:** Each tool has specific scope +- **Better accuracy:** Focused instructions reduce errors +- **Improved validation:** Tool-specific validation steps +- **Enhanced troubleshooting:** Targeted problem resolution + +### For Maintenance +- **Easier updates:** Modify individual tools without affecting others +- **Better testing:** Test each tool independently +- **Clearer documentation:** Each tool is self-contained +- **Improved reusability:** Tools can be repurposed for different scenarios + +## Migration from Original Tool + +If you were using the original `azd_plan_init` tool, here's how to migrate: + +**Original Phase 1 (Discovery and Analysis):** +Use `azd_discovery_analysis` tool + +**Original Phase 2 (Architecture Planning):** +Use `azd_architecture_planning` tool + +**Original Phase 3 (File Generation):** +Use `azd_azure_yaml_generation` + `azd_infrastructure_generation` + `azd_docker_generation` for focused file generation + +**Original Phase 4 (Project Validation):** +Use `azd_project_validation` tool for final validation and setup verification + +The modular tools provide the same functionality with improved focus and flexibility. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_architecture_planning.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_architecture_planning.md new file mode 100644 index 00000000000..b85778ecb24 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_architecture_planning.md @@ -0,0 +1,165 @@ +# AZD Architecture Planning Tool + +This tool performs Azure service selection and architecture planning for Azure Developer CLI (AZD) initialization. This is Phase 2 of the AZD migration process. + +## Overview + +Use discovery results to select appropriate Azure services, plan hosting strategies, and design infrastructure architecture. + +**IMPORTANT:** Before starting, review the `azd-arch-plan.md` file in your current working directory to understand discovered components and dependencies from the discovery phase. + +## Success Criteria + +- [ ] Azure service selections made for all components +- [ ] Hosting strategies defined for each service +- [ ] Containerization plans documented +- [ ] Infrastructure architecture designed +- [ ] Ready to proceed to file generation phase + +## Azure Service Selection + +**REQUIRED ANALYSIS:** + +For each discovered application component, select the most appropriate Azure hosting platform: + +### Azure Container Apps (PREFERRED) + +**Use for:** Microservices, containerized applications, event-driven workloads +**Benefits:** Auto-scaling, managed Kubernetes, simplified deployment +**Consider when:** Component can be containerized, needs elastic scaling + +### Azure App Service + +**Use for:** Web applications, REST APIs with specific runtime needs +**Benefits:** Managed platform, built-in CI/CD, easy SSL/custom domains +**Consider when:** Need specific runtime versions, Windows-specific features + +### Azure Functions + +**Use for:** Event processing, scheduled tasks, lightweight APIs +**Benefits:** Serverless, automatic scaling, pay-per-execution +**Consider when:** Event-driven processing, stateless operations + +### Azure Static Web Apps + +**Use for:** Frontend SPAs, static sites, JAMstack applications +**Benefits:** Global CDN, built-in authentication, API integration +**Consider when:** Static content, minimal backend requirements + +## Selection Criteria + +**REQUIRED ANALYSIS:** + +For each discovered component, consider: + +- Scalability requirements and traffic patterns +- Runtime and platform needs +- Operational complexity preferences +- Cost considerations +- Team expertise and preferences + +## Containerization Planning + +**REQUIRED ASSESSMENT:** + +For each component, determine: + +- **Containerization Feasibility:** Can it run in Docker? Windows-specific dependencies? +- **Docker Strategy:** Base image selection, port mappings, environment variables +- **Resource Requirements:** CPU, memory, storage needs +- **Health Check Strategy:** Endpoint patterns for monitoring + +## Data Storage Planning + +**REQUIRED ANALYSIS:** + +Select appropriate Azure database services: + +### Azure SQL Database + +**Use for:** SQL Server compatibility, complex queries, ACID compliance +**Consider when:** Relational data model, existing SQL Server applications + +### Azure Database for PostgreSQL/MySQL + +**Use for:** PostgreSQL/MySQL workloads, web applications +**Consider when:** Specific database engine compatibility required + +### Azure Cosmos DB + +**Use for:** NoSQL requirements, global scale, flexible schemas +**Consider when:** Multiple data models, global distribution needed + +### Azure Cache for Redis + +**Use for:** Application caching, session storage, real-time analytics +**Consider when:** Performance optimization, session management + +## Messaging and Integration Planning + +**REQUIRED ANALYSIS:** + +Select messaging services based on patterns: + +### Azure Service Bus + +**Use for:** Enterprise messaging, guaranteed delivery, complex routing +**Consider when:** Reliable messaging, enterprise scenarios + +### Azure Event Hubs + +**Use for:** High-throughput event streaming, telemetry ingestion +**Consider when:** Big data scenarios, real-time analytics + +### Azure Event Grid + +**Use for:** Event-driven architectures, reactive programming +**Consider when:** Decoupled systems, serverless architectures + +## Update Architecture Documentation + +**REQUIRED ACTIONS:** + +Update `azd-arch-plan.md` with: + +### Azure Service Mapping Table + +```markdown +| Component | Current Tech | Azure Service | Rationale | +|-----------|-------------|---------------|-----------| +| Web App | React | Static Web Apps | Frontend SPA | +| API Service | Node.js | Container Apps | Microservice architecture | +| Database | PostgreSQL | Azure Database for PostgreSQL | Existing dependency | +``` + +### Hosting Strategy Summary + +- Document hosting decisions for each component +- Include containerization plans where applicable +- Note resource requirements and scaling strategies + +### Infrastructure Architecture + +- Resource group organization strategy +- Networking and security design approach +- Monitoring and logging strategy +- Integration patterns between services + +### Next Steps Checklist + +- [ ] Azure service selected for each component with rationale +- [ ] Hosting strategies defined +- [ ] Containerization plans documented +- [ ] Data storage strategies planned +- [ ] Ready to proceed to file generation phase + +## Next Phase + +After completing architecture planning, proceed to the appropriate file generation tool: + +- Use `azd_azure_yaml_generation` tool for azure.yaml configuration +- Use `azd_infrastructure_generation` tool for Bicep templates +- Use `azd_docker_generation` tool for container configurations +- Use `azd_project_validation` tool for final project validation + +**IMPORTANT:** Keep `azd-arch-plan.md` updated as the central reference for all architecture decisions. This document guides subsequent phases and serves as implementation documentation. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_azure_yaml_generation.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_azure_yaml_generation.md new file mode 100644 index 00000000000..84a7618ea0a --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_azure_yaml_generation.md @@ -0,0 +1,200 @@ +# AZD Azure.yaml Generation Tool + +This specialized tool generates the `azure.yaml` configuration file for Azure Developer CLI (AZD) projects. + +## Overview + +Generate a valid `azure.yaml` configuration file with proper service hosting, build, and deployment settings. + +**IMPORTANT:** Before starting, check if `azd-arch-plan.md` exists in your current working directory. If it exists, review it to understand previous analysis and architecture decisions. Use the existing `azd_yaml_schema` tool for schema validation. + +## Success Criteria + +- [ ] Valid `azure.yaml` file created in root directory +- [ ] All application services properly configured +- [ ] Service hosting configurations match Azure service selections +- [ ] Build and deployment instructions complete +- [ ] File validates against AZD schema (use `azd_yaml_schema` tool) + +## Service Analysis Requirements + +**REQUIRED ACTIONS:** + +1. **Identify Application Services:** + - Frontend applications (React, Angular, Vue.js, static sites) + - Backend services (REST APIs, microservices, GraphQL, gRPC) + - Function-based services (Azure Functions) + - Background services and workers + +2. **Determine Hosting Requirements:** + - **Container Apps:** Microservices, APIs, containerized web apps + - **App Service:** Traditional web applications, APIs + - **Static Web Apps:** Frontend SPAs, static sites + - **Functions:** Event-driven, serverless workloads + +3. **Analyze Build Requirements:** + - Programming language and framework + - Package manager (npm, pip, dotnet, maven) + - Build commands and output directories + - Dependency management needs + +## Azure.yaml Configuration Requirements + +**REQUIRED ACTIONS:** + +Create a complete `azure.yaml` file in the root directory following these patterns: + +### Basic Structure Requirements + +**IMPORTANT:** Use the `azd_yaml_schema` tool for complete schema definition, structure requirements, and validation rules. + +Basic structure: + +```yaml +name: [project-name] +services: + # Service configurations +infra: + provider: bicep + path: infra +``` + +### Service Configuration Patterns + +**Azure Container Apps (for microservices, APIs, containerized apps):** + +```yaml +services: + api: + project: ./src/api + language: js + host: containerapp + docker: + path: ./src/api/Dockerfile +``` + +**Azure App Service (for traditional web apps):** + +```yaml +services: + webapp: + project: ./src/webapp + language: js + host: appservice +``` + +**Azure Functions (for serverless workloads):** + +```yaml +services: + functions: + project: ./src/functions + language: js + host: function +``` + +**Azure Static Web Apps (for SPAs, static sites):** + +```yaml +services: + frontend: + project: ./src/frontend + language: js + host: staticwebapp + dist: build +``` + +### Advanced Configuration Options + +**Environment Variables:** + +```yaml +services: + api: + env: + - name: NODE_ENV + value: production + - name: DATABASE_URL + value: "{{ .Env.DATABASE_URL }}" +``` + +**Custom Build Commands:** + +```yaml +services: + frontend: + hooks: + prebuild: + posix: npm install + build: + posix: npm run build +``` + +## Configuration Requirements + +**CRITICAL REQUIREMENTS:** + +- Service names must be valid Azure resource names (alphanumeric, hyphens only) +- All `project` paths must point to existing directories +- All `docker.path` references must point to existing Dockerfiles +- Host types must be: `containerapp`, `appservice`, `function`, or `staticwebapp` +- Language must match detected programming language +- `dist` paths must match build output directories + +## Validation Requirements + +**VALIDATION STEPS:** + +1. **Schema Validation:** Use `azd_yaml_schema` tool for authoritative schema validation +2. **Path Validation:** Ensure all referenced paths exist +3. **Configuration Testing:** Run `azd show` to test service discovery + +**Validation Commands:** + +```bash +# Validate configuration +azd config show + +# Test service discovery +azd show +``` + +## Common Patterns + +**Multi-Service Microservices:** + +- Frontend: Static Web App +- APIs: Container Apps with Dockerfiles +- Background Services: Container Apps or Functions + +**Full-Stack Application:** + +- Frontend: Static Web App +- Backend: Container App or App Service + +**Serverless Application:** + +- Frontend: Static Web App +- APIs: Azure Functions + +## Update Documentation + +**REQUIRED ACTIONS:** + +Update `azd-arch-plan.md` with: + +- Generated azure.yaml location and schema version +- Service configuration table (service, type, host, language, path) +- Hosting strategy summary by Azure service type +- Build and deployment configuration decisions +- Docker configuration details +- Validation results + +## Next Steps + +After azure.yaml generation is complete: + +1. Validate configuration using `azd_yaml_schema` tool +2. Test service discovery with `azd show` + +**IMPORTANT:** Reference existing tools for specific functionality. Use `azd_yaml_schema` for schema validation. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_discovery_analysis.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_discovery_analysis.md new file mode 100644 index 00000000000..10ff9e4e49c --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_discovery_analysis.md @@ -0,0 +1,200 @@ +# AZD Application Discovery and Analysis Tool + +This tool performs comprehensive discovery and analysis of applications to prepare them for Azure Developer CLI (AZD) initialization. This is Phase 1 of the AZD migration process. + +Always use Azure best practices with intelligent defaults. + +## Overview + +This tool analyzes your current codebase and architecture to: +1. Identify all application components and dependencies +2. Classify components by type and hosting requirements +3. Map dependencies and communication patterns +4. Provide foundation for architecture planning + +**IMPORTANT:** Before starting, check if `azd-arch-plan.md` exists in your current working directory. If it exists, review it to understand what analysis has already been completed and build upon that work. + +## Success Criteria + +The discovery and analysis is successful when: + +- [ ] Complete file system inventory is documented +- [ ] All application components are identified and classified +- [ ] Component dependencies are mapped +- [ ] Results are documented in `azd-arch-plan.md` +- [ ] Ready to proceed to architecture planning phase + +## Step 1: Deep File System Analysis + +**REQUIRED ACTIONS:** + +- Scan all files in the current working directory recursively +- Document file structure, programming languages, and frameworks detected +- Identify configuration files (package.json, requirements.txt, pom.xml, etc.) +- Locate any existing Docker files, docker-compose files, or containerization configs +- Find database configuration files and connection strings +- Identify API endpoints, service definitions, and application entry points +- Look for existing CI/CD pipeline files (.github/workflows, azure-pipelines.yml, etc.) +- Identify documentation files (README.md, API docs, architecture docs) + +**ANALYSIS QUESTIONS TO ANSWER:** + +- What programming languages and frameworks are used? +- What build systems and package managers are in use? +- Are there existing containerization configurations? +- What ports and endpoints are exposed? +- What external dependencies are required? +- Are there existing deployment or infrastructure configurations? + +**OUTPUT:** Complete inventory of all discoverable application artifacts + +## Step 2: Component Classification + +**REQUIRED ACTIONS:** + +Categorize each discovered component into one of these types: + +- **Web Applications** (frontend, SPA, static sites) + - React, Angular, Vue.js applications + - Static HTML/CSS/JavaScript sites + - Server-rendered web applications + +- **API Services** (REST APIs, GraphQL, gRPC services) + - RESTful web APIs + - GraphQL endpoints + - gRPC services + - Microservices + +- **Background Services** (workers, processors, scheduled jobs) + - Message queue processors + - Scheduled task runners + - Data processing pipelines + - Event handlers + +- **Databases** (relational, NoSQL, caching) + - SQL Server, PostgreSQL, MySQL databases + - NoSQL databases (MongoDB, CosmosDB) + - Caching layers (Redis, Memcached) + - Database migration scripts + +- **Messaging Systems** (queues, topics, event streams) + - Message queues + - Event streaming platforms + - Pub/sub systems + +- **AI/ML Components** (models, inference endpoints, training jobs) + - Machine learning models + - AI inference endpoints + - Training pipelines + - Data preprocessing services + +- **Supporting Services** (authentication, logging, monitoring) + - Authentication services + - Logging aggregators + - Monitoring and metrics + - Configuration services + +**CLASSIFICATION CRITERIA:** + +For each component, determine: +- Primary function and responsibility +- Runtime requirements +- Scalability needs +- Security considerations +- Integration points + +**OUTPUT:** Structured component inventory with classifications + +## Step 3: Dependency Mapping + +**REQUIRED ACTIONS:** + +- Map inter-component dependencies and communication patterns +- Identify external service dependencies (third-party APIs, SaaS services) +- Document data flow between components +- Identify shared resources and configuration +- Analyze network communication requirements +- Document authentication and authorization flows + +**DEPENDENCY ANALYSIS:** + +- **Internal Dependencies:** How components communicate with each other +- **External Dependencies:** Third-party services, APIs, databases +- **Data Dependencies:** Shared databases, file systems, caches +- **Configuration Dependencies:** Shared settings, secrets, environment variables +- **Runtime Dependencies:** Required services for startup and operation + +**COMMUNICATION PATTERNS TO IDENTIFY:** + +- Synchronous HTTP/HTTPS calls +- Asynchronous messaging +- Database connections +- File system access +- Caching patterns +- Authentication flows + +**OUTPUT:** Component dependency graph and communication matrix + +## Step 4: Generate Discovery Report + +**REQUIRED ACTIONS:** + +Create or update `azd-arch-plan.md` with the following sections: + +```markdown +# AZD Architecture Plan + +## Discovery and Analysis Results + +### Application Overview +- [Summary of application type and purpose] +- [Key technologies and frameworks identified] +- [Overall architecture pattern (monolith, microservices, etc.)] + +### Component Inventory +[For each component discovered:] +- **Component Name:** [name] +- **Type:** [classification] +- **Technology:** [language/framework] +- **Location:** [file path/directory] +- **Purpose:** [brief description] +- **Entry Points:** [how component is accessed] +- **Configuration:** [key config files] + +### Dependency Map +[Visual or text representation of dependencies] +- **Component A** → **Component B** (HTTP API) +- **Component B** → **Database** (SQL connection) +- **Component A** → **External API** (REST calls) + +### External Dependencies +- [List of third-party services] +- [Required environment variables] +- [External configuration requirements] + +### Next Steps +- [ ] Review discovery results +- [ ] Proceed to architecture planning phase +- [ ] Use `azd_architecture_planning` tool +``` + +## Validation and Next Steps + +**VALIDATION CHECKLIST:** + +- [ ] All major application components identified +- [ ] Component types and technologies documented +- [ ] Dependencies mapped and understood +- [ ] External services and APIs catalogued +- [ ] `azd-arch-plan.md` created or updated with findings + +**NEXT PHASE:** + +After completing this discovery phase, proceed to the **Architecture Planning** phase using the `azd_architecture_planning` tool. This next phase will use your discovery results to: + +- Select appropriate Azure services for each component +- Plan hosting strategies and containerization +- Design infrastructure architecture +- Prepare for configuration file generation + +**IMPORTANT:** Keep the `azd-arch-plan.md` file updated throughout the process as it serves as the central planning document for your AZD migration. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_docker_generation.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_docker_generation.md new file mode 100644 index 00000000000..38091d58d9a --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_docker_generation.md @@ -0,0 +1,174 @@ +# AZD Docker Generation Tool + +This specialized tool generates Dockerfiles and container configurations for Azure Developer CLI (AZD) projects. + +## Overview + +Generate optimized Dockerfiles for different programming languages and frameworks with Azure Container Apps best practices. + +**IMPORTANT:** Before starting, check if `azd-arch-plan.md` exists in your current working directory. If it exists, review it to understand discovered services and containerization requirements. + +## Success Criteria + +- [ ] Dockerfiles created for all containerizable services +- [ ] .dockerignore files generated for build optimization +- [ ] Health checks and security configurations implemented +- [ ] Multi-stage builds used where appropriate +- [ ] Azure Container Apps best practices followed + +## Containerization Requirements Analysis + +**REQUIRED ACTIONS:** + +1. **Identify Containerization Candidates:** + - Microservices and APIs (REST, GraphQL, gRPC) + - Web applications needing runtime flexibility + - Background services and workers + - Custom applications with specific runtime requirements + +2. **Services That Don't Need Containerization:** + - Static websites (use Azure Static Web Apps) + - Azure Functions (serverless, managed runtime) + - Database services (use managed Azure databases) + +3. **Language and Framework Detection:** + - Programming language (Node.js, Python, .NET, Java, Go, etc.) + - Framework type (Express, FastAPI, ASP.NET Core, Spring Boot) + - Build requirements (npm, pip, dotnet, maven, gradle) + - Runtime dependencies and port configurations +- **Programming language** (Node.js, Python, .NET, Java, Go, etc.) + +## Dockerfile Generation Requirements + +**REQUIRED ACTIONS:** + +For each containerizable service, generate optimized Dockerfiles following these patterns: + +### Language-Specific Requirements + +**Node.js Applications:** +- Use `node:18-alpine` base image +- Implement multi-stage build (build + runtime) +- Copy package*.json first for layer caching +- Use `npm ci --only=production` +- Create non-root user (`nodejs`) +- Expose appropriate port (typically 3000) +- Include health check endpoint +- Use `CMD ["npm", "start"]` + +**Python Applications:** +- Use `python:3.11-slim` base image +- Set environment variables: `PYTHONDONTWRITEBYTECODE=1`, `PYTHONUNBUFFERED=1` +- Copy requirements.txt first for caching +- Use `pip install --no-cache-dir` +- Create non-root user (`appuser`) +- Expose appropriate port (typically 8000) +- Include health check endpoint +- Use appropriate startup command (uvicorn, gunicorn, etc.) + +**.NET Applications:** +- Use `mcr.microsoft.com/dotnet/sdk:8.0` for build stage +- Use `mcr.microsoft.com/dotnet/aspnet:8.0` for runtime +- Multi-stage build: restore → build → publish → runtime +- Copy .csproj first for layer caching +- Create non-root user (`appuser`) +- Expose port 8080 (standard for .NET in containers) +- Include health check endpoint +- Use `ENTRYPOINT ["dotnet", "AppName.dll"]` + +**Java/Spring Boot Applications:** +- Use `openjdk:17-jdk-slim` for build, `openjdk:17-jre-slim` for runtime +- Copy pom.xml/build.gradle first for dependency caching +- Multi-stage build pattern +- Create non-root user (`appuser`) +- Expose port 8080 +- Include actuator health check +- Use `CMD ["java", "-jar", "app.jar"]` + +## Security and Best Practices + +**CRITICAL REQUIREMENTS:** + +- **Always use non-root users** in production stage +- **Use minimal base images** (alpine, slim variants) +- **Implement multi-stage builds** to reduce image size +- **Include health check endpoints** for Container Apps +- **Set proper working directories** and file permissions +- **Use layer caching** by copying dependency files first +- **Never include secrets** in container images + +## .dockerignore Requirements + +**REQUIRED ACTIONS:** + +Create .dockerignore files with these patterns: + +**Universal Exclusions:** +- Version control: `.git`, `.gitignore` +- Documentation: `README.md`, `*.md` +- IDE files: `.vscode/`, `.idea/`, `*.swp` +- OS files: `.DS_Store`, `Thumbs.db` +- Docker files: `Dockerfile*`, `.dockerignore`, `docker-compose*.yml` +- Build artifacts and logs + +**Language-Specific Exclusions:** +- **Node.js:** `node_modules/`, `npm-debug.log*`, `coverage/`, `dist/` +- **Python:** `__pycache__/`, `*.pyc`, `venv/`, `.pytest_cache/`, `dist/` +- **.NET:** `bin/`, `obj/`, `*.user`, `packages/`, `.vs/` +- **Java:** `target/`, `*.class`, `.mvn/repository` + +## Health Check Implementation + +**REQUIRED ACTIONS:** + +Each containerized service must include a health check endpoint: + +- **Endpoint:** `/health` (standard convention) +- **Response:** JSON with status and timestamp +- **HTTP Status:** 200 for healthy, 503 for unhealthy +- **Timeout:** 3 seconds maximum response time +- **Content:** `{"status": "healthy", "timestamp": "ISO-8601"}` + +## Container Optimization + +**REQUIRED OPTIMIZATIONS:** + +- Use multi-stage builds to exclude build tools from production images +- Copy package/dependency files before source code for better caching +- Combine RUN commands to reduce layers +- Clean package manager caches in same RUN command +- Use specific versions for base images (avoid `latest`) +- Set resource limits appropriate for Azure Container Apps + +## Validation and Testing + +**VALIDATION REQUIREMENTS:** + +- All Dockerfiles must build successfully: `docker build -t test-image .` +- Containers must run with non-root users +- Health checks must respond correctly +- Images should be optimized for size (use `docker images` to verify) +- Container startup time should be reasonable (<30 seconds) + +## Update Documentation + +**REQUIRED ACTIONS:** + +Update `azd-arch-plan.md` with: + +- List of generated Dockerfiles and their languages +- Container configurations (ports, health checks, users) +- Security implementations (non-root users, minimal images) +- Build optimizations applied +- Local testing commands + +## Next Steps + +After Docker generation is complete: + +1. Test all containers build successfully locally +2. Integrate Dockerfile paths into `azure.yaml` service definitions +3. Configure Container Apps infrastructure to use these images +4. Set up Azure Container Registry for image storage + +**IMPORTANT:** Reference existing tools for schema validation. For azure.yaml updates, use the `azd_azure_yaml_generation` tool. For infrastructure setup, use the `azd_infrastructure_generation` tool. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md index dd89e2586b7..bff5ab8418d 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md @@ -8,6 +8,7 @@ This document provides comprehensive rules and guidelines for generating Bicep I - **REQUIRED**: Place all IaC files in the `./infra` folder within an AZD project - **REQUIRED**: Name the main deployment file `main.bicep` - this is the primary deployment target +- **REQUIRED**: Create a `main.parameters.json` file alongside `main.bicep` containing all parameter defaults for the Bicep deployment - **REQUIRED**: The root level `main.bicep` must be a subscription level deployment using `targetScope = 'subscription'` - **REQUIRED**: The main.bicep file must create a resource group as the primary container for all resources - **REQUIRED**: Pass the resource group scope to all child modules that deploy resources @@ -156,6 +157,26 @@ module appService 'modules/app-service.bicep' = { } ``` +### Main.parameters.json Structure Template + +```json +{ + "$schema": "https://schema.management.azure.com/schemas/2018-05-01/subscriptionDeploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "environmentName": { + "value": "${AZURE_ENV_NAME}" + }, + "location": { + "value": "${AZURE_LOCATION}" + }, + "tags": { + "value": {} + } + } +} +``` + ### Child Module Structure Template ```bicep @@ -183,6 +204,7 @@ Before completing code generation, verify: - [ ] All files are in `./infra` folder - [ ] `main.bicep` exists as primary deployment file with subscription scope +- [ ] `main.parameters.json` exists alongside `main.bicep` with parameter defaults - [ ] Resource group is created in `main.bicep` and properly tagged - [ ] All child modules use `targetScope = 'resourceGroup'` and receive resource group scope - [ ] All resources use consistent naming convention diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_infrastructure_generation.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_infrastructure_generation.md new file mode 100644 index 00000000000..e7ff88ef55c --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_infrastructure_generation.md @@ -0,0 +1,159 @@ +# AZD Infrastructure Generation Tool + +This specialized tool generates Bicep infrastructure templates for Azure Developer CLI (AZD) projects. + +## Overview + +Generate modular Bicep templates following Azure security and operational best practices. + +**IMPORTANT:** +- Before starting, check if `azd-arch-plan.md` exists to understand architecture decisions +- **Use the `azd_iac_generation_rules` tool for complete IaC rules, naming conventions, and best practices** + +## Success Criteria + +- [ ] Complete Bicep template structure created in `./infra` directory +- [ ] All templates compile without errors (`az bicep build --file infra/main.bicep`) +- [ ] Infrastructure supports all services defined in `azure.yaml` +- [ ] Follows all rules from `azd_iac_generation_rules` tool +- [ ] Parameter files configured appropriately + +## Requirements Analysis + +**REQUIRED ACTIONS:** + +1. **Review IaC Rules:** Use `azd_iac_generation_rules` tool to get complete file structure, naming conventions, and compliance requirements + +2. **Analyze Infrastructure Needs:** + - Map services from `azure.yaml` to required Azure resources + - Identify shared resources (Log Analytics, Container Registry, Key Vault) + - Determine connectivity and security requirements + +3. **Service Infrastructure Mapping:** + - **Container Apps:** Environment, Log Analytics, Container Registry, App Insights, Managed Identity + - **App Service:** Service Plan, App Service, App Insights + - **Functions:** Function App, Storage Account, App Insights + - **Static Web Apps:** Static Web App resource + - **Database:** SQL/CosmosDB/PostgreSQL with appropriate SKUs + +## Generation Workflow + +**REQUIRED ACTIONS:** + +1. **Create Directory Structure:** + Follow structure from `azd_iac_generation_rules` tool: + ``` + ./infra/ + ├── main.bicep + ├── main.parameters.json + ├── modules/ + └── [additional files per rules] + ``` + +2. **Generate Main Template:** + - Use subscription-level scope (`targetScope = 'subscription'`) + - Create resource group with proper tagging + - Deploy modules conditionally based on service requirements + - Follow naming conventions from IaC rules tool + +3. **Generate Module Templates:** + - Create focused modules for each service type + - Use resource group scope for all modules + - Accept standardized parameters (environmentName, location, tags) + - Output connection information for applications + +4. **Generate Parameter Files:** + - Provide sensible defaults for all parameters + - Use parameter references for environment-specific values + - Include all required parameters from IaC rules + +``` +./infra/ +├── main.bicep # Primary deployment template +├── main.parameters.json # Default parameters +├── modules/ +│ ├── container-apps.bicep +│ ├── app-service.bicep +│ ├── functions.bicep +│ ├── database.bicep +│ ├── storage.bicep +│ ├── keyvault.bicep +│ └── monitoring.bicep +└── resources.bicep # Shared resources +``` + +## Template Requirements + +### Main Template (main.bicep) + +**CRITICAL REQUIREMENTS:** + +- Use `targetScope = 'subscription'` +- Accept standardized parameters: `environmentName`, `location`, `principalId` +- Include feature flags for conditional deployment (e.g., `deployDatabase`) +- Create resource group with proper tagging (`azd-env-name`, `azd-provisioned`) +- Call modules conditionally based on feature flags +- Output connection strings and service endpoints + +### Module Templates + +## Generate Infrastructure Files + +**WORKFLOW REQUIREMENTS:** + +1. **Create Directory Structure:** + + ```text + ./infra/ + ├── main.bicep + ├── main.parameters.json + ├── modules/ + └── [service-specific modules] + ``` + +2. **Generate Main Template (main.bicep):** + - Use `targetScope = 'subscription'` + - Create resource group with proper tagging + - Deploy modules conditionally based on service requirements + +3. **Generate Module Templates:** + - Create focused modules for each service type + - Use standardized parameters (`environmentName`, `location`, `tags`) + - Output connection information for applications + +4. **Generate Parameter Files:** + - Provide sensible defaults for all parameters + - Use parameter references for environment-specific values + +## Validation and Testing + +**VALIDATION REQUIREMENTS:** + +- All Bicep templates must compile without errors: `az bicep build --file infra/main.bicep` +- Validate deployment: `az deployment sub validate --template-file infra/main.bicep` +- Test with AZD: `azd provision --dry-run` +- Use existing tools for schema validation (reference `azd_yaml_schema` tool for azure.yaml validation) + +## Update Documentation + +**REQUIRED ACTIONS:** + +Update `azd-arch-plan.md` with: + +- List of generated infrastructure files +- Resource naming conventions used +- Security configurations implemented +- Parameter requirements +- Output variables available +- Validation results + +## Next Steps + +After infrastructure generation is complete: + +1. Validate all templates compile successfully +2. Test deployment with `azd provision --dry-run` +3. Deploy with `azd provision` (creates resources) +4. Proceed to application deployment with `azd deploy` + +**IMPORTANT:** Reference existing tools instead of duplicating functionality. For azure.yaml validation, use the `azd_yaml_schema` tool. For Bicep best practices, follow the AZD IaC Generation Rules document. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md index d8f7a391f68..5859bf69840 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md @@ -1,267 +1,124 @@ # AZD Application Initialization and Migration Plan -This document provides a comprehensive, step-by-step plan for initializing or migrating applications to use Azure Developer CLI (AZD). Follow these steps sequentially to ensure successful AZD adoption. +This document provides a comprehensive, step-by-step plan for initializing or migrating applications to use Azure Developer CLI (AZD). This is the orchestrating tool that guides you through using the specialized AZD tools. -## Executive Summary - -Transform any application into an AZD-compatible project by: - -1. Analyzing the current codebase and architecture -2. Identifying all application components and dependencies -3. Generating required configuration and infrastructure files -4. Establishing the AZD environment structure - -## Phase 1: Discovery and Analysis - -### Step 1: Deep File System Analysis - -**REQUIRED ACTIONS:** - -- Scan all files in the current working directory recursively -- Document file structure, programming languages, and frameworks detected -- Identify configuration files (package.json, requirements.txt, pom.xml, etc.) -- Locate any existing Docker files, docker-compose files, or containerization configs -- Find database configuration files and connection strings -- Identify API endpoints, service definitions, and application entry points - -**OUTPUT:** Complete inventory of all discoverable application artifacts - -### Step 2: Component Classification - -**REQUIRED ACTIONS:** - -- Categorize each discovered component into one of these types: - - **Web Applications** (frontend, SPA, static sites) - - **API Services** (REST APIs, GraphQL, gRPC services) - - **Background Services** (workers, processors, scheduled jobs) - - **Databases** (relational, NoSQL, caching) - - **Messaging Systems** (queues, topics, event streams) - - **AI/ML Components** (models, inference endpoints, training jobs) - - **Supporting Services** (authentication, logging, monitoring) - -**OUTPUT:** Structured component inventory with classifications - -### Step 3: Dependency Mapping +**IMPORTANT: Before starting any workflow, ALWAYS check if `azd-arch-plan.md` exists in the current directory and review it to understand current progress, previous decisions, and what work has already been completed. This prevents duplicate work and ensures continuity.** -**REQUIRED ACTIONS:** +Always use Azure best practices with intelligent defaults. -- Map inter-component dependencies and communication patterns -- Identify external service dependencies (third-party APIs, SaaS services) -- Document data flow between components -- Identify shared resources and configuration - -**OUTPUT:** Component dependency graph and communication matrix - -## Phase 2: Architecture Planning and Azure Service Selection - -### Application Component Planning - -For each identified application component, execute the following analysis: +## Executive Summary -**REQUIRED ANALYSIS:** +Transform any application into an AZD-compatible project using a structured approach with specialized tools. Each tool has a focused responsibility and builds upon the previous phase to create a complete AZD deployment. -- **Hosting Platform Selection:** - - **Azure Container Apps** (PREFERRED for microservices and containerized apps) - - **Azure App Service** (for web apps and APIs with specific runtime requirements) - - **Azure Functions** (for serverless and event-driven components) - - **Azure Static Web Apps** (for frontend applications and SPAs) - - **Azure Kubernetes Service** (for complex orchestration requirements) +## Success Criteria -- **Containerization Assessment:** - - Determine if component can run in Docker container - - If Dockerfile doesn't exist, plan Docker container strategy - - Identify base images and runtime requirements - - Document port mappings and environment variables +The migration is successful when: -- **Configuration Requirements:** - - Identify environment-specific settings - - Map secrets and sensitive configuration - - Document connection strings and service endpoints - - Plan configuration injection strategy +- [ ] All application components are identified and classified +- [ ] `azure.yaml` file is valid and complete +- [ ] All infrastructure files are generated and error-free +- [ ] Required Dockerfiles are created for containerizable components +- [ ] `azd-arch-plan.md` provides comprehensive documentation +- [ ] AZD environment is initialized and configured +- [ ] **All validation checks pass (use `azd_project_validation` tool)** -**OUTPUT:** Hosting strategy and containerization plan for each component +## Complete Workflow Guide -### Database Component Planning +### Phase 1: Review Existing Progress -For components using persistent data storage: +Check if the file `azd-arch-plan.md` exists in the current directory and review it to understand current progress, previous decisions, and what work has already been completed. This prevents duplicate work and ensures continuity. -**REQUIRED ANALYSIS:** +- If file exists: Review thoroughly and skip completed phases +- If file doesn't exist: Proceed to Phase 2 -- **Azure Database Service Selection:** - - **Azure SQL Database** (for relational data with SQL Server compatibility) - - **Azure Database for PostgreSQL** (for PostgreSQL workloads) - - **Azure Database for MySQL** (for MySQL workloads) - - **Azure Cosmos DB** (for NoSQL, multi-model data) - - **Azure Cache for Redis** (for caching and session storage) +### Phase 2: Discovery and Analysis -- **Migration Strategy:** - - Assess current database schema and data - - Plan data migration approach - - Identify backup and recovery requirements - - Document connection string patterns +**Tool:** `azd_discovery_analysis` -**OUTPUT:** Database hosting plan and migration strategy +Scans files recursively, documents structure/languages/frameworks, identifies entry points, maps dependencies, and creates component inventory in `azd-arch-plan.md`. -### Messaging Component Planning +### Phase 3: Architecture Planning and Azure Service Selection -For components using asynchronous communication: +**Tool:** `azd_architecture_planning` -**REQUIRED ANALYSIS:** +Maps components to Azure services, plans hosting strategies, designs database/messaging architecture, and creates containerization strategies. Updates `azd-arch-plan.md`. -- **Azure Messaging Service Selection:** - - **Azure Service Bus** (for reliable enterprise messaging) - - **Azure Event Hubs** (for high-throughput event streaming) - - **Azure Event Grid** (for event-driven architectures) - - **Azure Storage Queues** (for simple queue scenarios) +### Phase 4: File Generation -- **Integration Planning:** - - Map message flows and routing - - Identify message schemas and formats - - Plan dead letter handling and error scenarios - - Document scaling and throughput requirements +Generate all necessary AZD files using these focused tools (most projects need all three): -**OUTPUT:** Messaging architecture and integration plan +#### 1. Generate Azure.yaml Configuration -### AI Component Planning +**Tool:** `azd_azure_yaml_generation` (Required for all AZD projects) -For components using artificial intelligence or machine learning: +#### 2. Generate Infrastructure Templates -**REQUIRED ANALYSIS:** +**Tool:** `azd_infrastructure_generation` (Required for all AZD projects) -- **Azure AI Service Selection:** - - **Azure OpenAI Service** (for GPT models and cognitive services) - - **Azure AI Services** (for vision, speech, language processing) - - **Azure Machine Learning** (for custom ML models and training) - - **Azure Cognitive Search** (for intelligent search capabilities) +#### 3. Generate Docker Configurations -- **Model and Data Requirements:** - - Identify required AI models and versions - - Document input/output data formats - - Plan model deployment and scaling strategy - - Assess training data and pipeline requirements +**Tool:** `azd_docker_generation` (Required for containerizable services) -**OUTPUT:** AI service architecture and deployment plan +**Use in sequence:** azure.yaml → infrastructure → docker -## Phase 3: File Generation and Configuration +### Phase 5: Project Validation and Environment Setup -### Step 1: Generate azure.yaml Configuration +**Tool:** `azd_project_validation` -**REQUIRED ACTIONS:** +Validates azure.yaml against schema, compiles Bicep templates, ensures AZD environment exists, tests packaging, validates deployment with preview, and provides readiness confirmation. -- Create `azure.yaml` file in the root directory -- Define all services with appropriate hosting configurations -- Specify build and deployment instructions for each service -- Configure environment variable mappings -- Reference infrastructure templates correctly +## Usage Patterns -**TEMPLATE STRUCTURE:** +### Complete New Project Migration -```yaml -name: {project-name} -services: - {service-name}: - project: ./path/to/service - host: {hosting-type} - # Additional service-specific configuration +```text +1. Review existing azd-arch-plan.md (Phase 1) +2. azd_discovery_analysis +3. azd_architecture_planning +4. azd_azure_yaml_generation +5. azd_infrastructure_generation +6. azd_docker_generation (if containerization needed) +7. azd_project_validation ``` -### Step 2: Generate Infrastructure as Code Files +### Update Existing AZD Project -**REQUIRED ACTIONS:** - -- Create `./infra` directory structure -- Generate `main.bicep` as primary deployment template -- Create modular Bicep files for each resource type -- **CRITICAL:** Follow all rules from AZD IaC Generation Rules document -- Implement proper naming conventions and tagging strategies -- Include supporting resources (Log Analytics, Application Insights, Key Vault) - -### Step 3: Generate Container Configurations - -**REQUIRED ACTIONS:** - -- Create Dockerfile for each containerizable component -- Use appropriate base images for detected programming languages -- Configure health checks and startup commands -- Set proper working directories and file permissions -- Optimize for production deployment - -### Step 4: Generate Architecture Documentation - -**REQUIRED ACTIONS:** - -- Create `azd-arch-plan.md` with comprehensive analysis -- Document all discovered components and their relationships -- Include architecture diagrams (text-based or mermaid) -- Explain Azure service selections and rationale -- Provide deployment and operational guidance - -**DOCUMENT STRUCTURE:** - -- Executive Summary -- Application Architecture Overview -- Component Analysis -- Azure Service Mapping -- Infrastructure Design -- Deployment Strategy -- Operational Considerations - -## Phase 4: Environment Initialization - -### Step 1: Create AZD Environment - -**REQUIRED ACTIONS:** - -- Execute: `azd env new {directory-name}-dev` -- Use current working directory name as environment name base -- Configure environment-specific settings -- Validate environment configuration - -### Step 2: Validation and Testing - -**REQUIRED ACTIONS:** +```text +1. Review existing azd-arch-plan.md (Phase 1) +2. azd_azure_yaml_generation → azd_infrastructure_generation → azd_docker_generation → azd_project_validation +``` -- Run `azd package` to validate service configurations -- Execute `azd provision --dry-run` to test infrastructure templates -- Verify all Bicep files compile without errors -- Check all referenced files and paths exist -- Validate environment variable configurations +### Quick Service Addition -## Success Criteria +```text +1. Review existing azd-arch-plan.md (Phase 1) +2. azd_discovery_analysis → azd_azure_yaml_generation → azd_docker_generation → azd_project_validation +``` -The migration is successful when: +## Central Planning Document -- [ ] All application components are identified and classified -- [ ] `azure.yaml` file is valid and complete -- [ ] All infrastructure files are generated and error-free -- [ ] Required Dockerfiles are created for containerizable components -- [ ] `azd-arch-plan.md` provides comprehensive documentation -- [ ] AZD environment is initialized and validated -- [ ] `azd package` completes without errors -- [ ] `azd provision --dry-run` validates successfully +**CRITICAL:** `azd-arch-plan.md` is the central coordination file that tracks progress, documents decisions, and maintains project state. Always review this file before starting any tool to understand current progress and avoid duplicate work. -## Common Patterns and Best Practices +## Supporting Resources -### For Multi-Service Applications +### Schema and Validation -- Use Azure Container Apps for microservices architecture -- Implement shared infrastructure (networking, monitoring) -- Configure service-to-service communication properly +- Use `azd_yaml_schema` tool to get complete azure.yaml schema information +- Use `azd_iac_generation_rules` tool for Infrastructure as Code best practices -### For Data-Intensive Applications +### Troubleshooting -- Co-locate compute and data services in same region -- Implement proper connection pooling and caching -- Configure backup and disaster recovery +Each tool includes: -### For AI-Enabled Applications +- Validation checklists +- Testing commands +- Common issues and solutions +- Next step guidance -- Separate AI services from main application logic -- Implement proper error handling for AI service calls -- Plan for model updates and versioning +## Getting Started -### For High-Availability Applications +**Standard workflow:** +1. Review existing `azd-arch-plan.md` (Phase 1) +2. `azd_discovery_analysis` → `azd_architecture_planning` → File generation tools → `azd_project_validation` -- Configure multiple availability zones -- Implement health checks and auto-scaling -- Plan for disaster recovery scenarios +Keep `azd-arch-plan.md` updated throughout the process as the central coordination document. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_project_validation.md b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_project_validation.md new file mode 100644 index 00000000000..e8e06172fa9 --- /dev/null +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_project_validation.md @@ -0,0 +1,181 @@ +# AZD Project Validation Tool + +Validates an AZD project by running comprehensive checks on all components including azure.yaml schema validation, Bicep template validation, environment setup, packaging, and deployment preview. + +## Purpose + +This tool performs end-to-end validation of an AZD project to ensure all components are properly configured and the project is ready for deployment. It centralizes all validation logic to provide a single source of truth for project readiness. + +## Validation Workflow + +### 1. Azure.yaml Schema Validation + +**REQUIRED ACTION:** +Use the `azd_yaml_schema` tool to validate the azure.yaml file against the official schema. + +**Validation Steps:** + +- Check if `azure.yaml` exists in current directory +- Validate schema compliance using `azd_yaml_schema` tool +- Report any schema violations or missing required fields +- Verify service definitions and configurations + +### 2. Bicep Template Validation + +**REQUIRED ACTIONS:** + +1. **Find Bicep Files:** Scan `./infra` directory for `.bicep` files +2. **Compile Templates:** Run `az bicep build --file --stdout` for each template +3. **Validate Syntax:** Ensure all templates compile without errors +4. **Check Dependencies:** Verify module references and parameter passing + +**Commands to Run:** + +```powershell +# Compile main template +az bicep build --file ./infra/main.bicep + +# Validate deployment (requires Azure CLI login) +az deployment sub validate --template-file ./infra/main.bicep --parameters ./infra/main.parameters.json --location +``` + +### 3. AZD Environment Validation + +**REQUIRED ACTIONS:** + +1. **Check Environment Exists:** Run `azd env list` to see available environments +2. **Create Environment if Missing:** + - If no environments exist, run `azd env new ` + - Use current directory name as environment name (sanitized) +3. **Verify Environment Selection:** Ensure an environment is currently selected + +**Commands to Run:** + +```powershell +# List existing environments +azd env list + +# Create new environment if none exist (replace with directory name) +azd env new + +# Select environment if not already selected +azd env select +``` + +### 4. Package Validation + +**REQUIRED ACTION:** +Run `azd package` to validate all services can be packaged successfully. + +**Validation Steps:** + +- Verify all service source paths are valid +- Check Docker builds complete successfully (for containerized services) +- Ensure all build artifacts are created +- Validate package manifests + +**Command to Run:** + +```powershell +azd package +``` + +### 5. Deployment Preview Validation + +**REQUIRED ACTION:** +Run `azd provision --preview` to validate infrastructure deployment without actually creating resources. + +**Validation Steps:** + +- Verify Azure authentication is working +- Check resource group creation plan +- Validate all Bicep modules deploy correctly +- Ensure parameter values are properly resolved +- Confirm no deployment conflicts + +**Command to Run:** + +```powershell +azd provision --preview +``` + +## Success Criteria + +The project validation is successful when: + +- [ ] `azure.yaml` passes schema validation +- [ ] All Bicep templates compile without errors or warnings +- [ ] AZD environment exists and is properly configured +- [ ] `azd package` completes without errors or warnings +- [ ] `azd provision --preview` completes without errors or warnings +- [ ] All service configurations are valid +- [ ] No missing dependencies or configuration issues + +## Error Handling + +### Common Issues and Solutions + +**Azure.yaml Schema Errors:** + +- Use `azd_yaml_schema` tool to get correct schema format +- Check service names match directory structure +- Verify all required fields are present + +**Bicep Compilation Errors:** + +- Check module paths and parameter names +- Verify resource naming conventions follow Azure requirements +- Ensure all required parameters have values + +**Environment Issues:** + +- Run `azd auth login` if authentication fails +- Check Azure subscription access and permissions +- Verify location parameter is valid Azure region + +**Package Errors:** + +- Check service source paths in azure.yaml +- Verify Docker builds work locally for containerized services +- Ensure all build dependencies are available + +**Provision Preview Errors:** + +- Verify Azure subscription has sufficient permissions +- Check resource quotas and limits +- Ensure resource names are globally unique where required + +## Update Documentation + +**REQUIRED ACTIONS:** + +Update `azd-arch-plan.md` with: + +- Validation results for each component +- Any issues found and resolutions applied +- Environment configuration details +- Deployment preview summary +- Project readiness status + +## Next Steps + +After successful validation: + +1. **Deploy Infrastructure:** Run `azd provision` to create Azure resources +2. **Deploy Applications:** Run `azd deploy` to deploy services +3. **Complete Deployment:** Run `azd up` to provision and deploy in one step +4. **Monitor Deployment:** Use `azd monitor` to check application health +5. **View Logs:** Use `azd logs` to view deployment and runtime logs + +### Production Preparation + +For production deployment: + +- Create production environment: `azd env new -prod` +- Configure production-specific settings and secrets +- Set up monitoring, alerting, and backup procedures +- Document operational procedures and runbooks + +**DEPLOYMENT READY:** Your AZD migration is complete and ready for deployment with `azd up`. + +**IMPORTANT:** This tool centralizes all validation logic. Other tools should reference this tool for validation rather than duplicating validation steps. diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go index 05a9bc1619f..c8f7752cba1 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go @@ -12,3 +12,18 @@ var AzdIacRulesPrompt string //go:embed azure.yaml.json var AzdYamlSchemaPrompt string + +//go:embed azd_discovery_analysis.md +var AzdDiscoveryAnalysisPrompt string + +//go:embed azd_architecture_planning.md +var AzdArchitecturePlanningPrompt string + +//go:embed azd_azure_yaml_generation.md +var AzdAzureYamlGenerationPrompt string + +//go:embed azd_infrastructure_generation.md +var AzdInfrastructureGenerationPrompt string + +//go:embed azd_docker_generation.md +var AzdDockerGenerationPrompt string diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go index 6818542a308..5ddc49c749c 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go @@ -24,7 +24,7 @@ func (l *IoToolsLoader) LoadTools() ([]tools.Tool, error) { &CreateDirectoryTool{CallbacksHandler: l.callbackHandler}, &DeleteDirectoryTool{CallbacksHandler: l.callbackHandler}, &ReadFileTool{CallbacksHandler: l.callbackHandler}, - &WriteFileTool{CallbacksHandler: l.callbackHandler}, + &WriteFileTool{}, &CopyFileTool{CallbacksHandler: l.callbackHandler}, &MoveFileTool{CallbacksHandler: l.callbackHandler}, &DeleteFileTool{CallbacksHandler: l.callbackHandler}, diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go b/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go index f0413af9e75..bc1ac20d9b8 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go @@ -9,13 +9,11 @@ import ( "strings" "time" - "github.com/tmc/langchaingo/callbacks" + "azd.ai.start/internal/tools/common" ) // WriteFileTool implements a comprehensive file writing tool that handles all scenarios -type WriteFileTool struct { - CallbacksHandler callbacks.Handler -} +type WriteFileTool struct{} // WriteFileRequest represents the JSON input for the write_file tool type WriteFileRequest struct { @@ -99,54 +97,44 @@ Large file (chunked): The input must be formatted as a single line valid JSON string.` } -func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - logInput := input - if len(input) > 200 { - logInput = input[:200] + "... (truncated)" - } - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("write_file: %s", logInput)) +// createErrorResponse creates a JSON error response +func (t WriteFileTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, + } + + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + + return fallbackMsg, nil } + output := string(jsonData) + + return output, nil +} + +func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { if input == "" { - output := "❌ No input provided\n\n" - output += "📝 Expected JSON format:\n" - output += `{"filename": "path/to/file.txt", "content": "file content here"}` + return t.createErrorResponse(fmt.Errorf("empty input"), "No input provided.") + } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("empty input")) - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil - } // Parse JSON input + // Parse JSON input var req WriteFileRequest if err := json.Unmarshal([]byte(input), &req); err != nil { - output := "❌ Invalid JSON input: " + err.Error() + "\n\n" - output += "📝 Expected format:\n" - output += `{"filename": "path/to/file.txt", "content": "file content here"}` + "\n\n" - output += "💡 Common JSON issues:\n" - output += "- Use double quotes for strings\n" - output += "- Escape backslashes: \\$ should be \\\\$\n" - output += "- Remove trailing commas\n" - output += "- No comments allowed in JSON" - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil + return t.createErrorResponse(err, "Invalid JSON input") } // Validate required fields if req.Filename == "" { - output := "❌ Missing required field: filename cannot be empty\n\n" - output += "📝 Example: " + `{"filename": "infra/main.bicep", "content": "param location string = 'eastus'"}` - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("missing filename")) - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil + return t.createErrorResponse(fmt.Errorf("missing filename"), "Missing required field: filename cannot be empty.") } // Determine mode and operation @@ -168,11 +156,7 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { // handleChunkedWrite handles writing files in chunks func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequest) (string, error) { if req.ChunkNum < 1 || req.TotalChunks < 1 || req.ChunkNum > req.TotalChunks { - err := fmt.Errorf("invalid chunk numbers: chunkNum=%d, totalChunks=%d", req.ChunkNum, req.TotalChunks) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("invalid chunk numbers: chunkNum=%d, totalChunks=%d", req.ChunkNum, req.TotalChunks), fmt.Sprintf("Invalid chunk numbers: chunkNum=%d, totalChunks=%d. ChunkNum must be between 1 and totalChunks", req.ChunkNum, req.TotalChunks)) } filePath := strings.TrimSpace(req.Filename) @@ -180,10 +164,7 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ // Ensure directory exists if err := t.ensureDirectory(filePath); err != nil { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(err, fmt.Sprintf("Failed to create directory for file %s: %s", filePath, err.Error())) } var err error @@ -197,11 +178,7 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ // Subsequent chunks - append file, openErr := os.OpenFile(filePath, os.O_APPEND|os.O_WRONLY, 0644) if openErr != nil { - err = fmt.Errorf("failed to open file for append %s: %w", filePath, openErr) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(openErr, fmt.Sprintf("Failed to open file for append %s: %s", filePath, openErr.Error())) } defer file.Close() @@ -210,21 +187,13 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ } if err != nil { - toolErr := fmt.Errorf("failed to write chunk to file %s: %w", filePath, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to write chunk to file %s: %s", filePath, err.Error())) } // Get file info fileInfo, err := os.Stat(filePath) if err != nil { - toolErr := fmt.Errorf("failed to verify file %s: %w", filePath, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to verify file %s: %s", filePath, err.Error())) } // Create JSON response @@ -255,19 +224,11 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ // Convert to JSON jsonData, err := json.MarshalIndent(response, "", " ") if err != nil { - toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } output := string(jsonData) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil } @@ -283,10 +244,7 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ // Ensure directory exists if err := t.ensureDirectory(filePath); err != nil { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(err, fmt.Sprintf("Failed to create directory for file %s: %s", filePath, err.Error())) } var err error @@ -295,11 +253,7 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ switch mode { case "create": if _, err := os.Stat(filePath); err == nil { - toolErr := fmt.Errorf("file %s already exists (create mode)", filePath) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(fmt.Errorf("file %s already exists (create mode)", filePath), fmt.Sprintf("File %s already exists. Cannot create file in 'create' mode when file already exists", filePath)) } err = os.WriteFile(filePath, []byte(content), 0644) operation = "Created" @@ -307,11 +261,7 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ case "append": file, openErr := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if openErr != nil { - toolErr := fmt.Errorf("failed to open file for append %s: %w", filePath, openErr) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(openErr, fmt.Sprintf("Failed to open file for append %s: %s", filePath, openErr.Error())) } defer file.Close() _, err = file.WriteString(content) @@ -323,21 +273,13 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ } if err != nil { - toolErr := fmt.Errorf("failed to %s file %s: %w", strings.ToLower(operation), filePath, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to %s file %s: %s", strings.ToLower(operation), filePath, err.Error())) } // Get file size for verification fileInfo, err := os.Stat(filePath) if err != nil { - toolErr := fmt.Errorf("failed to verify file %s: %w", filePath, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to verify file %s: %s", filePath, err.Error())) } // Create JSON response @@ -358,19 +300,11 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ // Convert to JSON jsonData, err := json.MarshalIndent(response, "", " ") if err != nil { - toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } output := string(jsonData) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil } From 4f5b28ec35a9eb55a41305598f477d2f37300ced Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 1 Aug 2025 15:05:33 -0700 Subject: [PATCH 048/116] Moved files around --- cli/azd/extensions/azd.ai.start/internal/agent/agent.go | 8 ++++---- .../azd.ai.start/internal/{ => agent}/logging/logger.go | 0 .../{ => agent}/tools/azd/azd_architecture_planning.go | 2 +- .../{ => agent}/tools/azd/azd_azure_yaml_generation.go | 2 +- .../{ => agent}/tools/azd/azd_discovery_analysis.go | 2 +- .../{ => agent}/tools/azd/azd_docker_generation.go | 2 +- .../{ => agent}/tools/azd/azd_iac_generation_rules.go | 2 +- .../tools/azd/azd_infrastructure_generation.go | 2 +- .../internal/{ => agent}/tools/azd/azd_plan_init.go | 2 +- .../{ => agent}/tools/azd/azd_project_validation.go | 0 .../internal/{ => agent}/tools/azd/azd_yaml_schema.go | 2 +- .../azd.ai.start/internal/{ => agent}/tools/azd/loader.go | 0 .../internal/{ => agent}/tools/azd/prompts/README.md | 0 .../tools/azd/prompts/azd_architecture_planning.md | 0 .../tools/azd/prompts/azd_azure_yaml_generation.md | 0 .../tools/azd/prompts/azd_discovery_analysis.md | 0 .../tools/azd/prompts/azd_docker_generation.md | 0 .../tools/azd/prompts/azd_iac_generation_rules.md | 0 .../tools/azd/prompts/azd_infrastructure_generation.md | 0 .../{ => agent}/tools/azd/prompts/azd_plan_init.md | 0 .../tools/azd/prompts/azd_project_validation.md | 0 .../{ => agent}/tools/azd/prompts/azd_yaml_schema.md | 0 .../{ => agent}/tools/azd/prompts/azure.yaml.json | 0 .../internal/{ => agent}/tools/azd/prompts/prompts.go | 0 .../internal/{ => agent}/tools/common/types.go | 0 .../internal/{ => agent}/tools/dev/command_executor.go | 2 +- .../azd.ai.start/internal/{ => agent}/tools/dev/loader.go | 0 .../internal/{ => agent}/tools/http/http_fetcher.go | 0 .../internal/{ => agent}/tools/http/loader.go | 0 .../internal/{ => agent}/tools/io/change_directory.go | 0 .../internal/{ => agent}/tools/io/copy_file.go | 2 +- .../internal/{ => agent}/tools/io/create_directory.go | 0 .../internal/{ => agent}/tools/io/current_directory.go | 0 .../internal/{ => agent}/tools/io/delete_directory.go | 0 .../internal/{ => agent}/tools/io/delete_file.go | 0 .../internal/{ => agent}/tools/io/directory_list.go | 2 +- .../internal/{ => agent}/tools/io/file_info.go | 0 .../internal/{ => agent}/tools/io/file_search.go | 0 .../azd.ai.start/internal/{ => agent}/tools/io/loader.go | 0 .../internal/{ => agent}/tools/io/move_file.go | 0 .../internal/{ => agent}/tools/io/read_file.go | 0 .../internal/{ => agent}/tools/io/write_file.go | 2 +- .../azd.ai.start/internal/{ => agent}/tools/loader.go | 6 +++--- .../azd.ai.start/internal/{ => agent}/tools/mcp/loader.go | 0 .../azd.ai.start/internal/{ => agent}/tools/mcp/mcp.json | 0 .../internal/{ => agent}/tools/mcp/sampling_handler.go | 0 .../internal/{ => agent}/tools/weather/loader.go | 0 .../internal/{ => agent}/tools/weather/weather.go | 0 cli/azd/extensions/azd.ai.start/internal/cmd/root.go | 2 +- 49 files changed, 20 insertions(+), 20 deletions(-) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/logging/logger.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_architecture_planning.go (94%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_azure_yaml_generation.go (94%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_discovery_analysis.go (94%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_docker_generation.go (94%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_iac_generation_rules.go (92%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_infrastructure_generation.go (94%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_plan_init.go (91%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_project_validation.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/azd_yaml_schema.go (92%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/loader.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/README.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_architecture_planning.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_azure_yaml_generation.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_discovery_analysis.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_docker_generation.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_iac_generation_rules.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_infrastructure_generation.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_plan_init.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_project_validation.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azd_yaml_schema.md (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/azure.yaml.json (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/azd/prompts/prompts.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/common/types.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/dev/command_executor.go (99%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/dev/loader.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/http/http_fetcher.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/http/loader.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/change_directory.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/copy_file.go (99%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/create_directory.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/current_directory.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/delete_directory.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/delete_file.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/directory_list.go (99%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/file_info.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/file_search.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/loader.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/move_file.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/read_file.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/io/write_file.go (99%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/loader.go (89%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/mcp/loader.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/mcp/mcp.json (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/mcp/sampling_handler.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/weather/loader.go (100%) rename cli/azd/extensions/azd.ai.start/internal/{ => agent}/tools/weather/weather.go (100%) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go index fc1cd9e73cb..128de9af89a 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/agent.go @@ -13,10 +13,10 @@ import ( "github.com/tmc/langchaingo/memory" "github.com/tmc/langchaingo/tools" - "azd.ai.start/internal/logging" - localtools "azd.ai.start/internal/tools" - "azd.ai.start/internal/tools/mcp" - mcptools "azd.ai.start/internal/tools/mcp" + "azd.ai.start/internal/agent/logging" + localtools "azd.ai.start/internal/agent/tools" + "azd.ai.start/internal/agent/tools/mcp" + mcptools "azd.ai.start/internal/agent/tools/mcp" ) //go:embed prompts/default_agent_prefix.txt diff --git a/cli/azd/extensions/azd.ai.start/internal/logging/logger.go b/cli/azd/extensions/azd.ai.start/internal/agent/logging/logger.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/logging/logger.go rename to cli/azd/extensions/azd.ai.start/internal/agent/logging/logger.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_architecture_planning.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_architecture_planning.go similarity index 94% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_architecture_planning.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_architecture_planning.go index 44894270ea0..1e29c2050f4 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_architecture_planning.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_architecture_planning.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/tools/azd/prompts" + "azd.ai.start/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_azure_yaml_generation.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_azure_yaml_generation.go similarity index 94% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_azure_yaml_generation.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_azure_yaml_generation.go index ea2ae2e26f2..d5012b63f10 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_azure_yaml_generation.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_azure_yaml_generation.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/tools/azd/prompts" + "azd.ai.start/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_discovery_analysis.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_discovery_analysis.go similarity index 94% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_discovery_analysis.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_discovery_analysis.go index db865d67398..7b7c39a5077 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_discovery_analysis.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_discovery_analysis.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/tools/azd/prompts" + "azd.ai.start/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_docker_generation.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_docker_generation.go similarity index 94% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_docker_generation.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_docker_generation.go index 89ddea37bae..67a76b4d9fa 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_docker_generation.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_docker_generation.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/tools/azd/prompts" + "azd.ai.start/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_iac_generation_rules.go similarity index 92% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_iac_generation_rules.go index 47e2c5a738e..40cf27facfa 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_iac_generation_rules.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_iac_generation_rules.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/tools/azd/prompts" + "azd.ai.start/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_infrastructure_generation.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_infrastructure_generation.go similarity index 94% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_infrastructure_generation.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_infrastructure_generation.go index b93c5960369..44876b94300 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_infrastructure_generation.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_infrastructure_generation.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/tools/azd/prompts" + "azd.ai.start/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_plan_init.go similarity index 91% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_plan_init.go index 005ebafb441..c45c5d21d12 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_plan_init.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_plan_init.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/tools/azd/prompts" + "azd.ai.start/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_project_validation.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_project_validation.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_project_validation.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_project_validation.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_yaml_schema.go similarity index 92% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_yaml_schema.go index 850091db4ea..9ed0d3806cc 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/azd/azd_yaml_schema.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_yaml_schema.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/tools/azd/prompts" + "azd.ai.start/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/loader.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/README.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/README.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/README.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/README.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_architecture_planning.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_architecture_planning.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_architecture_planning.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_architecture_planning.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_azure_yaml_generation.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_azure_yaml_generation.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_discovery_analysis.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_discovery_analysis.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_discovery_analysis.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_discovery_analysis.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_docker_generation.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_docker_generation.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_docker_generation.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_docker_generation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_iac_generation_rules.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_infrastructure_generation.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_infrastructure_generation.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_plan_init.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_plan_init.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_plan_init.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_project_validation.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_project_validation.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_project_validation.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_project_validation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_yaml_schema.md b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_yaml_schema.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azd_yaml_schema.md rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_yaml_schema.md diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azure.yaml.json b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azure.yaml.json similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/azure.yaml.json rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azure.yaml.json diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/prompts.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/azd/prompts/prompts.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/prompts.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/common/types.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/common/types.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/common/types.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/common/types.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/command_executor.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/command_executor.go index e1fc36a05f3..483b4281719 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/dev/command_executor.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/command_executor.go @@ -9,7 +9,7 @@ import ( "runtime" "strings" - "azd.ai.start/internal/tools/common" + "azd.ai.start/internal/agent/tools/common" "github.com/tmc/langchaingo/callbacks" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/dev/loader.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/dev/loader.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/http/http_fetcher.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/http/http_fetcher.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/http/http_fetcher.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/http/http_fetcher.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/http/loader.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/http/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/http/loader.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/http/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/change_directory.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/change_directory.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/change_directory.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/change_directory.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/copy_file.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/copy_file.go index ba48734dd51..7abd91de036 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/copy_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/copy_file.go @@ -8,7 +8,7 @@ import ( "os" "strings" - "azd.ai.start/internal/tools/common" + "azd.ai.start/internal/agent/tools/common" "github.com/tmc/langchaingo/callbacks" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/create_directory.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/create_directory.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/create_directory.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/current_directory.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/current_directory.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/current_directory.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/current_directory.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/delete_directory.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/delete_directory.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/delete_directory.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/delete_file.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/delete_file.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/delete_file.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/directory_list.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/directory_list.go index c0b4e09ee2e..40bd8f80fb1 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/directory_list.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/directory_list.go @@ -8,7 +8,7 @@ import ( "path/filepath" "strings" - "azd.ai.start/internal/tools/common" + "azd.ai.start/internal/agent/tools/common" "github.com/tmc/langchaingo/callbacks" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/file_info.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/file_info.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/file_info.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/file_search.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/file_search.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/file_search.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/loader.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/move_file.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/move_file.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/move_file.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/read_file.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/read_file.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/read_file.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/write_file.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/io/write_file.go index bc1ac20d9b8..e367be33e00 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/io/write_file.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/write_file.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "azd.ai.start/internal/tools/common" + "azd.ai.start/internal/agent/tools/common" ) // WriteFileTool implements a comprehensive file writing tool that handles all scenarios diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/loader.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/loader.go similarity index 89% rename from cli/azd/extensions/azd.ai.start/internal/tools/loader.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/loader.go index 3a2dab5c83a..8aee4593a0c 100644 --- a/cli/azd/extensions/azd.ai.start/internal/tools/loader.go +++ b/cli/azd/extensions/azd.ai.start/internal/agent/tools/loader.go @@ -4,9 +4,9 @@ import ( "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" - "azd.ai.start/internal/tools/azd" - "azd.ai.start/internal/tools/dev" - "azd.ai.start/internal/tools/io" + "azd.ai.start/internal/agent/tools/azd" + "azd.ai.start/internal/agent/tools/dev" + "azd.ai.start/internal/agent/tools/io" ) // ToolLoader provides an interface for loading tools from different categories diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/mcp/loader.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json b/cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/mcp.json similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/mcp/mcp.json rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/mcp.json diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/sampling_handler.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/mcp/sampling_handler.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/sampling_handler.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/weather/loader.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/weather/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/weather/loader.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/weather/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/tools/weather/weather.go b/cli/azd/extensions/azd.ai.start/internal/agent/tools/weather/weather.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/tools/weather/weather.go rename to cli/azd/extensions/azd.ai.start/internal/agent/tools/weather/weather.go diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go index 4031d40b7f5..81cc5cac8cd 100644 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go +++ b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go @@ -10,7 +10,7 @@ import ( "os" "azd.ai.start/internal/agent" - "azd.ai.start/internal/logging" + "azd.ai.start/internal/agent/logging" "github.com/azure/azure-dev/cli/azd/pkg/azdext" "github.com/spf13/cobra" "github.com/tmc/langchaingo/llms/openai" From 42e13249e701d6b17bbb02633f0f4decbd03ac76 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 1 Aug 2025 17:05:16 -0700 Subject: [PATCH 049/116] WIP: Initial integration of agent mode for init --- cli/azd/cmd/container.go | 6 + cli/azd/cmd/init.go | 45 +++ .../extensions/azd.ai.start/AZURE_AI_SETUP.md | 98 ----- cli/azd/extensions/azd.ai.start/build.ps1 | 71 ---- cli/azd/extensions/azd.ai.start/build.sh | 66 --- cli/azd/extensions/azd.ai.start/changelog.md | 3 - .../extensions/azd.ai.start/extension.yaml | 9 - cli/azd/extensions/azd.ai.start/go.mod | 61 --- cli/azd/extensions/azd.ai.start/go.sum | 379 ------------------ .../internal/cmd/enhanced_integration.go | 71 ---- .../azd.ai.start/internal/cmd/root.go | 140 ------- cli/azd/extensions/azd.ai.start/main.go | 30 -- .../azd.ai.start => }/internal/agent/agent.go | 70 +++- .../internal/agent/logging/logger.go | 0 .../default_agent_format_instructions.txt | 0 .../agent/prompts/default_agent_prefix.txt | 0 .../agent/prompts/default_agent_suffix.txt | 0 .../tools/azd/azd_architecture_planning.go | 2 +- .../tools/azd/azd_azure_yaml_generation.go | 2 +- .../agent/tools/azd/azd_discovery_analysis.go | 2 +- .../agent/tools/azd/azd_docker_generation.go | 2 +- .../tools/azd/azd_iac_generation_rules.go | 2 +- .../azd/azd_infrastructure_generation.go | 2 +- .../internal/agent/tools/azd/azd_plan_init.go | 2 +- .../agent/tools/azd/azd_project_validation.go | 0 .../agent/tools/azd/azd_yaml_schema.go | 2 +- .../internal/agent/tools/azd/loader.go | 0 .../agent/tools/azd/prompts/README.md | 0 .../azd/prompts/azd_architecture_planning.md | 0 .../azd/prompts/azd_azure_yaml_generation.md | 0 .../azd/prompts/azd_discovery_analysis.md | 0 .../azd/prompts/azd_docker_generation.md | 0 .../azd/prompts/azd_iac_generation_rules.md | 0 .../prompts/azd_infrastructure_generation.md | 0 .../agent/tools/azd/prompts/azd_plan_init.md | 0 .../azd/prompts/azd_project_validation.md | 0 .../tools/azd/prompts/azd_yaml_schema.md | 0 .../agent/tools/azd/prompts/azure.yaml.json | 0 .../agent/tools/azd/prompts/prompts.go | 0 .../internal/agent/tools/common/types.go | 0 .../agent/tools/dev/command_executor.go | 2 +- .../internal/agent/tools/dev/loader.go | 0 .../internal/agent/tools/http/http_fetcher.go | 0 .../internal/agent/tools/http/loader.go | 0 .../agent/tools/io/change_directory.go | 0 .../internal/agent/tools/io/copy_file.go | 2 +- .../agent/tools/io/create_directory.go | 0 .../agent/tools/io/current_directory.go | 0 .../agent/tools/io/delete_directory.go | 0 .../internal/agent/tools/io/delete_file.go | 0 .../internal/agent/tools/io/directory_list.go | 2 +- .../internal/agent/tools/io/file_info.go | 0 .../internal/agent/tools/io/file_search.go | 0 .../internal/agent/tools/io/loader.go | 0 .../internal/agent/tools/io/move_file.go | 0 .../internal/agent/tools/io/read_file.go | 0 .../internal/agent/tools/io/write_file.go | 2 +- .../internal/agent/tools/loader.go | 6 +- .../internal/agent/tools/mcp/loader.go | 0 .../internal/agent/tools/mcp/mcp.json | 0 .../agent/tools/mcp/sampling_handler.go | 0 .../internal/agent/tools/weather/loader.go | 0 .../internal/agent/tools/weather/weather.go | 0 cli/azd/pkg/llm/azure_openai.go | 91 ++--- cli/azd/pkg/llm/client.go | 13 - cli/azd/pkg/llm/manager.go | 169 +++----- cli/azd/pkg/llm/manager_test.go | 121 ------ cli/azd/pkg/llm/model_factory.go | 28 ++ cli/azd/pkg/llm/ollama.go | 61 ++- 69 files changed, 295 insertions(+), 1267 deletions(-) delete mode 100644 cli/azd/extensions/azd.ai.start/AZURE_AI_SETUP.md delete mode 100644 cli/azd/extensions/azd.ai.start/build.ps1 delete mode 100644 cli/azd/extensions/azd.ai.start/build.sh delete mode 100644 cli/azd/extensions/azd.ai.start/changelog.md delete mode 100644 cli/azd/extensions/azd.ai.start/extension.yaml delete mode 100644 cli/azd/extensions/azd.ai.start/go.mod delete mode 100644 cli/azd/extensions/azd.ai.start/go.sum delete mode 100644 cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go delete mode 100644 cli/azd/extensions/azd.ai.start/internal/cmd/root.go delete mode 100644 cli/azd/extensions/azd.ai.start/main.go rename cli/azd/{extensions/azd.ai.start => }/internal/agent/agent.go (64%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/logging/logger.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/prompts/default_agent_format_instructions.txt (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/prompts/default_agent_prefix.txt (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/prompts/default_agent_suffix.txt (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_architecture_planning.go (92%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_azure_yaml_generation.go (91%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_discovery_analysis.go (91%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_docker_generation.go (91%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_iac_generation_rules.go (90%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_infrastructure_generation.go (91%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_plan_init.go (88%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_project_validation.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/azd_yaml_schema.go (88%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/loader.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/README.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_architecture_planning.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_discovery_analysis.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_docker_generation.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_plan_init.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_project_validation.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azd_yaml_schema.md (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/azure.yaml.json (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/azd/prompts/prompts.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/common/types.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/dev/command_executor.go (99%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/dev/loader.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/http/http_fetcher.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/http/loader.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/change_directory.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/copy_file.go (98%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/create_directory.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/current_directory.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/delete_directory.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/delete_file.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/directory_list.go (98%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/file_info.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/file_search.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/loader.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/move_file.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/read_file.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/io/write_file.go (99%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/loader.go (84%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/mcp/loader.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/mcp/mcp.json (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/mcp/sampling_handler.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/weather/loader.go (100%) rename cli/azd/{extensions/azd.ai.start => }/internal/agent/tools/weather/weather.go (100%) delete mode 100644 cli/azd/pkg/llm/client.go create mode 100644 cli/azd/pkg/llm/model_factory.go diff --git a/cli/azd/cmd/container.go b/cli/azd/cmd/container.go index be53dcf16fb..34b1f7df3b8 100644 --- a/cli/azd/cmd/container.go +++ b/cli/azd/cmd/container.go @@ -546,7 +546,13 @@ func registerCommonDependencies(container *ioc.NestedContainer) { return serviceManager, err }) }) + + // AI & LLM components container.MustRegisterSingleton(llm.NewManager) + container.MustRegisterSingleton(llm.NewModelFactory) + container.MustRegisterNamedSingleton("ollama", llm.NewOllamaModelProvider) + container.MustRegisterNamedSingleton("azure", llm.NewAzureOpenAiModelProvider) + container.MustRegisterSingleton(repository.NewInitializer) container.MustRegisterSingleton(alpha.NewFeaturesManager) container.MustRegisterSingleton(config.NewUserConfigManager) diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index 9bd5832209f..3395047e7c1 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -14,6 +14,8 @@ import ( "github.com/MakeNowJust/heredoc/v2" "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" + "github.com/azure/azure-dev/cli/azd/internal/agent" + "github.com/azure/azure-dev/cli/azd/internal/agent/logging" "github.com/azure/azure-dev/cli/azd/internal/repository" "github.com/azure/azure-dev/cli/azd/internal/tracing" "github.com/azure/azure-dev/cli/azd/internal/tracing/fields" @@ -24,6 +26,7 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/extensions" "github.com/azure/azure-dev/cli/azd/pkg/input" "github.com/azure/azure-dev/cli/azd/pkg/lazy" + "github.com/azure/azure-dev/cli/azd/pkg/llm" "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/azure/azure-dev/cli/azd/pkg/output/ux" "github.com/azure/azure-dev/cli/azd/pkg/project" @@ -131,6 +134,7 @@ type initAction struct { featuresManager *alpha.FeatureManager extensionsManager *extensions.Manager azd workflow.AzdCommandRunner + llmManager *llm.Manager } func newInitAction( @@ -145,6 +149,7 @@ func newInitAction( featuresManager *alpha.FeatureManager, extensionsManager *extensions.Manager, azd workflow.AzdCommandRunner, + llmManager *llm.Manager, ) actions.Action { return &initAction{ lazyAzdCtx: lazyAzdCtx, @@ -158,6 +163,7 @@ func newInitAction( featuresManager: featuresManager, extensionsManager: extensionsManager, azd: azd, + llmManager: llmManager, } } @@ -344,6 +350,10 @@ func (i *initAction) Run(ctx context.Context) (*actions.ActionResult, error) { header = fmt.Sprintf("Initialized environment %s.", env.Name()) followUp = "" + case initWithCopilot: + if err := i.initAppWithCopilot(ctx); err != nil { + return nil, err + } default: panic("unhandled init type") } @@ -360,6 +370,37 @@ func (i *initAction) Run(ctx context.Context) (*actions.ActionResult, error) { }, nil } +func (i *initAction) initAppWithCopilot(ctx context.Context) error { + defaultModelContainer, err := i.llmManager.GetDefaultModel() + if err != nil { + return err + } + + actionLogger := logging.NewActionLogger() + samplingModelContainer, err := i.llmManager.GetDefaultModel(llm.WithLogger(actionLogger)) + + azdAgent, err := agent.NewAzdAiAgent( + defaultModelContainer.Model, + agent.WithSamplingModel(samplingModelContainer.Model), + ) + if err != nil { + return err + } + + initPrompt := `Goal: Initialize or migrate the AZD project from the current working directory. + +Read and review the 'azd-arch-plan.md' file if it exists to get current status +Run the 'azd_plan_init' tool and follow the steps +Finally - run the 'azd_project_validation' tool to ensure the process is fully completed + ` + + if err := azdAgent.RunConversationLoop(ctx, []string{initPrompt}); err != nil { + return err + } + + return nil +} + type initType int const ( @@ -367,6 +408,7 @@ const ( initFromApp initAppTemplate initEnvironment + initWithCopilot ) func promptInitType(console input.Console, ctx context.Context) (initType, error) { @@ -375,6 +417,7 @@ func promptInitType(console input.Console, ctx context.Context) (initType, error Options: []string{ "Scan current directory", // This now covers minimal project creation too "Select a template", + "AZD Copilot", }, }) if err != nil { @@ -386,6 +429,8 @@ func promptInitType(console input.Console, ctx context.Context) (initType, error return initFromApp, nil case 1: return initAppTemplate, nil + case 2: + return initWithCopilot, nil default: panic("unhandled selection") } diff --git a/cli/azd/extensions/azd.ai.start/AZURE_AI_SETUP.md b/cli/azd/extensions/azd.ai.start/AZURE_AI_SETUP.md deleted file mode 100644 index 9d985f2f36e..00000000000 --- a/cli/azd/extensions/azd.ai.start/AZURE_AI_SETUP.md +++ /dev/null @@ -1,98 +0,0 @@ -# Azure AI Integration Setup - -This AI agent can work with both OpenAI and Azure OpenAI Service. Here's how to configure each: - -## Option 1: Azure OpenAI Service (Recommended for Azure users) - -Azure OpenAI provides the same models as OpenAI but hosted on Azure infrastructure with enterprise security and compliance. - -### Prerequisites -1. Azure subscription -2. Azure OpenAI resource created in Azure portal -3. GPT model deployed (e.g., GPT-3.5-turbo or GPT-4) - -### Environment Variables -```bash -# Set these environment variables for Azure OpenAI -export AZURE_OPENAI_ENDPOINT="https://your-resource-name.openai.azure.com" -export AZURE_OPENAI_API_KEY="your-azure-openai-api-key" -export AZURE_OPENAI_DEPLOYMENT_NAME="your-gpt-deployment-name" -``` - -### PowerShell (Windows) -```powershell -$env:AZURE_OPENAI_ENDPOINT="https://your-resource-name.openai.azure.com" -$env:AZURE_OPENAI_API_KEY="your-azure-openai-api-key" -$env:AZURE_OPENAI_DEPLOYMENT_NAME="your-gpt-deployment-name" -``` - -## Option 2: OpenAI API (Direct) - -### Environment Variables -```bash -export OPENAI_API_KEY="your-openai-api-key" -``` - -### PowerShell (Windows) -```powershell -$env:OPENAI_API_KEY="your-openai-api-key" -``` - -## Usage Examples - -```bash -# Interactive mode -azd ai.chat - -# Direct query -azd ai.chat "How do I deploy a Node.js app to Azure Container Apps?" - -# Azure-specific queries -azd ai.chat "What's the best way to set up CI/CD with Azure DevOps for my web app?" -azd ai.chat "How do I configure Azure Key Vault for my application secrets?" -``` - -## Azure OpenAI Advantages - -- **Enterprise Security**: Your data stays within your Azure tenant -- **Compliance**: Meets enterprise compliance requirements -- **Integration**: Better integration with other Azure services -- **Cost Control**: Better cost management and billing integration -- **Regional Deployment**: Deploy closer to your users for lower latency - -## Setup Steps for Azure OpenAI - -1. **Create Azure OpenAI Resource**: - ```bash - az cognitiveservices account create \ - --name myopenai \ - --resource-group myresourcegroup \ - --location eastus \ - --kind OpenAI \ - --sku s0 - ``` - -2. **Deploy a Model**: - - Go to Azure OpenAI Studio - - Navigate to "Deployments" - - Create a new deployment with your chosen model (e.g., gpt-35-turbo) - - Note the deployment name for the environment variable - -3. **Get API Key**: - ```bash - az cognitiveservices account keys list \ - --name myopenai \ - --resource-group myresourcegroup - ``` - -4. **Set Environment Variables** as shown above - -## Model Compatibility - -The agent supports various GPT models available in Azure OpenAI: -- GPT-3.5-turbo -- GPT-4 -- GPT-4-turbo -- And newer models as they become available - -Just make sure your deployment name matches the model you want to use. diff --git a/cli/azd/extensions/azd.ai.start/build.ps1 b/cli/azd/extensions/azd.ai.start/build.ps1 deleted file mode 100644 index 8cdd4ae9281..00000000000 --- a/cli/azd/extensions/azd.ai.start/build.ps1 +++ /dev/null @@ -1,71 +0,0 @@ -# Get the directory of the script -$EXTENSION_DIR = Split-Path -Parent $MyInvocation.MyCommand.Path - -# Change to the script directory -Set-Location -Path $EXTENSION_DIR - -# Create a safe version of EXTENSION_ID replacing dots with dashes -$EXTENSION_ID_SAFE = $env:EXTENSION_ID -replace '\.', '-' - -# Define output directory -$OUTPUT_DIR = if ($env:OUTPUT_DIR) { $env:OUTPUT_DIR } else { Join-Path $EXTENSION_DIR "bin" } - -# Create output directory if it doesn't exist -if (-not (Test-Path -Path $OUTPUT_DIR)) { - New-Item -ItemType Directory -Path $OUTPUT_DIR | Out-Null -} - -# Get Git commit hash and build date -$COMMIT = git rev-parse HEAD -$BUILD_DATE = (Get-Date -Format "yyyy-MM-ddTHH:mm:ssZ") - -# List of OS and architecture combinations -if ($env:EXTENSION_PLATFORM) { - $PLATFORMS = @($env:EXTENSION_PLATFORM) -} -else { - $PLATFORMS = @( - "windows/amd64", - "windows/arm64", - "darwin/amd64", - "darwin/arm64", - "linux/amd64", - "linux/arm64" - ) -} - -$APP_PATH = "$env:EXTENSION_ID/internal/cmd" - -# Loop through platforms and build -foreach ($PLATFORM in $PLATFORMS) { - $OS, $ARCH = $PLATFORM -split '/' - - $OUTPUT_NAME = Join-Path $OUTPUT_DIR "$EXTENSION_ID_SAFE-$OS-$ARCH" - - if ($OS -eq "windows") { - $OUTPUT_NAME += ".exe" - } - - Write-Host "Building for $OS/$ARCH..." - - # Delete the output file if it already exists - if (Test-Path -Path $OUTPUT_NAME) { - Remove-Item -Path $OUTPUT_NAME -Force - } - - # Set environment variables for Go build - $env:GOOS = $OS - $env:GOARCH = $ARCH - - go build ` - -ldflags="-X '$APP_PATH.Version=$env:EXTENSION_VERSION' -X '$APP_PATH.Commit=$COMMIT' -X '$APP_PATH.BuildDate=$BUILD_DATE'" ` - -o $OUTPUT_NAME - - if ($LASTEXITCODE -ne 0) { - Write-Host "An error occurred while building for $OS/$ARCH" - exit 1 - } -} - -Write-Host "Build completed successfully!" -Write-Host "Binaries are located in the $OUTPUT_DIR directory." diff --git a/cli/azd/extensions/azd.ai.start/build.sh b/cli/azd/extensions/azd.ai.start/build.sh deleted file mode 100644 index f1a995ec5e9..00000000000 --- a/cli/azd/extensions/azd.ai.start/build.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/bash - -# Get the directory of the script -EXTENSION_DIR="$(cd "$(dirname "$0")" && pwd)" - -# Change to the script directory -cd "$EXTENSION_DIR" || exit - -# Create a safe version of EXTENSION_ID replacing dots with dashes -EXTENSION_ID_SAFE="${EXTENSION_ID//./-}" - -# Define output directory -OUTPUT_DIR="${OUTPUT_DIR:-$EXTENSION_DIR/bin}" - -# Create output and target directories if they don't exist -mkdir -p "$OUTPUT_DIR" - -# Get Git commit hash and build date -COMMIT=$(git rev-parse HEAD) -BUILD_DATE=$(date -u +%Y-%m-%dT%H:%M:%SZ) - -# List of OS and architecture combinations -if [ -n "$EXTENSION_PLATFORM" ]; then - PLATFORMS=("$EXTENSION_PLATFORM") -else - PLATFORMS=( - "windows/amd64" - "windows/arm64" - "darwin/amd64" - "darwin/arm64" - "linux/amd64" - "linux/arm64" - ) -fi - -APP_PATH="$EXTENSION_ID/internal/cmd" - -# Loop through platforms and build -for PLATFORM in "${PLATFORMS[@]}"; do - OS=$(echo "$PLATFORM" | cut -d'/' -f1) - ARCH=$(echo "$PLATFORM" | cut -d'/' -f2) - - OUTPUT_NAME="$OUTPUT_DIR/$EXTENSION_ID_SAFE-$OS-$ARCH" - - if [ "$OS" = "windows" ]; then - OUTPUT_NAME+='.exe' - fi - - echo "Building for $OS/$ARCH..." - - # Delete the output file if it already exists - [ -f "$OUTPUT_NAME" ] && rm -f "$OUTPUT_NAME" - - # Set environment variables for Go build - GOOS=$OS GOARCH=$ARCH go build \ - -ldflags="-X '$APP_PATH.Version=$EXTENSION_VERSION' -X '$APP_PATH.Commit=$COMMIT' -X '$APP_PATH.BuildDate=$BUILD_DATE'" \ - -o "$OUTPUT_NAME" - - if [ $? -ne 0 ]; then - echo "An error occurred while building for $OS/$ARCH" - exit 1 - fi -done - -echo "Build completed successfully!" -echo "Binaries are located in the $OUTPUT_DIR directory." diff --git a/cli/azd/extensions/azd.ai.start/changelog.md b/cli/azd/extensions/azd.ai.start/changelog.md deleted file mode 100644 index b88d613cce0..00000000000 --- a/cli/azd/extensions/azd.ai.start/changelog.md +++ /dev/null @@ -1,3 +0,0 @@ -# Release History - -## 0.0.1 - Initial Version \ No newline at end of file diff --git a/cli/azd/extensions/azd.ai.start/extension.yaml b/cli/azd/extensions/azd.ai.start/extension.yaml deleted file mode 100644 index 2c645db27b3..00000000000 --- a/cli/azd/extensions/azd.ai.start/extension.yaml +++ /dev/null @@ -1,9 +0,0 @@ -capabilities: - - custom-commands -description: Enables interactive AI agent through AZD -displayName: AZD AI Agent -id: azd.ai.start -language: go -namespace: ai.chat -usage: azd ai.chat [options] -version: 0.0.1 diff --git a/cli/azd/extensions/azd.ai.start/go.mod b/cli/azd/extensions/azd.ai.start/go.mod deleted file mode 100644 index 892e4868991..00000000000 --- a/cli/azd/extensions/azd.ai.start/go.mod +++ /dev/null @@ -1,61 +0,0 @@ -module azd.ai.start - -go 1.24.1 - -require ( - github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c - github.com/bmatcuk/doublestar/v4 v4.8.1 - github.com/fatih/color v1.18.0 - github.com/spf13/cobra v1.9.1 - github.com/tmc/langchaingo v0.1.13 -) - -require ( - github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.3.1 // indirect - github.com/Masterminds/sprig/v3 v3.2.3 // indirect - github.com/bahlo/generic-list-go v0.2.0 // indirect - github.com/buger/jsonparser v1.1.1 // indirect - github.com/dlclark/regexp2 v1.10.0 // indirect - github.com/dustin/go-humanize v1.0.1 // indirect - github.com/google/go-cmp v0.7.0 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/goph/emperror v0.17.2 // indirect - github.com/huandu/xstrings v1.3.3 // indirect - github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df // indirect - github.com/imdario/mergo v0.3.13 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/invopop/jsonschema v0.13.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/mark3labs/mcp-go v0.36.0 // indirect - github.com/mattn/go-colorable v0.1.14 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mitchellh/copystructure v1.0.0 // indirect - github.com/mitchellh/reflectwalk v1.0.0 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/nikolalohinski/gonja v1.5.3 // indirect - github.com/pelletier/go-toml/v2 v2.0.9 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pkoukk/tiktoken-go v0.1.6 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect - github.com/shopspring/decimal v1.2.0 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/cast v1.7.1 // indirect - github.com/spf13/pflag v1.0.6 // indirect - github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect - github.com/yargevad/filepathx v1.0.0 // indirect - github.com/yosida95/uritemplate/v3 v3.0.2 // indirect - go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 // indirect - golang.org/x/crypto v0.37.0 // indirect - golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect - golang.org/x/net v0.39.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/text v0.24.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755 // indirect - google.golang.org/grpc v1.71.1 // indirect - google.golang.org/protobuf v1.36.6 // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/cli/azd/extensions/azd.ai.start/go.sum b/cli/azd/extensions/azd.ai.start/go.sum deleted file mode 100644 index ebd93aadd8e..00000000000 --- a/cli/azd/extensions/azd.ai.start/go.sum +++ /dev/null @@ -1,379 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.114.0 h1:OIPFAdfrFDFO2ve2U7r/H5SwSbBzEdrBdE7xkgwc+kY= -cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E= -cloud.google.com/go/ai v0.7.0 h1:P6+b5p4gXlza5E+u7uvcgYlzZ7103ACg70YdZeC6oGE= -cloud.google.com/go/ai v0.7.0/go.mod h1:7ozuEcraovh4ABsPbrec3o4LmFl9HigNI3D5haxYeQo= -cloud.google.com/go/aiplatform v1.68.0 h1:EPPqgHDJpBZKRvv+OsB3cr0jYz3EL2pZ+802rBPcG8U= -cloud.google.com/go/aiplatform v1.68.0/go.mod h1:105MFA3svHjC3Oazl7yjXAmIR89LKhRAeNdnDKJczME= -cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= -cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= -cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= -cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= -cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= -cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= -cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU= -cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng= -cloud.google.com/go/vertexai v0.12.0 h1:zTadEo/CtsoyRXNx3uGCncoWAP1H2HakGqwznt+iMo8= -cloud.google.com/go/vertexai v0.12.0/go.mod h1:8u+d0TsvBfAAd2x5R6GMgbYhsLgo3J7lmP4bR8g2ig8= -github.com/AssemblyAI/assemblyai-go-sdk v1.3.0 h1:AtOVgGxUycvK4P4ypP+1ZupecvFgnfH+Jsum0o5ILoU= -github.com/AssemblyAI/assemblyai-go-sdk v1.3.0/go.mod h1:H0naZbvpIW49cDA5ZZ/gggeXqi7ojSGB1mqshRk6kNE= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= -github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= -github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= -github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM= -github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJsnnd3H7Ho5jQ= -github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o= -github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss= -github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= -github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= -github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= -github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c h1:pi62a7GwfbxvZDXhV4DfhxeePzpVCoyr9/rZaWH5eow= -github.com/azure/azure-dev v0.0.0-20250725230316-fffc1a6a410c/go.mod h1:mSTaPODklWyhruY0DZgPw1DI97K5cHXfU3afMqGf0IM= -github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= -github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR5wKP38= -github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= -github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= -github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/getzep/zep-go v1.0.4 h1:09o26bPP2RAPKFjWuVWwUWLbtFDF/S8bfbilxzeZAAg= -github.com/getzep/zep-go v1.0.4/go.mod h1:HC1Gz7oiyrzOTvzeKC4dQKUiUy87zpIJl0ZFXXdHuss= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= -github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/generative-ai-go v0.15.1 h1:n8aQUpvhPOlGVuM2DRkJ2jvx04zpp42B778AROJa+pQ= -github.com/google/generative-ai-go v0.15.1/go.mod h1:AAucpWZjXsDKhQYWvCYuP6d0yB1kX998pJlOW1rAesw= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= -github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= -github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= -github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18= -github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= -github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= -github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df h1:4lTJXCZw16BF0BCzrQ1LUzlMW4+2OwBkkYj1/bRybhY= -github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df/go.mod h1:oL2JAtsIp/1vnVy4UG4iDzL8SZwkOzqvRL3YR9PGPjs= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= -github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= -github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80 h1:6Yzfa6GP0rIo/kULo2bwGEkFvCePZ3qHDDTC3/J9Swo= -github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mark3labs/mcp-go v0.32.0 h1:fgwmbfL2gbd67obg57OfV2Dnrhs1HtSdlY/i5fn7MU8= -github.com/mark3labs/mcp-go v0.32.0/go.mod h1:rXqOudj/djTORU/ThxYx8fqEVj/5pvTuuebQ2RC7uk4= -github.com/mark3labs/mcp-go v0.36.0 h1:rIZaijrRYPeSbJG8/qNDe0hWlGrCJ7FWHNMz2SQpTis= -github.com/mark3labs/mcp-go v0.36.0/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= -github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= -github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= -github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58= -github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/nikolalohinski/gonja v1.5.3 h1:GsA+EEaZDZPGJ8JtpeGN78jidhOlxeJROpqMT9fTj9c= -github.com/nikolalohinski/gonja v1.5.3/go.mod h1:RmjwxNiXAEqcq1HeK5SSMmqFJvKOfTfXhkJv6YBtPa4= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0= -github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkoukk/tiktoken-go v0.1.6 h1:JF0TlJzhTbrI30wCvFuiw6FzP2+/bR+FIxUdgEAcUsw= -github.com/pkoukk/tiktoken-go v0.1.6/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= -github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tmc/langchaingo v0.1.13 h1:rcpMWBIi2y3B90XxfE4Ao8dhCQPVDMaNPnN5cGB1CaA= -github.com/tmc/langchaingo v0.1.13/go.mod h1:vpQ5NOIhpzxDfTZK9B6tf2GM/MoaHewPWM5KXXGh7hg= -github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= -github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= -github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= -github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= -github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= -github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= -github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= -github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181 h1:K+bMSIx9A7mLES1rtG+qKduLIXq40DAzYHtb0XuCukA= -gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181/go.mod h1:dzYhVIwWCtzPAa4QP98wfB9+mzt33MSmM8wsKiMi2ow= -gitlab.com/golang-commonmark/linkify v0.0.0-20191026162114-a0c2df6c8f82 h1:oYrL81N608MLZhma3ruL8qTM4xcpYECGut8KSxRY59g= -gitlab.com/golang-commonmark/linkify v0.0.0-20191026162114-a0c2df6c8f82/go.mod h1:Gn+LZmCrhPECMD3SOKlE+BOHwhOYD9j7WT9NUtkCrC8= -gitlab.com/golang-commonmark/markdown v0.0.0-20211110145824-bf3e522c626a h1:O85GKETcmnCNAfv4Aym9tepU8OE0NmcZNqPlXcsBKBs= -gitlab.com/golang-commonmark/markdown v0.0.0-20211110145824-bf3e522c626a/go.mod h1:LaSIs30YPGs1H5jwGgPhLzc8vkNc/k0rDX/fEZqiU/M= -gitlab.com/golang-commonmark/mdurl v0.0.0-20191124015652-932350d1cb84 h1:qqjvoVXdWIcZCLPMlzgA7P9FZWdPGPvP/l3ef8GzV6o= -gitlab.com/golang-commonmark/mdurl v0.0.0-20191124015652-932350d1cb84/go.mod h1:IJZ+fdMvbW2qW6htJx7sLJ04FEs4Ldl/MDsJtMKywfw= -gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f h1:Wku8eEdeJqIOFHtrfkYUByc4bCaTeA6fL0UJgfEiFMI= -gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f/go.mod h1:Tiuhl+njh/JIg0uS/sOJVYi0x2HEa5rc1OAaVsb5tAs= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 h1:A3SayB3rNyt+1S6qpI9mHPkeHTZbD7XILEqWnYZb2l0= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0/go.mod h1:27iA5uvhuRNmalO+iEUdVn5ZMj2qy10Mm+XRIpRmyuU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= -go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= -go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 h1:Ss6D3hLXTM0KobyBYEAygXzFfGcjnmfEJOBgSbemCtg= -go.starlark.net v0.0.0-20230302034142-4b1e35fe2254/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= -golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= -golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= -golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= -google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240528184218-531527333157 h1:u7WMYrIrVvs0TF5yaKwKNbcJyySYf+HAIFXxWltJOXE= -google.golang.org/genproto v0.0.0-20240528184218-531527333157/go.mod h1:ubQlAQnzejB8uZzszhrTCU2Fyp6Vi7ZE5nn0c3W8+qQ= -google.golang.org/genproto/googleapis/api v0.0.0-20250407143221-ac9807e6c755 h1:AMLTAunltONNuzWgVPZXrjLWtXpsG6A3yLLPEoJ/IjU= -google.golang.org/genproto/googleapis/api v0.0.0-20250407143221-ac9807e6c755/go.mod h1:2R6XrVC8Oc08GlNh8ujEpc7HkLiEZ16QeY7FxIs20ac= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755 h1:TwXJCGVREgQ/cl18iY0Z4wJCTL/GmW+Um2oSwZiZPnc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250407143221-ac9807e6c755/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI= -google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= -nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go b/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go deleted file mode 100644 index 3a27dc4643c..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/enhanced_integration.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package cmd - -import ( - "bufio" - "context" - "fmt" - "os" - "strings" - - "github.com/fatih/color" - - "azd.ai.start/internal/agent" -) - -// RunEnhancedAgentLoop runs the enhanced AZD Copilot agent with full capabilities -func RunEnhancedAgentLoop(ctx context.Context, agent *agent.AzdAiAgent, args []string) error { - fmt.Println("🤖 AZD Copilot - Interactive Mode") - fmt.Println("═══════════════════════════════════════════════════════════") - - // Handle initial query if provided - var initialQuery string - if len(args) > 0 { - initialQuery = strings.Join(args, " ") - } - - scanner := bufio.NewScanner(os.Stdin) - - for { - var userInput string - - if initialQuery != "" { - userInput = initialQuery - initialQuery = "" // Clear after first use - color.Cyan("💬 You: %s\n", userInput) - } else { - fmt.Print(color.CyanString("\n💬 You: ")) - color.Set(color.FgCyan) // Set blue color for user input - if !scanner.Scan() { - color.Unset() // Reset color - break // EOF or error - } - userInput = strings.TrimSpace(scanner.Text()) - color.Unset() // Reset color after input - } - - // Check for exit commands - if userInput == "" { - continue - } - - if strings.ToLower(userInput) == "exit" || strings.ToLower(userInput) == "quit" { - fmt.Println("👋 Goodbye! Thanks for using AZD Copilot!") - break - } - - // Process the query with the enhanced agent - err := agent.ProcessQuery(ctx, userInput) - if err != nil { - continue - } - } - - if err := scanner.Err(); err != nil { - return fmt.Errorf("error reading input: %w", err) - } - - return nil -} diff --git a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go b/cli/azd/extensions/azd.ai.start/internal/cmd/root.go deleted file mode 100644 index 81cc5cac8cd..00000000000 --- a/cli/azd/extensions/azd.ai.start/internal/cmd/root.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package cmd - -import ( - "context" - "encoding/json" - "fmt" - "os" - - "azd.ai.start/internal/agent" - "azd.ai.start/internal/agent/logging" - "github.com/azure/azure-dev/cli/azd/pkg/azdext" - "github.com/spf13/cobra" - "github.com/tmc/langchaingo/llms/openai" -) - -func NewRootCommand() *cobra.Command { - var debug bool - - rootCmd := &cobra.Command{ - Use: "azd ai.chat [options]", - Short: "Enables interactive AI agent through AZD", - SilenceUsage: true, - SilenceErrors: true, - CompletionOptions: cobra.CompletionOptions{ - DisableDefaultCmd: true, - }, - RunE: func(cmd *cobra.Command, args []string) error { - return runAIAgent(cmd.Context(), args, debug) - }, - } - - rootCmd.Flags().BoolVar(&debug, "debug", false, "Enable debug logging") - - return rootCmd -} - -type AiModelConfig struct { - Endpoint string `json:"endpoint"` - ApiKey string `json:"apiKey"` - DeploymentName string `json:"deploymentName"` -} - -// runAIAgent creates and runs the enhanced AI agent using LangChain Go -func runAIAgent(ctx context.Context, args []string, debug bool) error { - // Create a new context that includes the AZD access token - ctx = azdext.WithAccessToken(ctx) - - // Create a new AZD client - azdClient, err := azdext.NewAzdClient() - if err != nil { - return fmt.Errorf("failed to create azd client: %w", err) - } - - defer azdClient.Close() - - getSectionResponse, err := azdClient. - UserConfig(). - GetSection(ctx, &azdext.GetUserConfigSectionRequest{ - Path: "ai.chat.model", - }) - if err != nil { - return fmt.Errorf("AI model configuration not found, %w", err) - } - - var aiConfig *AiModelConfig - if err := json.Unmarshal(getSectionResponse.Section, &aiConfig); err != nil { - return fmt.Errorf("failed to unmarshal AI model configuration: %w", err) - } - - if debug { - defaultValue := true - - _, _ = azdClient.Prompt().Confirm(ctx, &azdext.ConfirmRequest{ - Options: &azdext.ConfirmOptions{ - Message: fmt.Sprintf("Ready? (PID: %d - You can attach a debugger now)", os.Getpid()), - DefaultValue: &defaultValue, - }, - }) - } - - // Common deployment names to try - azureAPIVersion := "2024-02-15-preview" - - var defaultModel *openai.LLM - var samplingModel *openai.LLM - - actionLogger := logging.NewActionLogger(logging.WithDebug(debug)) - - // Try different deployment names - if aiConfig.Endpoint != "" && aiConfig.ApiKey != "" { - // Use Azure OpenAI with proper configuration - fmt.Printf("🔵 Trying Azure OpenAI with deployment: %s\n", aiConfig.DeploymentName) - - defaultModel, err = openai.New( - openai.WithToken(aiConfig.ApiKey), - openai.WithBaseURL(aiConfig.Endpoint+"/"), - openai.WithAPIType(openai.APITypeAzure), - openai.WithAPIVersion(azureAPIVersion), - openai.WithModel(aiConfig.DeploymentName), - openai.WithCallback(actionLogger), - ) - - if err == nil { - fmt.Printf("✅ Successfully connected with deployment: %s\n", aiConfig.DeploymentName) - } else { - fmt.Printf("❌ Failed with deployment %s: %v\n", aiConfig.DeploymentName, err) - } - - samplingModel, err = openai.New( - openai.WithToken(aiConfig.ApiKey), - openai.WithBaseURL(aiConfig.Endpoint+"/"), - openai.WithAPIType(openai.APITypeAzure), - openai.WithAPIVersion(azureAPIVersion), - openai.WithModel(aiConfig.DeploymentName), - ) - - if err != nil { - return err - } - } - - // Create the enhanced agent - azdAgent, err := agent.NewAzdAiAgent(defaultModel, - agent.WithSamplingModel(samplingModel), - agent.WithDebug(debug), - ) - if err != nil { - return err - } - - if defaultModel == nil { - return fmt.Errorf("failed to connect to any Azure OpenAI deployment") - } - - // Use the enhanced AZD Copilot agent with full capabilities - return RunEnhancedAgentLoop(ctx, azdAgent, args) -} diff --git a/cli/azd/extensions/azd.ai.start/main.go b/cli/azd/extensions/azd.ai.start/main.go deleted file mode 100644 index 026e7c944e7..00000000000 --- a/cli/azd/extensions/azd.ai.start/main.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package main - -import ( - "context" - "os" - - "azd.ai.start/internal/cmd" - "github.com/fatih/color" -) - -func init() { - forceColorVal, has := os.LookupEnv("FORCE_COLOR") - if has && forceColorVal == "1" { - color.NoColor = false - } -} - -func main() { - // Execute the root command - ctx := context.Background() - rootCmd := cmd.NewRootCommand() - - if err := rootCmd.ExecuteContext(ctx); err != nil { - color.Red("Error: %v", err) - os.Exit(1) - } -} diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go b/cli/azd/internal/agent/agent.go similarity index 64% rename from cli/azd/extensions/azd.ai.start/internal/agent/agent.go rename to cli/azd/internal/agent/agent.go index 128de9af89a..4e318314718 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -4,19 +4,24 @@ package agent import ( + "bufio" "context" _ "embed" + "fmt" + "os" + "strings" + "github.com/fatih/color" "github.com/tmc/langchaingo/agents" "github.com/tmc/langchaingo/chains" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/memory" "github.com/tmc/langchaingo/tools" - "azd.ai.start/internal/agent/logging" - localtools "azd.ai.start/internal/agent/tools" - "azd.ai.start/internal/agent/tools/mcp" - mcptools "azd.ai.start/internal/agent/tools/mcp" + "github.com/azure/azure-dev/cli/azd/internal/agent/logging" + localtools "github.com/azure/azure-dev/cli/azd/internal/agent/tools" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" + mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" ) //go:embed prompts/default_agent_prefix.txt @@ -127,8 +132,63 @@ func NewAzdAiAgent(llm llms.Model, opts ...AgentOption) (*AzdAiAgent, error) { return azdAgent, nil } +// RunConversationLoop runs the enhanced AZD Copilot agent with full capabilities +func (aai *AzdAiAgent) RunConversationLoop(ctx context.Context, args []string) error { + fmt.Println("🤖 AZD Copilot - Interactive Mode") + fmt.Println("═══════════════════════════════════════════════════════════") + + // Handle initial query if provided + var initialQuery string + if len(args) > 0 { + initialQuery = strings.Join(args, " ") + } + + scanner := bufio.NewScanner(os.Stdin) + + for { + var userInput string + + if initialQuery != "" { + userInput = initialQuery + initialQuery = "" // Clear after first use + color.Cyan("💬 You: %s\n", userInput) + } else { + fmt.Print(color.CyanString("\n💬 You: ")) + color.Set(color.FgCyan) // Set blue color for user input + if !scanner.Scan() { + color.Unset() // Reset color + break // EOF or error + } + userInput = strings.TrimSpace(scanner.Text()) + color.Unset() // Reset color after input + } + + // Check for exit commands + if userInput == "" { + continue + } + + if strings.ToLower(userInput) == "exit" || strings.ToLower(userInput) == "quit" { + fmt.Println("👋 Goodbye! Thanks for using AZD Copilot!") + break + } + + // Process the query with the enhanced agent + err := aai.runChain(ctx, userInput) + if err != nil { + continue + } + } + + if err := scanner.Err(); err != nil { + return fmt.Errorf("error reading input: %w", err) + } + + return nil +} + // ProcessQuery processes a user query with full action tracking and validation -func (aai *AzdAiAgent) ProcessQuery(ctx context.Context, userInput string) error { +func (aai *AzdAiAgent) runChain(ctx context.Context, userInput string) error { // Execute with enhanced input - agent should automatically handle memory _, err := chains.Run(ctx, aai.executor, userInput, chains.WithMaxTokens(800), diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/logging/logger.go b/cli/azd/internal/agent/logging/logger.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/logging/logger.go rename to cli/azd/internal/agent/logging/logger.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt b/cli/azd/internal/agent/prompts/default_agent_format_instructions.txt similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_format_instructions.txt rename to cli/azd/internal/agent/prompts/default_agent_format_instructions.txt diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt b/cli/azd/internal/agent/prompts/default_agent_prefix.txt similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_prefix.txt rename to cli/azd/internal/agent/prompts/default_agent_prefix.txt diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_suffix.txt b/cli/azd/internal/agent/prompts/default_agent_suffix.txt similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/prompts/default_agent_suffix.txt rename to cli/azd/internal/agent/prompts/default_agent_suffix.txt diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_architecture_planning.go b/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go similarity index 92% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_architecture_planning.go rename to cli/azd/internal/agent/tools/azd/azd_architecture_planning.go index 1e29c2050f4..d74111eaa53 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_architecture_planning.go +++ b/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/agent/tools/azd/prompts" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_azure_yaml_generation.go b/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go similarity index 91% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_azure_yaml_generation.go rename to cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go index d5012b63f10..9e5764563f3 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_azure_yaml_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/agent/tools/azd/prompts" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_discovery_analysis.go b/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go similarity index 91% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_discovery_analysis.go rename to cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go index 7b7c39a5077..1d1ae810d23 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_discovery_analysis.go +++ b/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/agent/tools/azd/prompts" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_docker_generation.go b/cli/azd/internal/agent/tools/azd/azd_docker_generation.go similarity index 91% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_docker_generation.go rename to cli/azd/internal/agent/tools/azd/azd_docker_generation.go index 67a76b4d9fa..18effd6c9e7 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_docker_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_docker_generation.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/agent/tools/azd/prompts" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_iac_generation_rules.go b/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go similarity index 90% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_iac_generation_rules.go rename to cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go index 40cf27facfa..38d797365c8 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_iac_generation_rules.go +++ b/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/agent/tools/azd/prompts" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_infrastructure_generation.go b/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go similarity index 91% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_infrastructure_generation.go rename to cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go index 44876b94300..0cc87372e87 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_infrastructure_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/agent/tools/azd/prompts" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_plan_init.go b/cli/azd/internal/agent/tools/azd/azd_plan_init.go similarity index 88% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_plan_init.go rename to cli/azd/internal/agent/tools/azd/azd_plan_init.go index c45c5d21d12..3bddc9dbb31 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_plan_init.go +++ b/cli/azd/internal/agent/tools/azd/azd_plan_init.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/agent/tools/azd/prompts" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_project_validation.go b/cli/azd/internal/agent/tools/azd/azd_project_validation.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_project_validation.go rename to cli/azd/internal/agent/tools/azd/azd_project_validation.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_yaml_schema.go b/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go similarity index 88% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_yaml_schema.go rename to cli/azd/internal/agent/tools/azd/azd_yaml_schema.go index 9ed0d3806cc..d9577f92af8 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/azd_yaml_schema.go +++ b/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go @@ -3,7 +3,7 @@ package azd import ( "context" - "azd.ai.start/internal/agent/tools/azd/prompts" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/loader.go b/cli/azd/internal/agent/tools/azd/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/loader.go rename to cli/azd/internal/agent/tools/azd/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/README.md b/cli/azd/internal/agent/tools/azd/prompts/README.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/README.md rename to cli/azd/internal/agent/tools/azd/prompts/README.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_architecture_planning.md b/cli/azd/internal/agent/tools/azd/prompts/azd_architecture_planning.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_architecture_planning.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_architecture_planning.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_discovery_analysis.md b/cli/azd/internal/agent/tools/azd/prompts/azd_discovery_analysis.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_discovery_analysis.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_discovery_analysis.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_docker_generation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_docker_generation.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_docker_generation.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_docker_generation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md b/cli/azd/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_plan_init.md b/cli/azd/internal/agent/tools/azd/prompts/azd_plan_init.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_plan_init.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_plan_init.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_project_validation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_project_validation.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_yaml_schema.md b/cli/azd/internal/agent/tools/azd/prompts/azd_yaml_schema.md similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azd_yaml_schema.md rename to cli/azd/internal/agent/tools/azd/prompts/azd_yaml_schema.md diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azure.yaml.json b/cli/azd/internal/agent/tools/azd/prompts/azure.yaml.json similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/azure.yaml.json rename to cli/azd/internal/agent/tools/azd/prompts/azure.yaml.json diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/prompts.go b/cli/azd/internal/agent/tools/azd/prompts/prompts.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/azd/prompts/prompts.go rename to cli/azd/internal/agent/tools/azd/prompts/prompts.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/common/types.go b/cli/azd/internal/agent/tools/common/types.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/common/types.go rename to cli/azd/internal/agent/tools/common/types.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/command_executor.go b/cli/azd/internal/agent/tools/dev/command_executor.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/command_executor.go rename to cli/azd/internal/agent/tools/dev/command_executor.go index 483b4281719..6f0fc33bdaa 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/command_executor.go +++ b/cli/azd/internal/agent/tools/dev/command_executor.go @@ -9,7 +9,7 @@ import ( "runtime" "strings" - "azd.ai.start/internal/agent/tools/common" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/tmc/langchaingo/callbacks" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/loader.go b/cli/azd/internal/agent/tools/dev/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/dev/loader.go rename to cli/azd/internal/agent/tools/dev/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/http/http_fetcher.go b/cli/azd/internal/agent/tools/http/http_fetcher.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/http/http_fetcher.go rename to cli/azd/internal/agent/tools/http/http_fetcher.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/http/loader.go b/cli/azd/internal/agent/tools/http/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/http/loader.go rename to cli/azd/internal/agent/tools/http/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/change_directory.go b/cli/azd/internal/agent/tools/io/change_directory.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/change_directory.go rename to cli/azd/internal/agent/tools/io/change_directory.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/copy_file.go b/cli/azd/internal/agent/tools/io/copy_file.go similarity index 98% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/copy_file.go rename to cli/azd/internal/agent/tools/io/copy_file.go index 7abd91de036..64f91d50b72 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/copy_file.go +++ b/cli/azd/internal/agent/tools/io/copy_file.go @@ -8,7 +8,7 @@ import ( "os" "strings" - "azd.ai.start/internal/agent/tools/common" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/tmc/langchaingo/callbacks" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/create_directory.go b/cli/azd/internal/agent/tools/io/create_directory.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/create_directory.go rename to cli/azd/internal/agent/tools/io/create_directory.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/current_directory.go b/cli/azd/internal/agent/tools/io/current_directory.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/current_directory.go rename to cli/azd/internal/agent/tools/io/current_directory.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/delete_directory.go b/cli/azd/internal/agent/tools/io/delete_directory.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/delete_directory.go rename to cli/azd/internal/agent/tools/io/delete_directory.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/delete_file.go b/cli/azd/internal/agent/tools/io/delete_file.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/delete_file.go rename to cli/azd/internal/agent/tools/io/delete_file.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/directory_list.go b/cli/azd/internal/agent/tools/io/directory_list.go similarity index 98% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/directory_list.go rename to cli/azd/internal/agent/tools/io/directory_list.go index 40bd8f80fb1..581bd593da1 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/directory_list.go +++ b/cli/azd/internal/agent/tools/io/directory_list.go @@ -8,7 +8,7 @@ import ( "path/filepath" "strings" - "azd.ai.start/internal/agent/tools/common" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/tmc/langchaingo/callbacks" ) diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/file_info.go b/cli/azd/internal/agent/tools/io/file_info.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/file_info.go rename to cli/azd/internal/agent/tools/io/file_info.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/file_search.go b/cli/azd/internal/agent/tools/io/file_search.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/file_search.go rename to cli/azd/internal/agent/tools/io/file_search.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/loader.go b/cli/azd/internal/agent/tools/io/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/loader.go rename to cli/azd/internal/agent/tools/io/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/move_file.go b/cli/azd/internal/agent/tools/io/move_file.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/move_file.go rename to cli/azd/internal/agent/tools/io/move_file.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/read_file.go b/cli/azd/internal/agent/tools/io/read_file.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/read_file.go rename to cli/azd/internal/agent/tools/io/read_file.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/write_file.go b/cli/azd/internal/agent/tools/io/write_file.go similarity index 99% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/io/write_file.go rename to cli/azd/internal/agent/tools/io/write_file.go index e367be33e00..8c20367af38 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/io/write_file.go +++ b/cli/azd/internal/agent/tools/io/write_file.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "azd.ai.start/internal/agent/tools/common" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // WriteFileTool implements a comprehensive file writing tool that handles all scenarios diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/loader.go b/cli/azd/internal/agent/tools/loader.go similarity index 84% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/loader.go rename to cli/azd/internal/agent/tools/loader.go index 8aee4593a0c..e4a10ad1f53 100644 --- a/cli/azd/extensions/azd.ai.start/internal/agent/tools/loader.go +++ b/cli/azd/internal/agent/tools/loader.go @@ -4,9 +4,9 @@ import ( "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" - "azd.ai.start/internal/agent/tools/azd" - "azd.ai.start/internal/agent/tools/dev" - "azd.ai.start/internal/agent/tools/io" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/dev" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/io" ) // ToolLoader provides an interface for loading tools from different categories diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/loader.go b/cli/azd/internal/agent/tools/mcp/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/loader.go rename to cli/azd/internal/agent/tools/mcp/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/mcp.json b/cli/azd/internal/agent/tools/mcp/mcp.json similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/mcp.json rename to cli/azd/internal/agent/tools/mcp/mcp.json diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/sampling_handler.go b/cli/azd/internal/agent/tools/mcp/sampling_handler.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/mcp/sampling_handler.go rename to cli/azd/internal/agent/tools/mcp/sampling_handler.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/weather/loader.go b/cli/azd/internal/agent/tools/weather/loader.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/weather/loader.go rename to cli/azd/internal/agent/tools/weather/loader.go diff --git a/cli/azd/extensions/azd.ai.start/internal/agent/tools/weather/weather.go b/cli/azd/internal/agent/tools/weather/weather.go similarity index 100% rename from cli/azd/extensions/azd.ai.start/internal/agent/tools/weather/weather.go rename to cli/azd/internal/agent/tools/weather/weather.go diff --git a/cli/azd/pkg/llm/azure_openai.go b/cli/azd/pkg/llm/azure_openai.go index 99b6a6e243b..ea02134c848 100644 --- a/cli/azd/pkg/llm/azure_openai.go +++ b/cli/azd/pkg/llm/azure_openai.go @@ -5,70 +5,67 @@ package llm import ( "fmt" - "maps" - "os" - "github.com/azure/azure-dev/cli/azd/pkg/output/ux" + "github.com/azure/azure-dev/cli/azd/pkg/config" "github.com/tmc/langchaingo/llms/openai" ) -const ( - modelEnvVar = "AZD_AZURE_OPENAI_MODEL" - versionEnvVar = "AZD_AZURE_OPENAI_VERSION" - urlEnvVar = "AZD_AZURE_OPENAI_URL" - keyEnvVar = "OPENAI_API_KEY" -) +type AzureOpenAiModelConfig struct { + Model string `json:"model"` + Version string `json:"version"` + Endpoint string `json:"endpoint"` + Token string `json:"token"` + ApiVersion string `json:"apiVersion"` +} -type requiredEnvVar struct { - name string - value string - isDefined bool +type AzureOpenAiModelProvider struct { + userConfigManager config.UserConfigManager } -var requiredEnvVars = map[string]requiredEnvVar{ - modelEnvVar: {name: modelEnvVar}, - versionEnvVar: {name: versionEnvVar}, - urlEnvVar: {name: urlEnvVar}, - keyEnvVar: {name: keyEnvVar}, +func NewAzureOpenAiModelProvider(userConfigManager config.UserConfigManager) ModelProvider { + return &AzureOpenAiModelProvider{ + userConfigManager: userConfigManager, + } } -func loadAzureOpenAi() (InfoResponse, error) { +func (p *AzureOpenAiModelProvider) CreateModelContainer(opts ...ModelOption) (*ModelContainer, error) { + userConfig, err := p.userConfigManager.Load() + if err != nil { + return nil, err + } - envVars := maps.Clone(requiredEnvVars) - missingEnvVars := []string{} - for name, envVar := range envVars { - value, isDefined := os.LookupEnv(envVar.name) - if !isDefined { - missingEnvVars = append(missingEnvVars, envVar.name) - continue - } + var modelConfig AzureOpenAiModelConfig + if ok, err := userConfig.GetSection("ai.agent.model.azure", &modelConfig); !ok || err != nil { + return nil, err + } - envVar.value = value - envVar.isDefined = true - envVars[name] = envVar + modelContainer := &ModelContainer{ + Type: LlmTypeOpenAIAzure, + IsLocal: false, + Metadata: ModelMetadata{ + Name: modelConfig.Model, + Version: modelConfig.Version, + }, + Url: modelConfig.Endpoint, } - if len(missingEnvVars) > 0 { - return InfoResponse{}, fmt.Errorf( - "missing required environment variable(s): %s", ux.ListAsText(missingEnvVars)) + + for _, opt := range opts { + opt(modelContainer) } - _, err := openai.New( - openai.WithModel(envVars[modelEnvVar].value), + model, err := openai.New( + openai.WithModel(modelConfig.Model), openai.WithAPIType(openai.APITypeAzure), - openai.WithAPIVersion(envVars[versionEnvVar].value), - openai.WithBaseURL(envVars[urlEnvVar].value), + openai.WithAPIVersion(modelConfig.ApiVersion), + openai.WithBaseURL(modelConfig.Endpoint), + openai.WithToken(modelConfig.Token), ) if err != nil { - return InfoResponse{}, fmt.Errorf("failed to create LLM: %w", err) + return nil, fmt.Errorf("failed to create LLM: %w", err) } - return InfoResponse{ - Type: LlmTypeOpenAIAzure, - IsLocal: false, - Model: LlmModel{ - Name: envVars[modelEnvVar].value, - Version: envVars[versionEnvVar].value, - }, - Url: envVars[urlEnvVar].value, - }, nil + model.CallbacksHandler = modelContainer.logger + modelContainer.Model = model + + return modelContainer, nil } diff --git a/cli/azd/pkg/llm/client.go b/cli/azd/pkg/llm/client.go deleted file mode 100644 index b11f392f1c7..00000000000 --- a/cli/azd/pkg/llm/client.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package llm - -import ( - "github.com/tmc/langchaingo/llms" -) - -// Client is the AZD representation of a Language Model (LLM) client. -type Client struct { - llms.Model -} diff --git a/cli/azd/pkg/llm/manager.go b/cli/azd/pkg/llm/manager.go index e3ef6abcc75..8c22732c107 100644 --- a/cli/azd/pkg/llm/manager.go +++ b/cli/azd/pkg/llm/manager.go @@ -5,14 +5,11 @@ package llm import ( "fmt" - "io" - "log" - "os" - "strings" "github.com/azure/azure-dev/cli/azd/pkg/alpha" - "github.com/tmc/langchaingo/llms/ollama" - "github.com/tmc/langchaingo/llms/openai" + "github.com/azure/azure-dev/cli/azd/pkg/config" + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/llms" ) var featureLlm = alpha.MustFeatureKey("llm") @@ -30,16 +27,22 @@ func IsLlmFeatureEnabled(alphaManager *alpha.FeatureManager) error { func NewManager( alphaManager *alpha.FeatureManager, -) Manager { - return Manager{ - alphaManager: alphaManager, + userConfigManager config.UserConfigManager, + modelFactory *ModelFactory, +) *Manager { + return &Manager{ + alphaManager: alphaManager, + userConfigManager: userConfigManager, + ModelFactory: modelFactory, } } // Manager provides functionality to manage Language Model (LLM) features and capabilities. // It encapsulates the alpha feature manager to control access to experimental LLM features. type Manager struct { - alphaManager *alpha.FeatureManager + alphaManager *alpha.FeatureManager + userConfigManager config.UserConfigManager + ModelFactory *ModelFactory } type LlmType string @@ -60,22 +63,32 @@ const ( LlmTypeOllama LlmType = "ollama" ) -// LlmModel represents a language model with its name and version information. +// ModelMetadata represents a language model with its name and version information. // Name specifies the identifier of the language model. // Version indicates the specific version or release of the model. -type LlmModel struct { +type ModelMetadata struct { Name string Version string } -// InfoResponse represents the configuration information of a Language Learning Model (LLM). +// ModelContainer represents the configuration information of a Language Learning Model (LLM). // It contains details about the model type, deployment location, model specification, // and endpoint URL for remote models. -type InfoResponse struct { - Type LlmType - IsLocal bool - Model LlmModel - Url string // For remote models, this is the API endpoint URL +type ModelContainer struct { + Type LlmType + IsLocal bool + Metadata ModelMetadata + Model llms.Model + Url string // For remote models, this is the API endpoint URL + logger callbacks.Handler +} + +type ModelOption func(modelContainer *ModelContainer) + +func WithLogger(logger callbacks.Handler) ModelOption { + return func(modelContainer *ModelContainer) { + modelContainer.logger = logger + } } // NotEnabledError represents an error that occurs when LLM functionality is not enabled. @@ -99,117 +112,27 @@ func (e InvalidLlmConfiguration) Error() string { return "Unable to determine LLM configuration. Please check your environment variables or configuration." } -// Info obtains configuration information about the LLM (Large Language Model) feature. -// If the LLM feature is not enabled through the alpha manager, it returns a NotEnabledError. -// The function writes output to the provided stdout writer. -// Returns an InfoResponse containing the LLM configuration and any error that occurred. -func (m Manager) Info(stdout io.Writer) (InfoResponse, error) { - if !m.alphaManager.IsEnabled(featureLlm) { - return InfoResponse{}, NotEnabledError{} +// GetDefaultModel returns the configured model from the global azd user configuration +func (m Manager) GetDefaultModel(opts ...ModelOption) (*ModelContainer, error) { + userConfig, err := m.userConfigManager.Load() + if err != nil { + return nil, err } - return LlmConfig() -} -var availableLlmTypes = []LlmType{ - LlmTypeOpenAIAzure, - LlmTypeOllama, -} - -// LlmConfig attempts to load and validate LLM (Language Learning Model) configuration. -// It first determines the default LLM type, which can be overridden by the AZD_LLM_TYPE -// environment variable. It then tries to load configurations for available LLM types -// in order, starting with the default type. -// -// The function supports two LLM types: -// - LlmTypeOpenAIAzure (default) -// - LlmTypeOllama -// -// Returns: -// - InfoResponse: Contains the successfully loaded LLM configuration -// - error: Returns an error if no valid LLM configuration could be loaded or if -// an unknown LLM type is specified in AZD_LLM_TYPE -func LlmConfig() (InfoResponse, error) { - defaultLLm := LlmTypeOpenAIAzure - // Default LLM can be overridden by environment variable AZD_LLM_TYPE - if value, isDefined := os.LookupEnv("AZD_LLM_TYPE"); isDefined { - switch strings.ToLower(value) { - case string(LlmTypeOllama): - defaultLLm = LlmTypeOllama - case string(LlmTypeOpenAIAzure): - defaultLLm = LlmTypeOpenAIAzure - default: - return InfoResponse{}, fmt.Errorf("unknown LLM type: %s", value) - } + defaultModelType, ok := userConfig.GetString("ai.agent.model.type") + if !ok { + return nil, fmt.Errorf("Default model type has not been set") } - // keep default on the top and add the rest in the order they are defined - configOrder := []LlmType{defaultLLm} - for _, llmType := range availableLlmTypes { - if llmType != defaultLLm { - configOrder = append(configOrder, llmType) - } - } - - for _, llmType := range configOrder { - log.Println("Checking LLM configuration for: ", llmType) - info, err := loadLlmConfig(llmType) - if err != nil { - log.Printf("Failed to load LLM configuration for %s: %v\n", llmType, err) - continue // Try the next LLM type - } - return info, nil - } - - return InfoResponse{}, InvalidLlmConfiguration{} + return m.ModelFactory.CreateModelContainer(LlmType(defaultModelType), opts...) } -// loadLlmConfig loads the configuration for the specified LLM type. -// It returns an InfoResponse containing the LLM configuration details and any error encountered. -// -// Parameters: -// - llmType: The type of LLM to load configuration for (LlmTypeOllama or LlmTypeOpenAIAzure) -// -// Returns: -// - InfoResponse: Configuration details for the specified LLM -// - error: InvalidLlmConfiguration error if an unsupported LLM type is provided -func loadLlmConfig(llmType LlmType) (InfoResponse, error) { - switch llmType { - case LlmTypeOllama: - return loadOllama() - case LlmTypeOpenAIAzure: - return loadAzureOpenAi() - default: - return InfoResponse{}, InvalidLlmConfiguration{} - } +// GetModel returns the configured model from the global azd user configuration +func (m Manager) GetModel(modelType LlmType, opts ...ModelOption) (*ModelContainer, error) { + return m.ModelFactory.CreateModelContainer(modelType, opts...) } -// LlmClient creates and returns a new LLM (Language Learning Model) client based on the provided InfoResponse. -// It supports different types of LLM services including Ollama and Azure OpenAI. -// -// Parameters: -// - info: InfoResponse containing the configuration details for the LLM service -// -// Returns: -// - Client: A configured LLM client wrapper -// - error: An error if the client creation fails or if the LLM type is unsupported -func LlmClient(info InfoResponse) (Client, error) { - switch info.Type { - case LlmTypeOllama: - c, err := ollama.New(ollama.WithModel(info.Model.Name)) - return Client{ - Model: c, - }, err - case LlmTypeOpenAIAzure: - c, err := openai.New( - openai.WithModel(info.Model.Name), - openai.WithAPIType(openai.APITypeAzure), - openai.WithAPIVersion(info.Model.Version), - openai.WithBaseURL(info.Url), - ) - return Client{ - Model: c, - }, err - default: - return Client{}, fmt.Errorf("unsupported LLM type: %s", info.Type) - } +var availableLlmTypes = []LlmType{ + LlmTypeOpenAIAzure, + LlmTypeOllama, } diff --git a/cli/azd/pkg/llm/manager_test.go b/cli/azd/pkg/llm/manager_test.go index 46d0d6e3874..4c87ec516c9 100644 --- a/cli/azd/pkg/llm/manager_test.go +++ b/cli/azd/pkg/llm/manager_test.go @@ -5,128 +5,7 @@ package llm import ( "testing" - - "github.com/stretchr/testify/require" ) func TestLlmConfig(t *testing.T) { - tests := []struct { - name string - envVars map[string]string - expectedType LlmType - expectErr bool - }{ - { - name: "Default to local Ollama", - envVars: map[string]string{}, - expectedType: LlmTypeOllama, - expectErr: false, - }, - { - name: "Use Ollama when AZD_LLM_TYPE=ollama", - envVars: map[string]string{ - "AZD_LLM_TYPE": "ollama", - }, - expectedType: LlmTypeOllama, - expectErr: false, - }, - { - name: "Use Azure OpenAI when AZD_LLM_TYPE=azure", - envVars: map[string]string{ - "AZD_LLM_TYPE": "azure", - keyEnvVar: "test-key", - urlEnvVar: "https://test.openai.azure.com/", - versionEnvVar: "2023-05-15", - modelEnvVar: "gpt-35-turbo", - }, - expectedType: LlmTypeOpenAIAzure, - expectErr: false, - }, - { - name: "Error on invalid LLM type", - envVars: map[string]string{ - "AZD_LLM_TYPE": "invalid", - }, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(innerTest *testing.T) { - - for key, value := range tt.envVars { - t.Setenv(key, value) - } - - info, err := LlmConfig() - if tt.expectErr { - require.Error(innerTest, err) - return - } - - require.NoError(innerTest, err) - require.Equal(innerTest, tt.expectedType, info.Type, "Expected LLM type does not match") - }) - } -} - -func TestLlmClient(t *testing.T) { - tests := []struct { - name string - info InfoResponse - expectErr bool - env map[string]string - }{ - { - name: "Create Ollama client", - info: InfoResponse{ - Type: LlmTypeOllama, - Model: LlmModel{ - Name: "llama2", - }, - }, - expectErr: false, - }, - { - name: "Create Azure OpenAI client", - info: InfoResponse{ - Type: LlmTypeOpenAIAzure, - Model: LlmModel{ - Name: "gpt-35-turbo", - Version: "2023-05-15", - }, - Url: "https://test.openai.azure.com/", - }, - expectErr: false, - env: map[string]string{ - keyEnvVar: "test-key", - }, - }, - { - name: "Error on invalid LLM type", - info: InfoResponse{ - Type: "invalid", - }, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - for key, value := range tt.env { - t.Setenv(key, value) - } - - client, err := LlmClient(tt.info) - if tt.expectErr { - require.Error(t, err) - require.Equal(t, Client{}, client, "Expected empty client on error") - require.Nil(t, client.Model, "Expected nil Model on error") - return - } - require.NoError(t, err) - require.NotNil(t, client) - }) - } } diff --git a/cli/azd/pkg/llm/model_factory.go b/cli/azd/pkg/llm/model_factory.go new file mode 100644 index 00000000000..d228465bd52 --- /dev/null +++ b/cli/azd/pkg/llm/model_factory.go @@ -0,0 +1,28 @@ +package llm + +import ( + "github.com/azure/azure-dev/cli/azd/pkg/ioc" +) + +type ModelFactory struct { + serviceLocator ioc.ServiceLocator +} + +func NewModelFactory(serviceLocator ioc.ServiceLocator) *ModelFactory { + return &ModelFactory{ + serviceLocator: serviceLocator, + } +} + +func (f *ModelFactory) CreateModelContainer(modelType LlmType, opts ...ModelOption) (*ModelContainer, error) { + var modelProvider ModelProvider + if err := f.serviceLocator.ResolveNamed(string(modelType), &modelProvider); err != nil { + return nil, err + } + + return modelProvider.CreateModelContainer(opts...) +} + +type ModelProvider interface { + CreateModelContainer(opts ...ModelOption) (*ModelContainer, error) +} diff --git a/cli/azd/pkg/llm/ollama.go b/cli/azd/pkg/llm/ollama.go index 46f7187bd15..3c58cd970b8 100644 --- a/cli/azd/pkg/llm/ollama.go +++ b/cli/azd/pkg/llm/ollama.go @@ -4,33 +4,64 @@ package llm import ( - "log" - "os" - + "github.com/azure/azure-dev/cli/azd/pkg/config" "github.com/tmc/langchaingo/llms/ollama" ) -func loadOllama() (InfoResponse, error) { - defaultLlamaVersion := "llama3" +type OllamaModelConfig struct { + Model string `json:"model"` +} - if value, isDefined := os.LookupEnv("AZD_OLLAMA_MODEL"); isDefined { - log.Printf("Found AZD_OLLAMA_MODEL with %s. Using this model", value) - defaultLlamaVersion = value +type OllamaModelProvider struct { + userConfigManager config.UserConfigManager +} + +func NewOllamaModelProvider(userConfigManager config.UserConfigManager) ModelProvider { + return &OllamaModelProvider{ + userConfigManager: userConfigManager, } +} - _, err := ollama.New( - ollama.WithModel(defaultLlamaVersion), - ) +func (p *OllamaModelProvider) CreateModelContainer(opts ...ModelOption) (*ModelContainer, error) { + userConfig, err := p.userConfigManager.Load() + if err != nil { + return nil, err + } + + defaultLlamaVersion := "llama3" + + var modelConfig OllamaModelConfig + ok, err := userConfig.GetSection("ai.agent.model.ollama", &modelConfig) if err != nil { - return InfoResponse{}, err + return nil, err } - return InfoResponse{ + if ok { + defaultLlamaVersion = modelConfig.Model + } + + modelContainer := &ModelContainer{ Type: LlmTypeOllama, IsLocal: true, - Model: LlmModel{ + Metadata: ModelMetadata{ Name: defaultLlamaVersion, Version: "latest", }, - }, nil + } + + for _, opt := range opts { + opt(modelContainer) + } + + model, err := ollama.New( + ollama.WithModel(defaultLlamaVersion), + ) + if err != nil { + return nil, err + } + + model.CallbacksHandler = modelContainer.logger + modelContainer.Model = model + + return modelContainer, nil } From 70170afe7456568b238196eac91815669f72638c Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 1 Aug 2025 17:51:04 -0700 Subject: [PATCH 050/116] Updates io tools to remove callback handler --- cli/azd/cmd/init.go | 8 +- cli/azd/internal/agent/agent.go | 4 +- cli/azd/internal/agent/tools/azd/loader.go | 11 +- .../azd/prompts/azd_project_validation.md | 126 ++++--- cli/azd/internal/agent/tools/dev/loader.go | 13 +- .../agent/tools/io/change_directory.go | 88 +++-- cli/azd/internal/agent/tools/io/copy_file.go | 117 ++----- .../agent/tools/io/create_directory.go | 78 +++-- .../agent/tools/io/current_directory.go | 53 ++- .../agent/tools/io/delete_directory.go | 87 +++-- .../internal/agent/tools/io/delete_file.go | 82 +++-- .../internal/agent/tools/io/directory_list.go | 140 +++----- cli/azd/internal/agent/tools/io/file_info.go | 61 ++-- .../internal/agent/tools/io/file_search.go | 177 ++++------ cli/azd/internal/agent/tools/io/loader.go | 33 +- cli/azd/internal/agent/tools/io/move_file.go | 96 ++--- cli/azd/internal/agent/tools/io/read_file.go | 330 ++++++------------ cli/azd/internal/agent/tools/loader.go | 13 +- cli/azd/internal/agent/tools/mcp/loader.go | 5 +- .../internal/agent/tools/weather/loader.go | 13 +- cli/azd/pkg/llm/azure_openai.go | 6 +- 21 files changed, 686 insertions(+), 855 deletions(-) diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index 3395047e7c1..f51d9d03da2 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -371,13 +371,13 @@ func (i *initAction) Run(ctx context.Context) (*actions.ActionResult, error) { } func (i *initAction) initAppWithCopilot(ctx context.Context) error { - defaultModelContainer, err := i.llmManager.GetDefaultModel() + actionLogger := logging.NewActionLogger() + defaultModelContainer, err := i.llmManager.GetDefaultModel(llm.WithLogger(actionLogger)) if err != nil { return err } - actionLogger := logging.NewActionLogger() - samplingModelContainer, err := i.llmManager.GetDefaultModel(llm.WithLogger(actionLogger)) + samplingModelContainer, err := i.llmManager.GetDefaultModel() azdAgent, err := agent.NewAzdAiAgent( defaultModelContainer.Model, @@ -392,6 +392,8 @@ func (i *initAction) initAppWithCopilot(ctx context.Context) error { Read and review the 'azd-arch-plan.md' file if it exists to get current status Run the 'azd_plan_init' tool and follow the steps Finally - run the 'azd_project_validation' tool to ensure the process is fully completed +Be very short, terse and to the point during planning and action execution. +Provide verbose output for the final summary when you are complete. ` if err := azdAgent.RunConversationLoop(ctx, []string{initPrompt}); err != nil { diff --git a/cli/azd/internal/agent/agent.go b/cli/azd/internal/agent/agent.go index 4e318314718..bcf30a609c6 100644 --- a/cli/azd/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -83,8 +83,8 @@ func NewAzdAiAgent(llm llms.Model, opts ...AgentOption) (*AzdAiAgent, error) { ) toolLoaders := []localtools.ToolLoader{ - localtools.NewLocalToolsLoader(actionLogger), - mcptools.NewMcpToolsLoader(actionLogger, samplingHandler), + localtools.NewLocalToolsLoader(), + mcptools.NewMcpToolsLoader(samplingHandler), } allTools := []tools.Tool{} diff --git a/cli/azd/internal/agent/tools/azd/loader.go b/cli/azd/internal/agent/tools/azd/loader.go index b4ac9a4de31..6d81740a6ac 100644 --- a/cli/azd/internal/agent/tools/azd/loader.go +++ b/cli/azd/internal/agent/tools/azd/loader.go @@ -1,19 +1,14 @@ package azd import ( - "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" ) // AzdToolsLoader loads AZD-related tools -type AzdToolsLoader struct { - callbackHandler callbacks.Handler -} +type AzdToolsLoader struct{} -func NewAzdToolsLoader(callbackHandler callbacks.Handler) *AzdToolsLoader { - return &AzdToolsLoader{ - callbackHandler: callbackHandler, - } +func NewAzdToolsLoader() *AzdToolsLoader { + return &AzdToolsLoader{} } func (l *AzdToolsLoader) LoadTools() ([]tools.Tool, error) { diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md index e8e06172fa9..5197edcacbb 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md @@ -1,35 +1,36 @@ # AZD Project Validation Tool -Validates an AZD project by running comprehensive checks on all components including azure.yaml schema validation, Bicep template validation, environment setup, packaging, and deployment preview. +This tool validates an AZD project by programmatically running comprehensive checks on all components including azure.yaml schema validation, Bicep template validation, environment setup, packaging, and deployment preview. ## Purpose -This tool performs end-to-end validation of an AZD project to ensure all components are properly configured and the project is ready for deployment. It centralizes all validation logic to provide a single source of truth for project readiness. +This tool performs automated end-to-end validation of an AZD project to ensure all components are properly configured and the project is ready for deployment. The LLM should execute all validation steps directly using available tools and terminal commands, not just provide instructions to the user. ## Validation Workflow +The LLM must execute these validation steps programmatically using terminal commands and available tools: + ### 1. Azure.yaml Schema Validation -**REQUIRED ACTION:** -Use the `azd_yaml_schema` tool to validate the azure.yaml file against the official schema. +**EXECUTE:** Use the `azd_yaml_schema` tool to validate the azure.yaml file against the official schema. -**Validation Steps:** +**Steps to Execute:** -- Check if `azure.yaml` exists in current directory -- Validate schema compliance using `azd_yaml_schema` tool -- Report any schema violations or missing required fields -- Verify service definitions and configurations +- Check if `azure.yaml` exists in current directory using file system tools +- Run `azd_yaml_schema` tool to validate schema compliance +- Parse and report any schema violations or missing required fields +- Verify service definitions and configurations are correct ### 2. Bicep Template Validation -**REQUIRED ACTIONS:** +**EXECUTE:** Run the following commands to validate Bicep templates: -1. **Find Bicep Files:** Scan `./infra` directory for `.bicep` files -2. **Compile Templates:** Run `az bicep build --file --stdout` for each template +1. **Find Bicep Files:** Use file search to scan `./infra` directory for `.bicep` files +2. **Compile Templates:** Execute `az bicep build --file --stdout` for each template 3. **Validate Syntax:** Ensure all templates compile without errors 4. **Check Dependencies:** Verify module references and parameter passing -**Commands to Run:** +**Commands to Execute:** ```powershell # Compile main template @@ -41,15 +42,15 @@ az deployment sub validate --template-file ./infra/main.bicep --parameters ./inf ### 3. AZD Environment Validation -**REQUIRED ACTIONS:** +**EXECUTE:** Run these commands to validate AZD environment setup: -1. **Check Environment Exists:** Run `azd env list` to see available environments +1. **Check Environment Exists:** Execute `azd env list` to see available environments 2. **Create Environment if Missing:** - - If no environments exist, run `azd env new ` + - If no environments exist, execute `azd env new ` - Use current directory name as environment name (sanitized) 3. **Verify Environment Selection:** Ensure an environment is currently selected -**Commands to Run:** +**Commands to Execute:** ```powershell # List existing environments @@ -64,17 +65,18 @@ azd env select ### 4. Package Validation -**REQUIRED ACTION:** -Run `azd package` to validate all services can be packaged successfully. +**EXECUTE:** Run `azd package` to validate all services can be packaged successfully. -**Validation Steps:** +**Steps to Execute:** +- Execute `azd package` command +- Monitor output for errors or warnings - Verify all service source paths are valid - Check Docker builds complete successfully (for containerized services) - Ensure all build artifacts are created - Validate package manifests -**Command to Run:** +**Command to Execute:** ```powershell azd package @@ -82,18 +84,19 @@ azd package ### 5. Deployment Preview Validation -**REQUIRED ACTION:** -Run `azd provision --preview` to validate infrastructure deployment without actually creating resources. +**EXECUTE:** Run `azd provision --preview` to validate infrastructure deployment without actually creating resources. -**Validation Steps:** +**Steps to Execute:** +- Execute `azd provision --preview` command +- Monitor output for errors or warnings - Verify Azure authentication is working - Check resource group creation plan - Validate all Bicep modules deploy correctly - Ensure parameter values are properly resolved - Confirm no deployment conflicts -**Command to Run:** +**Command to Execute:** ```powershell azd provision --preview @@ -101,81 +104,88 @@ azd provision --preview ## Success Criteria -The project validation is successful when: +The LLM must verify that project validation is successful when all of the following are true: -- [ ] `azure.yaml` passes schema validation -- [ ] All Bicep templates compile without errors or warnings -- [ ] AZD environment exists and is properly configured -- [ ] `azd package` completes without errors or warnings +- [ ] `azure.yaml` passes schema validation (executed via `azd_yaml_schema` tool) +- [ ] All Bicep templates compile without errors or warnings (verified via `az bicep build`) +- [ ] AZD environment exists and is properly configured (verified via `azd env list`) +- [ ] `azd package` completes without errors or warnings - [ ] `azd provision --preview` completes without errors or warnings - [ ] All service configurations are valid - [ ] No missing dependencies or configuration issues +The LLM should report the status of each validation step and provide a summary of the overall validation results. + ## Error Handling -### Common Issues and Solutions +The LLM must handle common validation errors by executing appropriate remediation steps: + +### Common Issues and Automated Solutions **Azure.yaml Schema Errors:** -- Use `azd_yaml_schema` tool to get correct schema format -- Check service names match directory structure -- Verify all required fields are present +- Execute `azd_yaml_schema` tool to get correct schema format +- Check service names match directory structure using file system tools +- Verify all required fields are present and report missing fields **Bicep Compilation Errors:** -- Check module paths and parameter names +- Parse compilation error output and identify specific issues +- Check module paths and parameter names programmatically - Verify resource naming conventions follow Azure requirements - Ensure all required parameters have values **Environment Issues:** -- Run `azd auth login` if authentication fails -- Check Azure subscription access and permissions +- Execute `azd auth login` if authentication fails +- Check Azure subscription access and permissions via Azure CLI - Verify location parameter is valid Azure region **Package Errors:** -- Check service source paths in azure.yaml -- Verify Docker builds work locally for containerized services +- Check service source paths in azure.yaml programmatically +- Verify Docker builds work locally for containerized services by executing build commands - Ensure all build dependencies are available **Provision Preview Errors:** -- Verify Azure subscription has sufficient permissions +- Verify Azure subscription has sufficient permissions via Azure CLI - Check resource quotas and limits - Ensure resource names are globally unique where required +The LLM should attempt to resolve issues automatically where possible and provide clear error reporting for issues that require manual intervention. + ## Update Documentation -**REQUIRED ACTIONS:** +**EXECUTE:** The LLM must update `azd-arch-plan.md` with validation results by: -Update `azd-arch-plan.md` with: +- Writing validation results for each component to the documentation +- Recording any issues found and resolutions applied +- Documenting environment configuration details +- Including deployment preview summary +- Updating project readiness status -- Validation results for each component -- Any issues found and resolutions applied -- Environment configuration details -- Deployment preview summary -- Project readiness status +Use file editing tools to update the documentation with the validation results. ## Next Steps -After successful validation: +The LLM should inform the user that after successful validation, they can proceed with: -1. **Deploy Infrastructure:** Run `azd provision` to create Azure resources -2. **Deploy Applications:** Run `azd deploy` to deploy services -3. **Complete Deployment:** Run `azd up` to provision and deploy in one step +1. **Deploy Infrastructure:** Execute `azd provision` to create Azure resources +2. **Deploy Applications:** Execute `azd deploy` to deploy services +3. **Complete Deployment:** Execute `azd up` to provision and deploy in one step 4. **Monitor Deployment:** Use `azd monitor` to check application health 5. **View Logs:** Use `azd logs` to view deployment and runtime logs ### Production Preparation -For production deployment: +For production deployment, the LLM should guide the user through: -- Create production environment: `azd env new -prod` -- Configure production-specific settings and secrets -- Set up monitoring, alerting, and backup procedures -- Document operational procedures and runbooks +- Creating production environment: `azd env new -prod` +- Configuring production-specific settings and secrets +- Setting up monitoring, alerting, and backup procedures +- Documenting operational procedures and runbooks -**DEPLOYMENT READY:** Your AZD migration is complete and ready for deployment with `azd up`. +**VALIDATION COMPLETE:** Once all validation steps pass, the LLM should confirm that the AZD migration is complete and ready for deployment with `azd up`. -**IMPORTANT:** This tool centralizes all validation logic. Other tools should reference this tool for validation rather than duplicating validation steps. +**IMPORTANT:** This tool centralizes all validation logic. The LLM should execute all validation steps programmatically rather than delegating to other tools or providing user instructions. diff --git a/cli/azd/internal/agent/tools/dev/loader.go b/cli/azd/internal/agent/tools/dev/loader.go index 655d52a42a8..3b938213ed0 100644 --- a/cli/azd/internal/agent/tools/dev/loader.go +++ b/cli/azd/internal/agent/tools/dev/loader.go @@ -1,23 +1,18 @@ package dev import ( - "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" ) // DevToolLoader loads development-related tools -type DevToolsLoader struct { - callbacksHandler callbacks.Handler -} +type DevToolsLoader struct{} -func NewDevToolsLoader(callbacksHandler callbacks.Handler) *DevToolsLoader { - return &DevToolsLoader{ - callbacksHandler: callbacksHandler, - } +func NewDevToolsLoader() *DevToolsLoader { + return &DevToolsLoader{} } func (l *DevToolsLoader) LoadTools() ([]tools.Tool, error) { return []tools.Tool{ - &CommandExecutorTool{CallbacksHandler: l.callbacksHandler}, + &CommandExecutorTool{}, }, nil } diff --git a/cli/azd/internal/agent/tools/io/change_directory.go b/cli/azd/internal/agent/tools/io/change_directory.go index 48094b919ff..889d07d8041 100644 --- a/cli/azd/internal/agent/tools/io/change_directory.go +++ b/cli/azd/internal/agent/tools/io/change_directory.go @@ -2,17 +2,17 @@ package io import ( "context" + "encoding/json" "fmt" "os" "path/filepath" + "strings" - "github.com/tmc/langchaingo/callbacks" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // ChangeDirectoryTool implements the Tool interface for changing the current working directory -type ChangeDirectoryTool struct { - CallbacksHandler callbacks.Handler -} +type ChangeDirectoryTool struct{} func (t ChangeDirectoryTool) Name() string { return "change_directory" @@ -22,63 +22,75 @@ func (t ChangeDirectoryTool) Description() string { return "Change the current working directory. Input: directory path (e.g., '../parent' or './subfolder' or absolute path)" } -func (t ChangeDirectoryTool) Call(ctx context.Context, input string) (string, error) { - // Invoke callback for tool start - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("change_directory: %s", input)) +// createErrorResponse creates a JSON error response +func (t ChangeDirectoryTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, } + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + +func (t ChangeDirectoryTool) Call(ctx context.Context, input string) (string, error) { + input = strings.TrimSpace(input) + input = strings.Trim(input, `"`) + if input == "" { - err := fmt.Errorf("directory path is required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("directory path is required"), "Directory path is required") } // Convert to absolute path absPath, err := filepath.Abs(input) if err != nil { - toolErr := fmt.Errorf("failed to resolve path %s: %w", input, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to resolve path %s: %s", input, err.Error())) } // Check if directory exists info, err := os.Stat(absPath) if err != nil { - toolErr := fmt.Errorf("directory %s does not exist: %w", absPath, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Directory %s does not exist: %s", absPath, err.Error())) } if !info.IsDir() { - toolErr := fmt.Errorf("%s is not a directory", absPath) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(fmt.Errorf("%s is not a directory", absPath), fmt.Sprintf("%s is not a directory", absPath)) } // Change directory err = os.Chdir(absPath) if err != nil { - toolErr := fmt.Errorf("failed to change directory to %s: %w", absPath, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to change directory to %s: %s", absPath, err.Error())) } - output := fmt.Sprintf("Changed directory to %s\n", absPath) + // Create success response + type ChangeDirectoryResponse struct { + Success bool `json:"success"` + OldPath string `json:"oldPath,omitempty"` + NewPath string `json:"newPath"` + Message string `json:"message"` + } - // Invoke callback for tool end - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + response := ChangeDirectoryResponse{ + Success: true, + NewPath: absPath, + Message: fmt.Sprintf("Successfully changed directory to %s", absPath), + } + + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/copy_file.go b/cli/azd/internal/agent/tools/io/copy_file.go index 64f91d50b72..2d0d573295f 100644 --- a/cli/azd/internal/agent/tools/io/copy_file.go +++ b/cli/azd/internal/agent/tools/io/copy_file.go @@ -9,13 +9,10 @@ import ( "strings" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" - "github.com/tmc/langchaingo/callbacks" ) // CopyFileTool implements the Tool interface for copying files -type CopyFileTool struct { - CallbacksHandler callbacks.Handler -} +type CopyFileTool struct{} func (t CopyFileTool) Name() string { return "copy_file" @@ -28,6 +25,27 @@ Returns: JSON with copy operation details or error information. The input must be formatted as a single line valid JSON string.` } +// createErrorResponse creates a JSON error response +func (t CopyFileTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, + } + + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { // Parse JSON input type InputParams struct { @@ -40,106 +58,46 @@ func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { // Clean the input first cleanInput := strings.TrimSpace(input) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("copy_file: %s", cleanInput)) - } - // Parse as JSON - this is now required if err := json.Unmarshal([]byte(cleanInput), ¶ms); err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"source\": \"file.txt\", \"destination\": \"backup.txt\"}", err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to parse JSON input: %w", err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"source\": \"file.txt\", \"destination\": \"backup.txt\"}", err.Error())) } source := strings.TrimSpace(params.Source) destination := strings.TrimSpace(params.Destination) if source == "" || destination == "" { - errorResponse := common.ErrorResponse{ - Error: true, - Message: "Both source and destination paths are required", - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("both source and destination paths are required")) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(fmt.Errorf("both source and destination paths are required"), "Both source and destination paths are required") } // Check if source file exists sourceInfo, err := os.Stat(source) if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Source file %s does not exist: %s", source, err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("source file %s does not exist: %w", source, err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Source file %s does not exist: %s", source, err.Error())) } if sourceInfo.IsDir() { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Source %s is a directory. Use copy_directory for directories", source), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("source %s is a directory. Use copy_directory for directories", source)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(fmt.Errorf("source %s is a directory", source), fmt.Sprintf("Source %s is a directory. Use copy_directory for directories", source)) } // Open source file sourceFile, err := os.Open(source) if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Failed to open source file %s: %s", source, err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to open source file %s: %w", source, err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Failed to open source file %s: %s", source, err.Error())) } defer sourceFile.Close() // Create destination file destFile, err := os.Create(destination) if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Failed to create destination file %s: %s", destination, err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to create destination file %s: %w", destination, err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Failed to create destination file %s: %s", destination, err.Error())) } defer destFile.Close() // Copy contents bytesWritten, err := io.Copy(destFile, sourceFile) if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Failed to copy file: %s", err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to copy file: %w", err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Failed to copy file: %s", err.Error())) } // Prepare JSON response structure @@ -162,21 +120,8 @@ func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { // Convert to JSON jsonData, err := json.MarshalIndent(response, "", " ") if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Failed to marshal JSON response: %s", err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to marshal JSON response: %w", err)) - } - errorJsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(errorJsonData), nil - } - - output := string(jsonData) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, fmt.Sprintf("Copied %s to %s (%d bytes)", source, destination, bytesWritten)) + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/create_directory.go b/cli/azd/internal/agent/tools/io/create_directory.go index c79ac8d46c6..79db58865cb 100644 --- a/cli/azd/internal/agent/tools/io/create_directory.go +++ b/cli/azd/internal/agent/tools/io/create_directory.go @@ -2,17 +2,16 @@ package io import ( "context" + "encoding/json" "fmt" "os" "strings" - "github.com/tmc/langchaingo/callbacks" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // CreateDirectoryTool implements the Tool interface for creating directories -type CreateDirectoryTool struct { - CallbacksHandler callbacks.Handler -} +type CreateDirectoryTool struct{} func (t CreateDirectoryTool) Name() string { return "create_directory" @@ -22,56 +21,69 @@ func (t CreateDirectoryTool) Description() string { return "Create a directory (and any necessary parent directories). Input: directory path (e.g., 'docs' or './src/components')" } -func (t CreateDirectoryTool) Call(ctx context.Context, input string) (string, error) { - // Invoke callback for tool start - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("create_directory: %s", input)) +// createErrorResponse creates a JSON error response +func (t CreateDirectoryTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, } + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + +func (t CreateDirectoryTool) Call(ctx context.Context, input string) (string, error) { input = strings.TrimPrefix(input, `"`) input = strings.TrimSuffix(input, `"`) + input = strings.TrimSpace(input) if input == "" { - err := fmt.Errorf("directory path is required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("directory path is required"), "Directory path is required") } err := os.MkdirAll(input, 0755) if err != nil { - toolErr := fmt.Errorf("failed to create directory %s: %w", input, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to create directory %s: %s", input, err.Error())) } // Check if directory already existed or was newly created info, err := os.Stat(input) if err != nil { - toolErr := fmt.Errorf("failed to verify directory creation: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to verify directory creation: %s", err.Error())) } if !info.IsDir() { - toolErr := fmt.Errorf("%s exists but is not a directory", input) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(fmt.Errorf("%s exists but is not a directory", input), fmt.Sprintf("%s exists but is not a directory", input)) + } + + // Create success response + type CreateDirectoryResponse struct { + Success bool `json:"success"` + Path string `json:"path"` + Message string `json:"message"` } - output := fmt.Sprintf("Created directory: %s\n", input) + response := CreateDirectoryResponse{ + Success: true, + Path: input, + Message: fmt.Sprintf("Successfully created directory: %s", input), + } - // Invoke callback for tool end - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/current_directory.go b/cli/azd/internal/agent/tools/io/current_directory.go index 59169eb24e9..56256b3ea56 100644 --- a/cli/azd/internal/agent/tools/io/current_directory.go +++ b/cli/azd/internal/agent/tools/io/current_directory.go @@ -2,16 +2,15 @@ package io import ( "context" + "encoding/json" "fmt" "os" - "github.com/tmc/langchaingo/callbacks" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // CurrentDirectoryTool implements the Tool interface for getting current directory -type CurrentDirectoryTool struct { - CallbacksHandler callbacks.Handler -} +type CurrentDirectoryTool struct{} func (t CurrentDirectoryTool) Name() string { return "cwd" @@ -21,21 +20,51 @@ func (t CurrentDirectoryTool) Description() string { return "Get the current working directory to understand the project context. Input: use 'current' or '.' (any input works)" } -func (t CurrentDirectoryTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, input) +// createErrorResponse creates a JSON error response +func (t CurrentDirectoryTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, } + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + +func (t CurrentDirectoryTool) Call(ctx context.Context, input string) (string, error) { dir, err := os.Getwd() if err != nil { - return "", fmt.Errorf("failed to get current directory: %w", err) + return t.createErrorResponse(err, fmt.Sprintf("Failed to get current directory: %s", err.Error())) + } + + // Create success response + type CurrentDirectoryResponse struct { + Success bool `json:"success"` + CurrentDirectory string `json:"currentDirectory"` + Message string `json:"message"` } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, dir) + response := CurrentDirectoryResponse{ + Success: true, + CurrentDirectory: dir, + Message: fmt.Sprintf("Current directory is %s", dir), } - output := fmt.Sprintf("Current directory is %s\n", dir) + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) + } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/delete_directory.go b/cli/azd/internal/agent/tools/io/delete_directory.go index 7afb090f868..3066cd2d403 100644 --- a/cli/azd/internal/agent/tools/io/delete_directory.go +++ b/cli/azd/internal/agent/tools/io/delete_directory.go @@ -2,17 +2,16 @@ package io import ( "context" + "encoding/json" "fmt" "os" "strings" - "github.com/tmc/langchaingo/callbacks" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // DeleteDirectoryTool implements the Tool interface for deleting directories -type DeleteDirectoryTool struct { - CallbacksHandler callbacks.Handler -} +type DeleteDirectoryTool struct{} func (t DeleteDirectoryTool) Name() string { return "delete_directory" @@ -22,40 +21,48 @@ func (t DeleteDirectoryTool) Description() string { return "Delete a directory and all its contents. Input: directory path (e.g., 'temp-folder' or './old-docs')" } -func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, error) { - // Invoke callback for tool start - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("delete_directory: %s", input)) +// createErrorResponse creates a JSON error response +func (t DeleteDirectoryTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() } + errorResp := common.ErrorResponse{ + Error: true, + Message: message, + } + + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + +func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, error) { input = strings.TrimPrefix(input, `"`) input = strings.TrimSuffix(input, `"`) + input = strings.TrimSpace(input) if input == "" { - err := fmt.Errorf("directory path is required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("directory path is required"), "Directory path is required") } // Check if directory exists info, err := os.Stat(input) if err != nil { - toolErr := fmt.Errorf("directory %s does not exist: %w", input, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + if os.IsNotExist(err) { + return t.createErrorResponse(err, fmt.Sprintf("Directory %s does not exist", input)) } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Cannot access directory %s: %s", input, err.Error())) } // Make sure it's a directory, not a file if !info.IsDir() { - toolErr := fmt.Errorf("%s is a file, not a directory. Use delete_file to remove files", input) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(fmt.Errorf("%s is a file, not a directory", input), fmt.Sprintf("%s is a file, not a directory. Use delete_file to remove files", input)) } // Count contents before deletion for reporting @@ -68,24 +75,36 @@ func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, er // Delete the directory and all contents err = os.RemoveAll(input) if err != nil { - toolErr := fmt.Errorf("failed to delete directory %s: %w", input, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to delete directory %s: %s", input, err.Error())) + } + + // Create success response + type DeleteDirectoryResponse struct { + Success bool `json:"success"` + Path string `json:"path"` + ItemsDeleted int `json:"itemsDeleted"` + Message string `json:"message"` } - var output string + var message string if fileCount > 0 { - output = fmt.Sprintf("Deleted directory: %s (contained %d items)", input, fileCount) + message = fmt.Sprintf("Successfully deleted directory %s (contained %d items)", input, fileCount) } else { - output = fmt.Sprintf("Deleted empty directory: %s", input) + message = fmt.Sprintf("Successfully deleted empty directory %s", input) + } + + response := DeleteDirectoryResponse{ + Success: true, + Path: input, + ItemsDeleted: fileCount, + Message: message, } - // Invoke callback for tool end - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/delete_file.go b/cli/azd/internal/agent/tools/io/delete_file.go index 57c51b415de..e5333526286 100644 --- a/cli/azd/internal/agent/tools/io/delete_file.go +++ b/cli/azd/internal/agent/tools/io/delete_file.go @@ -2,17 +2,16 @@ package io import ( "context" + "encoding/json" "fmt" "os" "strings" - "github.com/tmc/langchaingo/callbacks" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // DeleteFileTool implements the Tool interface for deleting files -type DeleteFileTool struct { - CallbacksHandler callbacks.Handler -} +type DeleteFileTool struct{} func (t DeleteFileTool) Name() string { return "delete_file" @@ -22,55 +21,78 @@ func (t DeleteFileTool) Description() string { return "Delete a file. Input: file path (e.g., 'temp.txt' or './docs/old-file.md')" } -func (t DeleteFileTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("delete_file: %s", input)) +// createErrorResponse creates a JSON error response +func (t DeleteFileTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() } + errorResp := common.ErrorResponse{ + Error: true, + Message: message, + } + + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + +func (t DeleteFileTool) Call(ctx context.Context, input string) (string, error) { input = strings.TrimPrefix(input, `"`) input = strings.TrimSuffix(input, `"`) + input = strings.TrimSpace(input) if input == "" { - err := fmt.Errorf("file path is required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("file path is required"), "File path is required") } // Check if file exists and get info info, err := os.Stat(input) if err != nil { - toolErr := fmt.Errorf("file %s does not exist: %w", input, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + if os.IsNotExist(err) { + return t.createErrorResponse(err, fmt.Sprintf("File %s does not exist", input)) } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Cannot access file %s: %s", input, err.Error())) } // Make sure it's a file, not a directory if info.IsDir() { - err := fmt.Errorf("%s is a directory, not a file. Use delete_directory to remove directories", input) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("%s is a directory, not a file", input), fmt.Sprintf("%s is a directory, not a file. Use delete_directory to remove directories", input)) } + fileSize := info.Size() + // Delete the file err = os.Remove(input) if err != nil { - toolErr := fmt.Errorf("failed to delete file %s: %w", input, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to delete file %s: %s", input, err.Error())) } - output := fmt.Sprintf("Deleted file %s (%d bytes)", input, info.Size()) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + // Create success response + type DeleteFileResponse struct { + Success bool `json:"success"` + FilePath string `json:"filePath"` + SizeDeleted int64 `json:"sizeDeleted"` + Message string `json:"message"` + } + + response := DeleteFileResponse{ + Success: true, + FilePath: input, + SizeDeleted: fileSize, + Message: fmt.Sprintf("Successfully deleted file %s (%d bytes)", input, fileSize), + } + + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/directory_list.go b/cli/azd/internal/agent/tools/io/directory_list.go index 581bd593da1..a5f6b92d089 100644 --- a/cli/azd/internal/agent/tools/io/directory_list.go +++ b/cli/azd/internal/agent/tools/io/directory_list.go @@ -9,13 +9,10 @@ import ( "strings" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" - "github.com/tmc/langchaingo/callbacks" ) // DirectoryListTool implements the Tool interface for listing directory contents -type DirectoryListTool struct { - CallbacksHandler callbacks.Handler -} +type DirectoryListTool struct{} func (t DirectoryListTool) Name() string { return "list_directory" @@ -28,6 +25,27 @@ Returns: JSON with directory contents including file names, types, and sizes. The input must be formatted as a single line valid JSON string.` } +// createErrorResponse creates a JSON error response +func (t DirectoryListTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, + } + + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + func (t DirectoryListTool) Call(ctx context.Context, input string) (string, error) { // Parse JSON input type InputParams struct { @@ -42,15 +60,7 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro // Parse as JSON - this is now required if err := json.Unmarshal([]byte(cleanInput), ¶ms); err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"path\": \".\", \"include_hidden\": false}", err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to parse JSON input: %w", err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"path\": \".\", \"includeHidden\": false}", err.Error())) } // Validate required path field @@ -60,11 +70,6 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro path := strings.TrimSpace(params.Path) - // Add debug logging - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("Processing JSON input: path='%s', include_hidden=%v", path, params.IncludeHidden)) - } - // Get absolute path for clarity - handle "." explicitly to avoid potential issues var absPath string var err error @@ -73,85 +78,32 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro // Explicitly get current working directory instead of relying on filepath.Abs(".") absPath, err = os.Getwd() if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Failed to get current working directory: %s", err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to get current working directory: %w", err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Failed to get current working directory: %s", err.Error())) } } else { absPath, err = filepath.Abs(path) if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Failed to get absolute path for %s: %s", path, err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to get absolute path for %s: %w", path, err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Failed to get absolute path for %s: %s", path, err.Error())) } } - // Invoke callback for tool execution start - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("Reading directory %s (absolute: %s)", path, absPath)) - } - - // Check if directory exists - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("Checking if directory exists: '%s'", absPath)) - } - + // Check if directory exists and is accessible info, err := os.Stat(absPath) if err != nil { - var message string if os.IsNotExist(err) { - message = fmt.Sprintf("Directory does not exist: %s", absPath) - } else { - message = fmt.Sprintf("Failed to access %s: %s (original input: '%s', cleaned path: '%s')", absPath, err.Error(), input, path) + return t.createErrorResponse(err, fmt.Sprintf("Directory %s does not exist", absPath)) } - - errorResponse := common.ErrorResponse{ - Error: true, - Message: message, - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to access %s: %w", absPath, err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Failed to access %s: %s", absPath, err.Error())) } if !info.IsDir() { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Path is not a directory: %s", absPath), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("%s is not a directory", absPath)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(fmt.Errorf("%s is not a directory", absPath), fmt.Sprintf("%s is not a directory", absPath)) } - // List directory contents + // Read directory contents files, err := os.ReadDir(absPath) if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Failed to read directory %s: %s", absPath, err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to read directory %s: %w", absPath, err)) - } - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil + return t.createErrorResponse(err, fmt.Sprintf("Failed to read directory %s: %s", absPath, err.Error())) } // Prepare JSON response structure @@ -163,14 +115,21 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro } type DirectoryResponse struct { + Success bool `json:"success"` Path string `json:"path"` TotalItems int `json:"totalItems"` Items []FileInfo `json:"items"` + Message string `json:"message"` } var items []FileInfo for _, file := range files { + // Skip hidden files if not requested + if !params.IncludeHidden && strings.HasPrefix(file.Name(), ".") { + continue + } + fileInfo := FileInfo{ Name: file.Name(), IsDir: file.IsDir(), @@ -189,31 +148,18 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro } response := DirectoryResponse{ + Success: true, Path: absPath, - TotalItems: len(files), + TotalItems: len(items), Items: items, + Message: fmt.Sprintf("Successfully listed %d items in directory %s", len(items), absPath), } // Convert to JSON jsonData, err := json.MarshalIndent(response, "", " ") if err != nil { - errorResponse := common.ErrorResponse{ - Error: true, - Message: fmt.Sprintf("Failed to marshal JSON response: %s", err.Error()), - } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to marshal JSON response: %w", err)) - } - errorJsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(errorJsonData), nil - } - - output := string(jsonData) - - // Invoke callback for tool end - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, "") + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/file_info.go b/cli/azd/internal/agent/tools/io/file_info.go index afc5a0aca15..57d53ddb906 100644 --- a/cli/azd/internal/agent/tools/io/file_info.go +++ b/cli/azd/internal/agent/tools/io/file_info.go @@ -8,13 +8,11 @@ import ( "strings" "time" - "github.com/tmc/langchaingo/callbacks" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // FileInfoTool implements the Tool interface for getting file information -type FileInfoTool struct { - CallbacksHandler callbacks.Handler -} +type FileInfoTool struct{} func (t FileInfoTool) Name() string { return "file_info" @@ -24,33 +22,47 @@ func (t FileInfoTool) Description() string { return "Get information about a file (size, modification time, permissions). Input: file path (e.g., 'data.txt' or './docs/readme.md'). Returns JSON with file information." } -func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("file_info: %s", input)) +// createErrorResponse creates a JSON error response +func (t FileInfoTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, } + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + +func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { input = strings.TrimPrefix(input, `"`) input = strings.TrimSuffix(input, `"`) + input = strings.TrimSpace(input) if input == "" { - err := fmt.Errorf("file path is required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("file path is required"), "File path is required") } info, err := os.Stat(input) if err != nil { - toolErr := fmt.Errorf("failed to get info for %s: %w", input, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + if os.IsNotExist(err) { + return t.createErrorResponse(err, fmt.Sprintf("File or directory %s does not exist", input)) } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to get info for %s: %s", input, err.Error())) } // Prepare JSON response structure type FileInfoResponse struct { + Success bool `json:"success"` Path string `json:"path"` Name string `json:"name"` Type string `json:"type"` @@ -58,6 +70,7 @@ func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { Size int64 `json:"size"` ModifiedTime time.Time `json:"modifiedTime"` Permissions string `json:"permissions"` + Message string `json:"message"` } var fileType string @@ -68,6 +81,7 @@ func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { } response := FileInfoResponse{ + Success: true, Path: input, Name: info.Name(), Type: fileType, @@ -75,23 +89,14 @@ func (t FileInfoTool) Call(ctx context.Context, input string) (string, error) { Size: info.Size(), ModifiedTime: info.ModTime(), Permissions: info.Mode().String(), + Message: fmt.Sprintf("Successfully retrieved information for %s", input), } // Convert to JSON jsonData, err := json.MarshalIndent(response, "", " ") if err != nil { - toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr - } - - output := string(jsonData) - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/file_search.go b/cli/azd/internal/agent/tools/io/file_search.go index dd7a7a0de9c..84ea580c93e 100644 --- a/cli/azd/internal/agent/tools/io/file_search.go +++ b/cli/azd/internal/agent/tools/io/file_search.go @@ -4,18 +4,14 @@ import ( "context" "encoding/json" "fmt" - "os" - "path/filepath" "sort" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/bmatcuk/doublestar/v4" - "github.com/tmc/langchaingo/callbacks" ) // FileSearchTool implements a tool for searching files using glob patterns -type FileSearchTool struct { - CallbacksHandler callbacks.Handler -} +type FileSearchTool struct{} // FileSearchRequest represents the JSON payload for file search requests type FileSearchRequest struct { @@ -77,147 +73,100 @@ Returns a sorted list of matching file paths relative to the current working dir The input must be formatted as a single line valid JSON string.` } -func (t FileSearchTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("file_search: %s", input)) +// createErrorResponse creates a JSON error response +func (t FileSearchTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, } + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + +func (t FileSearchTool) Call(ctx context.Context, input string) (string, error) { if input == "" { - err := fmt.Errorf("input is required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("input is required"), "Input is required. Expected JSON format: {\"pattern\": \"*.go\"}") } // Parse JSON input var req FileSearchRequest if err := json.Unmarshal([]byte(input), &req); err != nil { - toolErr := fmt.Errorf("invalid JSON input: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"pattern\": \"*.go\", \"maxResults\": 50}", err.Error())) } // Validate required fields if req.Pattern == "" { - err := fmt.Errorf("pattern is required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err - } - - // Set defaults - if req.MaxResults == 0 { - req.MaxResults = 100 - } - - // Get current working directory - searchPath, err := os.Getwd() - if err != nil { - toolErr := fmt.Errorf("failed to get current working directory: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(fmt.Errorf("pattern is required"), "Pattern is required in the JSON input") } - // Perform the search - matches, err := t.searchFiles(searchPath, req.Pattern, req.MaxResults) - if err != nil { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + // Set default max results + maxResults := req.MaxResults + if maxResults <= 0 { + maxResults = 100 } - // Format output as JSON - output, err := t.formatResults(searchPath, req.Pattern, matches, req.MaxResults) + // Use doublestar to find matching files + matches, err := doublestar.FilepathGlob(req.Pattern) if err != nil { - toolErr := fmt.Errorf("failed to format results: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr - } - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + return t.createErrorResponse(err, fmt.Sprintf("Invalid glob pattern '%s': %s", req.Pattern, err.Error())) } - return output, nil -} - -// searchFiles performs the actual file search using doublestar for comprehensive glob matching -func (t FileSearchTool) searchFiles(searchPath, pattern string, maxResults int) ([]string, error) { - var matches []string - searchPath = filepath.Clean(searchPath) - - // Use doublestar.Glob which handles all advanced patterns including recursion via ** - globPattern := filepath.Join(searchPath, pattern) - // Convert to forward slashes for cross-platform compatibility - globPattern = filepath.ToSlash(globPattern) + // Sort results for consistent output + sort.Strings(matches) - globMatches, err := doublestar.FilepathGlob(globPattern) - if err != nil { - return nil, fmt.Errorf("error in glob pattern matching: %w", err) + // Limit results if needed + if len(matches) > maxResults { + matches = matches[:maxResults] } - // Convert to relative paths and limit results - for _, match := range globMatches { - if len(matches) >= maxResults { - break - } - - // Check if it's a file (not directory) - info, err := os.Stat(match) - if err != nil || info.IsDir() { - continue - } - - relPath, err := filepath.Rel(searchPath, match) - if err != nil { - continue // Skip files we can't get relative path for - } - - // Convert to forward slashes for consistent output - relPath = filepath.ToSlash(relPath) - matches = append(matches, relPath) + // Create response structure + type FileSearchResponse struct { + Success bool `json:"success"` + Pattern string `json:"pattern"` + TotalFound int `json:"totalFound"` + Returned int `json:"returned"` + MaxResults int `json:"maxResults"` + Files []string `json:"files"` + Message string `json:"message"` } - // Sort the results for consistent output - sort.Strings(matches) + totalFound := len(matches) + returned := len(matches) - return matches, nil -} - -// formatResults formats the search results into a JSON response -func (t FileSearchTool) formatResults(searchPath, pattern string, matches []string, maxResults int) (string, error) { - // Prepare JSON response structure - type FileSearchResponse struct { - CurrentDirectory string `json:"currentDirectory"` - Pattern string `json:"pattern"` - TotalFound int `json:"totalFound"` - MaxResults int `json:"maxResults"` - ResultsLimited bool `json:"resultsLimited"` - Matches []string `json:"matches"` + var message string + if totalFound == 0 { + message = fmt.Sprintf("No files found matching pattern '%s'", req.Pattern) + } else if totalFound == returned { + message = fmt.Sprintf("Found %d files matching pattern '%s'", totalFound, req.Pattern) + } else { + message = fmt.Sprintf("Found %d files matching pattern '%s', returning first %d", totalFound, req.Pattern, returned) } response := FileSearchResponse{ - CurrentDirectory: searchPath, - Pattern: pattern, - TotalFound: len(matches), - MaxResults: maxResults, - ResultsLimited: len(matches) >= maxResults, - Matches: matches, + Success: true, + Pattern: req.Pattern, + TotalFound: totalFound, + Returned: returned, + MaxResults: maxResults, + Files: matches, + Message: message, } // Convert to JSON jsonData, err := json.MarshalIndent(response, "", " ") if err != nil { - return "", fmt.Errorf("failed to marshal JSON response: %w", err) + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } return string(jsonData), nil diff --git a/cli/azd/internal/agent/tools/io/loader.go b/cli/azd/internal/agent/tools/io/loader.go index 5ddc49c749c..bf5e95a9f3f 100644 --- a/cli/azd/internal/agent/tools/io/loader.go +++ b/cli/azd/internal/agent/tools/io/loader.go @@ -1,34 +1,29 @@ package io import ( - "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" ) // IoToolsLoader loads IO-related tools -type IoToolsLoader struct { - callbackHandler callbacks.Handler -} +type IoToolsLoader struct{} -func NewIoToolsLoader(callbackHandler callbacks.Handler) *IoToolsLoader { - return &IoToolsLoader{ - callbackHandler: callbackHandler, - } +func NewIoToolsLoader() *IoToolsLoader { + return &IoToolsLoader{} } func (l *IoToolsLoader) LoadTools() ([]tools.Tool, error) { return []tools.Tool{ - &CurrentDirectoryTool{CallbacksHandler: l.callbackHandler}, - &ChangeDirectoryTool{CallbacksHandler: l.callbackHandler}, - &DirectoryListTool{CallbacksHandler: l.callbackHandler}, - &CreateDirectoryTool{CallbacksHandler: l.callbackHandler}, - &DeleteDirectoryTool{CallbacksHandler: l.callbackHandler}, - &ReadFileTool{CallbacksHandler: l.callbackHandler}, + &CurrentDirectoryTool{}, + &ChangeDirectoryTool{}, + &DirectoryListTool{}, + &CreateDirectoryTool{}, + &DeleteDirectoryTool{}, + &ReadFileTool{}, &WriteFileTool{}, - &CopyFileTool{CallbacksHandler: l.callbackHandler}, - &MoveFileTool{CallbacksHandler: l.callbackHandler}, - &DeleteFileTool{CallbacksHandler: l.callbackHandler}, - &FileInfoTool{CallbacksHandler: l.callbackHandler}, - &FileSearchTool{CallbacksHandler: l.callbackHandler}, + &CopyFileTool{}, + &MoveFileTool{}, + &DeleteFileTool{}, + &FileInfoTool{}, + &FileSearchTool{}, }, nil } diff --git a/cli/azd/internal/agent/tools/io/move_file.go b/cli/azd/internal/agent/tools/io/move_file.go index 51c12488774..98d77ac6cc3 100644 --- a/cli/azd/internal/agent/tools/io/move_file.go +++ b/cli/azd/internal/agent/tools/io/move_file.go @@ -2,17 +2,16 @@ package io import ( "context" + "encoding/json" "fmt" "os" "strings" - "github.com/tmc/langchaingo/callbacks" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // MoveFileTool implements the Tool interface for moving/renaming files -type MoveFileTool struct { - CallbacksHandler callbacks.Handler -} +type MoveFileTool struct{} func (t MoveFileTool) Name() string { return "move_file" @@ -22,70 +21,77 @@ func (t MoveFileTool) Description() string { return "Move or rename a file. Input format: 'source|destination' (e.g., 'old.txt|new.txt' or './file.txt|./folder/file.txt')" } -func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("move_file: %s", input)) +// createErrorResponse creates a JSON error response +func (t MoveFileTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() + } + + errorResp := common.ErrorResponse{ + Error: true, + Message: message, + } + + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil } + return string(jsonData), nil +} + +func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { input = strings.TrimPrefix(input, `"`) input = strings.TrimSuffix(input, `"`) + input = strings.TrimSpace(input) if input == "" { - err := fmt.Errorf("input is required in format 'source|destination'") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("input is required in format 'source|destination'"), "Input is required in format 'source|destination'") } // Split on first occurrence of '|' to separate source from destination parts := strings.SplitN(input, "|", 2) if len(parts) != 2 { - err := fmt.Errorf("invalid input format. Use 'source|destination'") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("invalid input format"), "Invalid input format. Use 'source|destination'") } source := strings.TrimSpace(parts[0]) destination := strings.TrimSpace(parts[1]) if source == "" || destination == "" { - err := fmt.Errorf("both source and destination paths are required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("both source and destination paths are required"), "Both source and destination paths are required") } // Check if source exists sourceInfo, err := os.Stat(source) if err != nil { - toolErr := fmt.Errorf("source %s does not exist: %w", source, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + if os.IsNotExist(err) { + return t.createErrorResponse(err, fmt.Sprintf("Source %s does not exist", source)) } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Cannot access source %s: %s", source, err.Error())) } // Check if destination already exists if _, err := os.Stat(destination); err == nil { - err := fmt.Errorf("destination %s already exists", destination) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return t.createErrorResponse(fmt.Errorf("destination %s already exists", destination), fmt.Sprintf("Destination %s already exists", destination)) } // Move/rename the file err = os.Rename(source, destination) if err != nil { - toolErr := fmt.Errorf("failed to move %s to %s: %w", source, destination, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return t.createErrorResponse(err, fmt.Sprintf("Failed to move %s to %s: %s", source, destination, err.Error())) + } + + // Create success response + type MoveFileResponse struct { + Success bool `json:"success"` + Source string `json:"source"` + Destination string `json:"destination"` + Type string `json:"type"` + Size int64 `json:"size"` + Message string `json:"message"` } fileType := "file" @@ -93,10 +99,20 @@ func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { fileType = "directory" } - output := fmt.Sprintf("Successfully moved %s from %s to %s (%d bytes)\n", fileType, source, destination, sourceInfo.Size()) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + response := MoveFileResponse{ + Success: true, + Source: source, + Destination: destination, + Type: fileType, + Size: sourceInfo.Size(), + Message: fmt.Sprintf("Successfully moved %s from %s to %s (%d bytes)", fileType, source, destination, sourceInfo.Size()), + } + + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/io/read_file.go b/cli/azd/internal/agent/tools/io/read_file.go index 0890e127e76..9547d62054f 100644 --- a/cli/azd/internal/agent/tools/io/read_file.go +++ b/cli/azd/internal/agent/tools/io/read_file.go @@ -1,6 +1,7 @@ package io import ( + "bufio" "context" "encoding/json" "fmt" @@ -8,13 +9,11 @@ import ( "strings" "time" - "github.com/tmc/langchaingo/callbacks" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // ReadFileTool implements the Tool interface for reading file contents -type ReadFileTool struct { - CallbacksHandler callbacks.Handler -} +type ReadFileTool struct{} // ReadFileRequest represents the JSON payload for file read requests type ReadFileRequest struct { @@ -80,279 +79,170 @@ Examples: 5. Read single line: {"filePath": "package.json", "startLine": 42, "endLine": 42} -Files larger than 10KB are automatically truncated. Files over 1MB show size info only unless specific line range is requested. +Files larger than 100KB are automatically truncated. Files over 1MB show size info only unless specific line range is requested. The input must be formatted as a single line valid JSON string.` } -func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("read_file: %s", input)) +// createErrorResponse creates a JSON error response +func (t ReadFileTool) createErrorResponse(err error, message string) (string, error) { + if message == "" { + message = err.Error() } - if input == "" { - output := "❌ No input provided\n\n" - output += "📝 Expected JSON format:\n" - output += `{"filePath": "path/to/file.txt"}` + errorResp := common.ErrorResponse{ + Error: true, + Message: message, + } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("empty input")) - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil + jsonData, jsonErr := json.MarshalIndent(errorResp, "", " ") + if jsonErr != nil { + // Fallback to simple error message if JSON marshalling fails + fallbackMsg := fmt.Sprintf(`{"error": true, "message": "JSON marshalling failed: %s"}`, jsonErr.Error()) + return fallbackMsg, nil + } + + return string(jsonData), nil +} + +func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { + if input == "" { + return t.createErrorResponse(fmt.Errorf("empty input"), "No input provided. Expected JSON format: {\"filePath\": \"path/to/file.txt\"}") } // Parse JSON input var req ReadFileRequest if err := json.Unmarshal([]byte(input), &req); err != nil { - output := fmt.Sprintf("❌ Invalid JSON input: %s\n\n", err.Error()) - output += "📝 Expected format:\n" - output += `{"filePath": "path/to/file.txt", "startLine": 1, "endLine": 50}` - output += "\n\n💡 Tips:\n" - output += "- Use double quotes for strings\n" - output += "- Remove any trailing commas\n" - output += "- Escape backslashes: use \\\\ instead of \\" - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil + return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"filePath\": \"path/to/file.txt\", \"startLine\": 1, \"endLine\": 50}", err.Error())) } // Validate required fields if req.FilePath == "" { - output := "❌ Missing required field: filePath cannot be empty\n\n" - output += "📝 Example: " + `{"filePath": "README.md"}` - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("missing filePath")) - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil + return t.createErrorResponse(fmt.Errorf("missing filePath"), "Missing required field: filePath cannot be empty") } // Get file info first to check size fileInfo, err := os.Stat(req.FilePath) if err != nil { - output := fmt.Sprintf("❌ Cannot access file: %s\n\n", req.FilePath) if os.IsNotExist(err) { - output += "📁 File does not exist. Please check:\n" - output += "- File path spelling and case sensitivity\n" - output += "- File location relative to current directory\n" - output += "- File permissions\n" - } else { - output += fmt.Sprintf("Error details: %s\n", err.Error()) + return t.createErrorResponse(err, fmt.Sprintf("File does not exist: %s. Please check file path spelling and location", req.FilePath)) } - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil + return t.createErrorResponse(err, fmt.Sprintf("Cannot access file %s: %s", req.FilePath, err.Error())) } - fileSize := fileInfo.Size() - - // Handle very large files differently (unless specific line range requested) - if fileSize > 1024*1024 && req.StartLine == 0 && req.EndLine == 0 { // 1MB+ - response := ReadFileResponse{ - Success: false, - FilePath: req.FilePath, - Content: "", - IsTruncated: false, - IsPartial: false, - FileInfo: ReadFileInfo{ - Size: fileSize, - ModifiedTime: fileInfo.ModTime(), - Permissions: fileInfo.Mode().String(), - }, - Message: fmt.Sprintf("File is very large (%.2f MB). Use startLine and endLine parameters for specific sections.", float64(fileSize)/(1024*1024)), - } - - jsonData, err := json.MarshalIndent(response, "", " ") - if err != nil { - toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr - } + if fileInfo.IsDir() { + return t.createErrorResponse(fmt.Errorf("path is a directory"), fmt.Sprintf("%s is a directory, not a file. Use directory_list tool for directories", req.FilePath)) + } - output := string(jsonData) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil + // Handle very large files (>1MB) - require line range + const maxFileSize = 1024 * 1024 // 1MB + if fileInfo.Size() > maxFileSize && req.StartLine == 0 && req.EndLine == 0 { + return t.createErrorResponse(fmt.Errorf("file too large"), fmt.Sprintf("File %s is too large (%d bytes). Please specify startLine and endLine to read specific sections", req.FilePath, fileInfo.Size())) } - content, err := os.ReadFile(req.FilePath) + // Read file content + file, err := os.Open(req.FilePath) if err != nil { - output := fmt.Sprintf("❌ Cannot read file: %s\n", req.FilePath) - output += fmt.Sprintf("Error: %s\n\n", err.Error()) - output += "💡 This might be due to:\n" - output += "- Insufficient permissions\n" - output += "- File is locked by another process\n" - output += "- File is binary or corrupted\n" - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil + return t.createErrorResponse(err, fmt.Sprintf("Failed to open file %s: %s", req.FilePath, err.Error())) } + defer file.Close() - lines := strings.Split(string(content), "\n") - totalLines := len(lines) + // Read lines + var lines []string + scanner := bufio.NewScanner(file) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } - // Handle partial reads based on line range - if req.StartLine > 0 || req.EndLine > 0 { - return t.handlePartialRead(ctx, req.FilePath, lines, req.StartLine, req.EndLine, totalLines, fileInfo) + if err := scanner.Err(); err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Error reading file %s: %s", req.FilePath, err.Error())) } - var finalContent string + totalLines := len(lines) + var content string + var isPartial bool var isTruncated bool - var message string - - // Improved truncation with better limits for full file reads - if len(content) > 10000 { // 10KB limit - // Show first 50 lines and last 10 lines - preview := strings.Join(lines[:50], "\n") - if totalLines > 60 { - preview += fmt.Sprintf("\n\n... [%d lines omitted] ...\n\n", totalLines-60) - preview += strings.Join(lines[totalLines-10:], "\n") - } - finalContent = preview - isTruncated = true - message = "Large file truncated - showing first 50 and last 10 lines" - } else { - finalContent = string(content) - isTruncated = false - message = "File read successfully" - } + var lineRange *LineRange - response := ReadFileResponse{ - Success: true, - FilePath: req.FilePath, - Content: finalContent, - IsTruncated: isTruncated, - IsPartial: false, - FileInfo: ReadFileInfo{ - Size: fileSize, - ModifiedTime: fileInfo.ModTime(), - Permissions: fileInfo.Mode().String(), - }, - Message: message, - } + // Determine what to read + if req.StartLine > 0 || req.EndLine > 0 { + // Reading specific line range + startLine := req.StartLine + endLine := req.EndLine - jsonData, err := json.MarshalIndent(response, "", " ") - if err != nil { - toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) + if startLine == 0 { + startLine = 1 } - return "", toolErr - } - - output := string(jsonData) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - - return output, nil -} - -// handlePartialRead handles reading specific line ranges from a file -func (t ReadFileTool) handlePartialRead(ctx context.Context, filePath string, lines []string, startLine, endLine, totalLines int, fileInfo os.FileInfo) (string, error) { - // Validate and adjust line numbers (1-based to 0-based) - if startLine == 0 { - startLine = 1 // Default to start of file - } - if endLine == 0 { - endLine = totalLines // Default to end of file - } - - // Validate line numbers - if startLine < 1 { - startLine = 1 - } - if endLine > totalLines { - endLine = totalLines - } - if startLine > endLine { - response := ReadFileResponse{ - Success: false, - FilePath: filePath, - Content: "", - IsTruncated: false, - IsPartial: false, - FileInfo: ReadFileInfo{ - Size: fileInfo.Size(), - ModifiedTime: fileInfo.ModTime(), - Permissions: fileInfo.Mode().String(), - }, - Message: fmt.Sprintf("Invalid line range: start line (%d) cannot be greater than end line (%d)", startLine, endLine), + if endLine == 0 { + endLine = totalLines } - jsonData, err := json.MarshalIndent(response, "", " ") - if err != nil { - toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + // Validate line range + if startLine > totalLines { + return t.createErrorResponse(fmt.Errorf("start line out of range"), fmt.Sprintf("Start line %d is greater than total lines %d in file", startLine, totalLines)) } - - output := string(jsonData) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("invalid line range: start %d > end %d", startLine, endLine)) - t.CallbacksHandler.HandleToolEnd(ctx, output) + if startLine > endLine { + return t.createErrorResponse(fmt.Errorf("invalid line range"), fmt.Sprintf("Start line %d is greater than end line %d", startLine, endLine)) } - return output, nil - } - // Convert to 0-based indexing - startIdx := startLine - 1 - endIdx := endLine + // Adjust endLine if it exceeds total lines + if endLine > totalLines { + endLine = totalLines + } - // Extract the requested lines - selectedLines := lines[startIdx:endIdx] - content := strings.Join(selectedLines, "\n") + // Convert to 0-based indexing and extract lines + startIdx := startLine - 1 + endIdx := endLine + selectedLines := lines[startIdx:endIdx] + content = strings.Join(selectedLines, "\n") + isPartial = true - linesRead := endLine - startLine + 1 + lineRange = &LineRange{ + StartLine: startLine, + EndLine: endLine, + TotalLines: totalLines, + LinesRead: endLine - startLine + 1, + } + } else { + // Reading entire file + content = strings.Join(lines, "\n") + + // Truncate if content is too large (>100KB) + const maxContentSize = 100 * 1024 // 100KB + if len(content) > maxContentSize { + content = content[:maxContentSize] + "\n... [content truncated]" + isTruncated = true + } + } + // Create success response response := ReadFileResponse{ Success: true, - FilePath: filePath, + FilePath: req.FilePath, Content: content, - IsTruncated: false, - IsPartial: true, - LineRange: &LineRange{ - StartLine: startLine, - EndLine: endLine, - TotalLines: totalLines, - LinesRead: linesRead, - }, + IsTruncated: isTruncated, + IsPartial: isPartial, + LineRange: lineRange, FileInfo: ReadFileInfo{ Size: fileInfo.Size(), ModifiedTime: fileInfo.ModTime(), Permissions: fileInfo.Mode().String(), }, - Message: fmt.Sprintf("Successfully read %d lines (%d-%d) from file", linesRead, startLine, endLine), } - jsonData, err := json.MarshalIndent(response, "", " ") - if err != nil { - toolErr := fmt.Errorf("failed to marshal JSON response: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + // Set appropriate message + if isPartial && lineRange != nil { + response.Message = fmt.Sprintf("Successfully read %d lines (%d-%d) from file", lineRange.LinesRead, lineRange.StartLine, lineRange.EndLine) + } else if isTruncated { + response.Message = fmt.Sprintf("Successfully read file (content truncated due to size)") + } else { + response.Message = fmt.Sprintf("Successfully read entire file (%d lines)", totalLines) } - output := string(jsonData) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) + // Convert to JSON + jsonData, err := json.MarshalIndent(response, "", " ") + if err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - return output, nil + return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/loader.go b/cli/azd/internal/agent/tools/loader.go index e4a10ad1f53..75be6baefe6 100644 --- a/cli/azd/internal/agent/tools/loader.go +++ b/cli/azd/internal/agent/tools/loader.go @@ -1,7 +1,6 @@ package tools import ( - "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd" @@ -15,18 +14,16 @@ type ToolLoader interface { } type LocalToolsLoader struct { - loaders []ToolLoader - callbackHandler callbacks.Handler + loaders []ToolLoader } -func NewLocalToolsLoader(callbackHandler callbacks.Handler) *LocalToolsLoader { +func NewLocalToolsLoader() *LocalToolsLoader { return &LocalToolsLoader{ loaders: []ToolLoader{ - azd.NewAzdToolsLoader(callbackHandler), - dev.NewDevToolsLoader(callbackHandler), - io.NewIoToolsLoader(callbackHandler), + azd.NewAzdToolsLoader(), + dev.NewDevToolsLoader(), + io.NewIoToolsLoader(), }, - callbackHandler: callbackHandler, } } diff --git a/cli/azd/internal/agent/tools/mcp/loader.go b/cli/azd/internal/agent/tools/mcp/loader.go index e30c3fb5e0e..7ad5fdc1bd1 100644 --- a/cli/azd/internal/agent/tools/mcp/loader.go +++ b/cli/azd/internal/agent/tools/mcp/loader.go @@ -10,7 +10,6 @@ import ( langchaingo_mcp_adapter "github.com/i2y/langchaingo-mcp-adapter" "github.com/mark3labs/mcp-go/client" "github.com/mark3labs/mcp-go/client/transport" - "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" ) @@ -31,13 +30,11 @@ type ServerConfig struct { } type McpToolsLoader struct { - callbackHandler callbacks.Handler samplingHandler client.SamplingHandler } -func NewMcpToolsLoader(callbackHandler callbacks.Handler, samplingHandler client.SamplingHandler) *McpToolsLoader { +func NewMcpToolsLoader(samplingHandler client.SamplingHandler) *McpToolsLoader { return &McpToolsLoader{ - callbackHandler: callbackHandler, samplingHandler: samplingHandler, } } diff --git a/cli/azd/internal/agent/tools/weather/loader.go b/cli/azd/internal/agent/tools/weather/loader.go index ce283e18fb3..afdf7894f68 100644 --- a/cli/azd/internal/agent/tools/weather/loader.go +++ b/cli/azd/internal/agent/tools/weather/loader.go @@ -1,23 +1,18 @@ package weather import ( - "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/tools" ) // WeatherToolsLoader loads weather-related tools -type WeatherToolsLoader struct { - callbackHandler callbacks.Handler -} +type WeatherToolsLoader struct{} -func NewWeatherToolsLoader(callbackHandler callbacks.Handler) *WeatherToolsLoader { - return &WeatherToolsLoader{ - callbackHandler: callbackHandler, - } +func NewWeatherToolsLoader() *WeatherToolsLoader { + return &WeatherToolsLoader{} } func (l *WeatherToolsLoader) LoadTools() ([]tools.Tool, error) { return []tools.Tool{ - &WeatherTool{CallbacksHandler: l.callbackHandler}, + &WeatherTool{}, }, nil } diff --git a/cli/azd/pkg/llm/azure_openai.go b/cli/azd/pkg/llm/azure_openai.go index ea02134c848..dc05b78ec86 100644 --- a/cli/azd/pkg/llm/azure_openai.go +++ b/cli/azd/pkg/llm/azure_openai.go @@ -54,11 +54,11 @@ func (p *AzureOpenAiModelProvider) CreateModelContainer(opts ...ModelOption) (*M } model, err := openai.New( - openai.WithModel(modelConfig.Model), + openai.WithToken(modelConfig.Token), + openai.WithBaseURL(modelConfig.Endpoint), openai.WithAPIType(openai.APITypeAzure), openai.WithAPIVersion(modelConfig.ApiVersion), - openai.WithBaseURL(modelConfig.Endpoint), - openai.WithToken(modelConfig.Token), + openai.WithModel(modelConfig.Model), ) if err != nil { return nil, fmt.Errorf("failed to create LLM: %w", err) From 1cbdb1e6e2755f011b797e839a645bdf3381dc8c Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Wed, 6 Aug 2025 18:19:54 -0700 Subject: [PATCH 051/116] Adds feature flag usage --- cli/azd/cmd/init.go | 187 ++++++- cli/azd/internal/agent/agent.go | 201 ++----- .../internal/agent/conversational_agent.go | 182 +++++++ cli/azd/internal/agent/logging/file_logger.go | 201 +++++++ cli/azd/internal/agent/logging/logger.go | 6 + cli/azd/internal/agent/one_shot_agent.go | 117 +++++ .../internal/agent/prompts/conversational.txt | 88 ++++ .../default_agent_format_instructions.txt | 40 -- .../agent/prompts/default_agent_prefix.txt | 17 - .../agent/prompts/default_agent_suffix.txt | 8 - cli/azd/internal/agent/prompts/one_shot.txt | 78 +++ .../tools/azd/azd_architecture_planning.go | 14 +- .../tools/azd/azd_azure_yaml_generation.go | 14 +- .../agent/tools/azd/azd_discovery_analysis.go | 14 +- .../agent/tools/azd/azd_docker_generation.go | 14 +- .../tools/azd/azd_iac_generation_rules.go | 12 +- .../azd/azd_infrastructure_generation.go | 13 +- .../internal/agent/tools/azd/azd_plan_init.go | 12 +- .../agent/tools/azd/azd_project_validation.go | 17 +- .../agent/tools/azd/prompts/README.md | 199 ------- .../azd/prompts/azd_architecture_planning.md | 222 ++++---- .../azd/prompts/azd_azure_yaml_generation.md | 185 ++----- .../azd/prompts/azd_discovery_analysis.md | 238 ++------- .../azd/prompts/azd_docker_generation.md | 245 ++++----- .../azd/prompts/azd_iac_generation_rules.md | 224 +++----- .../prompts/azd_infrastructure_generation.md | 268 +++++----- .../agent/tools/azd/prompts/azd_plan_init.md | 153 +++--- .../azd/prompts/azd_project_validation.md | 233 +++------ .../tools/azd/prompts/azd_yaml_schema.md | 18 - .../agent/tools/azd/prompts/prompts.go | 3 + cli/azd/internal/agent/tools/io/write_file.go | 237 ++++++--- .../agent/tools/io/write_file_test.go | 495 ++++++++++++++++++ cli/azd/pkg/llm/azure_openai.go | 28 +- cli/azd/pkg/llm/manager.go | 4 +- cli/azd/pkg/llm/model.go | 35 ++ cli/azd/pkg/llm/model_factory.go | 8 +- cli/azd/pkg/output/colors.go | 56 +- go.mod | 20 +- go.sum | 39 ++ 39 files changed, 2414 insertions(+), 1731 deletions(-) create mode 100644 cli/azd/internal/agent/conversational_agent.go create mode 100644 cli/azd/internal/agent/logging/file_logger.go create mode 100644 cli/azd/internal/agent/one_shot_agent.go create mode 100644 cli/azd/internal/agent/prompts/conversational.txt delete mode 100644 cli/azd/internal/agent/prompts/default_agent_format_instructions.txt delete mode 100644 cli/azd/internal/agent/prompts/default_agent_prefix.txt delete mode 100644 cli/azd/internal/agent/prompts/default_agent_suffix.txt create mode 100644 cli/azd/internal/agent/prompts/one_shot.txt delete mode 100644 cli/azd/internal/agent/tools/azd/prompts/README.md delete mode 100644 cli/azd/internal/agent/tools/azd/prompts/azd_yaml_schema.md create mode 100644 cli/azd/internal/agent/tools/io/write_file_test.go create mode 100644 cli/azd/pkg/llm/model.go diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index f51d9d03da2..3c128cb5c53 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -34,6 +34,7 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/tools" "github.com/azure/azure-dev/cli/azd/pkg/tools/git" "github.com/azure/azure-dev/cli/azd/pkg/workflow" + "github.com/fatih/color" "github.com/joho/godotenv" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -244,7 +245,7 @@ func (i *initAction) Run(ctx context.Context) (*actions.ActionResult, error) { initTypeSelect = initEnvironment } else { // Prompt for init type for new projects - initTypeSelect, err = promptInitType(i.console, ctx) + initTypeSelect, err = promptInitType(i.console, ctx, i.featuresManager) if err != nil { return nil, err } @@ -371,38 +372,176 @@ func (i *initAction) Run(ctx context.Context) (*actions.ActionResult, error) { } func (i *initAction) initAppWithCopilot(ctx context.Context) error { - actionLogger := logging.NewActionLogger() - defaultModelContainer, err := i.llmManager.GetDefaultModel(llm.WithLogger(actionLogger)) + // Warn user that this is an alpha feature + i.console.WarnForFeature(ctx, llm.FeatureLlm) + + fileLogger, cleanup, err := logging.NewFileLoggerDefault() + if err != nil { + return err + } + defer cleanup() + + defaultModelContainer, err := i.llmManager.GetDefaultModel(llm.WithLogger(fileLogger)) if err != nil { return err } samplingModelContainer, err := i.llmManager.GetDefaultModel() - azdAgent, err := agent.NewAzdAiAgent( + azdAgent, err := agent.NewConversationalAzdAiAgent( defaultModelContainer.Model, agent.WithSamplingModel(samplingModelContainer.Model), + agent.WithDebug(i.flags.global.EnableDebugLogging), ) if err != nil { return err } - initPrompt := `Goal: Initialize or migrate the AZD project from the current working directory. + type initStep struct { + Name string + Description string + } + + taskInput := `Your task: %s + +Break this task down into smaller steps if needed. +If new information reveals more work to be done, pursue it. +Do not stop until all tasks are complete and fully resolved. +` + + initSteps := []initStep{ + { + Name: "Running Discovery & Analysis", + Description: "Run a deep discovery and analysis on the current working directory. Provide a detailed summary of work performed.", + }, + { + Name: "Generating Architecture Plan", + Description: "Create a high-level architecture plan for the application. Provide a detailed summary of work performed.", + }, + { + Name: "Generating Dockerfile(s)", + Description: "Generate a Dockerfile for the application components as needed. Provide a detailed summary of work performed.", + }, + { + Name: "Generating infrastructure", + Description: "Generate infrastructure as code (IaC) for the application. Provide a detailed summary of work performed.", + }, + { + Name: "Generating azure.yaml file", + Description: "Generate an azure.yaml file for the application. Provide a detailed summary of work performed.", + }, + { + Name: "Validating project", + Description: "Validate the project structure and configuration. Provide a detailed summary of work performed.", + }, + } + + for idx, step := range initSteps { + // Collect and apply feedback for next steps + if idx > 0 { + if err := i.collectAndApplyFeedback(ctx, azdAgent, "Any feedback before continuing to the next step?"); err != nil { + return err + } + } + + // Run Step + i.console.ShowSpinner(ctx, step.Name, input.Step) + fullTaskInput := fmt.Sprintf(taskInput, step.Description) + agentOutput, err := azdAgent.SendMessage(ctx, fullTaskInput) + if err != nil { + i.console.StopSpinner(ctx, fmt.Sprintf("%s (With errors)", step.Name), input.StepWarning) + if agentOutput != "" { + i.console.Message(ctx, output.WithMarkdown(agentOutput)) + } + + return err + } + + i.console.StopSpinner(ctx, step.Name, input.StepDone) + i.console.Message(ctx, "") + finalOutput := fmt.Sprintf("%s %s", color.MagentaString("🤖 AZD Copilot:"), output.WithMarkdown(agentOutput)) + i.console.Message(ctx, finalOutput) + i.console.Message(ctx, "") + } + + // Post-completion feedback loop + if err := i.postCompletionFeedbackLoop(ctx, azdAgent); err != nil { + return err + } -Read and review the 'azd-arch-plan.md' file if it exists to get current status -Run the 'azd_plan_init' tool and follow the steps -Finally - run the 'azd_project_validation' tool to ensure the process is fully completed -Be very short, terse and to the point during planning and action execution. -Provide verbose output for the final summary when you are complete. - ` + return nil +} - if err := azdAgent.RunConversationLoop(ctx, []string{initPrompt}); err != nil { +// collectAndApplyFeedback prompts for user feedback and applies it using the agent in a loop +func (i *initAction) collectAndApplyFeedback(ctx context.Context, azdAgent *agent.ConversationalAzdAiAgent, promptMessage string) error { + hasFeedback, err := i.console.Confirm(ctx, input.ConsoleOptions{ + Message: promptMessage, + DefaultValue: false, + }) + if err != nil { return err } + if !hasFeedback { + i.console.Message(ctx, "") + return nil + } + + // Loop to allow multiple rounds of feedback + for { + userInput, err := i.console.Prompt(ctx, input.ConsoleOptions{ + Message: "💭 You:", + DefaultValue: "", + Help: "Additional context will be provided to AZD Copilot", + }) + if err != nil { + return fmt.Errorf("error collecting feedback during azd init, %w", err) + } + + if userInput != "" { + i.console.ShowSpinner(ctx, "Submitting feedback", input.Step) + feedbackOutput, err := azdAgent.SendMessage(ctx, userInput) + if err != nil { + i.console.StopSpinner(ctx, "Submitting feedback (With errors)", input.StepWarning) + if feedbackOutput != "" { + i.console.Message(ctx, output.WithMarkdown(feedbackOutput)) + } + return err + } + + i.console.StopSpinner(ctx, "Submitting feedback", input.StepDone) + i.console.Message(ctx, "") + agentOutput := fmt.Sprintf("%s %s", color.MagentaString("🤖 AZD Copilot:"), output.WithMarkdown(feedbackOutput)) + i.console.Message(ctx, agentOutput) + i.console.Message(ctx, "") + } + + // Check if user wants to provide more feedback + moreFeedback, err := i.console.Confirm(ctx, input.ConsoleOptions{ + Message: "Do you have any more feedback or changes?", + DefaultValue: false, + }) + if err != nil { + return err + } + + if !moreFeedback { + break + } + } + return nil } +// postCompletionFeedbackLoop provides a final opportunity for feedback after all steps complete +func (i *initAction) postCompletionFeedbackLoop(ctx context.Context, azdAgent *agent.ConversationalAzdAiAgent) error { + i.console.Message(ctx, "") + i.console.Message(ctx, "🎉 All initialization steps completed!") + i.console.Message(ctx, "") + + return i.collectAndApplyFeedback(ctx, azdAgent, "Any additional feedback or changes you'd like to make?") +} + type initType int const ( @@ -413,14 +552,20 @@ const ( initWithCopilot ) -func promptInitType(console input.Console, ctx context.Context) (initType, error) { +func promptInitType(console input.Console, ctx context.Context, featuresManager *alpha.FeatureManager) (initType, error) { + options := []string{ + "Scan current directory", // This now covers minimal project creation too + "Select a template", + } + + // Only include AZD Copilot option if the LLM feature is enabled + if featuresManager.IsEnabled(llm.FeatureLlm) { + options = append(options, fmt.Sprintf("AZD Copilot %s", color.YellowString("(Alpha)"))) + } + selection, err := console.Select(ctx, input.ConsoleOptions{ Message: "How do you want to initialize your app?", - Options: []string{ - "Scan current directory", // This now covers minimal project creation too - "Select a template", - "AZD Copilot", - }, + Options: options, }) if err != nil { return initUnknown, err @@ -432,7 +577,11 @@ func promptInitType(console input.Console, ctx context.Context) (initType, error case 1: return initAppTemplate, nil case 2: - return initWithCopilot, nil + // Only return initWithCopilot if the LLM feature is enabled and we have 3 options + if featuresManager.IsEnabled(llm.FeatureLlm) { + return initWithCopilot, nil + } + fallthrough default: panic("unhandled selection") } diff --git a/cli/azd/internal/agent/agent.go b/cli/azd/internal/agent/agent.go index bcf30a609c6..dc3f434e3d3 100644 --- a/cli/azd/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -1,202 +1,73 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - package agent import ( - "bufio" - "context" - _ "embed" "fmt" - "os" "strings" - "github.com/fatih/color" "github.com/tmc/langchaingo/agents" - "github.com/tmc/langchaingo/chains" + "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/llms" - "github.com/tmc/langchaingo/memory" "github.com/tmc/langchaingo/tools" - - "github.com/azure/azure-dev/cli/azd/internal/agent/logging" - localtools "github.com/azure/azure-dev/cli/azd/internal/agent/tools" - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" - mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" ) -//go:embed prompts/default_agent_prefix.txt -var _defaultAgentPrefix string - -//go:embed prompts/default_agent_format_instructions.txt -var _defaultAgentFormatInstructions string - -//go:embed prompts/default_agent_suffix.txt -var _defaultAgentSuffix string - -// AzdAiAgent represents an enhanced AZD Copilot agent with action tracking, intent validation, and conversation memory -type AzdAiAgent struct { - debug bool - defaultModel llms.Model - samplingModel llms.Model - executor *agents.Executor +type Agent struct { + debug bool + defaultModel llms.Model + samplingModel llms.Model + executor *agents.Executor + tools []tools.Tool + callbacksHandler callbacks.Handler } -type AgentOption func(*AzdAiAgent) +type AgentOption func(*Agent) func WithDebug(debug bool) AgentOption { - return func(agent *AzdAiAgent) { + return func(agent *Agent) { agent.debug = debug } } -func WithSamplingModel(model llms.Model) AgentOption { - return func(agent *AzdAiAgent) { - agent.samplingModel = model +func WithDefaultModel(model llms.Model) AgentOption { + return func(agent *Agent) { + agent.defaultModel = model } } -func NewAzdAiAgent(llm llms.Model, opts ...AgentOption) (*AzdAiAgent, error) { - azdAgent := &AzdAiAgent{ - defaultModel: llm, - samplingModel: llm, - } - - for _, opt := range opts { - opt(azdAgent) - } - - actionLogger := logging.NewActionLogger( - logging.WithDebug(azdAgent.debug), - ) - - smartMemory := memory.NewConversationBuffer( - memory.WithInputKey("input"), - memory.WithOutputKey("output"), - memory.WithHumanPrefix("Human"), - memory.WithAIPrefix("AI"), - ) - - // Create sampling handler for MCP - samplingHandler := mcptools.NewMcpSamplingHandler( - azdAgent.samplingModel, - mcp.WithDebug(azdAgent.debug), - ) - - toolLoaders := []localtools.ToolLoader{ - localtools.NewLocalToolsLoader(), - mcptools.NewMcpToolsLoader(samplingHandler), - } - - allTools := []tools.Tool{} - - // Define block list of excluded tools - excludedTools := map[string]bool{ - "extension_az": true, - "extension_azd": true, - // Add more excluded tools here as needed +func WithSamplingModel(model llms.Model) AgentOption { + return func(agent *Agent) { + agent.samplingModel = model } +} - for _, toolLoader := range toolLoaders { - categoryTools, err := toolLoader.LoadTools() - if err != nil { - return nil, err - } - - // Filter out excluded tools - for _, tool := range categoryTools { - if !excludedTools[tool.Name()] { - allTools = append(allTools, tool) - } - } +func WithTools(tools ...tools.Tool) AgentOption { + return func(agent *Agent) { + agent.tools = tools } - - // 4. Create agent with memory directly integrated - conversationAgent := agents.NewConversationalAgent(llm, allTools, - agents.WithPromptPrefix(_defaultAgentPrefix), - agents.WithPromptSuffix(_defaultAgentSuffix), - agents.WithPromptFormatInstructions(_defaultAgentFormatInstructions), - agents.WithMemory(smartMemory), - agents.WithCallbacksHandler(actionLogger), - agents.WithReturnIntermediateSteps(), - ) - - // 5. Create executor without separate memory configuration since agent already has it - executor := agents.NewExecutor(conversationAgent, - agents.WithMaxIterations(500), // Much higher limit for complex multi-step processes - agents.WithMemory(smartMemory), - agents.WithCallbacksHandler(actionLogger), - agents.WithReturnIntermediateSteps(), - ) - - azdAgent.executor = executor - return azdAgent, nil } -// RunConversationLoop runs the enhanced AZD Copilot agent with full capabilities -func (aai *AzdAiAgent) RunConversationLoop(ctx context.Context, args []string) error { - fmt.Println("🤖 AZD Copilot - Interactive Mode") - fmt.Println("═══════════════════════════════════════════════════════════") - - // Handle initial query if provided - var initialQuery string - if len(args) > 0 { - initialQuery = strings.Join(args, " ") +func WithCallbacksHandler(handler callbacks.Handler) AgentOption { + return func(agent *Agent) { + agent.callbacksHandler = handler } +} - scanner := bufio.NewScanner(os.Stdin) - - for { - var userInput string - - if initialQuery != "" { - userInput = initialQuery - initialQuery = "" // Clear after first use - color.Cyan("💬 You: %s\n", userInput) - } else { - fmt.Print(color.CyanString("\n💬 You: ")) - color.Set(color.FgCyan) // Set blue color for user input - if !scanner.Scan() { - color.Unset() // Reset color - break // EOF or error - } - userInput = strings.TrimSpace(scanner.Text()) - color.Unset() // Reset color after input - } - - // Check for exit commands - if userInput == "" { - continue - } - - if strings.ToLower(userInput) == "exit" || strings.ToLower(userInput) == "quit" { - fmt.Println("👋 Goodbye! Thanks for using AZD Copilot!") - break - } - - // Process the query with the enhanced agent - err := aai.runChain(ctx, userInput) - if err != nil { - continue +func toolNames(tools []tools.Tool) string { + var tn strings.Builder + for i, tool := range tools { + if i > 0 { + tn.WriteString(", ") } + tn.WriteString(tool.Name()) } - if err := scanner.Err(); err != nil { - return fmt.Errorf("error reading input: %w", err) - } - - return nil + return tn.String() } -// ProcessQuery processes a user query with full action tracking and validation -func (aai *AzdAiAgent) runChain(ctx context.Context, userInput string) error { - // Execute with enhanced input - agent should automatically handle memory - _, err := chains.Run(ctx, aai.executor, userInput, - chains.WithMaxTokens(800), - chains.WithTemperature(0.3), - ) - if err != nil { - return err +func toolDescriptions(tools []tools.Tool) string { + var ts strings.Builder + for _, tool := range tools { + ts.WriteString(fmt.Sprintf("- %s: %s\n", tool.Name(), tool.Description())) } - return nil + return ts.String() } diff --git a/cli/azd/internal/agent/conversational_agent.go b/cli/azd/internal/agent/conversational_agent.go new file mode 100644 index 00000000000..49b6c20e850 --- /dev/null +++ b/cli/azd/internal/agent/conversational_agent.go @@ -0,0 +1,182 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package agent + +import ( + "bufio" + "context" + _ "embed" + "fmt" + "os" + "strings" + + "github.com/fatih/color" + "github.com/tmc/langchaingo/agents" + "github.com/tmc/langchaingo/chains" + "github.com/tmc/langchaingo/llms" + "github.com/tmc/langchaingo/memory" + "github.com/tmc/langchaingo/prompts" + "github.com/tmc/langchaingo/tools" + + localtools "github.com/azure/azure-dev/cli/azd/internal/agent/tools" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" + mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" +) + +//go:embed prompts/conversational.txt +var conversational_prompt_template string + +// ConversationalAzdAiAgent represents an enhanced AZD Copilot agent with action tracking, intent validation, and conversation memory +type ConversationalAzdAiAgent struct { + *Agent +} + +func NewConversationalAzdAiAgent(llm llms.Model, opts ...AgentOption) (*ConversationalAzdAiAgent, error) { + azdAgent := &ConversationalAzdAiAgent{ + Agent: &Agent{ + defaultModel: llm, + samplingModel: llm, + tools: []tools.Tool{}, + }, + } + + for _, opt := range opts { + opt(azdAgent.Agent) + } + + smartMemory := memory.NewConversationBuffer( + memory.WithInputKey("input"), + memory.WithOutputKey("output"), + memory.WithHumanPrefix("Human"), + memory.WithAIPrefix("AI"), + ) + + // Create sampling handler for MCP + samplingHandler := mcptools.NewMcpSamplingHandler( + azdAgent.samplingModel, + mcp.WithDebug(azdAgent.debug), + ) + + toolLoaders := []localtools.ToolLoader{ + localtools.NewLocalToolsLoader(), + mcptools.NewMcpToolsLoader(samplingHandler), + } + + // Define block list of excluded tools + excludedTools := map[string]bool{ + "extension_az": true, + "extension_azd": true, + // Add more excluded tools here as needed + } + + for _, toolLoader := range toolLoaders { + categoryTools, err := toolLoader.LoadTools() + if err != nil { + return nil, err + } + + // Filter out excluded tools + for _, tool := range categoryTools { + if !excludedTools[tool.Name()] { + azdAgent.tools = append(azdAgent.tools, tool) + } + } + } + + promptTemplate := prompts.PromptTemplate{ + Template: conversational_prompt_template, + TemplateFormat: prompts.TemplateFormatGoTemplate, + InputVariables: []string{"input", "agent_scratchpad"}, + PartialVariables: map[string]any{ + "tool_names": toolNames(azdAgent.tools), + "tool_descriptions": toolDescriptions(azdAgent.tools), + "history": "", + }, + } + + // 4. Create agent with memory directly integrated + conversationAgent := agents.NewConversationalAgent(llm, azdAgent.tools, + agents.WithPrompt(promptTemplate), + agents.WithMemory(smartMemory), + agents.WithCallbacksHandler(azdAgent.callbacksHandler), + agents.WithReturnIntermediateSteps(), + ) + + // 5. Create executor without separate memory configuration since agent already has it + executor := agents.NewExecutor(conversationAgent, + agents.WithMaxIterations(500), // Much higher limit for complex multi-step processes + agents.WithMemory(smartMemory), + agents.WithCallbacksHandler(azdAgent.callbacksHandler), + agents.WithReturnIntermediateSteps(), + ) + + azdAgent.executor = executor + return azdAgent, nil +} + +func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, args ...string) (string, error) { + return aai.runChain(ctx, strings.Join(args, "\n")) +} + +// RunConversationLoop runs the enhanced AZD Copilot agent with full capabilities +func (aai *ConversationalAzdAiAgent) StartConversation(ctx context.Context, args ...string) (string, error) { + fmt.Println("🤖 AZD Copilot - Interactive Mode") + fmt.Println("═══════════════════════════════════════════════════════════") + + // Handle initial query if provided + var initialQuery string + if len(args) > 0 { + initialQuery = strings.Join(args, " ") + } + + scanner := bufio.NewScanner(os.Stdin) + + for { + var userInput string + + if initialQuery != "" { + userInput = initialQuery + initialQuery = "" // Clear after first use + color.Cyan("💬 You: %s\n", userInput) + } else { + fmt.Print(color.CyanString("\n💬 You: ")) + color.Set(color.FgCyan) // Set blue color for user input + if !scanner.Scan() { + color.Unset() // Reset color + break // EOF or error + } + userInput = strings.TrimSpace(scanner.Text()) + color.Unset() // Reset color after input + } + + // Check for exit commands + if userInput == "" { + continue + } + + if strings.ToLower(userInput) == "exit" || strings.ToLower(userInput) == "quit" { + fmt.Println("👋 Goodbye! Thanks for using AZD Copilot!") + break + } + + // Process the query with the enhanced agent + return aai.runChain(ctx, userInput) + } + + if err := scanner.Err(); err != nil { + return "", fmt.Errorf("error reading input: %w", err) + } + + return "", nil +} + +// ProcessQuery processes a user query with full action tracking and validation +func (aai *ConversationalAzdAiAgent) runChain(ctx context.Context, userInput string) (string, error) { + // Execute with enhanced input - agent should automatically handle memory + output, err := chains.Run(ctx, aai.executor, userInput) + if err != nil { + return "", err + } + return output, nil +} diff --git a/cli/azd/internal/agent/logging/file_logger.go b/cli/azd/internal/agent/logging/file_logger.go new file mode 100644 index 00000000000..07222bbe781 --- /dev/null +++ b/cli/azd/internal/agent/logging/file_logger.go @@ -0,0 +1,201 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package logging + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "os" + "time" + + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/llms" + "github.com/tmc/langchaingo/schema" +) + +// Compile-time check to ensure FileLogger implements callbacks.Handler +var _ callbacks.Handler = &FileLogger{} + +// FlushWriter is an interface for writers that support flushing +type FlushWriter interface { + io.Writer + Flush() error +} + +// FileLogger logs all agent actions to a file with automatic flushing +type FileLogger struct { + writer FlushWriter + file *os.File // Keep reference to close file when needed +} + +// FileLoggerOption represents an option for configuring FileLogger +type FileLoggerOption func(*FileLogger) + +// NewFileLogger creates a new file logger that writes to the provided FlushWriter +func NewFileLogger(writer FlushWriter, opts ...FileLoggerOption) *FileLogger { + fl := &FileLogger{ + writer: writer, + } + + for _, opt := range opts { + opt(fl) + } + + return fl +} + +// NewFileLoggerDefault creates a new file logger with default settings. +// Opens or creates "azd-agent-{date}.log" in the current working directory. +// Returns the logger and a cleanup function that should be called to close the file. +func NewFileLoggerDefault(opts ...FileLoggerOption) (*FileLogger, func() error, error) { + // Create dated filename: azd-agent-2025-08-05.log + dateStr := time.Now().Format("2006-01-02") + filename := fmt.Sprintf("azd-agent-%s.log", dateStr) + + file, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) + if err != nil { + return nil, nil, fmt.Errorf("failed to open log file: %w", err) + } + + bufferedWriter := bufio.NewWriter(file) + + // Create a flush writer that flushes both the buffer and the file + flushWriter := &fileFlushWriter{ + writer: bufferedWriter, + file: file, + } + + fl := &FileLogger{ + writer: flushWriter, + file: file, + } + + for _, opt := range opts { + opt(fl) + } + + cleanup := func() error { + if err := bufferedWriter.Flush(); err != nil { + file.Close() + return err + } + return file.Close() + } + + return fl, cleanup, nil +} + +// fileFlushWriter wraps a buffered writer and ensures both buffer and file are flushed +type fileFlushWriter struct { + writer *bufio.Writer + file *os.File +} + +func (fw *fileFlushWriter) Write(p []byte) (int, error) { + return fw.writer.Write(p) +} + +func (fw *fileFlushWriter) Flush() error { + if err := fw.writer.Flush(); err != nil { + return err + } + return fw.file.Sync() +} + +// writeAndFlush writes a message to the file and flushes immediately +func (fl *FileLogger) writeAndFlush(format string, args ...interface{}) { + timestamp := time.Now().UTC().Format(time.RFC3339) + message := fmt.Sprintf("[%s] %s\n", timestamp, fmt.Sprintf(format, args...)) + + if _, err := fl.writer.Write([]byte(message)); err == nil { + fl.writer.Flush() + } +} + +// HandleText is called when text is processed +func (fl *FileLogger) HandleText(ctx context.Context, text string) { + fl.writeAndFlush("TEXT: %s", text) +} + +// HandleLLMGenerateContentStart is called when LLM content generation starts +func (fl *FileLogger) HandleLLMGenerateContentStart(ctx context.Context, ms []llms.MessageContent) { + fl.writeAndFlush("LLM_GENERATE_START: %d messages", len(ms)) +} + +// HandleLLMGenerateContentEnd is called when LLM content generation ends +func (fl *FileLogger) HandleLLMGenerateContentEnd(ctx context.Context, res *llms.ContentResponse) { + for i, choice := range res.Choices { + fl.writeAndFlush("LLM_GENERATE_END[%d]: %s", i, choice.Content) + } +} + +// HandleRetrieverStart is called when retrieval starts +func (fl *FileLogger) HandleRetrieverStart(ctx context.Context, query string) { + fl.writeAndFlush("RETRIEVER_START: %s", query) +} + +// HandleRetrieverEnd is called when retrieval ends +func (fl *FileLogger) HandleRetrieverEnd(ctx context.Context, query string, documents []schema.Document) { + fl.writeAndFlush("RETRIEVER_END: query=%s, documents=%d", query, len(documents)) +} + +// HandleToolStart is called when a tool execution starts +func (fl *FileLogger) HandleToolStart(ctx context.Context, input string) { + fl.writeAndFlush("TOOL_START: %s", input) +} + +// HandleToolEnd is called when a tool execution ends +func (fl *FileLogger) HandleToolEnd(ctx context.Context, output string) { + fl.writeAndFlush("TOOL_END: %s", output) +} + +// HandleToolError is called when a tool execution fails +func (fl *FileLogger) HandleToolError(ctx context.Context, err error) { + fl.writeAndFlush("TOOL_ERROR: %s", err.Error()) +} + +// HandleLLMStart is called when LLM call starts +func (fl *FileLogger) HandleLLMStart(ctx context.Context, prompts []string) { + fl.writeAndFlush("LLM_START: %d prompts", len(prompts)) +} + +// HandleChainStart is called when chain execution starts +func (fl *FileLogger) HandleChainStart(ctx context.Context, inputs map[string]any) { + inputsJson, _ := json.Marshal(inputs) + fl.writeAndFlush("CHAIN_START: %s", string(inputsJson)) +} + +// HandleChainEnd is called when chain execution ends +func (fl *FileLogger) HandleChainEnd(ctx context.Context, outputs map[string]any) { + outputsJson, _ := json.Marshal(outputs) + fl.writeAndFlush("CHAIN_END: %s", string(outputsJson)) +} + +// HandleChainError is called when chain execution fails +func (fl *FileLogger) HandleChainError(ctx context.Context, err error) { + fl.writeAndFlush("CHAIN_ERROR: %s", err.Error()) +} + +// HandleAgentAction is called when an agent action is planned +func (fl *FileLogger) HandleAgentAction(ctx context.Context, action schema.AgentAction) { + fl.writeAndFlush("AGENT_ACTION: tool=%s, input=%s", action.Tool, action.ToolInput) +} + +// HandleAgentFinish is called when the agent finishes +func (fl *FileLogger) HandleAgentFinish(ctx context.Context, finish schema.AgentFinish) { + fl.writeAndFlush("AGENT_FINISH: %s", finish.Log) +} + +// HandleLLMError is called when LLM call fails +func (fl *FileLogger) HandleLLMError(ctx context.Context, err error) { + fl.writeAndFlush("LLM_ERROR: %s", err.Error()) +} + +// HandleStreamingFunc handles streaming responses +func (fl *FileLogger) HandleStreamingFunc(ctx context.Context, chunk []byte) { + fl.writeAndFlush("STREAMING: %s", string(chunk)) +} diff --git a/cli/azd/internal/agent/logging/logger.go b/cli/azd/internal/agent/logging/logger.go index e3f9b64e0e4..94a36192ffa 100644 --- a/cli/azd/internal/agent/logging/logger.go +++ b/cli/azd/internal/agent/logging/logger.go @@ -95,10 +95,16 @@ func (al *ActionLogger) HandleRetrieverEnd(ctx context.Context, query string, do // HandleToolStart is called when a tool execution starts func (al *ActionLogger) HandleToolStart(ctx context.Context, input string) { + if al.debugEnabled { + color.HiBlack("\nHandleToolStart\n%s\n", input) + } } // HandleToolEnd is called when a tool execution ends func (al *ActionLogger) HandleToolEnd(ctx context.Context, output string) { + if al.debugEnabled { + color.HiBlack("\nHandleToolEnd\n%s\n", output) + } } // HandleToolError is called when a tool execution fails diff --git a/cli/azd/internal/agent/one_shot_agent.go b/cli/azd/internal/agent/one_shot_agent.go new file mode 100644 index 00000000000..e6b5adf427f --- /dev/null +++ b/cli/azd/internal/agent/one_shot_agent.go @@ -0,0 +1,117 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package agent + +import ( + "context" + _ "embed" + "strings" + + "github.com/tmc/langchaingo/agents" + "github.com/tmc/langchaingo/chains" + "github.com/tmc/langchaingo/llms" + "github.com/tmc/langchaingo/prompts" + "github.com/tmc/langchaingo/tools" + + localtools "github.com/azure/azure-dev/cli/azd/internal/agent/tools" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" + mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" +) + +// OneShotAzdAiAgent represents an enhanced AZD Copilot agent with action tracking, intent validation, and conversation memory +type OneShotAzdAiAgent struct { + *Agent +} + +//go:embed prompts/one_shot.txt +var one_shot_prompt_template string + +func NewOneShotAzdAiAgent(llm llms.Model, opts ...AgentOption) (*OneShotAzdAiAgent, error) { + azdAgent := &OneShotAzdAiAgent{ + Agent: &Agent{ + defaultModel: llm, + samplingModel: llm, + tools: []tools.Tool{}, + }, + } + + for _, opt := range opts { + opt(azdAgent.Agent) + } + + // Create sampling handler for MCP + samplingHandler := mcptools.NewMcpSamplingHandler( + azdAgent.samplingModel, + mcp.WithDebug(azdAgent.debug), + ) + + toolLoaders := []localtools.ToolLoader{ + localtools.NewLocalToolsLoader(), + mcptools.NewMcpToolsLoader(samplingHandler), + } + + // Define block list of excluded tools + excludedTools := map[string]bool{ + "extension_az": true, + "extension_azd": true, + // Add more excluded tools here as needed + } + + for _, toolLoader := range toolLoaders { + categoryTools, err := toolLoader.LoadTools() + if err != nil { + return nil, err + } + + // Filter out excluded tools + for _, tool := range categoryTools { + if !excludedTools[tool.Name()] { + azdAgent.tools = append(azdAgent.tools, tool) + } + } + } + + promptTemplate := prompts.PromptTemplate{ + Template: one_shot_prompt_template, + InputVariables: []string{"input", "agent_scratchpad"}, + TemplateFormat: prompts.TemplateFormatGoTemplate, + PartialVariables: map[string]any{ + "tool_names": toolNames(azdAgent.tools), + "tool_descriptions": toolDescriptions(azdAgent.tools), + }, + } + + // 4. Create agent with memory directly integrated + oneShotAgent := agents.NewOneShotAgent(llm, azdAgent.tools, + agents.WithPrompt(promptTemplate), + agents.WithCallbacksHandler(azdAgent.callbacksHandler), + agents.WithReturnIntermediateSteps(), + ) + + // 5. Create executor without separate memory configuration since agent already has it + executor := agents.NewExecutor(oneShotAgent, + agents.WithMaxIterations(500), // Much higher limit for complex multi-step processes + agents.WithCallbacksHandler(azdAgent.callbacksHandler), + agents.WithReturnIntermediateSteps(), + ) + + azdAgent.executor = executor + return azdAgent, nil +} + +// RunConversationLoop runs the enhanced AZD Copilot agent with full capabilities +func (aai *OneShotAzdAiAgent) SendMessage(ctx context.Context, args ...string) (string, error) { + return aai.runChain(ctx, strings.Join(args, "\n")) +} + +// ProcessQuery processes a user query with full action tracking and validation +func (aai *OneShotAzdAiAgent) runChain(ctx context.Context, userInput string) (string, error) { + // Execute with enhanced input - agent should automatically handle memory + output, err := chains.Run(ctx, aai.executor, userInput) + if err != nil { + return "", err + } + + return output, nil +} diff --git a/cli/azd/internal/agent/prompts/conversational.txt b/cli/azd/internal/agent/prompts/conversational.txt new file mode 100644 index 00000000000..66878fc4eca --- /dev/null +++ b/cli/azd/internal/agent/prompts/conversational.txt @@ -0,0 +1,88 @@ +You are an Azure Developer CLI (AZD) agent. +You are an expert in building, provisioning, and deploying Azure applications. +Always use Azure best practices and automation wherever possible. + +--- + +## Pre-Task Expectations + +Before beginning your work: + +* Review all available tools. +* If a tool exists for best practices or required inputs, you MUST invoke it before taking further steps. +* Integrate any learned knowledge from tools into your workflow. + +When generating code, infrastructure, or configurations: + +* You MUST ALWAYS save the content to files using the `write_file` tool. +* If no filename is provided, generate a meaningful and descriptive name. + +--- + +## Efficiency and Token Usage Guidelines + +To minimize cost and maximize speed: + +* DO NOT list or read full directories unless absolutely necessary. +* Prefer targeted exploration: + * Top-level file listings (1–2 levels deep) + * Common files: `README.md`, `package.json`, `*.csproj`, etc. + * Specific file extensions or known filenames +* Read files incrementally and only go deeper if prior steps justify it. +* **Favor breadth over depth**, and always limit the number and size of file reads per action. + +--- + +You have access to the following tools: +{{.tool_descriptions}} + +--- + +## REQUIRED RESPONSE FORMAT — DO NOT DEVIATE + +You MUST follow the ReAct pattern below for every task, without exception. + +This pattern consists of repeating the following sequence: + +``` +Thought: [Analyze the current situation and what needs to be done] +Thought: Do I need to use a tool? [Yes/No] +Action: [the action to take, should be one of [{{.tool_names}}]] +Action Input: [the input to the action] +Observation: [the result of the action] +``` + +After each Observation, you MUST continue the ReAct loop: + +* Reflect on the outcome. +* Determine if further actions are required. +* If yes, perform the next tool call using the same format. +* If an error occurred, debug and retry using alternative tool inputs (up to 3 retries). + +Only when ALL subtasks are completed and no further tool use is needed, you may finish with: + +``` +Thought: Do I need to use a tool? No +AI: [your full, final answer] +``` + +--- + +## Additional Behavior Requirements + +* Never skip the ReAct format. No direct answers, summaries, or conclusions are allowed outside of the full ReAct loop. +* Every Observation must trigger another Thought. +* You must NEVER exit early unless all actions are truly completed. +* If tool output reveals new required work, continue acting until all related tasks are complete. +* Be exhaustive and explicit in your reasoning. + +--- + +Previous conversation history: +{{.history}} + +User Question: +{{.input}} + +Thought: +{{.agent_scratchpad}} diff --git a/cli/azd/internal/agent/prompts/default_agent_format_instructions.txt b/cli/azd/internal/agent/prompts/default_agent_format_instructions.txt deleted file mode 100644 index 4ff35663ba8..00000000000 --- a/cli/azd/internal/agent/prompts/default_agent_format_instructions.txt +++ /dev/null @@ -1,40 +0,0 @@ -Answer the following questions or perform tasks as best you can. You have access to the following tools: - -IMPORTANT: Continue taking actions recursively until the task is completely finished. Do not stop after a single action if more work is needed to accomplish the user's goal. - -Follow this format exactly: - -Thought: [Analyze the current situation and what needs to be done] - -Thought: Do I need to use a tool? [Yes/No] -Action: [the action to take, should be one of [{{.tool_names}}]] -Action Input: [the input to the action] -Observation: [the result of the action] - -After each Observation, you MUST continue the cycle: - -Thought: [Evaluate the result and determine if the task is complete or if more actions are needed] - -If the task is NOT complete: - -Thought: Do I need to use a tool? Yes -Action: [next action to take] -Action Input: [input for the next action] -Observation: [result of the next action] -... (continue this cycle until the task is fully complete) - -If there are errors: - -Thought: [Analyze the error and determine how to fix it] -Thought: Do I need to use a tool? Yes -Action: [corrective action] -Action Input: [corrected input] -Observation: [result] -... (retry up to 3 times with different approaches if needed) - -Remember: Always continue taking actions until the user's request is fully satisfied. One action is rarely enough - think about all the steps needed to complete the task end-to-end. - -When you are done or handing control back to the user you MUST ALWAYS use the following format: - -Thought: Do I need to use a tool? No -AI: [briefly summarize your response without all the details from your observations] \ No newline at end of file diff --git a/cli/azd/internal/agent/prompts/default_agent_prefix.txt b/cli/azd/internal/agent/prompts/default_agent_prefix.txt deleted file mode 100644 index 0102a07432f..00000000000 --- a/cli/azd/internal/agent/prompts/default_agent_prefix.txt +++ /dev/null @@ -1,17 +0,0 @@ -You are an Azure Developer CLI (AZD) agent. -You are an expert is building, provisioning and deploying Azure applications. -Always use Azure best patterns and practices. - -Before starting your task initial task review available tools. -If any tools exist for best practices invoke the tool to learn more. -Incorporate learned best practices in your work. - -When any code generation is performed ALWAYS save content to files. -When filenames are not explicitly specified generate new files with meaningful names. - -TOOLS: ------- - -Agent has access to the following tools: - -{{.tool_descriptions}} diff --git a/cli/azd/internal/agent/prompts/default_agent_suffix.txt b/cli/azd/internal/agent/prompts/default_agent_suffix.txt deleted file mode 100644 index c469d53ce8e..00000000000 --- a/cli/azd/internal/agent/prompts/default_agent_suffix.txt +++ /dev/null @@ -1,8 +0,0 @@ -Begin! - -Previous conversation history: -{{.history}} - -Question: {{.input}} - -Thought:{{.agent_scratchpad}} \ No newline at end of file diff --git a/cli/azd/internal/agent/prompts/one_shot.txt b/cli/azd/internal/agent/prompts/one_shot.txt new file mode 100644 index 00000000000..890569e2bc5 --- /dev/null +++ b/cli/azd/internal/agent/prompts/one_shot.txt @@ -0,0 +1,78 @@ +You are an Azure Developer CLI (AZD) agent. +You are an expert in generating, building, provisioning, and deploying Azure applications. +Always follow Azure best patterns and practices. +Always automate as many tasks as possible. + +Before starting your initial task, review the available tools. +If any tools exist for best practices, invoke those tools to gather information. +Incorporate any learned best practices into your work. + +When generating code or configuration, ALWAYS save the output to a file. +If a filename is not explicitly provided, generate a meaningful and appropriate name automatically. + +--- + +**Efficiency and Token Usage Guidelines:** + +- Always minimize token usage when interacting with file-related tools. +- Do **not** request large directory globs like `**/*` or attempt to read full directories unless absolutely required. +- Instead, start with: + - High-level file listings (e.g., 1–2 levels deep) + - Only common project root files or config files + - Specific files by name or extension (.csproj, package.json, README.md) +- When reading files, limit the number of files and prefer smaller ones. +- Never request entire folders to be read in a single call. +- If you need to scan deeper, do so **incrementally** and **only if earlier reads indicate it's necessary.** +- When in doubt, prioritize **breadth first, then depth**. + +Failing to follow these heuristics may result in tool failures, token overuse, or excessive latency. + +--- + +You have access to the following tools: + +{{.tool_descriptions}} + +When responding, always use the following format: + +Question: [the input question you must answer] +Thought: [you should always think about what to do] +Action: [the action to take, must be one of [ {{.tool_names}} ]] +Action Input: [the input to the action] +Observation: [the result of the action] +... (this Thought → Action → Action Input → Observation sequence can repeat N times) +Thought: [I now know the final answer] +Final Answer: [the final answer to the original input question] + +--- + +**Important Behavioral Guidelines:** + +- After every Observation, reflect on whether it reveals additional work that must be done. New tasks may emerge from tool outputs — you must identify and complete them before finishing. +- Do **not** assume a task is complete after a single tool call unless you have verified that **all necessary work is complete**. +- Never skip steps or return a Final Answer prematurely. +- Always continue until all identified and implied tasks have been completed using the tools available. +- If the Observation hints at other subtasks, pursue them fully before concluding. + +**Strict Output Format Rules (Do Not Violate):** + +You MUST follow this exact output structure for each tool invocation: + +Thought: [your thought] +Action: [tool name] +Action Input: [input to the tool] +Observation: [result from the tool] + +**Every** Action MUST be followed by an Observation — even if the result is empty, obvious, or a no-op. +Do NOT omit, reorder, or skip any part of this pattern. +Do NOT substitute summaries or explanations for an Observation. + +Only after completing all actions and observations may you finish with: + +Thought: I now know the final answer +Final Answer: [your full, final answer] + +Begin! + +Question: {{.input}} +{{.agent_scratchpad}} diff --git a/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go b/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go index d74111eaa53..0368400fabd 100644 --- a/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go +++ b/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go @@ -17,13 +17,15 @@ func (t *AzdArchitecturePlanningTool) Name() string { } func (t *AzdArchitecturePlanningTool) Description() string { - return ` - Performs Azure service selection and architecture planning for applications preparing for Azure Developer CLI (AZD) initialization. - This is Phase 2 of the AZD migration process that maps components to Azure services, plans hosting strategies, - and designs infrastructure architecture based on discovery results. + return `Returns instructions for selecting appropriate Azure services for discovered application components and designing infrastructure architecture. The LLM agent should execute these instructions using available tools. - Input: "./azd-arch-plan.md" - ` +Use this tool when: +- Discovery analysis has been completed and azd-arch-plan.md exists +- Application components have been identified and classified +- Need to map components to Azure hosting services +- Ready to plan containerization and database strategies + +Input: "./azd-arch-plan.md"` } func (t *AzdArchitecturePlanningTool) Call(ctx context.Context, input string) (string, error) { diff --git a/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go b/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go index 9e5764563f3..5b1f5adb84e 100644 --- a/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go @@ -17,13 +17,15 @@ func (t *AzdAzureYamlGenerationTool) Name() string { } func (t *AzdAzureYamlGenerationTool) Description() string { - return ` - Generates the azure.yaml configuration file for Azure Developer CLI (AZD) projects. - This specialized tool focuses on creating service definitions, hosting configurations, - and deployment instructions. Can be used independently for service configuration updates. + return `Returns instructions for generating the azure.yaml configuration file with proper service hosting, build, and deployment settings for AZD projects. The LLM agent should execute these instructions using available tools. - Input: "./azd-arch-plan.md" - ` +Use this tool when: +- Architecture planning has been completed and Azure services selected +- Need to create or update azure.yaml configuration file +- Services have been mapped to Azure hosting platforms +- Ready to define build and deployment configurations + +Input: "./azd-arch-plan.md"` } func (t *AzdAzureYamlGenerationTool) Call(ctx context.Context, input string) (string, error) { diff --git a/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go b/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go index 1d1ae810d23..5e24f5727fc 100644 --- a/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go +++ b/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go @@ -17,13 +17,15 @@ func (t *AzdDiscoveryAnalysisTool) Name() string { } func (t *AzdDiscoveryAnalysisTool) Description() string { - return ` - Performs comprehensive discovery and analysis of applications to prepare them for Azure Developer CLI (AZD) initialization. - This is Phase 1 of the AZD migration process that analyzes codebase, identifies components and dependencies, - and creates a foundation for architecture planning. + return `Returns instructions for performing comprehensive discovery and analysis of application components to prepare for Azure Developer CLI (AZD) initialization. The LLM agent should execute these instructions using available tools. - Input: "./azd-arch-plan.md" - ` +Use this tool when: +- Starting Phase 1 of AZD migration process +- Need to identify all application components and dependencies +- Codebase analysis required before architecture planning +- azd-arch-plan.md does not exist or needs updating + +Input: "./azd-arch-plan.md"` } func (t *AzdDiscoveryAnalysisTool) Call(ctx context.Context, input string) (string, error) { diff --git a/cli/azd/internal/agent/tools/azd/azd_docker_generation.go b/cli/azd/internal/agent/tools/azd/azd_docker_generation.go index 18effd6c9e7..c22b590c779 100644 --- a/cli/azd/internal/agent/tools/azd/azd_docker_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_docker_generation.go @@ -17,13 +17,15 @@ func (t *AzdDockerGenerationTool) Name() string { } func (t *AzdDockerGenerationTool) Description() string { - return ` - Generates Dockerfiles and container configurations for Azure Developer CLI (AZD) projects. - This specialized tool focuses on containerization requirements, creating optimized Dockerfiles - for different programming languages, and configuring container-specific settings for Azure hosting. + return `Returns instructions for generating optimized Dockerfiles and container configurations for containerizable services in AZD projects. The LLM agent should execute these instructions using available tools. - Input: "./azd-arch-plan.md" - ` +Use this tool when: +- Architecture planning identified services requiring containerization +- azd-arch-plan.md shows Container Apps or AKS as selected hosting platform +- Need Dockerfiles for microservices, APIs, or containerized web applications +- Ready to implement containerization strategy + +Input: "./azd-arch-plan.md"` } func (t *AzdDockerGenerationTool) Call(ctx context.Context, input string) (string, error) { diff --git a/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go b/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go index 38d797365c8..2fe68dbeaeb 100644 --- a/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go +++ b/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go @@ -17,11 +17,15 @@ func (t *AzdIacGenerationRulesTool) Name() string { } func (t *AzdIacGenerationRulesTool) Description() string { - return ` - Gets the infrastructure as code (IaC) rules and best practices and patterns to use when generating bicep files and modules for use within AZD. + return `Returns comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules for AZD projects. The LLM agent should reference these rules when generating infrastructure code. - Input: "./azd-arch-plan.md" - ` +Use this tool when: +- Generating any Bicep infrastructure templates for AZD projects +- Need compliance rules and naming conventions for Azure resources +- Creating modular, reusable Bicep files +- Ensuring security and operational best practices + +Input: "./azd-arch-plan.md"` } func (t *AzdIacGenerationRulesTool) Call(ctx context.Context, input string) (string, error) { diff --git a/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go b/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go index 0cc87372e87..b147a99b3aa 100644 --- a/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go @@ -17,13 +17,14 @@ func (t *AzdInfrastructureGenerationTool) Name() string { } func (t *AzdInfrastructureGenerationTool) Description() string { - return ` - Generates Bicep infrastructure templates for Azure Developer CLI (AZD) projects. - This specialized tool focuses on creating modular Bicep templates, parameter files, - and implementing Azure security and operational best practices for infrastructure as code. + return `Returns instructions for generating modular Bicep infrastructure templates following Azure security and operational best practices for AZD projects. The LLM agent should execute these instructions using available tools. - Input: "./azd-arch-plan.md" - ` +Use this tool when: +- Architecture planning completed with Azure services selected +- Need to create Bicep infrastructure templates +- Ready to implement infrastructure as code for deployment + +Input: "./azd-arch-plan.md"` } func (t *AzdInfrastructureGenerationTool) Call(ctx context.Context, input string) (string, error) { diff --git a/cli/azd/internal/agent/tools/azd/azd_plan_init.go b/cli/azd/internal/agent/tools/azd/azd_plan_init.go index 3bddc9dbb31..0c246d46b92 100644 --- a/cli/azd/internal/agent/tools/azd/azd_plan_init.go +++ b/cli/azd/internal/agent/tools/azd/azd_plan_init.go @@ -17,11 +17,15 @@ func (t *AzdPlanInitTool) Name() string { } func (t *AzdPlanInitTool) Description() string { - return ` - Gets the required workflow steps and best practices and patterns for initializing or migrating an application to use AZD. + return `Returns instructions for orchestrating complete AZD application initialization using structured phases with specialized tools. The LLM agent should execute these instructions using available tools. - Input: "./azd-arch-plan.md" - ` +Use this tool when: +- Starting new AZD project initialization or migration +- Need structured approach to transform application into AZD-compatible project +- Want to ensure proper sequencing of discovery, planning, and file generation +- Require complete project orchestration guidance + +Input: "./azd-arch-plan.md"` } func (t *AzdPlanInitTool) Call(ctx context.Context, input string) (string, error) { diff --git a/cli/azd/internal/agent/tools/azd/azd_project_validation.go b/cli/azd/internal/agent/tools/azd/azd_project_validation.go index de7639839a5..2a856a5596a 100644 --- a/cli/azd/internal/agent/tools/azd/azd_project_validation.go +++ b/cli/azd/internal/agent/tools/azd/azd_project_validation.go @@ -4,12 +4,10 @@ import ( "context" _ "embed" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" "github.com/tmc/langchaingo/tools" ) -//go:embed prompts/azd_project_validation.md -var azdProjectValidationPrompt string - // AzdProjectValidationTool validates an AZD project by running comprehensive checks on all components // including azure.yaml schema validation, Bicep template validation, environment setup, packaging, // and deployment preview. @@ -22,15 +20,20 @@ func (t *AzdProjectValidationTool) Name() string { // Description returns the description of the tool. func (t *AzdProjectValidationTool) Description() string { - return ` - Validates an AZD project by running comprehensive checks on all components including azure.yaml schema validation, Bicep template validation, environment setup, packaging, and deployment preview. + return `Returns instructions for validating AZD project by running comprehensive checks on azure.yaml schema, Bicep templates, environment setup, packaging, and deployment preview. The LLM agent should execute these instructions using available tools. + +Use this tool when: +- All AZD configuration files have been generated +- Ready to validate complete project before deployment +- Need to ensure azure.yaml, Bicep templates, and environment are properly configured +- Final validation step before running azd up - Input: "./azd-arch-plan.md"` +Input: "./azd-arch-plan.md"` } // Call executes the tool with the given arguments. func (t *AzdProjectValidationTool) Call(ctx context.Context, args string) (string, error) { - return azdProjectValidationPrompt, nil + return prompts.AzdProjectValidationPrompt, nil } // Ensure AzdProjectValidationTool implements the Tool interface. diff --git a/cli/azd/internal/agent/tools/azd/prompts/README.md b/cli/azd/internal/agent/tools/azd/prompts/README.md deleted file mode 100644 index 01c5a3ab0dd..00000000000 --- a/cli/azd/internal/agent/tools/azd/prompts/README.md +++ /dev/null @@ -1,199 +0,0 @@ -# AZD Modular Tools Overview - -This document provides an overview of the modular AZD initialization tools that replace the monolithic `azd_plan_init` tool. Each tool is designed to be used independently or as part of a complete AZD migration workflow. - -## Tool Structure - -The AZD initialization process has been broken down into focused, modular tools: - -### 1. Discovery and Analysis Tool (`azd_discovery_analysis`) - -**Purpose:** Analyze applications and identify components and dependencies -**Use When:** Starting a new AZD migration or need to understand an existing codebase -**Output:** Component inventory and dependency mapping in `azd-arch-plan.md` - -### 2. Architecture Planning Tool (`azd_architecture_planning`) - -**Purpose:** Select Azure services and plan hosting strategies -**Use When:** You have discovered components and need to plan Azure service mapping -**Prerequisites:** Completed discovery and analysis -**Output:** Architecture decisions and service selections in `azd-arch-plan.md` - -### 3. Azure.yaml Generation Tool (`azd_azure_yaml_generation`) - -**Purpose:** Generate azure.yaml service configuration file -**Use When:** You need to create or update just the service definitions -**Prerequisites:** Understanding of application services and hosting requirements -**Output:** Valid `azure.yaml` file - -### 4. Infrastructure Generation Tool (`azd_infrastructure_generation`) - -**Purpose:** Generate Bicep infrastructure templates -**Use When:** You need to create or update just the infrastructure components -**Prerequisites:** Architecture decisions about Azure services -**Output:** Complete Bicep template structure - -### 5. Docker Generation Tool (`azd_docker_generation`) - -**Purpose:** Generate Dockerfiles and container configurations -**Use When:** You need containerization for your services -**Prerequisites:** Understanding of application services and containerization needs -**Output:** Optimized Dockerfiles and .dockerignore files - -### 6. Project Validation Tool (`azd_project_validation`) - -**Purpose:** Validate the complete AZD project setup and configuration -**Use When:** All files are generated and you need to validate the setup -**Prerequisites:** All configuration files generated -**Output:** Validation report and ready-to-deploy confirmation - -## Complete Workflow - -For a full AZD migration, use the tools in this sequence: - -``` -1. azd_discovery_analysis - ↓ -2. azd_architecture_planning - ↓ -3a. azd_azure_yaml_generation -3b. azd_infrastructure_generation -3c. azd_docker_generation (if containerization needed) - ↓ -4. azd_project_validation -``` - -## Selective Usage - -You can also use individual tools for specific tasks: - -**Generate only azure.yaml:** -``` -azd_discovery_analysis → azd_azure_yaml_generation -``` - -**Generate only infrastructure:** -``` -azd_architecture_planning → azd_infrastructure_generation -``` - -**Add containerization:** -``` -azd_docker_generation (based on existing analysis) -``` - -**Validate existing project:** -``` -azd_project_validation (for validation and testing) -``` - -## Central Planning Document - -All tools use `azd-arch-plan.md` as the central planning document: - -- **Created by:** Discovery and Analysis tool -- **Updated by:** All subsequent tools -- **Purpose:** Track progress, document decisions, and maintain project state -- **Location:** Current working directory - -## Key Features - -### Modular Design -- Each tool has a specific responsibility -- Tools can be used independently or together -- Clear prerequisites and outputs -- Consistent documentation patterns - -### Azure Best Practices -- All tools implement Azure best practices -- Security-first approach -- Cost optimization considerations -- Operational excellence patterns - -### LLM Optimized -- Clear, actionable instructions -- Structured output formats -- Comprehensive validation steps -- Troubleshooting guidance - -### Progress Tracking -- Checkboxes for completed actions -- Clear success criteria -- Validation requirements -- Next step guidance - -## Tool Selection Guide - -**Use the Discovery Tool when:** -- Starting a new AZD migration -- Don't understand the application structure -- Need to document existing architecture -- Want to identify all components and dependencies - -**Use the Architecture Planning Tool when:** -- Have component inventory -- Need to select Azure services -- Planning hosting strategies -- Designing infrastructure architecture - -**Use the File Generation Tool when:** -- Have architecture decisions -- Need to create all AZD files -- Want complete project setup -- Ready to implement infrastructure - -**Use the Environment Initialization Tool when:** -- All files are generated -- Ready to create AZD environment -- Need to validate complete setup -- Preparing for deployment - -**Use the Azure.yaml Generation Tool when:** -- Only need service configuration -- Updating existing azure.yaml -- Working with known service requirements -- Quick service definition setup - -**Use the Infrastructure Generation Tool when:** -- Only need Bicep templates -- Updating existing infrastructure -- Working with specific Azure service requirements -- Advanced infrastructure customization - -## Benefits of Modular Approach - -### For Users -- **Faster iterations:** Update only what you need -- **Better understanding:** Focus on one aspect at a time -- **Reduced complexity:** Smaller, focused tasks -- **Flexible workflow:** Use tools in different orders based on needs - -### For LLMs -- **Clearer context:** Each tool has specific scope -- **Better accuracy:** Focused instructions reduce errors -- **Improved validation:** Tool-specific validation steps -- **Enhanced troubleshooting:** Targeted problem resolution - -### For Maintenance -- **Easier updates:** Modify individual tools without affecting others -- **Better testing:** Test each tool independently -- **Clearer documentation:** Each tool is self-contained -- **Improved reusability:** Tools can be repurposed for different scenarios - -## Migration from Original Tool - -If you were using the original `azd_plan_init` tool, here's how to migrate: - -**Original Phase 1 (Discovery and Analysis):** -Use `azd_discovery_analysis` tool - -**Original Phase 2 (Architecture Planning):** -Use `azd_architecture_planning` tool - -**Original Phase 3 (File Generation):** -Use `azd_azure_yaml_generation` + `azd_infrastructure_generation` + `azd_docker_generation` for focused file generation - -**Original Phase 4 (Project Validation):** -Use `azd_project_validation` tool for final validation and setup verification - -The modular tools provide the same functionality with improved focus and flexibility. diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_architecture_planning.md b/cli/azd/internal/agent/tools/azd/prompts/azd_architecture_planning.md index b85778ecb24..904f75ffa95 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_architecture_planning.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_architecture_planning.md @@ -1,165 +1,133 @@ -# AZD Architecture Planning Tool +# AZD Architecture Planning Instructions -This tool performs Azure service selection and architecture planning for Azure Developer CLI (AZD) initialization. This is Phase 2 of the AZD migration process. +✅ **Agent Task List** -## Overview +1. Read `azd-arch-plan.md` to understand discovered components +2. For each component, select optimal Azure service using selection criteria below +3. Plan containerization strategy for applicable services +4. Select appropriate database and messaging services +5. Design resource group organization and networking approach +6. Generate IaC file checklist based on selected Azure services +7. Generate Docker file checklist based on containerization strategy +8. Create `azd-arch-plan.md` if it doesn't exist, or update existing file with service mapping table, architecture decisions, IaC checklist, and Docker checklist while preserving existing content -Use discovery results to select appropriate Azure services, plan hosting strategies, and design infrastructure architecture. +📄 **Required Outputs** -**IMPORTANT:** Before starting, review the `azd-arch-plan.md` file in your current working directory to understand discovered components and dependencies from the discovery phase. +- Create `azd-arch-plan.md` if missing, or update existing file with Azure Service Mapping Table showing Component | Current Tech | Azure Service | Rationale +- Hosting strategy summary documenting decisions for each component (preserve existing content) +- Containerization plans for applicable services (preserve existing content) +- Infrastructure architecture design including resource organization and networking (preserve existing content) +- **IaC File Generation Checklist** listing all Bicep files that need to be created based on selected services (add to existing file) +- **Docker File Generation Checklist** listing all Docker files needed for containerized services (add to existing file) -## Success Criteria +🧠 **Execution Guidelines** -- [ ] Azure service selections made for all components -- [ ] Hosting strategies defined for each service -- [ ] Containerization plans documented -- [ ] Infrastructure architecture designed -- [ ] Ready to proceed to file generation phase +**Azure Service Selection Criteria:** -## Azure Service Selection +**Azure Container Apps (PREFERRED)** - Use for microservices, containerized applications, event-driven workloads with auto-scaling needs -**REQUIRED ANALYSIS:** +**Azure Kubernetes Service (AKS)** - Use for complex containerized applications requiring full Kubernetes control, advanced networking, custom operators -For each discovered application component, select the most appropriate Azure hosting platform: +**Azure App Service** - Use for web applications, REST APIs needing specific runtime versions or Windows-specific features -### Azure Container Apps (PREFERRED) +**Azure Functions** - Use for event processing, scheduled tasks, lightweight APIs with pay-per-execution model -**Use for:** Microservices, containerized applications, event-driven workloads -**Benefits:** Auto-scaling, managed Kubernetes, simplified deployment -**Consider when:** Component can be containerized, needs elastic scaling +**Azure Static Web Apps** - Use for frontend SPAs, static sites, JAMstack applications with minimal backend needs -### Azure App Service +**Database Service Selection:** -**Use for:** Web applications, REST APIs with specific runtime needs -**Benefits:** Managed platform, built-in CI/CD, easy SSL/custom domains -**Consider when:** Need specific runtime versions, Windows-specific features +- Azure SQL Database: SQL Server compatibility, complex queries, ACID compliance +- Azure Database for PostgreSQL/MySQL: Specific engine compatibility required +- Azure Cosmos DB: NoSQL requirements, global scale, flexible schemas +- Azure Cache for Redis: Application caching, session storage, real-time analytics -### Azure Functions +**Messaging Service Selection:** -**Use for:** Event processing, scheduled tasks, lightweight APIs -**Benefits:** Serverless, automatic scaling, pay-per-execution -**Consider when:** Event-driven processing, stateless operations +- Azure Service Bus: Enterprise messaging, guaranteed delivery, complex routing +- Azure Event Hubs: High-throughput event streaming, telemetry ingestion +- Azure Event Grid: Event-driven architectures, reactive programming -### Azure Static Web Apps +**IaC File Checklist Generation:** -**Use for:** Frontend SPAs, static sites, JAMstack applications -**Benefits:** Global CDN, built-in authentication, API integration -**Consider when:** Static content, minimal backend requirements +Based on selected Azure services, generate a checklist of required Bicep files to be created: -## Selection Criteria +**Always Required:** -**REQUIRED ANALYSIS:** +- [ ] `./infra/main.bicep` - Primary deployment template (subscription scope) +- [ ] `./infra/main.parameters.json` - Parameter defaults +- [ ] `./infra/modules/monitoring.bicep` - Log Analytics and Application Insights -For each discovered component, consider: +**Service-Specific Modules (include based on service selection):** -- Scalability requirements and traffic patterns -- Runtime and platform needs -- Operational complexity preferences -- Cost considerations -- Team expertise and preferences +- [ ] `./infra/modules/container-apps.bicep` - If Container Apps selected +- [ ] `./infra/modules/app-service.bicep` - If App Service selected +- [ ] `./infra/modules/functions.bicep` - If Azure Functions selected +- [ ] `./infra/modules/static-web-app.bicep` - If Static Web Apps selected +- [ ] `./infra/modules/aks.bicep` - If AKS selected +- [ ] `./infra/modules/database.bicep` - If SQL/PostgreSQL/MySQL selected +- [ ] `./infra/modules/cosmosdb.bicep` - If Cosmos DB selected +- [ ] `./infra/modules/storage.bicep` - If Storage Account needed +- [ ] `./infra/modules/keyvault.bicep` - If Key Vault needed (recommended) +- [ ] `./infra/modules/servicebus.bicep` - If Service Bus selected +- [ ] `./infra/modules/eventhub.bicep` - If Event Hubs selected +- [ ] `./infra/modules/redis.bicep` - If Redis Cache selected +- [ ] `./infra/modules/container-registry.bicep` - If container services selected -## Containerization Planning +**Example IaC Checklist Output:** -**REQUIRED ASSESSMENT:** - -For each component, determine: - -- **Containerization Feasibility:** Can it run in Docker? Windows-specific dependencies? -- **Docker Strategy:** Base image selection, port mappings, environment variables -- **Resource Requirements:** CPU, memory, storage needs -- **Health Check Strategy:** Endpoint patterns for monitoring - -## Data Storage Planning - -**REQUIRED ANALYSIS:** - -Select appropriate Azure database services: - -### Azure SQL Database - -**Use for:** SQL Server compatibility, complex queries, ACID compliance -**Consider when:** Relational data model, existing SQL Server applications - -### Azure Database for PostgreSQL/MySQL - -**Use for:** PostgreSQL/MySQL workloads, web applications -**Consider when:** Specific database engine compatibility required - -### Azure Cosmos DB - -**Use for:** NoSQL requirements, global scale, flexible schemas -**Consider when:** Multiple data models, global distribution needed - -### Azure Cache for Redis - -**Use for:** Application caching, session storage, real-time analytics -**Consider when:** Performance optimization, session management - -## Messaging and Integration Planning - -**REQUIRED ANALYSIS:** - -Select messaging services based on patterns: - -### Azure Service Bus - -**Use for:** Enterprise messaging, guaranteed delivery, complex routing -**Consider when:** Reliable messaging, enterprise scenarios - -### Azure Event Hubs - -**Use for:** High-throughput event streaming, telemetry ingestion -**Consider when:** Big data scenarios, real-time analytics - -### Azure Event Grid - -**Use for:** Event-driven architectures, reactive programming -**Consider when:** Decoupled systems, serverless architectures - -## Update Architecture Documentation +```markdown +## Infrastructure as Code File Checklist -**REQUIRED ACTIONS:** +Based on the selected Azure services, the following Bicep files need to be generated: -Update `azd-arch-plan.md` with: +### Core Files (Always Required) +- [ ] `./infra/main.bicep` - Primary deployment template +- [ ] `./infra/main.parameters.json` - Parameter defaults +- [ ] `./infra/modules/monitoring.bicep` - Observability stack -### Azure Service Mapping Table +### Service-Specific Modules +- [ ] `./infra/modules/container-apps.bicep` - For web API hosting +- [ ] `./infra/modules/database.bicep` - For PostgreSQL database +- [ ] `./infra/modules/keyvault.bicep` - For secrets management +- [ ] `./infra/modules/container-registry.bicep` - For container image storage -```markdown -| Component | Current Tech | Azure Service | Rationale | -|-----------|-------------|---------------|-----------| -| Web App | React | Static Web Apps | Frontend SPA | -| API Service | Node.js | Container Apps | Microservice architecture | -| Database | PostgreSQL | Azure Database for PostgreSQL | Existing dependency | +Total files to generate: 7 ``` -### Hosting Strategy Summary +**Docker File Checklist Generation:** -- Document hosting decisions for each component -- Include containerization plans where applicable -- Note resource requirements and scaling strategies +Based on selected Azure services and containerization strategy, generate a checklist of required Docker files: -### Infrastructure Architecture +**Container-Based Services (include based on service selection):** -- Resource group organization strategy -- Networking and security design approach -- Monitoring and logging strategy -- Integration patterns between services +- [ ] `{service-path}/Dockerfile` - If Container Apps, AKS, or containerized App Service selected +- [ ] `{service-path}/.dockerignore` - For each containerized service -### Next Steps Checklist +**Example Docker Checklist Output:** -- [ ] Azure service selected for each component with rationale -- [ ] Hosting strategies defined -- [ ] Containerization plans documented -- [ ] Data storage strategies planned -- [ ] Ready to proceed to file generation phase +```markdown +## Docker File Generation Checklist -## Next Phase +Based on the containerization strategy, the following Docker files need to be generated: -After completing architecture planning, proceed to the appropriate file generation tool: +### Service Dockerfiles +- [ ] `./api/Dockerfile` - For Node.js API service (Container Apps) +- [ ] `./api/.dockerignore` - Exclude unnecessary files from API container +- [ ] `./frontend/Dockerfile` - For React frontend (containerized App Service) +- [ ] `./frontend/.dockerignore` - Exclude unnecessary files from frontend container -- Use `azd_azure_yaml_generation` tool for azure.yaml configuration -- Use `azd_infrastructure_generation` tool for Bicep templates -- Use `azd_docker_generation` tool for container configurations -- Use `azd_project_validation` tool for final project validation +Total Docker files to generate: 4 +``` -**IMPORTANT:** Keep `azd-arch-plan.md` updated as the central reference for all architecture decisions. This document guides subsequent phases and serves as implementation documentation. +📌 **Completion Checklist** + +- [ ] Azure service selected for each discovered component with documented rationale +- [ ] Hosting strategies defined and documented in `azd-arch-plan.md` +- [ ] Containerization plans documented for applicable services +- [ ] Data storage strategies planned and documented +- [ ] Resource group organization strategy defined +- [ ] Integration patterns between services documented +- [ ] **IaC file checklist generated** and added to `azd-arch-plan.md` based on selected services +- [ ] **Docker file checklist generated** and added to `azd-arch-plan.md` based on containerization strategy +- [ ] `azd-arch-plan.md` created or updated while preserving existing content +- [ ] Ready to proceed to infrastructure generation phase diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md index 84a7618ea0a..7496351c81e 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md @@ -1,67 +1,37 @@ -# AZD Azure.yaml Generation Tool +# AZD Azure.yaml Generation Instructions -This specialized tool generates the `azure.yaml` configuration file for Azure Developer CLI (AZD) projects. +✅ **Agent Task List** -## Overview +1. Check if `azd-arch-plan.md` exists and review architecture decisions +2. Identify all application services (frontend, backend, functions, etc.) +3. Determine hosting requirements for each service based on Azure service selections +4. Analyze build requirements (language, package manager, build commands) +5. Create complete `azure.yaml` file in root directory following required patterns +6. Validate file against AZD schema using available tools +7. Update existing `azd-arch-plan.md` with generated configuration details while preserving existing content -Generate a valid `azure.yaml` configuration file with proper service hosting, build, and deployment settings. +📄 **Required Outputs** -**IMPORTANT:** Before starting, check if `azd-arch-plan.md` exists in your current working directory. If it exists, review it to understand previous analysis and architecture decisions. Use the existing `azd_yaml_schema` tool for schema validation. +- Valid `azure.yaml` file created in root directory +- Service configurations matching Azure service selections from architecture planning +- Build and deployment instructions for all services +- Configuration validated against AZD schema +- Update existing `azd-arch-plan.md` with configuration details while preserving existing content -## Success Criteria +🧠 **Execution Guidelines** -- [ ] Valid `azure.yaml` file created in root directory -- [ ] All application services properly configured -- [ ] Service hosting configurations match Azure service selections -- [ ] Build and deployment instructions complete -- [ ] File validates against AZD schema (use `azd_yaml_schema` tool) - -## Service Analysis Requirements - -**REQUIRED ACTIONS:** - -1. **Identify Application Services:** - - Frontend applications (React, Angular, Vue.js, static sites) - - Backend services (REST APIs, microservices, GraphQL, gRPC) - - Function-based services (Azure Functions) - - Background services and workers - -2. **Determine Hosting Requirements:** - - **Container Apps:** Microservices, APIs, containerized web apps - - **App Service:** Traditional web applications, APIs - - **Static Web Apps:** Frontend SPAs, static sites - - **Functions:** Event-driven, serverless workloads - -3. **Analyze Build Requirements:** - - Programming language and framework - - Package manager (npm, pip, dotnet, maven) - - Build commands and output directories - - Dependency management needs +**Service Analysis Requirements:** -## Azure.yaml Configuration Requirements +Identify and configure these service types: -**REQUIRED ACTIONS:** +- **Frontend applications:** React, Angular, Vue.js, static sites +- **Backend services:** REST APIs, microservices, GraphQL, gRPC +- **Function-based services:** Azure Functions for event-driven workloads +- **Background services:** Workers and long-running processes -Create a complete `azure.yaml` file in the root directory following these patterns: +**Hosting Configuration Patterns:** -### Basic Structure Requirements - -**IMPORTANT:** Use the `azd_yaml_schema` tool for complete schema definition, structure requirements, and validation rules. - -Basic structure: - -```yaml -name: [project-name] -services: - # Service configurations -infra: - provider: bicep - path: infra -``` - -### Service Configuration Patterns - -**Azure Container Apps (for microservices, APIs, containerized apps):** +**Azure Container Apps** (for microservices, APIs, containerized apps): ```yaml services: @@ -70,10 +40,10 @@ services: language: js host: containerapp docker: - path: ./src/api/Dockerfile + path: ./Dockerfile ``` -**Azure App Service (for traditional web apps):** +**Azure App Service** (for traditional web apps): ```yaml services: @@ -83,7 +53,7 @@ services: host: appservice ``` -**Azure Functions (for serverless workloads):** +**Azure Functions** (for serverless workloads): ```yaml services: @@ -93,7 +63,7 @@ services: host: function ``` -**Azure Static Web Apps (for SPAs, static sites):** +**Azure Static Web Apps** (for SPAs, static sites): ```yaml services: @@ -104,97 +74,28 @@ services: dist: build ``` -### Advanced Configuration Options - -**Environment Variables:** - -```yaml -services: - api: - env: - - name: NODE_ENV - value: production - - name: DATABASE_URL - value: "{{ .Env.DATABASE_URL }}" -``` - -**Custom Build Commands:** - -```yaml -services: - frontend: - hooks: - prebuild: - posix: npm install - build: - posix: npm run build -``` - -## Configuration Requirements - -**CRITICAL REQUIREMENTS:** +**Critical Configuration Requirements:** -- Service names must be valid Azure resource names (alphanumeric, hyphens only) +- Service names must be alphanumeric with hyphens only - All `project` paths must point to existing directories -- All `docker.path` references must point to existing Dockerfiles +- All `docker.path` references must point to existing Dockerfiles **relative to the service project path** - Host types must be: `containerapp`, `appservice`, `function`, or `staticwebapp` - Language must match detected programming language - `dist` paths must match build output directories -## Validation Requirements - -**VALIDATION STEPS:** - -1. **Schema Validation:** Use `azd_yaml_schema` tool for authoritative schema validation -2. **Path Validation:** Ensure all referenced paths exist -3. **Configuration Testing:** Run `azd show` to test service discovery - -**Validation Commands:** - -```bash -# Validate configuration -azd config show - -# Test service discovery -azd show -``` - -## Common Patterns +**Important Note:** For Container Apps with Docker configurations, the `docker.path` is relative to the service's `project` directory, not the repository root. For example, if your service project is `./src/api` and the Dockerfile is located at `./src/api/Dockerfile`, the `docker.path` should be `./Dockerfile`. -**Multi-Service Microservices:** +**Advanced Configuration Options:** -- Frontend: Static Web App -- APIs: Container Apps with Dockerfiles -- Background Services: Container Apps or Functions +- Environment variables using `${VARIABLE_NAME}` syntax +- Custom commands using hooks (prebuild, postbuild, prepackage, postpackage, preprovision, postprovision) +- Service dependencies and startup order -**Full-Stack Application:** +📌 **Completion Checklist** -- Frontend: Static Web App -- Backend: Container App or App Service - -**Serverless Application:** - -- Frontend: Static Web App -- APIs: Azure Functions - -## Update Documentation - -**REQUIRED ACTIONS:** - -Update `azd-arch-plan.md` with: - -- Generated azure.yaml location and schema version -- Service configuration table (service, type, host, language, path) -- Hosting strategy summary by Azure service type -- Build and deployment configuration decisions -- Docker configuration details -- Validation results - -## Next Steps - -After azure.yaml generation is complete: - -1. Validate configuration using `azd_yaml_schema` tool -2. Test service discovery with `azd show` - -**IMPORTANT:** Reference existing tools for specific functionality. Use `azd_yaml_schema` for schema validation. +- [ ] Valid `azure.yaml` file created in root directory +- [ ] All discovered services properly configured with correct host types +- [ ] Service hosting configurations match Azure service selections from architecture planning +- [ ] Build and deployment instructions complete for all services +- [ ] File validates against any available AZD schema tools +- [ ] `azd-arch-plan.md` updated with configuration details while preserving existing content diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_discovery_analysis.md b/cli/azd/internal/agent/tools/azd/prompts/azd_discovery_analysis.md index 10ff9e4e49c..d14f99ca52e 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_discovery_analysis.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_discovery_analysis.md @@ -1,200 +1,66 @@ -# AZD Application Discovery and Analysis Tool +# AZD Application Discovery and Analysis Instructions -This tool performs comprehensive discovery and analysis of applications to prepare them for Azure Developer CLI (AZD) initialization. This is Phase 1 of the AZD migration process. +✅ **Agent Task List** -Always use Azure best practices with intelligent defaults. +1. Check if `azd-arch-plan.md` exists and review previous analysis if present +2. Scan current directory recursively for all files and document structure +3. Identify programming languages, frameworks, and configuration files +4. Classify discovered components by type (web apps, APIs, databases, etc.) +5. Map dependencies and communication patterns between components +6. Create `azd-arch-plan.md` if it doesn't exist, or update existing file with complete discovery report while preserving existing content -## Overview +📄 **Required Outputs** -This tool analyzes your current codebase and architecture to: -1. Identify all application components and dependencies -2. Classify components by type and hosting requirements -3. Map dependencies and communication patterns -4. Provide foundation for architecture planning +- Complete file system inventory documented in `azd-arch-plan.md` (create file if missing, update existing while preserving content) +- Component classification table with Type | Technology | Location | Purpose (add to existing file) +- Dependency map showing inter-component communication (add to existing file) +- External dependencies list with required environment variables (add to existing file) +- Discovery report ready for architecture planning phase -**IMPORTANT:** Before starting, check if `azd-arch-plan.md` exists in your current working directory. If it exists, review it to understand what analysis has already been completed and build upon that work. +🧠 **Execution Guidelines** -## Success Criteria +**File System Analysis - Document:** -The discovery and analysis is successful when: +- Programming languages and frameworks detected +- Configuration files (package.json, requirements.txt, pom.xml, Dockerfile, docker-compose.yml) +- API endpoints, service definitions, application entry points +- Database configurations and connection strings +- CI/CD pipeline files (.github/workflows, azure-pipelines.yml) +- Documentation files and existing architecture docs -- [ ] Complete file system inventory is documented -- [ ] All application components are identified and classified -- [ ] Component dependencies are mapped -- [ ] Results are documented in `azd-arch-plan.md` -- [ ] Ready to proceed to architecture planning phase +**Component Classification Categories:** -## Step 1: Deep File System Analysis +- **Web Applications:** React/Angular/Vue.js apps, static sites, server-rendered apps +- **API Services:** REST APIs, GraphQL endpoints, gRPC services, microservices +- **Background Services:** Message queue processors, scheduled tasks, data pipelines +- **Databases:** SQL/NoSQL databases, caching layers, migration scripts +- **Messaging Systems:** Message queues, event streaming, pub/sub systems +- **AI/ML Components:** Models, inference endpoints, training pipelines +- **Supporting Services:** Authentication, logging, monitoring, configuration -**REQUIRED ACTIONS:** +**Dependency Analysis - Identify:** -- Scan all files in the current working directory recursively -- Document file structure, programming languages, and frameworks detected -- Identify configuration files (package.json, requirements.txt, pom.xml, etc.) -- Locate any existing Docker files, docker-compose files, or containerization configs -- Find database configuration files and connection strings -- Identify API endpoints, service definitions, and application entry points -- Look for existing CI/CD pipeline files (.github/workflows, azure-pipelines.yml, etc.) -- Identify documentation files (README.md, API docs, architecture docs) +- Internal dependencies (component-to-component communication) +- External dependencies (third-party APIs, SaaS services) +- Data dependencies (shared databases, file systems, caches) +- Configuration dependencies (shared settings, secrets, environment variables) +- Runtime dependencies (required services for startup) -**ANALYSIS QUESTIONS TO ANSWER:** - -- What programming languages and frameworks are used? -- What build systems and package managers are in use? -- Are there existing containerization configurations? -- What ports and endpoints are exposed? -- What external dependencies are required? -- Are there existing deployment or infrastructure configurations? - -**OUTPUT:** Complete inventory of all discoverable application artifacts - -## Step 2: Component Classification - -**REQUIRED ACTIONS:** - -Categorize each discovered component into one of these types: - -- **Web Applications** (frontend, SPA, static sites) - - React, Angular, Vue.js applications - - Static HTML/CSS/JavaScript sites - - Server-rendered web applications - -- **API Services** (REST APIs, GraphQL, gRPC services) - - RESTful web APIs - - GraphQL endpoints - - gRPC services - - Microservices - -- **Background Services** (workers, processors, scheduled jobs) - - Message queue processors - - Scheduled task runners - - Data processing pipelines - - Event handlers - -- **Databases** (relational, NoSQL, caching) - - SQL Server, PostgreSQL, MySQL databases - - NoSQL databases (MongoDB, CosmosDB) - - Caching layers (Redis, Memcached) - - Database migration scripts - -- **Messaging Systems** (queues, topics, event streams) - - Message queues - - Event streaming platforms - - Pub/sub systems - -- **AI/ML Components** (models, inference endpoints, training jobs) - - Machine learning models - - AI inference endpoints - - Training pipelines - - Data preprocessing services - -- **Supporting Services** (authentication, logging, monitoring) - - Authentication services - - Logging aggregators - - Monitoring and metrics - - Configuration services - -**CLASSIFICATION CRITERIA:** - -For each component, determine: -- Primary function and responsibility -- Runtime requirements -- Scalability needs -- Security considerations -- Integration points - -**OUTPUT:** Structured component inventory with classifications - -## Step 3: Dependency Mapping - -**REQUIRED ACTIONS:** - -- Map inter-component dependencies and communication patterns -- Identify external service dependencies (third-party APIs, SaaS services) -- Document data flow between components -- Identify shared resources and configuration -- Analyze network communication requirements -- Document authentication and authorization flows - -**DEPENDENCY ANALYSIS:** - -- **Internal Dependencies:** How components communicate with each other -- **External Dependencies:** Third-party services, APIs, databases -- **Data Dependencies:** Shared databases, file systems, caches -- **Configuration Dependencies:** Shared settings, secrets, environment variables -- **Runtime Dependencies:** Required services for startup and operation - -**COMMUNICATION PATTERNS TO IDENTIFY:** +**Communication Patterns to Document:** - Synchronous HTTP/HTTPS calls -- Asynchronous messaging -- Database connections -- File system access -- Caching patterns -- Authentication flows - -**OUTPUT:** Component dependency graph and communication matrix - -## Step 4: Generate Discovery Report - -**REQUIRED ACTIONS:** - -Create or update `azd-arch-plan.md` with the following sections: - -```markdown -# AZD Architecture Plan - -## Discovery and Analysis Results - -### Application Overview -- [Summary of application type and purpose] -- [Key technologies and frameworks identified] -- [Overall architecture pattern (monolith, microservices, etc.)] - -### Component Inventory -[For each component discovered:] -- **Component Name:** [name] -- **Type:** [classification] -- **Technology:** [language/framework] -- **Location:** [file path/directory] -- **Purpose:** [brief description] -- **Entry Points:** [how component is accessed] -- **Configuration:** [key config files] - -### Dependency Map -[Visual or text representation of dependencies] -- **Component A** → **Component B** (HTTP API) -- **Component B** → **Database** (SQL connection) -- **Component A** → **External API** (REST calls) - -### External Dependencies -- [List of third-party services] -- [Required environment variables] -- [External configuration requirements] - -### Next Steps -- [ ] Review discovery results -- [ ] Proceed to architecture planning phase -- [ ] Use `azd_architecture_planning` tool -``` - -## Validation and Next Steps - -**VALIDATION CHECKLIST:** - -- [ ] All major application components identified -- [ ] Component types and technologies documented -- [ ] Dependencies mapped and understood -- [ ] External services and APIs catalogued -- [ ] `azd-arch-plan.md` created or updated with findings - -**NEXT PHASE:** - -After completing this discovery phase, proceed to the **Architecture Planning** phase using the `azd_architecture_planning` tool. This next phase will use your discovery results to: - -- Select appropriate Azure services for each component -- Plan hosting strategies and containerization -- Design infrastructure architecture -- Prepare for configuration file generation - -**IMPORTANT:** Keep the `azd-arch-plan.md` file updated throughout the process as it serves as the central planning document for your AZD migration. +- Asynchronous messaging patterns +- Database connections and data access +- File system access patterns +- Caching patterns and session management +- Authentication and authorization flows + +📌 **Completion Checklist** + +- [ ] Complete inventory of all discoverable application artifacts documented +- [ ] All major application components identified and classified by type +- [ ] Component technologies and frameworks documented with file locations +- [ ] Dependencies mapped and communication patterns understood +- [ ] External services and APIs catalogued with requirements +- [ ] `azd-arch-plan.md` created or updated with comprehensive findings while preserving existing content +- [ ] Ready to proceed to architecture planning phase using `azd_architecture_planning` tool diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_docker_generation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_docker_generation.md index 38091d58d9a..b09aee91625 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_docker_generation.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_docker_generation.md @@ -1,174 +1,115 @@ -# AZD Docker Generation Tool +# AZD Docker Generation Instructions -This specialized tool generates Dockerfiles and container configurations for Azure Developer CLI (AZD) projects. +✅ **Agent Task List** -## Overview +1. Read the **Docker File Generation Checklist** from `azd-arch-plan.md` +2. Identify containerizable services and required Docker files from the checklist +3. Detect programming language and framework for each containerizable service +4. Generate each Docker file specified in the checklist following language-specific best practices +5. Create .dockerignore files for build optimization +6. Implement health checks and security configurations +7. Update the Docker checklist section in existing `azd-arch-plan.md` by marking completed items as [x] while preserving existing content -Generate optimized Dockerfiles for different programming languages and frameworks with Azure Container Apps best practices. +📄 **Required Outputs** -**IMPORTANT:** Before starting, check if `azd-arch-plan.md` exists in your current working directory. If it exists, review it to understand discovered services and containerization requirements. +- All Docker files listed in the Docker File Generation Checklist from `azd-arch-plan.md` +- Dockerfiles created for all containerizable services +- .dockerignore files generated for each service +- Health check endpoints implemented +- Multi-stage builds with security best practices +- Update existing `azd-arch-plan.md` Docker checklist by marking completed items as [x] while preserving existing content -## Success Criteria +🧠 **Execution Guidelines** -- [ ] Dockerfiles created for all containerizable services -- [ ] .dockerignore files generated for build optimization -- [ ] Health checks and security configurations implemented -- [ ] Multi-stage builds used where appropriate -- [ ] Azure Container Apps best practices followed +**Read Docker Checklist:** -## Containerization Requirements Analysis +- Read the "Docker File Generation Checklist" section from `azd-arch-plan.md` +- This checklist specifies exactly which Docker files need to be generated +- Use this as the authoritative source for what to create +- Follow the exact file paths specified in the checklist -**REQUIRED ACTIONS:** +**Generate Files in Order:** -1. **Identify Containerization Candidates:** - - Microservices and APIs (REST, GraphQL, gRPC) - - Web applications needing runtime flexibility - - Background services and workers - - Custom applications with specific runtime requirements +- Create service Dockerfiles first (e.g., `{service-path}/Dockerfile`) +- Create corresponding .dockerignore files for each service (e.g., `{service-path}/.dockerignore`) +- Follow the exact file paths specified in the checklist from `azd-arch-plan.md` -2. **Services That Don't Need Containerization:** - - Static websites (use Azure Static Web Apps) - - Azure Functions (serverless, managed runtime) - - Database services (use managed Azure databases) +**Containerization Candidates:** -3. **Language and Framework Detection:** - - Programming language (Node.js, Python, .NET, Java, Go, etc.) - - Framework type (Express, FastAPI, ASP.NET Core, Spring Boot) - - Build requirements (npm, pip, dotnet, maven, gradle) - - Runtime dependencies and port configurations -- **Programming language** (Node.js, Python, .NET, Java, Go, etc.) +- **Include:** Microservices, REST APIs, GraphQL services, web applications, background workers +- **Exclude:** Static websites (use Static Web Apps), Azure Functions (serverless), databases (use managed services) -## Dockerfile Generation Requirements - -**REQUIRED ACTIONS:** - -For each containerizable service, generate optimized Dockerfiles following these patterns: - -### Language-Specific Requirements +**Language-Specific Dockerfile Patterns:** **Node.js Applications:** -- Use `node:18-alpine` base image -- Implement multi-stage build (build + runtime) + +- Base image: `node:18-alpine` +- Multi-stage build (build + runtime) - Copy package*.json first for layer caching - Use `npm ci --only=production` -- Create non-root user (`nodejs`) -- Expose appropriate port (typically 3000) -- Include health check endpoint -- Use `CMD ["npm", "start"]` +- Non-root user: `nodejs` +- Expose port 3000, health check `/health` **Python Applications:** -- Use `python:3.11-slim` base image -- Set environment variables: `PYTHONDONTWRITEBYTECODE=1`, `PYTHONUNBUFFERED=1` -- Copy requirements.txt first for caching + +- Base image: `python:3.11-slim` +- Environment: `PYTHONDONTWRITEBYTECODE=1`, `PYTHONUNBUFFERED=1` +- Copy requirements.txt first - Use `pip install --no-cache-dir` -- Create non-root user (`appuser`) -- Expose appropriate port (typically 8000) -- Include health check endpoint -- Use appropriate startup command (uvicorn, gunicorn, etc.) +- Non-root user: `appuser` +- Expose port 8000, health check `/health` **.NET Applications:** -- Use `mcr.microsoft.com/dotnet/sdk:8.0` for build stage -- Use `mcr.microsoft.com/dotnet/aspnet:8.0` for runtime -- Multi-stage build: restore → build → publish → runtime -- Copy .csproj first for layer caching -- Create non-root user (`appuser`) -- Expose port 8080 (standard for .NET in containers) -- Include health check endpoint -- Use `ENTRYPOINT ["dotnet", "AppName.dll"]` - -**Java/Spring Boot Applications:** -- Use `openjdk:17-jdk-slim` for build, `openjdk:17-jre-slim` for runtime -- Copy pom.xml/build.gradle first for dependency caching -- Multi-stage build pattern -- Create non-root user (`appuser`) -- Expose port 8080 -- Include actuator health check -- Use `CMD ["java", "-jar", "app.jar"]` - -## Security and Best Practices - -**CRITICAL REQUIREMENTS:** - -- **Always use non-root users** in production stage -- **Use minimal base images** (alpine, slim variants) -- **Implement multi-stage builds** to reduce image size -- **Include health check endpoints** for Container Apps -- **Set proper working directories** and file permissions -- **Use layer caching** by copying dependency files first -- **Never include secrets** in container images - -## .dockerignore Requirements - -**REQUIRED ACTIONS:** - -Create .dockerignore files with these patterns: - -**Universal Exclusions:** -- Version control: `.git`, `.gitignore` -- Documentation: `README.md`, `*.md` -- IDE files: `.vscode/`, `.idea/`, `*.swp` -- OS files: `.DS_Store`, `Thumbs.db` -- Docker files: `Dockerfile*`, `.dockerignore`, `docker-compose*.yml` -- Build artifacts and logs - -**Language-Specific Exclusions:** -- **Node.js:** `node_modules/`, `npm-debug.log*`, `coverage/`, `dist/` -- **Python:** `__pycache__/`, `*.pyc`, `venv/`, `.pytest_cache/`, `dist/` -- **.NET:** `bin/`, `obj/`, `*.user`, `packages/`, `.vs/` -- **Java:** `target/`, `*.class`, `.mvn/repository` - -## Health Check Implementation - -**REQUIRED ACTIONS:** - -Each containerized service must include a health check endpoint: - -- **Endpoint:** `/health` (standard convention) -- **Response:** JSON with status and timestamp -- **HTTP Status:** 200 for healthy, 503 for unhealthy -- **Timeout:** 3 seconds maximum response time -- **Content:** `{"status": "healthy", "timestamp": "ISO-8601"}` - -## Container Optimization - -**REQUIRED OPTIMIZATIONS:** - -- Use multi-stage builds to exclude build tools from production images -- Copy package/dependency files before source code for better caching -- Combine RUN commands to reduce layers -- Clean package manager caches in same RUN command -- Use specific versions for base images (avoid `latest`) -- Set resource limits appropriate for Azure Container Apps - -## Validation and Testing - -**VALIDATION REQUIREMENTS:** - -- All Dockerfiles must build successfully: `docker build -t test-image .` -- Containers must run with non-root users -- Health checks must respond correctly -- Images should be optimized for size (use `docker images` to verify) -- Container startup time should be reasonable (<30 seconds) - -## Update Documentation - -**REQUIRED ACTIONS:** - -Update `azd-arch-plan.md` with: - -- List of generated Dockerfiles and their languages -- Container configurations (ports, health checks, users) -- Security implementations (non-root users, minimal images) -- Build optimizations applied -- Local testing commands - -## Next Steps - -After Docker generation is complete: - -1. Test all containers build successfully locally -2. Integrate Dockerfile paths into `azure.yaml` service definitions -3. Configure Container Apps infrastructure to use these images -4. Set up Azure Container Registry for image storage -**IMPORTANT:** Reference existing tools for schema validation. For azure.yaml updates, use the `azd_azure_yaml_generation` tool. For infrastructure setup, use the `azd_infrastructure_generation` tool. +- Build: `mcr.microsoft.com/dotnet/sdk:8.0` +- Runtime: `mcr.microsoft.com/dotnet/aspnet:8.0` +- Multi-stage: restore → build → publish → runtime +- Non-root user: `appuser` +- Expose port 8080, health check `/health` + +**Java/Spring Boot:** + +- Build: `openjdk:17-jdk-slim`, Runtime: `openjdk:17-jre-slim` +- Copy dependency files first for caching +- Non-root user: `appuser` +- Expose port 8080, actuator health check + +**Security and Optimization Requirements:** + +- Always use non-root users in production stage +- Use minimal base images (alpine, slim variants) +- Implement multi-stage builds to reduce size +- Include health check endpoints for Container Apps +- Set proper working directories and file permissions +- Use layer caching by copying dependency files first +- Never include secrets in container images + +**.dockerignore Patterns:** + +- Universal: `.git`, `README.md`, `.vscode/`, `.DS_Store`, `Dockerfile*` +- Node.js: `node_modules/`, `npm-debug.log*`, `coverage/` +- Python: `__pycache__/`, `*.pyc`, `venv/`, `.pytest_cache/` +- .NET: `bin/`, `obj/`, `*.user`, `packages/` +- Java: `target/`, `*.class`, `.mvn/repository` + +**Health Check Implementation:** + +- Endpoint: `/health` (standard convention) +- Response: JSON with status and timestamp +- HTTP Status: 200 for healthy, 503 for unhealthy +- Timeout: 3 seconds maximum +- Content: `{"status": "healthy", "timestamp": "ISO-8601"}` + +📌 **Completion Checklist** + +- [ ] **Docker File Generation Checklist read** from `azd-arch-plan.md` +- [ ] **All files from Docker checklist generated** in the correct locations +- [ ] Dockerfiles created for all containerizable services identified in architecture planning +- [ ] .dockerignore files generated with appropriate exclusions for each language +- [ ] Multi-stage builds implemented to reduce image size +- [ ] Non-root users configured for security +- [ ] Health check endpoints implemented for all services +- [ ] Container startup optimization applied (dependency file caching) +- [ ] All Dockerfiles build successfully (`docker build` test) +- [ ] Security best practices followed (minimal images, no secrets) +- [ ] **Docker checklist in `azd-arch-plan.md` updated** by marking completed items as [x] while preserving existing content diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md b/cli/azd/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md index bff5ab8418d..49d7bb80107 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md @@ -1,134 +1,109 @@ -# Infrastructure as Code (IaC) Generation Rules for Azure Developer CLI (AZD) +# Infrastructure as Code (IaC) Generation Rules -This document provides comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules for AZD projects. Follow these rules strictly when generating Azure infrastructure code. +✅ **Agent Task List** -## Core Generation Rules +1. Reference these rules when generating any IaC files +2. Follow file structure and organization requirements +3. Implement naming conventions and tagging strategies +4. Apply security and compliance best practices +5. Validate all generated code against these requirements -### File Structure and Organization +📄 **Required Outputs** -- **REQUIRED**: Place all IaC files in the `./infra` folder within an AZD project -- **REQUIRED**: Name the main deployment file `main.bicep` - this is the primary deployment target -- **REQUIRED**: Create a `main.parameters.json` file alongside `main.bicep` containing all parameter defaults for the Bicep deployment -- **REQUIRED**: The root level `main.bicep` must be a subscription level deployment using `targetScope = 'subscription'` -- **REQUIRED**: The main.bicep file must create a resource group as the primary container for all resources -- **REQUIRED**: Pass the resource group scope to all child modules that deploy resources -- **REQUIRED**: Create modular, reusable Bicep files instead of monolithic templates -- **RECOMMENDED**: Organize modules by resource type or logical grouping +- IaC files following all specified rules and conventions +- Proper file structure in `./infra` directory +- Compliance with Azure Well-Architected Framework principles +- Security best practices implemented +- Validation passing without errors -### Azure Best Practices Compliance +🧠 **Execution Guidelines** -- **REQUIRED**: Follow Azure Well-Architected Framework principles -- **REQUIRED**: Use Bicep best practices including proper parameter validation and resource dependencies -- **REQUIRED**: Leverage Azure Verified Modules (AVM) when available - always check for existing AVM modules before creating custom ones -- **REQUIRED**: Implement least-privilege access principles +**File Structure and Organization:** -### Naming Conventions +- **REQUIRED:** Place all IaC files in `./infra` folder +- **REQUIRED:** Name main deployment file `main.bicep` +- **REQUIRED:** Create `main.parameters.json` with parameter defaults +- **REQUIRED:** Main.bicep must use `targetScope = 'subscription'` +- **REQUIRED:** Create resource group as primary container +- **REQUIRED:** Pass resource group scope to all child modules +- **REQUIRED:** Create modular, reusable Bicep files -- **REQUIRED**: Use consistent naming pattern: `{resourcePrefix}-{name}-{uniqueHash}` -- **REQUIRED**: Generate unique hash using combination of environment name, subscription ID, and resource group name -- **EXAMPLE**: `app-myservice-h3x9k2` where `h3x9k2` is generated from env/subscription/rg -- **FORBIDDEN**: Hard-code tenant IDs, subscription IDs, or resource group names +**Naming Conventions:** -### Module Parameters +- **REQUIRED:** Use pattern `{resourcePrefix}-{name}-{uniqueHash}` +- **REQUIRED:** Generate unique hash from environment name, subscription ID, and resource group name +- **EXAMPLE:** `app-myservice-h3x9k2` where `h3x9k2` is generated +- **FORBIDDEN:** Hard-code tenant IDs, subscription IDs, or resource group names -- **REQUIRED**: Every module must accept these standard parameters: - - `name` (string): Base name for the resource - - `location` (string): Azure region for deployment - - `tags` (object): Resource tags for governance -- **REQUIRED**: Modules that deploy Azure resources must use `targetScope = 'resourceGroup'` and be called with the resource group scope from main.bicep -- **REQUIRED**: Provide intelligent defaults for optional parameters -- **REQUIRED**: Use parameter decorators for validation (e.g., `@minLength`, `@allowed`) -- **RECOMMENDED**: Group related parameters using objects when appropriate +**Module Parameters (All modules must accept):** -### Tagging Strategy +- `name` (string): Base name for the resource +- `location` (string): Azure region for deployment +- `tags` (object): Resource tags for governance +- **REQUIRED:** Modules use `targetScope = 'resourceGroup'` +- **REQUIRED:** Provide intelligent defaults for optional parameters +- **REQUIRED:** Use parameter decorators for validation -- **REQUIRED**: Tag resource groups with `azd-env-name: {environment-name}` -- **REQUIRED**: Tag hosting resources with `azd-service-name: {service-name}` -- **RECOMMENDED**: Include additional governance tags (cost center, owner, etc.) +**Tagging Strategy:** -### Security and Compliance +- **REQUIRED:** Tag resource groups with `azd-env-name: {environment-name}` +- **REQUIRED:** Tag hosting resources with `azd-service-name: {service-name}` +- **RECOMMENDED:** Include governance tags (cost center, owner, etc.) -- **FORBIDDEN**: Hard-code secrets, connection strings, or sensitive values -- **REQUIRED**: Use Key Vault references for secrets -- **REQUIRED**: Enable diagnostic settings and logging where applicable -- **REQUIRED**: Follow principle of least privilege for managed identities +**Security and Compliance:** -### Quality Assurance +- **FORBIDDEN:** Hard-code secrets, connection strings, or sensitive values +- **REQUIRED:** Use Key Vault references for secrets +- **REQUIRED:** Enable diagnostic settings and logging where applicable +- **REQUIRED:** Follow principle of least privilege for managed identities +- **REQUIRED:** Follow Azure Well-Architected Framework principles -- **REQUIRED**: Validate all generated Bicep code using Bicep CLI -- **REQUIRED**: Address all warnings and errors before considering code complete -- **REQUIRED**: Test deployment in a sandbox environment when possible +**Container Resource Specifications:** -## Supported Azure Services +- **REQUIRED:** Wrap partial CPU values in `json()` function (e.g., `json('0.5')` for 0.5 CPU cores) +- **REQUIRED:** Memory values should be strings with units (e.g., `'0.5Gi'`, `'1Gi'`, `'2Gi'`) +- **EXAMPLE:** Container Apps resource specification: -### Primary Hosting Resources (Choose One) - -1. **Azure Container Apps** ⭐ **(PREFERRED)** - - Best for containerized applications - - Built-in scaling and networking - - Supports both HTTP and background services - -2. **Azure App Service** - - Best for web applications and APIs - - Supports multiple runtime stacks - - Built-in CI/CD integration - -3. **Azure Function Apps** - - Best for serverless and event-driven workloads - - Multiple hosting plans available - - Trigger-based execution model - -4. **Azure Static Web Apps** - - Best for frontend applications - - Built-in GitHub/Azure DevOps integration - - Free tier available - -5. **Azure Kubernetes Service (AKS)** - - Best for complex containerized workloads - - Full Kubernetes capabilities - - Requires advanced configuration - -### Essential Supporting Resources + ```bicep + resources: { + cpu: json('0.25') // Correct: wrapped in json() + memory: '0.5Gi' // Correct: string with units + } + ``` -**REQUIRED** - Include these resources in most AZD applications: +**Supported Azure Services:** -- **Log Analytics Workspace** - - Central logging and monitoring - - Required for Application Insights - - Enable diagnostic settings for all resources +**Primary Hosting Resources (Choose One):** -- **Application Insights** - - Application performance monitoring - - Dependency tracking and telemetry - - Link to Log Analytics workspace +- **Azure Container Apps** (PREFERRED): Containerized applications, built-in scaling +- **Azure App Service:** Web applications and APIs, multiple runtime stacks +- **Azure Function Apps:** Serverless and event-driven workloads +- **Azure Static Web Apps:** Frontend applications, built-in CI/CD +- **Azure Kubernetes Service (AKS):** Complex containerized workloads -- **Azure Key Vault** - - Secure storage for secrets, keys, and certificates - - Use managed identity for access - - Enable soft delete and purge protection +**Essential Supporting Resources (REQUIRED for most applications):** -**CONDITIONAL** - Include based on application requirements: +- **Log Analytics Workspace:** Central logging and monitoring +- **Application Insights:** Application performance monitoring +- **Azure Key Vault:** Secure storage for secrets and certificates -- **Azure Container Registry** (for container-based apps) -- **Azure Service Bus** (for messaging scenarios) -- **Azure Cosmos DB** (for NoSQL data storage) -- **Azure SQL Database** (for relational data storage) -- **Azure Storage Account** (for blob/file storage) -- **Azure Cache for Redis** (for caching scenarios) +**Conditional Resources (Include based on requirements):** -## Code Generation Examples +- Azure Container Registry (for container-based apps) +- Azure Service Bus (for messaging scenarios) +- Azure Cosmos DB (for NoSQL data storage) +- Azure SQL Database (for relational data storage) +- Azure Storage Account (for blob/file storage) +- Azure Cache for Redis (for caching scenarios) -### Main.bicep Structure Template +**Main.bicep Structure Template:** ```bicep targetScope = 'subscription' - @description('Name of the environment') param environmentName string - @description('Location for all resources') param location string - @description('Tags to apply to all resources') param tags object = {} @@ -157,37 +132,14 @@ module appService 'modules/app-service.bicep' = { } ``` -### Main.parameters.json Structure Template - -```json -{ - "$schema": "https://schema.management.azure.com/schemas/2018-05-01/subscriptionDeploymentParameters.json#", - "contentVersion": "1.0.0.0", - "parameters": { - "environmentName": { - "value": "${AZURE_ENV_NAME}" - }, - "location": { - "value": "${AZURE_LOCATION}" - }, - "tags": { - "value": {} - } - } -} -``` - -### Child Module Structure Template +**Child Module Structure Template:** ```bicep targetScope = 'resourceGroup' - @description('Base name for all resources') param name string - -@description('Location for all resources') +@description('Location for all resources') param location string = resourceGroup().location - @description('Tags to apply to all resources') param tags object = {} @@ -198,20 +150,16 @@ var resourceName = '${name}-${resourceSuffix}' // Resource definitions here... ``` -## Validation Checklist - -Before completing code generation, verify: +📌 **Completion Checklist** -- [ ] All files are in `./infra` folder -- [ ] `main.bicep` exists as primary deployment file with subscription scope -- [ ] `main.parameters.json` exists alongside `main.bicep` with parameter defaults -- [ ] Resource group is created in `main.bicep` and properly tagged +- [ ] All files placed in `./infra` folder with correct structure +- [ ] `main.bicep` exists with subscription scope and resource group creation +- [ ] `main.parameters.json` exists with parameter defaults - [ ] All child modules use `targetScope = 'resourceGroup'` and receive resource group scope -- [ ] All resources use consistent naming convention -- [ ] Required tags are applied correctly -- [ ] No hard-coded secrets or identifiers -- [ ] Parameters have appropriate validation -- [ ] Bicep CLI validation passes without errors -- [ ] AVM modules are used where available -- [ ] Supporting resources are included as needed -- [ ] Security best practices are followed +- [ ] Consistent naming convention applied: `{resourcePrefix}-{name}-{uniqueHash}` +- [ ] Required tags applied: `azd-env-name` and `azd-service-name` +- [ ] No hard-coded secrets, tenant IDs, or subscription IDs +- [ ] Parameters have appropriate validation decorators +- [ ] Security best practices followed (Key Vault, managed identities, diagnostics) +- [ ] Bicep CLI validation passes without errors (`az bicep build`) +- [ ] Deployment validation successful (`az deployment sub validate`) diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md index e7ff88ef55c..f7b902bb239 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md @@ -1,75 +1,126 @@ -# AZD Infrastructure Generation Tool - -This specialized tool generates Bicep infrastructure templates for Azure Developer CLI (AZD) projects. - -## Overview - -Generate modular Bicep templates following Azure security and operational best practices. - -**IMPORTANT:** -- Before starting, check if `azd-arch-plan.md` exists to understand architecture decisions -- **Use the `azd_iac_generation_rules` tool for complete IaC rules, naming conventions, and best practices** - -## Success Criteria - -- [ ] Complete Bicep template structure created in `./infra` directory -- [ ] All templates compile without errors (`az bicep build --file infra/main.bicep`) -- [ ] Infrastructure supports all services defined in `azure.yaml` -- [ ] Follows all rules from `azd_iac_generation_rules` tool -- [ ] Parameter files configured appropriately - -## Requirements Analysis - -**REQUIRED ACTIONS:** - -1. **Review IaC Rules:** Use `azd_iac_generation_rules` tool to get complete file structure, naming conventions, and compliance requirements - -2. **Analyze Infrastructure Needs:** - - Map services from `azure.yaml` to required Azure resources - - Identify shared resources (Log Analytics, Container Registry, Key Vault) - - Determine connectivity and security requirements +# AZD Infrastructure Generation Instructions + +✅ **Agent Task List** + +1. Use `azd_iac_generation_rules` tool to get complete IaC rules and conventions +2. **Inventory existing IaC files** - scan current working directory for all `.bicep` files +3. Read `azd-arch-plan.md` to get the **IaC File Generation Checklist** +4. Create directory structure in `./infra` following IaC rules +5. For each file in the IaC checklist: + - **If file exists**: Intelligently update to match requirements, preserve user customizations where possible + - **If file missing**: Generate new file following templates and best practices + - **Flag conflicts**: Note any incompatible configurations but proceed with updates +6. Validate all generated bicep templates compile without errors or warnings +7. Update the IaC checklist section in existing `azd-arch-plan.md` by marking completed files as [x] while preserving existing content + +📄 **Required Outputs** + +- **Existing IaC inventory** documenting all current `.bicep` files found +- Complete Bicep template structure in `./infra` directory based on the IaC checklist +- All files listed in the IaC File Generation Checklist from `azd-arch-plan.md` (created or updated) +- Main.bicep file with subscription scope and modular deployment +- Service-specific modules for each Azure service from the checklist +- Parameter files with sensible defaults +- **Conflict report** highlighting any incompatible configurations that were updated +- All templates validated and error-free +- Update existing `azd-arch-plan.md` IaC checklist by marking completed files as [x] while preserving existing content + +🧠 **Execution Guidelines** + +**CRITICAL:** Use `azd_iac_generation_rules` tool first to get complete file structure, naming conventions, and compliance requirements. + +**Inventory Existing IaC Files:** + +- Scan current working directory recursively for all `.bicep` files +- Document existing files, their locations, and basic structure +- Note any existing modules, parameters, and resource definitions +- Identify which checklist files already exist vs. need to be created + +**Read IaC Checklist:** + +- Read the "Infrastructure as Code File Checklist" section from `azd-arch-plan.md` +- This checklist specifies exactly which Bicep files need to be generated +- Cross-reference with existing file inventory to determine update vs. create strategy + +**Smart File Generation Strategy:** + +**For Existing Files:** + +- **Preserve user customizations**: Keep existing resource configurations, naming, and parameters where compatible +- **Add missing components**: Inject required modules, resources, or configurations that are missing +- **Update outdated patterns**: Modernize to use current best practices +- **Maintain functionality**: Ensure existing deployments continue to work + +**For New Files:** + +- Create from templates following IaC generation rules +- Follow standard naming conventions and patterns + +**Conflict Resolution:** + +- **Document conflicts**: Log when existing configurations conflict with requirements +- **Prioritize functionality**: Make changes needed for AZD compatibility +- **Preserve intent**: Keep user's architectural decisions when possible +- **Flag major changes**: Clearly indicate significant modifications made + +**Generate Files in Order:** + +- Create `./infra/main.bicep` first (always required) +- Create `./infra/main.parameters.json` second (always required) +- Generate each module file listed in the checklist +- Follow the exact file paths specified in the checklist + +**Main Parameters File Requirements:** + +The `./infra/main.parameters.json` file is critical for AZD integration and must follow this exact structure: + +```json +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "environmentName": { + "value": "${AZURE_ENV_NAME}" + }, + "location": { + "value": "${AZURE_LOCATION}" + }, + "principalId": { + "value": "${AZURE_PRINCIPAL_ID}" + } + } +} +``` -3. **Service Infrastructure Mapping:** - - **Container Apps:** Environment, Log Analytics, Container Registry, App Insights, Managed Identity - - **App Service:** Service Plan, App Service, App Insights - - **Functions:** Function App, Storage Account, App Insights - - **Static Web Apps:** Static Web App resource - - **Database:** SQL/CosmosDB/PostgreSQL with appropriate SKUs +**Key Features:** -## Generation Workflow +- **Environment Variable Substitution**: Uses `${VARIABLE_NAME}` syntax for dynamic values +- **Standard Parameters**: Always include `environmentName`, `location`, and `principalId` +- **AZD Integration**: These variables are automatically populated by AZD during deployment +- **Additional Parameters**: Add service-specific parameters as needed, using the same substitution pattern -**REQUIRED ACTIONS:** +**Service Infrastructure Mapping:** -1. **Create Directory Structure:** - Follow structure from `azd_iac_generation_rules` tool: - ``` - ./infra/ - ├── main.bicep - ├── main.parameters.json - ├── modules/ - └── [additional files per rules] - ``` +- **Container Apps:** Environment, Log Analytics, Container Registry, App Insights, Managed Identity +- **App Service:** Service Plan, App Service, App Insights, Managed Identity +- **Functions:** Function App, Storage Account, App Insights, Managed Identity +- **Static Web Apps:** Static Web App resource with configuration +- **Database:** SQL/CosmosDB/PostgreSQL with appropriate SKUs and security -2. **Generate Main Template:** - - Use subscription-level scope (`targetScope = 'subscription'`) - - Create resource group with proper tagging - - Deploy modules conditionally based on service requirements - - Follow naming conventions from IaC rules tool +**Module Template Requirements:** -3. **Generate Module Templates:** - - Create focused modules for each service type - - Use resource group scope for all modules - - Accept standardized parameters (environmentName, location, tags) - - Output connection information for applications +- Use `targetScope = 'resourceGroup'` for all modules +- Accept resource group scope from main template +- Use standardized parameters (name, location, tags) +- Follow naming convention: `{resourcePrefix}-{name}-{uniqueHash}` +- Output connection information for applications +- Include security best practices and monitoring -4. **Generate Parameter Files:** - - Provide sensible defaults for all parameters - - Use parameter references for environment-specific values - - Include all required parameters from IaC rules +**Required Directory Structure:** -``` +```text ./infra/ -├── main.bicep # Primary deployment template +├── main.bicep # Primary deployment template (subscription scope) ├── main.parameters.json # Default parameters ├── modules/ │ ├── container-apps.bicep @@ -82,78 +133,31 @@ Generate modular Bicep templates following Azure security and operational best p └── resources.bicep # Shared resources ``` -## Template Requirements - -### Main Template (main.bicep) - -**CRITICAL REQUIREMENTS:** +**Main Template Requirements:** - Use `targetScope = 'subscription'` - Accept standardized parameters: `environmentName`, `location`, `principalId` -- Include feature flags for conditional deployment (e.g., `deployDatabase`) -- Create resource group with proper tagging (`azd-env-name`, `azd-provisioned`) -- Call modules conditionally based on feature flags +- Include feature flags for conditional deployment +- Create resource group with proper tagging (`azd-env-name`) +- Call modules conditionally based on service requirements - Output connection strings and service endpoints -### Module Templates - -## Generate Infrastructure Files - -**WORKFLOW REQUIREMENTS:** - -1. **Create Directory Structure:** - - ```text - ./infra/ - ├── main.bicep - ├── main.parameters.json - ├── modules/ - └── [service-specific modules] - ``` - -2. **Generate Main Template (main.bicep):** - - Use `targetScope = 'subscription'` - - Create resource group with proper tagging - - Deploy modules conditionally based on service requirements - -3. **Generate Module Templates:** - - Create focused modules for each service type - - Use standardized parameters (`environmentName`, `location`, `tags`) - - Output connection information for applications - -4. **Generate Parameter Files:** - - Provide sensible defaults for all parameters - - Use parameter references for environment-specific values - -## Validation and Testing - -**VALIDATION REQUIREMENTS:** - -- All Bicep templates must compile without errors: `az bicep build --file infra/main.bicep` -- Validate deployment: `az deployment sub validate --template-file infra/main.bicep` -- Test with AZD: `azd provision --dry-run` -- Use existing tools for schema validation (reference `azd_yaml_schema` tool for azure.yaml validation) - -## Update Documentation - -**REQUIRED ACTIONS:** - -Update `azd-arch-plan.md` with: - -- List of generated infrastructure files -- Resource naming conventions used -- Security configurations implemented -- Parameter requirements -- Output variables available -- Validation results - -## Next Steps - -After infrastructure generation is complete: - -1. Validate all templates compile successfully -2. Test deployment with `azd provision --dry-run` -3. Deploy with `azd provision` (creates resources) -4. Proceed to application deployment with `azd deploy` +📌 **Completion Checklist** + +- [ ] `azd_iac_generation_rules` tool referenced for complete compliance requirements +- [ ] **Existing IaC inventory completed** - all `.bicep` files in current directory catalogued +- [ ] **IaC File Generation Checklist read** from `azd-arch-plan.md` +- [ ] **Update vs. create strategy determined** for each file in checklist +- [ ] **All files from checklist generated or updated** in the correct locations +- [ ] **User customizations preserved** where compatible with requirements +- [ ] **Conflicts documented** and resolved with functional priority +- [ ] Infrastructure directory structure created following IaC rules +- [ ] Main.bicep template created/updated with subscription scope and resource group +- [ ] Module templates generated/updated for all services listed in checklist +- [ ] Parameter files created/updated with appropriate defaults +- [ ] All Bicep templates compile without errors or warnings (`az bicep build`) +- [ ] Templates validate successfully (`az deployment sub validate`) +- [ ] Naming conventions and tagging implemented correctly +- [ ] Security best practices implemented (Key Vault, managed identities) +- [ ] **IaC checklist in `azd-arch-plan.md` updated** by marking completed files as [x] while preserving existing content -**IMPORTANT:** Reference existing tools instead of duplicating functionality. For azure.yaml validation, use the `azd_yaml_schema` tool. For Bicep best practices, follow the AZD IaC Generation Rules document. diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_plan_init.md b/cli/azd/internal/agent/tools/azd/prompts/azd_plan_init.md index 5859bf69840..fceea6c0f96 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_plan_init.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_plan_init.md @@ -1,124 +1,93 @@ -# AZD Application Initialization and Migration Plan +# AZD Application Initialization and Migration Instructions -This document provides a comprehensive, step-by-step plan for initializing or migrating applications to use Azure Developer CLI (AZD). This is the orchestrating tool that guides you through using the specialized AZD tools. +✅ **Agent Task List** -**IMPORTANT: Before starting any workflow, ALWAYS check if `azd-arch-plan.md` exists in the current directory and review it to understand current progress, previous decisions, and what work has already been completed. This prevents duplicate work and ensures continuity.** +1. **Check Progress:** Review existing `azd-arch-plan.md` to understand completed work +2. **Phase 1:** Execute `azd_discovery_analysis` for component identification +3. **Phase 2:** Execute `azd_architecture_planning` for Azure service selection +4. **Phase 3:** Execute file generation tools (`azd_azure_yaml_generation`, `azd_infrastructure_generation`, `azd_docker_generation`) +5. **Phase 4:** Execute `azd_project_validation` for complete validation +6. **Final:** Confirm project readiness for deployment -Always use Azure best practices with intelligent defaults. +📄 **Required Outputs** -## Executive Summary +- Complete AZD-compatible project structure +- Valid `azure.yaml` configuration file +- Bicep infrastructure templates in `./infra` directory +- Dockerfiles for containerizable services +- Comprehensive `azd-arch-plan.md` documentation (created or updated while preserving existing content) +- Validated project ready for `azd up` deployment -Transform any application into an AZD-compatible project using a structured approach with specialized tools. Each tool has a focused responsibility and builds upon the previous phase to create a complete AZD deployment. +🧠 **Execution Guidelines** -## Success Criteria +**CRITICAL:** Always check if `azd-arch-plan.md` exists first to understand current progress and avoid duplicate work. If the file exists, preserve all existing content and user modifications while updating relevant sections. -The migration is successful when: +**Complete Workflow Phases:** -- [ ] All application components are identified and classified -- [ ] `azure.yaml` file is valid and complete -- [ ] All infrastructure files are generated and error-free -- [ ] Required Dockerfiles are created for containerizable components -- [ ] `azd-arch-plan.md` provides comprehensive documentation -- [ ] AZD environment is initialized and configured -- [ ] **All validation checks pass (use `azd_project_validation` tool)** +**Phase 1: Review Existing Progress** -## Complete Workflow Guide +- Check if `azd-arch-plan.md` exists in current directory +- If exists: Review thoroughly and skip completed phases +- If doesn't exist: Proceed to Phase 2 -### Phase 1: Review Existing Progress +**Phase 2: Discovery and Analysis** -Check if the file `azd-arch-plan.md` exists in the current directory and review it to understand current progress, previous decisions, and what work has already been completed. This prevents duplicate work and ensures continuity. +- Tool: `azd_discovery_analysis` +- Scans files recursively, documents structure/languages/frameworks +- Identifies entry points, maps dependencies, creates component inventory +- Updates `azd-arch-plan.md` with findings -- If file exists: Review thoroughly and skip completed phases -- If file doesn't exist: Proceed to Phase 2 +**Phase 3: Architecture Planning and Azure Service Selection** -### Phase 2: Discovery and Analysis +- Tool: `azd_architecture_planning` +- Maps components to Azure services, plans hosting strategies +- Designs database/messaging architecture, creates containerization strategies +- Updates `azd-arch-plan.md` with service selections -**Tool:** `azd_discovery_analysis` +**Phase 4: File Generation (Execute in Sequence)** -Scans files recursively, documents structure/languages/frameworks, identifies entry points, maps dependencies, and creates component inventory in `azd-arch-plan.md`. +1. **Azure.yaml Configuration:** `azd_azure_yaml_generation` (Required for all projects) +2. **Infrastructure Templates:** `azd_infrastructure_generation` (Required for all projects) +3. **Docker Configurations:** `azd_docker_generation` (Required for containerizable services) -### Phase 3: Architecture Planning and Azure Service Selection +**Phase 5: Project Validation and Environment Setup** -**Tool:** `azd_architecture_planning` +- Tool: `azd_project_validation` +- Validates azure.yaml against schema, compiles Bicep templates +- Ensures AZD environment exists, tests packaging, validates deployment preview +- Provides readiness confirmation -Maps components to Azure services, plans hosting strategies, designs database/messaging architecture, and creates containerization strategies. Updates `azd-arch-plan.md`. +**Usage Patterns:** -### Phase 4: File Generation - -Generate all necessary AZD files using these focused tools (most projects need all three): - -#### 1. Generate Azure.yaml Configuration - -**Tool:** `azd_azure_yaml_generation` (Required for all AZD projects) - -#### 2. Generate Infrastructure Templates - -**Tool:** `azd_infrastructure_generation` (Required for all AZD projects) - -#### 3. Generate Docker Configurations - -**Tool:** `azd_docker_generation` (Required for containerizable services) - -**Use in sequence:** azure.yaml → infrastructure → docker - -### Phase 5: Project Validation and Environment Setup - -**Tool:** `azd_project_validation` - -Validates azure.yaml against schema, compiles Bicep templates, ensures AZD environment exists, tests packaging, validates deployment with preview, and provides readiness confirmation. - -## Usage Patterns - -### Complete New Project Migration +**Complete New Project Migration:** ```text -1. Review existing azd-arch-plan.md (Phase 1) -2. azd_discovery_analysis -3. azd_architecture_planning -4. azd_azure_yaml_generation -5. azd_infrastructure_generation -6. azd_docker_generation (if containerization needed) +1. Review azd-arch-plan.md → 2. azd_discovery_analysis → 3. azd_architecture_planning → +4. azd_azure_yaml_generation → 5. azd_infrastructure_generation → 6. azd_docker_generation → 7. azd_project_validation ``` -### Update Existing AZD Project +**Update Existing AZD Project:** ```text -1. Review existing azd-arch-plan.md (Phase 1) -2. azd_azure_yaml_generation → azd_infrastructure_generation → azd_docker_generation → azd_project_validation +1. Review azd-arch-plan.md → 2. File generation tools → 3. azd_project_validation ``` -### Quick Service Addition +**Quick Service Addition:** ```text -1. Review existing azd-arch-plan.md (Phase 1) -2. azd_discovery_analysis → azd_azure_yaml_generation → azd_docker_generation → azd_project_validation +1. Review azd-arch-plan.md → 2. azd_discovery_analysis → 3. azd_azure_yaml_generation → +4. azd_docker_generation → 5. azd_project_validation ``` -## Central Planning Document - -**CRITICAL:** `azd-arch-plan.md` is the central coordination file that tracks progress, documents decisions, and maintains project state. Always review this file before starting any tool to understand current progress and avoid duplicate work. - -## Supporting Resources - -### Schema and Validation - -- Use `azd_yaml_schema` tool to get complete azure.yaml schema information -- Use `azd_iac_generation_rules` tool for Infrastructure as Code best practices - -### Troubleshooting - -Each tool includes: - -- Validation checklists -- Testing commands -- Common issues and solutions -- Next step guidance - -## Getting Started - -**Standard workflow:** -1. Review existing `azd-arch-plan.md` (Phase 1) -2. `azd_discovery_analysis` → `azd_architecture_planning` → File generation tools → `azd_project_validation` - -Keep `azd-arch-plan.md` updated throughout the process as the central coordination document. +📌 **Completion Checklist** + +- [ ] All application components identified and classified in discovery phase +- [ ] Azure service selections made for each component with rationale +- [ ] `azure.yaml` file generated and validates against schema +- [ ] Infrastructure files generated and compile without errors +- [ ] Dockerfiles created for containerizable components +- [ ] `azd-arch-plan.md` created or updated to provide comprehensive project documentation while preserving existing content +- [ ] AZD environment initialized and configured +- [ ] All validation checks pass using `azd_project_validation` tool +- [ ] Project confirmed ready for deployment with `azd up` diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md index 5197edcacbb..badbd8a58fb 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md @@ -1,191 +1,100 @@ -# AZD Project Validation Tool +# AZD Project Validation Instructions -This tool validates an AZD project by programmatically running comprehensive checks on all components including azure.yaml schema validation, Bicep template validation, environment setup, packaging, and deployment preview. +✅ **Agent Task List** -## Purpose +1. Load existing `azd-arch-plan.md` to understand current project state and context +2. Execute azure.yaml against azd schema using available tool +3. Compile and validate all Bicep templates in ./infra directory +4. Verify AZD environment exists and is properly configured +5. Run `azd package` to validate service packaging +6. Execute `azd provision --preview` to test infrastructure deployment +7. Resolve ALL issues found in each validation step before proceeding +8. Update existing `azd-arch-plan.md` with validation results by adding/updating validation section while preserving existing content -This tool performs automated end-to-end validation of an AZD project to ensure all components are properly configured and the project is ready for deployment. The LLM should execute all validation steps directly using available tools and terminal commands, not just provide instructions to the user. +📄 **Required Outputs** -## Validation Workflow +- Complete validation report with all checks passed +- All identified issues resolved with zero remaining errors +- Confirmation that project is ready for deployment +- Update existing `azd-arch-plan.md` with validation results while preserving existing content +- Validation checklist added to or updated in architecture plan +- Clear next steps for deployment -The LLM must execute these validation steps programmatically using terminal commands and available tools: +🧠 **Execution Guidelines** -### 1. Azure.yaml Schema Validation +**CRITICAL REQUIREMENT:** Resolve ALL issues found during validation before proceeding to the next step. No validation step should be considered successful until all errors, warnings, and issues have been fully addressed. -**EXECUTE:** Use the `azd_yaml_schema` tool to validate the azure.yaml file against the official schema. +**Pre-Validation Setup:** -**Steps to Execute:** +**0. Load Architecture Plan:** -- Check if `azure.yaml` exists in current directory using file system tools -- Run `azd_yaml_schema` tool to validate schema compliance -- Parse and report any schema violations or missing required fields -- Verify service definitions and configurations are correct - -### 2. Bicep Template Validation - -**EXECUTE:** Run the following commands to validate Bicep templates: - -1. **Find Bicep Files:** Use file search to scan `./infra` directory for `.bicep` files -2. **Compile Templates:** Execute `az bicep build --file --stdout` for each template -3. **Validate Syntax:** Ensure all templates compile without errors -4. **Check Dependencies:** Verify module references and parameter passing - -**Commands to Execute:** - -```powershell -# Compile main template -az bicep build --file ./infra/main.bicep +- Read existing `azd-arch-plan.md` to understand current project architecture and context +- Review any previous validation results or known issues +- Understand the project structure and service configurations from the plan +- **MANDATORY:** Must load and review architecture plan before starting validation -# Validate deployment (requires Azure CLI login) -az deployment sub validate --template-file ./infra/main.bicep --parameters ./infra/main.parameters.json --location -``` +**Validation Execution Steps:** -### 3. AZD Environment Validation +**1. Azure.yaml Schema Validation:** -**EXECUTE:** Run these commands to validate AZD environment setup: - -1. **Check Environment Exists:** Execute `azd env list` to see available environments -2. **Create Environment if Missing:** - - If no environments exist, execute `azd env new ` - - Use current directory name as environment name (sanitized) -3. **Verify Environment Selection:** Ensure an environment is currently selected - -**Commands to Execute:** - -```powershell -# List existing environments -azd env list +- Check if `azure.yaml` exists in current directory +- Validate `azure.yaml` against AZD schema using available tools +- Parse and report any schema violations or missing fields +- Verify service definitions and configurations are correct +- **MANDATORY:** Fix ALL schema violations before proceeding -# Create new environment if none exist (replace with directory name) -azd env new +**2. Bicep Template Validation:** -# Select environment if not already selected -azd env select -``` +- Scan `./infra` directory for `.bicep` files using file search +- Execute `az bicep build --file ` for each template +- Run `az deployment sub validate --template-file ./infra/main.bicep --parameters ./infra/main.parameters.json --location ` +- Verify all templates compile without errors and dependencies are correct +- **MANDATORY:** Fix ALL compilation errors before proceeding +- Clean up any generated `` files generated during bicep validation -### 4. Package Validation +**3. AZD Environment Validation:** -**EXECUTE:** Run `azd package` to validate all services can be packaged successfully. +- Execute `azd env list` to check available environments +- If no environments exist, create one: `azd env new -dev` +- Ensure environment is selected and configured +- Ensure `AZURE_LOCATION` azd environment variable is set to a valid Azure location value +- Ensure `AZURE_SUBSCRIPTION_ID` azd environment variable is set to the users current Azure subscription +- **MANDATORY:** Fix environment issues before proceeding -**Steps to Execute:** +**4. Package Validation:** -- Execute `azd package` command -- Monitor output for errors or warnings +- Execute `azd package` command and monitor output - Verify all service source paths are valid -- Check Docker builds complete successfully (for containerized services) -- Ensure all build artifacts are created -- Validate package manifests - -**Command to Execute:** +- Check Docker builds complete successfully for containerized services +- Ensure all build artifacts are created correctly +- **MANDATORY:** Fix ALL packaging errors before proceeding -```powershell -azd package -``` - -### 5. Deployment Preview Validation - -**EXECUTE:** Run `azd provision --preview` to validate infrastructure deployment without actually creating resources. - -**Steps to Execute:** +**5. Deployment Preview Validation:** - Execute `azd provision --preview` command -- Monitor output for errors or warnings - Verify Azure authentication is working -- Check resource group creation plan -- Validate all Bicep modules deploy correctly +- Check resource group creation plan and Bicep module deployment - Ensure parameter values are properly resolved -- Confirm no deployment conflicts - -**Command to Execute:** - -```powershell -azd provision --preview -``` - -## Success Criteria - -The LLM must verify that project validation is successful when all of the following are true: - -- [ ] `azure.yaml` passes schema validation (executed via `azd_yaml_schema` tool) -- [ ] All Bicep templates compile without errors or warnings (verified via `az bicep build`) -- [ ] AZD environment exists and is properly configured (verified via `azd env list`) -- [ ] `azd package` completes without errors or warnings -- [ ] `azd provision --preview` completes without errors or warnings -- [ ] All service configurations are valid -- [ ] No missing dependencies or configuration issues - -The LLM should report the status of each validation step and provide a summary of the overall validation results. - -## Error Handling - -The LLM must handle common validation errors by executing appropriate remediation steps: - -### Common Issues and Automated Solutions - -**Azure.yaml Schema Errors:** - -- Execute `azd_yaml_schema` tool to get correct schema format -- Check service names match directory structure using file system tools -- Verify all required fields are present and report missing fields - -**Bicep Compilation Errors:** - -- Parse compilation error output and identify specific issues -- Check module paths and parameter names programmatically -- Verify resource naming conventions follow Azure requirements -- Ensure all required parameters have values - -**Environment Issues:** - -- Execute `azd auth login` if authentication fails -- Check Azure subscription access and permissions via Azure CLI -- Verify location parameter is valid Azure region - -**Package Errors:** - -- Check service source paths in azure.yaml programmatically -- Verify Docker builds work locally for containerized services by executing build commands -- Ensure all build dependencies are available - -**Provision Preview Errors:** - -- Verify Azure subscription has sufficient permissions via Azure CLI -- Check resource quotas and limits -- Ensure resource names are globally unique where required - -The LLM should attempt to resolve issues automatically where possible and provide clear error reporting for issues that require manual intervention. - -## Update Documentation - -**EXECUTE:** The LLM must update `azd-arch-plan.md` with validation results by: - -- Writing validation results for each component to the documentation -- Recording any issues found and resolutions applied -- Documenting environment configuration details -- Including deployment preview summary -- Updating project readiness status - -Use file editing tools to update the documentation with the validation results. - -## Next Steps - -The LLM should inform the user that after successful validation, they can proceed with: - -1. **Deploy Infrastructure:** Execute `azd provision` to create Azure resources -2. **Deploy Applications:** Execute `azd deploy` to deploy services -3. **Complete Deployment:** Execute `azd up` to provision and deploy in one step -4. **Monitor Deployment:** Use `azd monitor` to check application health -5. **View Logs:** Use `azd logs` to view deployment and runtime logs +- **MANDATORY:** Fix ALL preview errors before proceeding -### Production Preparation +**Error Resolution Requirements:** -For production deployment, the LLM should guide the user through: +- **Azure.yaml Schema Errors:** Validate azure.yaml using available tools +- **Bicep Compilation Errors:** Parse error output, check module paths and parameter names, verify resource naming +- **Environment Issues:** Run `azd auth login` if needed, check subscription access, verify location parameter +- **Package Errors:** Check service source paths, verify Docker builds work locally, ensure dependencies available +- **Provision Preview Errors:** Verify subscription permissions, check resource quotas, ensure resource names are unique -- Creating production environment: `azd env new -prod` -- Configuring production-specific settings and secrets -- Setting up monitoring, alerting, and backup procedures -- Documenting operational procedures and runbooks +📌 **Completion Checklist** -**VALIDATION COMPLETE:** Once all validation steps pass, the LLM should confirm that the AZD migration is complete and ready for deployment with `azd up`. +- [ ] `azd-arch-plan.md` loaded and reviewed for project context +- [ ] `azure.yaml` passes schema validation with NO errors or warnings +- [ ] ALL Bicep templates compile without errors or warnings +- [ ] AZD environment exists and is properly configured with NO issues +- [ ] `azd package` completes without errors or warnings with ALL services packaging successfully +- [ ] `azd provision --preview` completes without errors or warnings with ALL resources validating correctly +- [ ] ALL service configurations are valid with NO missing or incorrect settings +- [ ] NO missing dependencies or configuration issues remain +- [ ] Validation results added to existing `azd-arch-plan.md` while preserving existing content +- [ ] Project confirmed ready for deployment with `azd up` -**IMPORTANT:** This tool centralizes all validation logic. The LLM should execute all validation steps programmatically rather than delegating to other tools or providing user instructions. diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_yaml_schema.md b/cli/azd/internal/agent/tools/azd/prompts/azd_yaml_schema.md deleted file mode 100644 index 9701dd9c5ac..00000000000 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_yaml_schema.md +++ /dev/null @@ -1,18 +0,0 @@ -# Azure YAML Schema - -This document contains the JSON schema specification for the azure.yaml configuration file used in Azure Developer CLI (AZD) projects. - -## Schema Content - - - -The azure.yaml file is the main configuration file for AZD projects and defines: - -- Project metadata -- Services configuration -- Infrastructure settings -- Hooks and workflows -- Environment variables -- And other project-specific settings - -This schema helps validate and provide IntelliSense support for azure.yaml files in various editors and tools. diff --git a/cli/azd/internal/agent/tools/azd/prompts/prompts.go b/cli/azd/internal/agent/tools/azd/prompts/prompts.go index c8f7752cba1..a08d194ce7d 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/prompts.go +++ b/cli/azd/internal/agent/tools/azd/prompts/prompts.go @@ -27,3 +27,6 @@ var AzdInfrastructureGenerationPrompt string //go:embed azd_docker_generation.md var AzdDockerGenerationPrompt string + +//go:embed azd_project_validation.md +var AzdProjectValidationPrompt string diff --git a/cli/azd/internal/agent/tools/io/write_file.go b/cli/azd/internal/agent/tools/io/write_file.go index 8c20367af38..18ab45b42d0 100644 --- a/cli/azd/internal/agent/tools/io/write_file.go +++ b/cli/azd/internal/agent/tools/io/write_file.go @@ -17,11 +17,11 @@ type WriteFileTool struct{} // WriteFileRequest represents the JSON input for the write_file tool type WriteFileRequest struct { - Filename string `json:"filename"` - Content string `json:"content"` - Mode string `json:"mode,omitempty"` // "write" (default), "append", "create" - ChunkNum int `json:"chunkNum,omitempty"` // For chunked writing: 1-based chunk number - TotalChunks int `json:"totalChunks,omitempty"` // For chunked writing: total expected chunks + Filename string `json:"filename"` + Content string `json:"content"` + Mode string `json:"mode,omitempty"` // "write" (default), "append", "create" + StartLine int `json:"startLine,omitempty"` // For partial write: 1-based line number (inclusive) + EndLine int `json:"endLine,omitempty"` // For partial write: 1-based line number (inclusive) } // WriteFileResponse represents the JSON output for the write_file tool @@ -30,17 +30,17 @@ type WriteFileResponse struct { Operation string `json:"operation"` FilePath string `json:"filePath"` BytesWritten int `json:"bytesWritten"` - IsChunked bool `json:"isChunked"` - ChunkInfo *ChunkInfo `json:"chunkInfo,omitempty"` + IsPartial bool `json:"isPartial"` // True for partial write + LineInfo *LineInfo `json:"lineInfo,omitempty"` // For partial write FileInfo FileInfoDetails `json:"fileInfo"` Message string `json:"message,omitempty"` } -// ChunkInfo represents chunked writing details -type ChunkInfo struct { - ChunkNumber int `json:"chunkNumber"` - TotalChunks int `json:"totalChunks"` - IsComplete bool `json:"isComplete"` +// LineInfo represents line-based partial write details +type LineInfo struct { + StartLine int `json:"startLine"` + EndLine int `json:"endLine"` + LinesChanged int `json:"linesChanged"` } // FileInfoDetails represents file metadata @@ -55,44 +55,48 @@ func (t WriteFileTool) Name() string { } func (t WriteFileTool) Description() string { - return `Comprehensive file writing tool that handles small and large files intelligently. Returns JSON response with operation details. + return `Comprehensive file writing tool that handles full file writes, appends, and line-based partial updates. Returns JSON response with operation details. Input: JSON payload with the following structure: { "filename": "path/to/file.txt", "content": "file content here", "mode": "write", - "chunkNum": 1, - "totalChunks": 3 + "startLine": 5, + "endLine": 8 } Field descriptions: -- mode: "write" (default), "append", or "create" -- chunkNum: for chunked writing (1-based) -- totalChunks: total number of chunks +- mode: "write" (default), "append", or "create" +- startLine: for partial write - 1-based line number (inclusive) - REQUIRES EXISTING FILE +- endLine: for partial write - 1-based line number (inclusive) - REQUIRES EXISTING FILE MODES: -- "write" (default): Overwrite/create file +- "write" (default): Full file overwrite/create, OR partial line replacement when startLine/endLine provided - "append": Add content to end of existing file - "create": Create file only if it doesn't exist -CHUNKED WRITING (for large files): -Use chunkNum and totalChunks for files that might be too large: -- chunkNum: 1-based chunk number (1, 2, 3...) -- totalChunks: Total number of chunks you'll send +PARTIAL WRITES (line-based editing): +⚠️ IMPORTANT: Partial writes REQUIRE an existing file. Cannot create new files with line positioning. +Add startLine and endLine to any "write" operation to replace specific lines in EXISTING files: +- Both are 1-based and inclusive +- startLine=5, endLine=8 replaces lines 5, 6, 7, and 8 +- If endLine > file length, content is appended +- File MUST exist for partial writes - use regular write mode for new files EXAMPLES: -Simple write: +Full file write (new or existing file): {"filename": "./main.bicep", "content": "param location string = 'eastus'"} Append to file: {"filename": "./log.txt", "content": "\nNew log entry", "mode": "append"} -Large file (chunked): -{"filename": "./large.bicep", "content": "first part...", "chunkNum": 1, "totalChunks": 3} -{"filename": "./large.bicep", "content": "middle part...", "chunkNum": 2, "totalChunks": 3} -{"filename": "./large.bicep", "content": "final part...", "chunkNum": 3, "totalChunks": 3} +Partial write (replace specific lines in EXISTING file): +{"filename": "./config.json", "content": " \"newSetting\": true,\n \"version\": \"2.0\"", "startLine": 3, "endLine": 4} + +Create only if doesn't exist: +{"filename": "./new-file.txt", "content": "Initial content", "mode": "create"} The input must be formatted as a single line valid JSON string.` } @@ -126,10 +130,21 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { return t.createErrorResponse(fmt.Errorf("empty input"), "No input provided.") } + // Debug: Check for common JSON issues + input = strings.TrimSpace(input) + if !strings.HasPrefix(input, "{") || !strings.HasSuffix(input, "}") { + return t.createErrorResponse(fmt.Errorf("malformed JSON structure"), fmt.Sprintf("Invalid JSON input: Input does not appear to be valid JSON object. Starts with: %q, Ends with: %q", input[:min(10, len(input))], input[max(0, len(input)-10):])) + } + // Parse JSON input var req WriteFileRequest if err := json.Unmarshal([]byte(input), &req); err != nil { - return t.createErrorResponse(err, "Invalid JSON input") + // Enhanced error reporting for debugging + truncatedInput := input + if len(input) > 200 { + truncatedInput = input[:200] + "...[truncated]" + } + return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input. Error: %s. Input (first 200 chars): %s", err.Error(), truncatedInput)) } // Validate required fields @@ -143,51 +158,112 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { mode = "write" } - // Handle chunked writing - isChunked := req.ChunkNum > 0 && req.TotalChunks > 0 - if isChunked { - return t.handleChunkedWrite(ctx, req) + // Check if line numbers are provided for partial write + hasStartLine := req.StartLine != 0 + hasEndLine := req.EndLine != 0 + + // If any line number is provided, both must be provided and valid + if hasStartLine || hasEndLine { + if !hasStartLine || !hasEndLine { + return t.createErrorResponse(fmt.Errorf("both startLine and endLine must be provided for partial write"), "Both startLine and endLine must be provided for partial write") + } + + // Validate that file exists for partial write BEFORE attempting + filePath := strings.TrimSpace(req.Filename) + if _, err := os.Stat(filePath); os.IsNotExist(err) { + return t.createErrorResponse(err, fmt.Sprintf("Cannot perform partial write on file '%s' because it does not exist. For new files, omit startLine and endLine parameters to create the entire file", filePath)) + } + + // Smart write mode: this should be a partial write + if mode == "write" { + return t.handlePartialWrite(ctx, req) + } else { + return t.createErrorResponse(fmt.Errorf("startLine and endLine can only be used with write mode"), "startLine and endLine can only be used with write mode") + } } // Handle regular writing return t.handleRegularWrite(ctx, req, mode) } -// handleChunkedWrite handles writing files in chunks -func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequest) (string, error) { - if req.ChunkNum < 1 || req.TotalChunks < 1 || req.ChunkNum > req.TotalChunks { - return t.createErrorResponse(fmt.Errorf("invalid chunk numbers: chunkNum=%d, totalChunks=%d", req.ChunkNum, req.TotalChunks), fmt.Sprintf("Invalid chunk numbers: chunkNum=%d, totalChunks=%d. ChunkNum must be between 1 and totalChunks", req.ChunkNum, req.TotalChunks)) +// handlePartialWrite handles line-based partial file editing +func (t WriteFileTool) handlePartialWrite(ctx context.Context, req WriteFileRequest) (string, error) { + // Validate line numbers + if req.StartLine < 1 { + return t.createErrorResponse(fmt.Errorf("invalid startLine: %d", req.StartLine), "startLine must be >= 1") + } + if req.EndLine < 1 { + return t.createErrorResponse(fmt.Errorf("invalid endLine: %d", req.EndLine), "endLine must be >= 1") + } + if req.StartLine > req.EndLine { + return t.createErrorResponse(fmt.Errorf("invalid line range: startLine=%d > endLine=%d", req.StartLine, req.EndLine), "startLine cannot be greater than endLine") } filePath := strings.TrimSpace(req.Filename) - content := t.processContent(req.Content) - // Ensure directory exists - if err := t.ensureDirectory(filePath); err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Failed to create directory for file %s: %s", filePath, err.Error())) + // Read existing file + fileBytes, err := os.ReadFile(filePath) + if err != nil { + return t.createErrorResponse(err, fmt.Sprintf("Failed to read existing file %s: %s", filePath, err.Error())) } - var err error - var operation string + // Detect line ending style from existing content + content := string(fileBytes) + lineEnding := "\n" + if strings.Contains(content, "\r\n") { + lineEnding = "\r\n" + } else if strings.Contains(content, "\r") { + lineEnding = "\r" + } - if req.ChunkNum == 1 { - // First chunk - create/overwrite file - err = os.WriteFile(filePath, []byte(content), 0644) - operation = "write" - } else { - // Subsequent chunks - append - file, openErr := os.OpenFile(filePath, os.O_APPEND|os.O_WRONLY, 0644) - if openErr != nil { - return t.createErrorResponse(openErr, fmt.Sprintf("Failed to open file for append %s: %s", filePath, openErr.Error())) - } - defer file.Close() + // Split into lines (preserve line endings) + lines := strings.Split(content, lineEnding) + originalLineCount := len(lines) - _, err = file.WriteString(content) - operation = "append" + // Handle the case where file ends with line ending (empty last element) + if originalLineCount > 0 && lines[originalLineCount-1] == "" { + lines = lines[:originalLineCount-1] + originalLineCount-- } + // Process new content + newContent := t.processContent(req.Content) + newLines := strings.Split(newContent, "\n") + + // If endLine is beyond file length, we'll append + actualEndLine := req.EndLine + if req.EndLine > originalLineCount { + actualEndLine = originalLineCount + } + + // Build new file content + var result []string + + // Lines before the replacement + if req.StartLine > 1 { + result = append(result, lines[:req.StartLine-1]...) + } + + // New lines + result = append(result, newLines...) + + // Lines after the replacement (if any) + if actualEndLine < originalLineCount { + result = append(result, lines[actualEndLine:]...) + } + + // Join with original line ending style + finalContent := strings.Join(result, lineEnding) + + // If original file had trailing newline, preserve it + if len(fileBytes) > 0 && (string(fileBytes[len(fileBytes)-1:]) == "\n" || strings.HasSuffix(string(fileBytes), lineEnding)) { + finalContent += lineEnding + } + + // Write the updated content + err = os.WriteFile(filePath, []byte(finalContent), 0644) if err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Failed to write chunk to file %s: %s", filePath, err.Error())) + return t.createErrorResponse(err, fmt.Sprintf("Failed to write updated content to file %s: %s", filePath, err.Error())) } // Get file info @@ -196,29 +272,27 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ return t.createErrorResponse(err, fmt.Sprintf("Failed to verify file %s: %s", filePath, err.Error())) } + // Calculate lines changed + linesChanged := len(newLines) + // Create JSON response response := WriteFileResponse{ Success: true, - Operation: operation, + Operation: "Wrote (partial)", FilePath: filePath, - BytesWritten: len(content), - IsChunked: true, - ChunkInfo: &ChunkInfo{ - ChunkNumber: req.ChunkNum, - TotalChunks: req.TotalChunks, - IsComplete: req.ChunkNum == req.TotalChunks, + BytesWritten: len(newContent), + IsPartial: true, + LineInfo: &LineInfo{ + StartLine: req.StartLine, + EndLine: req.EndLine, + LinesChanged: linesChanged, }, FileInfo: FileInfoDetails{ Size: fileInfo.Size(), ModifiedTime: fileInfo.ModTime(), Permissions: fileInfo.Mode().String(), }, - } - - if req.ChunkNum == req.TotalChunks { - response.Message = "File writing completed successfully" - } else { - response.Message = fmt.Sprintf("Chunk %d/%d written successfully", req.ChunkNum, req.TotalChunks) + Message: fmt.Sprintf("Partial write completed: lines %d-%d replaced successfully", req.StartLine, req.EndLine), } // Convert to JSON @@ -227,9 +301,7 @@ func (t WriteFileTool) handleChunkedWrite(ctx context.Context, req WriteFileRequ return t.createErrorResponse(err, fmt.Sprintf("Failed to marshal JSON response: %s", err.Error())) } - output := string(jsonData) - - return output, nil + return string(jsonData), nil } // handleRegularWrite handles normal file writing @@ -239,7 +311,7 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ // Provide feedback for large content if len(content) > 10000 { - fmt.Printf("📝 Large content detected (%d chars). Consider using chunked writing for better reliability.\n", len(content)) + fmt.Printf("📝 Large content detected (%d chars). Consider breaking into smaller edits for better reliability.\n", len(content)) } // Ensure directory exists @@ -288,7 +360,7 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ Operation: operation, FilePath: filePath, BytesWritten: len(content), - IsChunked: false, + IsPartial: false, FileInfo: FileInfoDetails{ Size: fileInfo.Size(), ModifiedTime: fileInfo.ModTime(), @@ -325,3 +397,18 @@ func (t WriteFileTool) ensureDirectory(filePath string) error { } return nil } + +// Helper functions +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/cli/azd/internal/agent/tools/io/write_file_test.go b/cli/azd/internal/agent/tools/io/write_file_test.go new file mode 100644 index 00000000000..2aacfe62cdb --- /dev/null +++ b/cli/azd/internal/agent/tools/io/write_file_test.go @@ -0,0 +1,495 @@ +package io + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWriteFileTool_Name(t *testing.T) { + tool := WriteFileTool{} + assert.Equal(t, "write_file", tool.Name()) +} + +func TestWriteFileTool_Description(t *testing.T) { + tool := WriteFileTool{} + desc := tool.Description() + assert.Contains(t, desc, "Comprehensive file writing tool") + assert.Contains(t, desc, "partial") + assert.Contains(t, desc, "startLine") + assert.Contains(t, desc, "endLine") +} + +func TestWriteFileTool_Call_EmptyInput(t *testing.T) { + tool := WriteFileTool{} + result, err := tool.Call(context.Background(), "") + + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "No input provided") +} + +func TestWriteFileTool_Call_InvalidJSON(t *testing.T) { + tool := WriteFileTool{} + result, err := tool.Call(context.Background(), "invalid json") + + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "Invalid JSON input: Input does not appear to be valid JSON object") +} + +func TestWriteFileTool_Call_MalformedJSON(t *testing.T) { + tool := WriteFileTool{} + // Test with JSON that has parse errors + result, err := tool.Call(context.Background(), `{"filename": "test.txt", "content": "unclosed string}`) + + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "Invalid JSON input. Error:") + assert.Contains(t, result, "Input (first 200 chars):") +} + +func TestWriteFileTool_Call_MissingFilename(t *testing.T) { + tool := WriteFileTool{} + input := `{"content": "test content"}` + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "filename cannot be empty") +} + +func TestWriteFileTool_FullFileWrite(t *testing.T) { + // Create temp directory + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + tool := WriteFileTool{} + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "Hello, World!"}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Verify response + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "Wrote", response.Operation) + assert.Equal(t, testFile, response.FilePath) + assert.Equal(t, 13, response.BytesWritten) // "Hello, World!" length + assert.False(t, response.IsPartial) + assert.Nil(t, response.LineInfo) + assert.Greater(t, response.FileInfo.Size, int64(0)) + + // Verify file content + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + assert.Equal(t, "Hello, World!", string(content)) +} + +func TestWriteFileTool_AppendMode(t *testing.T) { + // Create temp directory and initial file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + // Create initial file + err := os.WriteFile(testFile, []byte("Initial content"), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "\nAppended content", "mode": "append"}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Verify response + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "Appended to", response.Operation) + assert.False(t, response.IsPartial) + + // Verify file content + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + assert.Equal(t, "Initial content\nAppended content", string(content)) +} + +func TestWriteFileTool_CreateMode_Success(t *testing.T) { + // Create temp directory + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "new-file.txt") + + tool := WriteFileTool{} + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New file content", "mode": "create"}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Verify response + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "Created", response.Operation) + + // Verify file content + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + assert.Equal(t, "New file content", string(content)) +} + +func TestWriteFileTool_CreateMode_FileExists(t *testing.T) { + // Create temp directory and existing file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "existing.txt") + + err := os.WriteFile(testFile, []byte("Existing content"), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New content", "mode": "create"}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Should return error + assert.Contains(t, result, "error") + assert.Contains(t, result, "already exists") + + // Verify original content unchanged + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + assert.Equal(t, "Existing content", string(content)) +} + +func TestWriteFileTool_PartialWrite_Basic(t *testing.T) { + // Create temp directory and initial file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + // Create initial file with multiple lines + initialContent := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5" + err := os.WriteFile(testFile, []byte(initialContent), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "Modified Line 2\nModified Line 3", "startLine": 2, "endLine": 3}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Verify response + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "Wrote (partial)", response.Operation) + assert.True(t, response.IsPartial) + assert.NotNil(t, response.LineInfo) + assert.Equal(t, 2, response.LineInfo.StartLine) + assert.Equal(t, 3, response.LineInfo.EndLine) + assert.Equal(t, 2, response.LineInfo.LinesChanged) + + // Verify file content + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + expectedContent := "Line 1\nModified Line 2\nModified Line 3\nLine 4\nLine 5" + assert.Equal(t, expectedContent, string(content)) +} + +func TestWriteFileTool_PartialWrite_SingleLine(t *testing.T) { + // Create temp directory and initial file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + // Create initial file + initialContent := "Line 1\nLine 2\nLine 3" + err := os.WriteFile(testFile, []byte(initialContent), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "Replaced Line 2", "startLine": 2, "endLine": 2}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Verify response + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.True(t, response.IsPartial) + assert.Equal(t, 2, response.LineInfo.StartLine) + assert.Equal(t, 2, response.LineInfo.EndLine) + assert.Equal(t, 1, response.LineInfo.LinesChanged) + + // Verify file content + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + expectedContent := "Line 1\nReplaced Line 2\nLine 3" + assert.Equal(t, expectedContent, string(content)) +} + +func TestWriteFileTool_PartialWrite_SingleLineToMultipleLines(t *testing.T) { + // Create temp directory and initial file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + // Create initial file + initialContent := "Line 1\nLine 2\nLine 3\nLine 4" + err := os.WriteFile(testFile, []byte(initialContent), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + // Replace single line 2 with multiple lines + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New Line 2a\nNew Line 2b\nNew Line 2c", "startLine": 2, "endLine": 2}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Verify response + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "Wrote (partial)", response.Operation) + assert.True(t, response.IsPartial) + assert.NotNil(t, response.LineInfo) + assert.Equal(t, 2, response.LineInfo.StartLine) + assert.Equal(t, 2, response.LineInfo.EndLine) + assert.Equal(t, 3, response.LineInfo.LinesChanged) // 3 new lines replaced 1 line + + // Verify file content - single line 2 should be replaced with 3 lines + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + expectedContent := "Line 1\nNew Line 2a\nNew Line 2b\nNew Line 2c\nLine 3\nLine 4" + assert.Equal(t, expectedContent, string(content)) +} + +func TestWriteFileTool_PartialWrite_FileNotExists(t *testing.T) { + // Create temp directory + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "nonexistent.txt") + + tool := WriteFileTool{} + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New content", "startLine": 1, "endLine": 1}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Should return error + assert.Contains(t, result, "error") + assert.Contains(t, result, "does not exist") + assert.Contains(t, result, "Cannot perform partial write on file") + assert.Contains(t, result, "For new files, omit startLine and endLine parameters") +} + +func TestWriteFileTool_PartialWrite_InvalidLineNumbers(t *testing.T) { + // Create temp directory and initial file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + err := os.WriteFile(testFile, []byte("Line 1\nLine 2"), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + + // Test startLine provided but not endLine + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": 1}` + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "Both startLine and endLine must be provided") + + // Test endLine provided but not startLine + input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "endLine": 1}` + result, err = tool.Call(context.Background(), input) + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "Both startLine and endLine must be provided") + + // Test startLine < 1 (this will trigger the partial write validation) + input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": 0, "endLine": 1}` + result, err = tool.Call(context.Background(), input) + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "Both startLine and endLine must be provided") // 0 is treated as "not provided" + + // Test valid line numbers but startLine > endLine + input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": 3, "endLine": 1}` + result, err = tool.Call(context.Background(), input) + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "startLine cannot be greater than endLine") +} + +func TestWriteFileTool_PartialWrite_BeyondFileLength(t *testing.T) { + // Create temp directory and initial file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + // Create initial file with 3 lines + initialContent := "Line 1\nLine 2\nLine 3" + err := os.WriteFile(testFile, []byte(initialContent), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + // Try to replace lines 2-5 (beyond file length) + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New content", "startLine": 2, "endLine": 5}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Verify response + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.True(t, response.IsPartial) + + // Verify file content - should append since endLine > file length + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + expectedContent := "Line 1\nNew content" + assert.Equal(t, expectedContent, string(content)) +} + +func TestWriteFileTool_PartialWrite_PreserveLineEndings(t *testing.T) { + // Create temp directory and initial file with Windows line endings + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + // Create initial file with CRLF line endings + initialContent := "Line 1\r\nLine 2\r\nLine 3\r\n" + err := os.WriteFile(testFile, []byte(initialContent), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "Modified Line 2", "startLine": 2, "endLine": 2}` + + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + + // Verify response + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + assert.True(t, response.Success) + + // Verify file content preserves CRLF + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + expectedContent := "Line 1\r\nModified Line 2\r\nLine 3\r\n" + assert.Equal(t, expectedContent, string(content)) + assert.Contains(t, string(content), "\r\n") // Verify CRLF preserved +} + +func TestWriteFileTool_ProcessContent_EscapeSequences(t *testing.T) { + tool := WriteFileTool{} + + // Test newline escape + result := tool.processContent("Line 1\\nLine 2") + assert.Equal(t, "Line 1\nLine 2", result) + + // Test tab escape + result = tool.processContent("Column1\\tColumn2") + assert.Equal(t, "Column1\tColumn2", result) + + // Test both + result = tool.processContent("Line 1\\nColumn1\\tColumn2") + assert.Equal(t, "Line 1\nColumn1\tColumn2", result) +} + +func TestWriteFileTool_EnsureDirectory(t *testing.T) { + tool := WriteFileTool{} + tempDir := t.TempDir() + + // Test creating nested directory + testFile := filepath.Join(tempDir, "subdir", "nested", "test.txt") + err := tool.ensureDirectory(testFile) + assert.NoError(t, err) + + // Verify directory exists + dirPath := filepath.Dir(testFile) + info, err := os.Stat(dirPath) + assert.NoError(t, err) + assert.True(t, info.IsDir()) +} + +func TestWriteFileTool_Integration_ComplexScenario(t *testing.T) { + // Create temp directory + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "complex.txt") + + tool := WriteFileTool{} + + // Step 1: Create initial file + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "# Configuration File\nversion: 1.0\nname: test\nport: 8080\ndebug: false", "mode": "create"}` + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + assert.Contains(t, result, `"success": true`) + + // Step 2: Append new section + input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "\n# Database Config\nhost: localhost\nport: 5432", "mode": "append"}` + result, err = tool.Call(context.Background(), input) + assert.NoError(t, err) + assert.Contains(t, result, `"success": true`) + + // Step 3: Update specific lines (change port and debug) + input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "port: 9090\ndebug: true", "startLine": 4, "endLine": 5}` + result, err = tool.Call(context.Background(), input) + assert.NoError(t, err) + + var response WriteFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + assert.True(t, response.Success) + assert.True(t, response.IsPartial) + + // Verify final content + content, err := os.ReadFile(testFile) + assert.NoError(t, err) + expectedContent := "# Configuration File\nversion: 1.0\nname: test\nport: 9090\ndebug: true\n# Database Config\nhost: localhost\nport: 5432" + assert.Equal(t, expectedContent, string(content)) +} + +func TestWriteFileTool_PartialWrite_InvalidLineRanges(t *testing.T) { + // Create temp directory and initial file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + err := os.WriteFile(testFile, []byte("Line 1\nLine 2"), 0644) + require.NoError(t, err) + + tool := WriteFileTool{} + + // Test negative startLine (will be handled by partial write validation) + input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": -1, "endLine": 1}` + result, err := tool.Call(context.Background(), input) + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "startLine must be") + + // Test negative endLine + input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": 1, "endLine": -1}` + result, err = tool.Call(context.Background(), input) + assert.NoError(t, err) + assert.Contains(t, result, "error") + assert.Contains(t, result, "endLine must be") +} diff --git a/cli/azd/pkg/llm/azure_openai.go b/cli/azd/pkg/llm/azure_openai.go index dc05b78ec86..8abe43f5f25 100644 --- a/cli/azd/pkg/llm/azure_openai.go +++ b/cli/azd/pkg/llm/azure_openai.go @@ -7,15 +7,18 @@ import ( "fmt" "github.com/azure/azure-dev/cli/azd/pkg/config" + "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/llms/openai" ) type AzureOpenAiModelConfig struct { - Model string `json:"model"` - Version string `json:"version"` - Endpoint string `json:"endpoint"` - Token string `json:"token"` - ApiVersion string `json:"apiVersion"` + Model string `json:"model"` + Version string `json:"version"` + Endpoint string `json:"endpoint"` + Token string `json:"token"` + ApiVersion string `json:"apiVersion"` + Temperature *float64 `json:"temperature"` + MaxTokens *int `json:"maxTokens"` } type AzureOpenAiModelProvider struct { @@ -53,7 +56,7 @@ func (p *AzureOpenAiModelProvider) CreateModelContainer(opts ...ModelOption) (*M opt(modelContainer) } - model, err := openai.New( + openAiModel, err := openai.New( openai.WithToken(modelConfig.Token), openai.WithBaseURL(modelConfig.Endpoint), openai.WithAPIType(openai.APITypeAzure), @@ -64,8 +67,17 @@ func (p *AzureOpenAiModelProvider) CreateModelContainer(opts ...ModelOption) (*M return nil, fmt.Errorf("failed to create LLM: %w", err) } - model.CallbacksHandler = modelContainer.logger - modelContainer.Model = model + callOptions := []llms.CallOption{} + if modelConfig.Temperature != nil { + callOptions = append(callOptions, llms.WithTemperature(*modelConfig.Temperature)) + } + + if modelConfig.MaxTokens != nil { + callOptions = append(callOptions, llms.WithMaxTokens(*modelConfig.MaxTokens)) + } + + openAiModel.CallbacksHandler = modelContainer.logger + modelContainer.Model = NewModel(openAiModel, callOptions...) return modelContainer, nil } diff --git a/cli/azd/pkg/llm/manager.go b/cli/azd/pkg/llm/manager.go index 8c22732c107..ede625a970e 100644 --- a/cli/azd/pkg/llm/manager.go +++ b/cli/azd/pkg/llm/manager.go @@ -12,7 +12,7 @@ import ( "github.com/tmc/langchaingo/llms" ) -var featureLlm = alpha.MustFeatureKey("llm") +var FeatureLlm = alpha.MustFeatureKey("llm") func IsLlmFeatureEnabled(alphaManager *alpha.FeatureManager) error { if alphaManager == nil { @@ -99,7 +99,7 @@ type NotEnabledError struct { func (e NotEnabledError) Error() string { return fmt.Sprintf("LLM feature is not enabled. Run '%s' to enable", - alpha.GetEnableCommand(featureLlm)) + alpha.GetEnableCommand(FeatureLlm)) } // InvalidLlmConfiguration represents an error that occurs when the LLM (Large Language Model) diff --git a/cli/azd/pkg/llm/model.go b/cli/azd/pkg/llm/model.go new file mode 100644 index 00000000000..b00e7730c49 --- /dev/null +++ b/cli/azd/pkg/llm/model.go @@ -0,0 +1,35 @@ +package llm + +import ( + "context" + "fmt" + + "github.com/tmc/langchaingo/llms" +) + +var _ llms.Model = (*Model)(nil) + +// / Wraps an langchaingo model to allow specifying specific call options at create time +type Model struct { + model llms.Model + options []llms.CallOption +} + +func NewModel(model llms.Model, options ...llms.CallOption) *Model { + return &Model{ + model: model, + options: options, + } +} + +func (m *Model) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error) { + allOptions := []llms.CallOption{} + allOptions = append(allOptions, m.options...) + allOptions = append(allOptions, options...) + + return m.model.GenerateContent(ctx, messages, allOptions...) +} + +func (m *Model) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { + return "", fmt.Errorf("Deprecated, call GenerateContent") +} diff --git a/cli/azd/pkg/llm/model_factory.go b/cli/azd/pkg/llm/model_factory.go index d228465bd52..610c5118dba 100644 --- a/cli/azd/pkg/llm/model_factory.go +++ b/cli/azd/pkg/llm/model_factory.go @@ -1,6 +1,9 @@ package llm import ( + "fmt" + + "github.com/azure/azure-dev/cli/azd/internal" "github.com/azure/azure-dev/cli/azd/pkg/ioc" ) @@ -17,7 +20,10 @@ func NewModelFactory(serviceLocator ioc.ServiceLocator) *ModelFactory { func (f *ModelFactory) CreateModelContainer(modelType LlmType, opts ...ModelOption) (*ModelContainer, error) { var modelProvider ModelProvider if err := f.serviceLocator.ResolveNamed(string(modelType), &modelProvider); err != nil { - return nil, err + return nil, &internal.ErrorWithSuggestion{ + Err: fmt.Errorf("The model type '%s' is not supported. Support types include: azure, ollama", modelType), + Suggestion: "Use `azd config set` to set the model type and any model specific options, such as the model name or version.", + } } return modelProvider.CreateModelContainer(opts...) diff --git a/cli/azd/pkg/output/colors.go b/cli/azd/pkg/output/colors.go index 6daa42beccb..6d4834d023c 100644 --- a/cli/azd/pkg/output/colors.go +++ b/cli/azd/pkg/output/colors.go @@ -5,8 +5,13 @@ package output import ( "fmt" + "os" + "strconv" + "strings" + "github.com/charmbracelet/glamour" "github.com/fatih/color" + "github.com/nathan-fiscaletti/consolesize-go" ) // withLinkFormat creates string with hyperlink-looking color @@ -50,11 +55,58 @@ func WithUnderline(text string, a ...interface{}) string { } // WithBackticks wraps text with the backtick (`) character. -func WithBackticks(text string) string { - return "`" + text + "`" +func WithBackticks(s string) string { + return fmt.Sprintf("`%s`", s) +} + +// WithMarkdown converts markdown to terminal-friendly colorized output using glamour. +// This provides rich markdown rendering including bold, italic, code blocks, headers, etc. +func WithMarkdown(markdownText string) string { + // Get dynamic console width with fallback to 120 + consoleWidth := getConsoleWidth() + + // Create a custom glamour renderer with auto-style detection + r, err := glamour.NewTermRenderer( + glamour.WithAutoStyle(), + glamour.WithWordWrap(consoleWidth), // Use dynamic console width + ) + if err != nil { + // Fallback to returning original text if glamour fails + return markdownText + } + + // Render the markdown + rendered, err := r.Render(markdownText) + if err != nil { + // Fallback to returning original text if rendering fails + return markdownText + } + + // Trim trailing whitespace that glamour sometimes adds + return strings.TrimSpace(rendered) } // WithHyperlink wraps text with the colored hyperlink format escape sequence. func WithHyperlink(url string, text string) string { return WithLinkFormat(fmt.Sprintf("\033]8;;%s\007%s\033]8;;\007", url, text)) } + +// getConsoleWidth gets the console width with fallback logic. +// It uses the consolesize package to get the size and falls back to check the COLUMNS environment variable. +// Defaults to 120 if the console size cannot be determined. +func getConsoleWidth() int { + width, _ := consolesize.GetConsoleSize() + if width <= 0 { + // Default to 120 if console size cannot be determined + width = 120 + + consoleWidth := os.Getenv("COLUMNS") + if consoleWidth != "" { + if parsedWidth, err := strconv.Atoi(consoleWidth); err == nil { + width = parsedWidth + } + } + } + + return width +} diff --git a/go.mod b/go.mod index ba009a74964..4e4b44ec27b 100644 --- a/go.mod +++ b/go.mod @@ -92,16 +92,27 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/alecthomas/chroma/v2 v2.14.0 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect + github.com/aymerick/douceur v0.2.0 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/buger/jsonparser v1.1.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect + github.com/charmbracelet/glamour v0.10.0 // indirect + github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 // indirect + github.com/charmbracelet/x/ansi v0.8.0 // indirect + github.com/charmbracelet/x/cellbuf v0.0.13 // indirect + github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf // indirect + github.com/charmbracelet/x/term v0.2.1 // indirect github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dlclark/regexp2 v1.10.0 // indirect + github.com/dlclark/regexp2 v1.11.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/goph/emperror v0.17.2 // indirect + github.com/gorilla/css v1.0.1 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/huandu/xstrings v1.3.3 // indirect github.com/imdario/mergo v0.3.13 // indirect @@ -110,14 +121,18 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kylelemons/godebug v1.1.0 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-ieproxy v0.0.12 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect + github.com/microcosm-cc/bluemonday v1.0.27 // indirect github.com/mitchellh/copystructure v1.0.0 // indirect github.com/mitchellh/reflectwalk v1.0.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/muesli/reflow v0.3.0 // indirect + github.com/muesli/termenv v0.16.0 // indirect github.com/nikolalohinski/gonja v1.5.3 // indirect github.com/otiai10/mint v1.6.3 // indirect github.com/pelletier/go-toml/v2 v2.0.9 // indirect @@ -135,8 +150,11 @@ require ( github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect + github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect github.com/yargevad/filepathx v1.0.0 // indirect github.com/yosida95/uritemplate/v3 v3.0.2 // indirect + github.com/yuin/goldmark v1.7.8 // indirect + github.com/yuin/goldmark-emoji v1.0.5 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect go.opentelemetry.io/otel/metric v1.35.0 // indirect diff --git a/go.sum b/go.sum index 81ab09de4a8..42bdd6a3c6f 100644 --- a/go.sum +++ b/go.sum @@ -109,8 +109,12 @@ github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJs github.com/adam-lavrik/go-imath v0.0.0-20210910152346-265a42a96f0b h1:g9SuFmxM/WucQFKTMSP+irxyf5m0RiUJreBDhGI6jSA= github.com/adam-lavrik/go-imath v0.0.0-20210910152346-265a42a96f0b/go.mod h1:XjvqMUpGd3Xn9Jtzk/4GEBCSoBX0eB2RyriXgne0IdM= github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o= +github.com/alecthomas/chroma/v2 v2.14.0 h1:R3+wzpnUArGcQz7fCETQBzO5n9IMNi13iIs46aU4V9E= +github.com/alecthomas/chroma/v2 v2.14.0/go.mod h1:QolEbTfmUHIMVpBqxeDnNBj2uoeI4EbYP4i6n68SG4I= github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss= github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= @@ -141,6 +145,20 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk= +github.com/charmbracelet/glamour v0.10.0 h1:MtZvfwsYCx8jEPFJm3rIBFIMZUfUJ765oX8V6kXldcY= +github.com/charmbracelet/glamour v0.10.0/go.mod h1:f+uf+I/ChNmqo087elLnVdCiVgjSKWuXa/l6NU2ndYk= +github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 h1:ZR7e0ro+SZZiIZD7msJyA+NjkCNNavuiPBLgerbOziE= +github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834/go.mod h1:aKC/t2arECF6rNOnaKaVU6y4t4ZeHQzqfxedE/VkVhA= +github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= +github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= +github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k= +github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf h1:rLG0Yb6MQSDKdB52aGX55JT1oi0P0Kuaj7wi1bLUpnI= +github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf/go.mod h1:B3UgsnsBZS/eX42BlaNiJkD1pPOUa+oF1IYC6Yd2CEU= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -161,6 +179,8 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/drone/envsubst v1.0.3 h1:PCIBwNDYjs50AsLZPYdfhSATKaRg/FJmDc2D6+C2x8g= github.com/drone/envsubst v1.0.3/go.mod h1:N2jZmlMufstn1KEqvbHjw40h1KyTmnVzHcSc9bFiJ2g= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= @@ -239,6 +259,8 @@ github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18= github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= +github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= +github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= @@ -282,6 +304,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80 h1:6Yzfa6GP0rIo/kULo2bwGEkFvCePZ3qHDDTC3/J9Swo= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -297,6 +321,7 @@ github.com/mattn/go-ieproxy v0.0.12/go.mod h1:Vn+N61199DAnVeTgaF8eoB9PvLO8P3OBnG github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= @@ -304,6 +329,8 @@ github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQ github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58= github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs= +github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk= +github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA= github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= github.com/microsoft/azure-devops-go-api/azuredevops/v7 v7.1.0 h1:mmJCWLe63QvybxhW1iBmQWEaCKdc4SKgALfTNZ+OphU= @@ -321,6 +348,10 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= +github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= github.com/nathan-fiscaletti/consolesize-go v0.0.0-20220204101620-317176b6684d h1:NqRhLdNVlozULwM1B3VaHhcXYSgrOAv8V5BE65om+1Q= github.com/nathan-fiscaletti/consolesize-go v0.0.0-20220204101620-317176b6684d/go.mod h1:cxIIfNMTwff8f/ZvRouvWYF6wOoO7nj99neWSx2q/Es= github.com/nikolalohinski/gonja v1.5.3 h1:GsA+EEaZDZPGJ8JtpeGN78jidhOlxeJROpqMT9fTj9c= @@ -348,6 +379,7 @@ github.com/psanford/memfs v0.0.0-20241019191636-4ef911798f9b h1:xzjEJAHum+mV5Dd5 github.com/psanford/memfs v0.0.0-20241019191636-4ef911798f9b/go.mod h1:tcaRap0jS3eifrEEllL6ZMd9dg8IlDpi2S1oARrQ+NI= github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -409,11 +441,18 @@ github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/ github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= +github.com/yuin/goldmark v1.7.8 h1:iERMLn0/QJeHFhxSt3p6PeN9mGnvIKSpG9YYorDMnic= +github.com/yuin/goldmark v1.7.8/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= +github.com/yuin/goldmark-emoji v1.0.5 h1:EMVWyCGPlXJfUXBXpuMu+ii3TIaxbVBnEX9uaDC4cIk= +github.com/yuin/goldmark-emoji v1.0.5/go.mod h1:tTkZEbwu5wkPmgTcitqddVxY9osFZiavD+r4AzQrh1U= gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181 h1:K+bMSIx9A7mLES1rtG+qKduLIXq40DAzYHtb0XuCukA= gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181/go.mod h1:dzYhVIwWCtzPAa4QP98wfB9+mzt33MSmM8wsKiMi2ow= gitlab.com/golang-commonmark/linkify v0.0.0-20191026162114-a0c2df6c8f82 h1:oYrL81N608MLZhma3ruL8qTM4xcpYECGut8KSxRY59g= From 1c199501c94633bc6151395b3be2df0f8355ad78 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Thu, 7 Aug 2025 09:55:31 -0700 Subject: [PATCH 052/116] Updates final prompts and output --- cli/azd/cmd/init.go | 83 ++++++++++--------- .../azd/prompts/azd_project_validation.md | 8 +- cli/azd/pkg/output/colors.go | 4 + 3 files changed, 50 insertions(+), 45 deletions(-) diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index 3c128cb5c53..0116b9901e7 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -9,6 +9,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "time" "github.com/MakeNowJust/heredoc/v2" @@ -33,6 +34,7 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/templates" "github.com/azure/azure-dev/cli/azd/pkg/tools" "github.com/azure/azure-dev/cli/azd/pkg/tools/git" + uxlib "github.com/azure/azure-dev/cli/azd/pkg/ux" "github.com/azure/azure-dev/cli/azd/pkg/workflow" "github.com/fatih/color" "github.com/joho/godotenv" @@ -412,27 +414,27 @@ Do not stop until all tasks are complete and fully resolved. initSteps := []initStep{ { Name: "Running Discovery & Analysis", - Description: "Run a deep discovery and analysis on the current working directory. Provide a detailed summary of work performed.", + Description: "Run a deep discovery and analysis on the current working directory.", }, { Name: "Generating Architecture Plan", - Description: "Create a high-level architecture plan for the application. Provide a detailed summary of work performed.", + Description: "Create a high-level architecture plan for the application.", }, { Name: "Generating Dockerfile(s)", - Description: "Generate a Dockerfile for the application components as needed. Provide a detailed summary of work performed.", + Description: "Generate a Dockerfile for the application components as needed.", }, { Name: "Generating infrastructure", - Description: "Generate infrastructure as code (IaC) for the application. Provide a detailed summary of work performed.", + Description: "Generate infrastructure as code (IaC) for the application.", }, { Name: "Generating azure.yaml file", - Description: "Generate an azure.yaml file for the application. Provide a detailed summary of work performed.", + Description: "Generate an azure.yaml file for the application.", }, { Name: "Validating project", - Description: "Validate the project structure and configuration. Provide a detailed summary of work performed.", + Description: "Validate the project structure and configuration.", }, } @@ -446,7 +448,11 @@ Do not stop until all tasks are complete and fully resolved. // Run Step i.console.ShowSpinner(ctx, step.Name, input.Step) - fullTaskInput := fmt.Sprintf(taskInput, step.Description) + fullTaskInput := fmt.Sprintf(taskInput, strings.Join([]string{ + step.Description, + "Provide a very brief summary in markdown format that includes any files generated during this step.", + }, "\n")) + agentOutput, err := azdAgent.SendMessage(ctx, fullTaskInput) if err != nil { i.console.StopSpinner(ctx, fmt.Sprintf("%s (With errors)", step.Name), input.StepWarning) @@ -459,8 +465,8 @@ Do not stop until all tasks are complete and fully resolved. i.console.StopSpinner(ctx, step.Name, input.StepDone) i.console.Message(ctx, "") - finalOutput := fmt.Sprintf("%s %s", color.MagentaString("🤖 AZD Copilot:"), output.WithMarkdown(agentOutput)) - i.console.Message(ctx, finalOutput) + i.console.Message(ctx, color.MagentaString("🤖 AZD Copilot:")) + i.console.Message(ctx, output.WithMarkdown(agentOutput)) i.console.Message(ctx, "") } @@ -474,30 +480,38 @@ Do not stop until all tasks are complete and fully resolved. // collectAndApplyFeedback prompts for user feedback and applies it using the agent in a loop func (i *initAction) collectAndApplyFeedback(ctx context.Context, azdAgent *agent.ConversationalAzdAiAgent, promptMessage string) error { - hasFeedback, err := i.console.Confirm(ctx, input.ConsoleOptions{ - Message: promptMessage, - DefaultValue: false, - }) - if err != nil { - return err - } - - if !hasFeedback { - i.console.Message(ctx, "") - return nil - } - // Loop to allow multiple rounds of feedback for { - userInput, err := i.console.Prompt(ctx, input.ConsoleOptions{ - Message: "💭 You:", - DefaultValue: "", - Help: "Additional context will be provided to AZD Copilot", + confirmFeedback := uxlib.NewConfirm(&uxlib.ConfirmOptions{ + Message: promptMessage, + DefaultValue: uxlib.Ptr(false), + HelpMessage: "You will be able to provide and feedback or changes after each step.", }) + + hasFeedback, err := confirmFeedback.Ask(ctx) + if err != nil { + return err + } + + if !*hasFeedback { + i.console.Message(ctx, "") + break + } + + userInputPrompt := uxlib.NewPrompt(&uxlib.PromptOptions{ + Message: "💭 You", + PlaceHolder: "Provide feedback or changes to the project", + Required: true, + IgnoreHintKeys: true, + }) + + userInput, err := userInputPrompt.Ask(ctx) if err != nil { return fmt.Errorf("error collecting feedback during azd init, %w", err) } + i.console.Message(ctx, "") + if userInput != "" { i.console.ShowSpinner(ctx, "Submitting feedback", input.Step) feedbackOutput, err := azdAgent.SendMessage(ctx, userInput) @@ -511,23 +525,10 @@ func (i *initAction) collectAndApplyFeedback(ctx context.Context, azdAgent *agen i.console.StopSpinner(ctx, "Submitting feedback", input.StepDone) i.console.Message(ctx, "") - agentOutput := fmt.Sprintf("%s %s", color.MagentaString("🤖 AZD Copilot:"), output.WithMarkdown(feedbackOutput)) - i.console.Message(ctx, agentOutput) + i.console.Message(ctx, color.MagentaString("🤖 AZD Copilot:")) + i.console.Message(ctx, output.WithMarkdown(feedbackOutput)) i.console.Message(ctx, "") } - - // Check if user wants to provide more feedback - moreFeedback, err := i.console.Confirm(ctx, input.ConsoleOptions{ - Message: "Do you have any more feedback or changes?", - DefaultValue: false, - }) - if err != nil { - return err - } - - if !moreFeedback { - break - } } return nil diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md b/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md index badbd8a58fb..0a28e9a7bc3 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md +++ b/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md @@ -6,8 +6,8 @@ 2. Execute azure.yaml against azd schema using available tool 3. Compile and validate all Bicep templates in ./infra directory 4. Verify AZD environment exists and is properly configured -5. Run `azd package` to validate service packaging -6. Execute `azd provision --preview` to test infrastructure deployment +5. Run `azd package --no-prompt` to validate service packaging +6. Execute `azd provision --preview --no-prompt` to test infrastructure deployment 7. Resolve ALL issues found in each validation step before proceeding 8. Update existing `azd-arch-plan.md` with validation results by adding/updating validation section while preserving existing content @@ -63,7 +63,7 @@ **4. Package Validation:** -- Execute `azd package` command and monitor output +- Execute `azd package --no-prompt` command and monitor output - Verify all service source paths are valid - Check Docker builds complete successfully for containerized services - Ensure all build artifacts are created correctly @@ -71,7 +71,7 @@ **5. Deployment Preview Validation:** -- Execute `azd provision --preview` command +- Execute `azd provision --preview --no-prompt` command - Verify Azure authentication is working - Check resource group creation plan and Bicep module deployment - Ensure parameter values are properly resolved diff --git a/cli/azd/pkg/output/colors.go b/cli/azd/pkg/output/colors.go index 6d4834d023c..c828a122bc9 100644 --- a/cli/azd/pkg/output/colors.go +++ b/cli/azd/pkg/output/colors.go @@ -62,6 +62,10 @@ func WithBackticks(s string) string { // WithMarkdown converts markdown to terminal-friendly colorized output using glamour. // This provides rich markdown rendering including bold, italic, code blocks, headers, etc. func WithMarkdown(markdownText string) string { + markdownText = strings.Trim(markdownText, "\n") + markdownText = strings.TrimPrefix(markdownText, "```markdown") + markdownText = strings.TrimSuffix(markdownText, "```") + // Get dynamic console width with fallback to 120 consoleWidth := getConsoleWidth() From d45f608503097932adc7868c0e58ad1c9d84e261 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Thu, 7 Aug 2025 12:21:36 -0700 Subject: [PATCH 053/116] Fixes all linter & spelling issues --- cli/azd/.vscode/cspell-azd-dictionary.txt | 5 + cli/azd/cmd/init.go | 15 ++- cli/azd/internal/agent/agent.go | 3 + .../internal/agent/conversational_agent.go | 6 +- cli/azd/internal/agent/one_shot_agent.go | 6 +- .../tools/azd/azd_architecture_planning.go | 8 +- .../tools/azd/azd_azure_yaml_generation.go | 8 +- .../agent/tools/azd/azd_discovery_analysis.go | 8 +- .../agent/tools/azd/azd_docker_generation.go | 8 +- .../tools/azd/azd_iac_generation_rules.go | 8 +- .../azd/azd_infrastructure_generation.go | 8 +- .../internal/agent/tools/azd/azd_plan_init.go | 8 +- .../agent/tools/azd/azd_project_validation.go | 8 +- .../agent/tools/azd/azd_yaml_schema.go | 3 + cli/azd/internal/agent/tools/azd/loader.go | 3 + .../agent/tools/azd/prompts/prompts.go | 3 + cli/azd/internal/agent/tools/common/types.go | 3 + .../agent/tools/dev/command_executor.go | 9 +- cli/azd/internal/agent/tools/dev/loader.go | 3 + .../internal/agent/tools/http/http_fetcher.go | 4 + cli/azd/internal/agent/tools/http/loader.go | 3 + .../agent/tools/io/change_directory.go | 11 +- cli/azd/internal/agent/tools/io/copy_file.go | 21 ++- .../agent/tools/io/create_directory.go | 11 +- .../agent/tools/io/current_directory.go | 6 +- .../agent/tools/io/delete_directory.go | 8 +- .../internal/agent/tools/io/delete_file.go | 8 +- .../internal/agent/tools/io/directory_list.go | 13 +- cli/azd/internal/agent/tools/io/file_info.go | 6 +- .../internal/agent/tools/io/file_search.go | 16 ++- cli/azd/internal/agent/tools/io/loader.go | 3 + cli/azd/internal/agent/tools/io/move_file.go | 29 ++++- cli/azd/internal/agent/tools/io/read_file.go | 61 +++++++-- cli/azd/internal/agent/tools/io/write_file.go | 83 +++++++++--- .../agent/tools/io/write_file_test.go | 112 ++++++++++++---- cli/azd/internal/agent/tools/loader.go | 3 + cli/azd/internal/agent/tools/mcp/loader.go | 3 + .../agent/tools/mcp/sampling_handler.go | 8 +- .../internal/agent/tools/weather/loader.go | 18 --- .../internal/agent/tools/weather/weather.go | 121 ------------------ cli/azd/pkg/llm/azure_openai.go | 16 ++- cli/azd/pkg/llm/manager.go | 5 - cli/azd/pkg/llm/model.go | 19 ++- cli/azd/pkg/llm/model_factory.go | 6 +- cli/azd/pkg/llm/ollama.go | 36 ++++-- 45 files changed, 501 insertions(+), 252 deletions(-) delete mode 100644 cli/azd/internal/agent/tools/weather/loader.go delete mode 100644 cli/azd/internal/agent/tools/weather/weather.go diff --git a/cli/azd/.vscode/cspell-azd-dictionary.txt b/cli/azd/.vscode/cspell-azd-dictionary.txt index f56a5e00147..f9777d9041c 100644 --- a/cli/azd/.vscode/cspell-azd-dictionary.txt +++ b/cli/azd/.vscode/cspell-azd-dictionary.txt @@ -69,6 +69,7 @@ BUILDNUMBER buildpacks byoi cflags +charmbracelet circleci cmdrecord cmdsubst @@ -76,6 +77,7 @@ cognitiveservices conditionalize consolesize containerapp +containerizable containerapps containerd contoso @@ -144,6 +146,8 @@ ldflags lechnerc77 libc llms +localtools +mcptools memfs mergo mgmt @@ -247,6 +251,7 @@ unsetenvs unsets upgrader utsname +uxlib vite vsrpc vuejs diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index 0116b9901e7..0b35a210631 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -389,6 +389,9 @@ func (i *initAction) initAppWithCopilot(ctx context.Context) error { } samplingModelContainer, err := i.llmManager.GetDefaultModel() + if err != nil { + return err + } azdAgent, err := agent.NewConversationalAzdAiAgent( defaultModelContainer.Model, @@ -441,7 +444,11 @@ Do not stop until all tasks are complete and fully resolved. for idx, step := range initSteps { // Collect and apply feedback for next steps if idx > 0 { - if err := i.collectAndApplyFeedback(ctx, azdAgent, "Any feedback before continuing to the next step?"); err != nil { + if err := i.collectAndApplyFeedback( + ctx, + azdAgent, + "Any feedback before continuing to the next step?", + ); err != nil { return err } } @@ -479,7 +486,11 @@ Do not stop until all tasks are complete and fully resolved. } // collectAndApplyFeedback prompts for user feedback and applies it using the agent in a loop -func (i *initAction) collectAndApplyFeedback(ctx context.Context, azdAgent *agent.ConversationalAzdAiAgent, promptMessage string) error { +func (i *initAction) collectAndApplyFeedback( + ctx context.Context, + azdAgent *agent.ConversationalAzdAiAgent, + promptMessage string, +) error { // Loop to allow multiple rounds of feedback for { confirmFeedback := uxlib.NewConfirm(&uxlib.ConfirmOptions{ diff --git a/cli/azd/internal/agent/agent.go b/cli/azd/internal/agent/agent.go index dc3f434e3d3..8dc07ae2668 100644 --- a/cli/azd/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package agent import ( diff --git a/cli/azd/internal/agent/conversational_agent.go b/cli/azd/internal/agent/conversational_agent.go index 49b6c20e850..1c6622cac7d 100644 --- a/cli/azd/internal/agent/conversational_agent.go +++ b/cli/azd/internal/agent/conversational_agent.go @@ -20,14 +20,14 @@ import ( "github.com/tmc/langchaingo/tools" localtools "github.com/azure/azure-dev/cli/azd/internal/agent/tools" - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" ) //go:embed prompts/conversational.txt var conversational_prompt_template string -// ConversationalAzdAiAgent represents an enhanced AZD Copilot agent with action tracking, intent validation, and conversation memory +// ConversationalAzdAiAgent represents an enhanced AZD Copilot agent with action tracking, +// intent validation, and conversation memory type ConversationalAzdAiAgent struct { *Agent } @@ -55,7 +55,7 @@ func NewConversationalAzdAiAgent(llm llms.Model, opts ...AgentOption) (*Conversa // Create sampling handler for MCP samplingHandler := mcptools.NewMcpSamplingHandler( azdAgent.samplingModel, - mcp.WithDebug(azdAgent.debug), + mcptools.WithDebug(azdAgent.debug), ) toolLoaders := []localtools.ToolLoader{ diff --git a/cli/azd/internal/agent/one_shot_agent.go b/cli/azd/internal/agent/one_shot_agent.go index e6b5adf427f..e2d8c9adcd2 100644 --- a/cli/azd/internal/agent/one_shot_agent.go +++ b/cli/azd/internal/agent/one_shot_agent.go @@ -15,11 +15,11 @@ import ( "github.com/tmc/langchaingo/tools" localtools "github.com/azure/azure-dev/cli/azd/internal/agent/tools" - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" ) -// OneShotAzdAiAgent represents an enhanced AZD Copilot agent with action tracking, intent validation, and conversation memory +// OneShotAzdAiAgent represents an enhanced AZD Copilot agent with action tracking, +// intent validation, and conversation memory type OneShotAzdAiAgent struct { *Agent } @@ -43,7 +43,7 @@ func NewOneShotAzdAiAgent(llm llms.Model, opts ...AgentOption) (*OneShotAzdAiAge // Create sampling handler for MCP samplingHandler := mcptools.NewMcpSamplingHandler( azdAgent.samplingModel, - mcp.WithDebug(azdAgent.debug), + mcptools.WithDebug(azdAgent.debug), ) toolLoaders := []localtools.ToolLoader{ diff --git a/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go b/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go index 0368400fabd..aa6ea409a26 100644 --- a/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go +++ b/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( @@ -17,7 +20,10 @@ func (t *AzdArchitecturePlanningTool) Name() string { } func (t *AzdArchitecturePlanningTool) Description() string { - return `Returns instructions for selecting appropriate Azure services for discovered application components and designing infrastructure architecture. The LLM agent should execute these instructions using available tools. + return `Returns instructions for selecting appropriate Azure services for discovered application components and +designing infrastructure architecture. + +The LLM agent should execute these instructions using available tools. Use this tool when: - Discovery analysis has been completed and azd-arch-plan.md exists diff --git a/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go b/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go index 5b1f5adb84e..2590eb2c0ba 100644 --- a/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( @@ -17,7 +20,10 @@ func (t *AzdAzureYamlGenerationTool) Name() string { } func (t *AzdAzureYamlGenerationTool) Description() string { - return `Returns instructions for generating the azure.yaml configuration file with proper service hosting, build, and deployment settings for AZD projects. The LLM agent should execute these instructions using available tools. + return `Returns instructions for generating the azure.yaml configuration file with proper service hosting, +build, and deployment settings for AZD projects. + +The LLM agent should execute these instructions using available tools. Use this tool when: - Architecture planning has been completed and Azure services selected diff --git a/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go b/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go index 5e24f5727fc..f8b13425eea 100644 --- a/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go +++ b/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( @@ -17,7 +20,10 @@ func (t *AzdDiscoveryAnalysisTool) Name() string { } func (t *AzdDiscoveryAnalysisTool) Description() string { - return `Returns instructions for performing comprehensive discovery and analysis of application components to prepare for Azure Developer CLI (AZD) initialization. The LLM agent should execute these instructions using available tools. + return `Returns instructions for performing comprehensive discovery and analysis of application components +to prepare for Azure Developer CLI (AZD) initialization. + +The LLM agent should execute these instructions using available tools. Use this tool when: - Starting Phase 1 of AZD migration process diff --git a/cli/azd/internal/agent/tools/azd/azd_docker_generation.go b/cli/azd/internal/agent/tools/azd/azd_docker_generation.go index c22b590c779..57c03e2d807 100644 --- a/cli/azd/internal/agent/tools/azd/azd_docker_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_docker_generation.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( @@ -17,7 +20,10 @@ func (t *AzdDockerGenerationTool) Name() string { } func (t *AzdDockerGenerationTool) Description() string { - return `Returns instructions for generating optimized Dockerfiles and container configurations for containerizable services in AZD projects. The LLM agent should execute these instructions using available tools. + return `Returns instructions for generating optimized Dockerfiles and container configurations for containerizable +services in AZD projects. + +The LLM agent should execute these instructions using available tools. Use this tool when: - Architecture planning identified services requiring containerization diff --git a/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go b/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go index 2fe68dbeaeb..d55f903e4d2 100644 --- a/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go +++ b/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( @@ -17,7 +20,10 @@ func (t *AzdIacGenerationRulesTool) Name() string { } func (t *AzdIacGenerationRulesTool) Description() string { - return `Returns comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules for AZD projects. The LLM agent should reference these rules when generating infrastructure code. + return `Returns comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules +for AZD projects. + +The LLM agent should reference these rules when generating infrastructure code. Use this tool when: - Generating any Bicep infrastructure templates for AZD projects diff --git a/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go b/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go index b147a99b3aa..3c3e7cf52b9 100644 --- a/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go +++ b/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( @@ -17,7 +20,10 @@ func (t *AzdInfrastructureGenerationTool) Name() string { } func (t *AzdInfrastructureGenerationTool) Description() string { - return `Returns instructions for generating modular Bicep infrastructure templates following Azure security and operational best practices for AZD projects. The LLM agent should execute these instructions using available tools. + return `Returns instructions for generating modular Bicep infrastructure templates following Azure security and +operational best practices for AZD projects. + +The LLM agent should execute these instructions using available tools. Use this tool when: - Architecture planning completed with Azure services selected diff --git a/cli/azd/internal/agent/tools/azd/azd_plan_init.go b/cli/azd/internal/agent/tools/azd/azd_plan_init.go index 0c246d46b92..a6eb422ab78 100644 --- a/cli/azd/internal/agent/tools/azd/azd_plan_init.go +++ b/cli/azd/internal/agent/tools/azd/azd_plan_init.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( @@ -17,7 +20,10 @@ func (t *AzdPlanInitTool) Name() string { } func (t *AzdPlanInitTool) Description() string { - return `Returns instructions for orchestrating complete AZD application initialization using structured phases with specialized tools. The LLM agent should execute these instructions using available tools. + return `Returns instructions for orchestrating complete AZD application initialization using structured phases +with specialized tools. + +The LLM agent should execute these instructions using available tools. Use this tool when: - Starting new AZD project initialization or migration diff --git a/cli/azd/internal/agent/tools/azd/azd_project_validation.go b/cli/azd/internal/agent/tools/azd/azd_project_validation.go index 2a856a5596a..7645fac9ca0 100644 --- a/cli/azd/internal/agent/tools/azd/azd_project_validation.go +++ b/cli/azd/internal/agent/tools/azd/azd_project_validation.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( @@ -20,7 +23,10 @@ func (t *AzdProjectValidationTool) Name() string { // Description returns the description of the tool. func (t *AzdProjectValidationTool) Description() string { - return `Returns instructions for validating AZD project by running comprehensive checks on azure.yaml schema, Bicep templates, environment setup, packaging, and deployment preview. The LLM agent should execute these instructions using available tools. + return `Returns instructions for validating AZD project by running comprehensive checks on azure.yaml schema, +Bicep templates, environment setup, packaging, and deployment preview. + +The LLM agent should execute these instructions using available tools. Use this tool when: - All AZD configuration files have been generated diff --git a/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go b/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go index d9577f92af8..678d268d7ea 100644 --- a/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go +++ b/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( diff --git a/cli/azd/internal/agent/tools/azd/loader.go b/cli/azd/internal/agent/tools/azd/loader.go index 6d81740a6ac..f55b9c93e77 100644 --- a/cli/azd/internal/agent/tools/azd/loader.go +++ b/cli/azd/internal/agent/tools/azd/loader.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package azd import ( diff --git a/cli/azd/internal/agent/tools/azd/prompts/prompts.go b/cli/azd/internal/agent/tools/azd/prompts/prompts.go index a08d194ce7d..7273140321a 100644 --- a/cli/azd/internal/agent/tools/azd/prompts/prompts.go +++ b/cli/azd/internal/agent/tools/azd/prompts/prompts.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package prompts import ( diff --git a/cli/azd/internal/agent/tools/common/types.go b/cli/azd/internal/agent/tools/common/types.go index 47f14eea64e..b8740f01b06 100644 --- a/cli/azd/internal/agent/tools/common/types.go +++ b/cli/azd/internal/agent/tools/common/types.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package common // ErrorResponse represents a JSON error response structure that can be reused across all tools diff --git a/cli/azd/internal/agent/tools/dev/command_executor.go b/cli/azd/internal/agent/tools/dev/command_executor.go index 6f0fc33bdaa..1ecc9041318 100644 --- a/cli/azd/internal/agent/tools/dev/command_executor.go +++ b/cli/azd/internal/agent/tools/dev/command_executor.go @@ -1,8 +1,12 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package dev import ( "context" "encoding/json" + "errors" "fmt" "os" "os/exec" @@ -168,6 +172,7 @@ func (t CommandExecutorTool) executeCommand(ctx context.Context, command string, if runtime.GOOS == "windows" { // On Windows, use cmd.exe to handle built-in commands and path resolution allArgs := append([]string{"/C", command}, args...) + // #nosec G204 - Command execution is the intended functionality of this tool cmd = exec.CommandContext(ctx, "cmd", allArgs...) } else { // On Unix-like systems, use sh for better command resolution @@ -175,6 +180,7 @@ func (t CommandExecutorTool) executeCommand(ctx context.Context, command string, if len(args) > 0 { fullCommand += " " + strings.Join(args, " ") } + // #nosec G204 - Command execution is the intended functionality of this tool cmd = exec.CommandContext(ctx, "sh", "-c", fullCommand) } @@ -198,7 +204,8 @@ func (t CommandExecutorTool) executeCommand(ctx context.Context, command string, var cmdError error if err != nil { - if exitError, ok := err.(*exec.ExitError); ok { + var exitError *exec.ExitError + if errors.As(err, &exitError) { // Command ran but exited with non-zero code - this is normal exitCode = exitError.ExitCode() cmdError = nil // Don't treat non-zero exit as a system error diff --git a/cli/azd/internal/agent/tools/dev/loader.go b/cli/azd/internal/agent/tools/dev/loader.go index 3b938213ed0..1028825fb22 100644 --- a/cli/azd/internal/agent/tools/dev/loader.go +++ b/cli/azd/internal/agent/tools/dev/loader.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package dev import ( diff --git a/cli/azd/internal/agent/tools/http/http_fetcher.go b/cli/azd/internal/agent/tools/http/http_fetcher.go index cbd3628506b..7a4cb1c9c24 100644 --- a/cli/azd/internal/agent/tools/http/http_fetcher.go +++ b/cli/azd/internal/agent/tools/http/http_fetcher.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package http import ( @@ -27,6 +30,7 @@ func (t HTTPFetcherTool) Call(ctx context.Context, input string) (string, error) t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("http_fetcher: %s", input)) } + // #nosec G107 - HTTP requests with variable URLs are the intended functionality of this tool resp, err := http.Get(input) if err != nil { toolErr := fmt.Errorf("failed to fetch URL %s: %w", input, err) diff --git a/cli/azd/internal/agent/tools/http/loader.go b/cli/azd/internal/agent/tools/http/loader.go index 2233455e3e8..32a1ce4dbc8 100644 --- a/cli/azd/internal/agent/tools/http/loader.go +++ b/cli/azd/internal/agent/tools/http/loader.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package http import ( diff --git a/cli/azd/internal/agent/tools/io/change_directory.go b/cli/azd/internal/agent/tools/io/change_directory.go index 889d07d8041..b942e09b458 100644 --- a/cli/azd/internal/agent/tools/io/change_directory.go +++ b/cli/azd/internal/agent/tools/io/change_directory.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -19,7 +22,8 @@ func (t ChangeDirectoryTool) Name() string { } func (t ChangeDirectoryTool) Description() string { - return "Change the current working directory. Input: directory path (e.g., '../parent' or './subfolder' or absolute path)" + return "Change the current working directory. " + + "Input: directory path (e.g., '../parent' or './subfolder' or absolute path)" } // createErrorResponse creates a JSON error response @@ -63,7 +67,10 @@ func (t ChangeDirectoryTool) Call(ctx context.Context, input string) (string, er return t.createErrorResponse(err, fmt.Sprintf("Directory %s does not exist: %s", absPath, err.Error())) } if !info.IsDir() { - return t.createErrorResponse(fmt.Errorf("%s is not a directory", absPath), fmt.Sprintf("%s is not a directory", absPath)) + return t.createErrorResponse( + fmt.Errorf("%s is not a directory", absPath), + fmt.Sprintf("%s is not a directory", absPath), + ) } // Change directory diff --git a/cli/azd/internal/agent/tools/io/copy_file.go b/cli/azd/internal/agent/tools/io/copy_file.go index 2d0d573295f..0272421454a 100644 --- a/cli/azd/internal/agent/tools/io/copy_file.go +++ b/cli/azd/internal/agent/tools/io/copy_file.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -60,14 +63,23 @@ func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { // Parse as JSON - this is now required if err := json.Unmarshal([]byte(cleanInput), ¶ms); err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"source\": \"file.txt\", \"destination\": \"backup.txt\"}", err.Error())) + return t.createErrorResponse( + err, + fmt.Sprintf( + "Invalid JSON input: %s. Expected format: {\"source\": \"file.txt\", \"destination\": \"backup.txt\"}", + err.Error(), + ), + ) } source := strings.TrimSpace(params.Source) destination := strings.TrimSpace(params.Destination) if source == "" || destination == "" { - return t.createErrorResponse(fmt.Errorf("both source and destination paths are required"), "Both source and destination paths are required") + return t.createErrorResponse( + fmt.Errorf("both source and destination paths are required"), + "Both source and destination paths are required", + ) } // Check if source file exists @@ -77,7 +89,10 @@ func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { } if sourceInfo.IsDir() { - return t.createErrorResponse(fmt.Errorf("source %s is a directory", source), fmt.Sprintf("Source %s is a directory. Use copy_directory for directories", source)) + return t.createErrorResponse( + fmt.Errorf("source %s is a directory", source), + fmt.Sprintf("Source %s is a directory. Use copy_directory for directories", source), + ) } // Open source file diff --git a/cli/azd/internal/agent/tools/io/create_directory.go b/cli/azd/internal/agent/tools/io/create_directory.go index 79db58865cb..57f2e83710e 100644 --- a/cli/azd/internal/agent/tools/io/create_directory.go +++ b/cli/azd/internal/agent/tools/io/create_directory.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -18,7 +21,8 @@ func (t CreateDirectoryTool) Name() string { } func (t CreateDirectoryTool) Description() string { - return "Create a directory (and any necessary parent directories). Input: directory path (e.g., 'docs' or './src/components')" + return "Create a directory (and any necessary parent directories). " + + "Input: directory path (e.g., 'docs' or './src/components')" } // createErrorResponse creates a JSON error response @@ -63,7 +67,10 @@ func (t CreateDirectoryTool) Call(ctx context.Context, input string) (string, er } if !info.IsDir() { - return t.createErrorResponse(fmt.Errorf("%s exists but is not a directory", input), fmt.Sprintf("%s exists but is not a directory", input)) + return t.createErrorResponse( + fmt.Errorf("%s exists but is not a directory", input), + fmt.Sprintf("%s exists but is not a directory", input), + ) } // Create success response diff --git a/cli/azd/internal/agent/tools/io/current_directory.go b/cli/azd/internal/agent/tools/io/current_directory.go index 56256b3ea56..0ba2d925c3e 100644 --- a/cli/azd/internal/agent/tools/io/current_directory.go +++ b/cli/azd/internal/agent/tools/io/current_directory.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -17,7 +20,8 @@ func (t CurrentDirectoryTool) Name() string { } func (t CurrentDirectoryTool) Description() string { - return "Get the current working directory to understand the project context. Input: use 'current' or '.' (any input works)" + return "Get the current working directory to understand the project context. " + + "Input: use 'current' or '.' (any input works)" } // createErrorResponse creates a JSON error response diff --git a/cli/azd/internal/agent/tools/io/delete_directory.go b/cli/azd/internal/agent/tools/io/delete_directory.go index 3066cd2d403..27ae1413ce5 100644 --- a/cli/azd/internal/agent/tools/io/delete_directory.go +++ b/cli/azd/internal/agent/tools/io/delete_directory.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -62,7 +65,10 @@ func (t DeleteDirectoryTool) Call(ctx context.Context, input string) (string, er // Make sure it's a directory, not a file if !info.IsDir() { - return t.createErrorResponse(fmt.Errorf("%s is a file, not a directory", input), fmt.Sprintf("%s is a file, not a directory. Use delete_file to remove files", input)) + return t.createErrorResponse( + fmt.Errorf("%s is a file, not a directory", input), + fmt.Sprintf("%s is a file, not a directory. Use delete_file to remove files", input), + ) } // Count contents before deletion for reporting diff --git a/cli/azd/internal/agent/tools/io/delete_file.go b/cli/azd/internal/agent/tools/io/delete_file.go index e5333526286..828c0180e4a 100644 --- a/cli/azd/internal/agent/tools/io/delete_file.go +++ b/cli/azd/internal/agent/tools/io/delete_file.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -62,7 +65,10 @@ func (t DeleteFileTool) Call(ctx context.Context, input string) (string, error) // Make sure it's a file, not a directory if info.IsDir() { - return t.createErrorResponse(fmt.Errorf("%s is a directory, not a file", input), fmt.Sprintf("%s is a directory, not a file. Use delete_directory to remove directories", input)) + return t.createErrorResponse( + fmt.Errorf("%s is a directory, not a file", input), + fmt.Sprintf("%s is a directory, not a file. Use delete_directory to remove directories", input), + ) } fileSize := info.Size() diff --git a/cli/azd/internal/agent/tools/io/directory_list.go b/cli/azd/internal/agent/tools/io/directory_list.go index a5f6b92d089..7833026b3ad 100644 --- a/cli/azd/internal/agent/tools/io/directory_list.go +++ b/cli/azd/internal/agent/tools/io/directory_list.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -60,7 +63,10 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro // Parse as JSON - this is now required if err := json.Unmarshal([]byte(cleanInput), ¶ms); err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"path\": \".\", \"includeHidden\": false}", err.Error())) + return t.createErrorResponse( + err, + fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"path\": \".\", \"includeHidden\": false}", err.Error()), + ) } // Validate required path field @@ -97,7 +103,10 @@ func (t DirectoryListTool) Call(ctx context.Context, input string) (string, erro } if !info.IsDir() { - return t.createErrorResponse(fmt.Errorf("%s is not a directory", absPath), fmt.Sprintf("%s is not a directory", absPath)) + return t.createErrorResponse( + fmt.Errorf("%s is not a directory", absPath), + fmt.Sprintf("%s is not a directory", absPath), + ) } // Read directory contents diff --git a/cli/azd/internal/agent/tools/io/file_info.go b/cli/azd/internal/agent/tools/io/file_info.go index 57d53ddb906..f05763acf8f 100644 --- a/cli/azd/internal/agent/tools/io/file_info.go +++ b/cli/azd/internal/agent/tools/io/file_info.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -19,7 +22,8 @@ func (t FileInfoTool) Name() string { } func (t FileInfoTool) Description() string { - return "Get information about a file (size, modification time, permissions). Input: file path (e.g., 'data.txt' or './docs/readme.md'). Returns JSON with file information." + return "Get information about a file (size, modification time, permissions). " + + "Input: file path (e.g., 'data.txt' or './docs/readme.md'). Returns JSON with file information." } // createErrorResponse creates a JSON error response diff --git a/cli/azd/internal/agent/tools/io/file_search.go b/cli/azd/internal/agent/tools/io/file_search.go index 84ea580c93e..700274d1479 100644 --- a/cli/azd/internal/agent/tools/io/file_search.go +++ b/cli/azd/internal/agent/tools/io/file_search.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -24,7 +27,8 @@ func (t FileSearchTool) Name() string { } func (t FileSearchTool) Description() string { - return `Search for files matching a glob pattern in the current working directory using the doublestar library for full glob support. + return `Searches for files matching a glob pattern in the current working directory +using the doublestar library for full glob support. Input: JSON payload with the following structure: { @@ -96,13 +100,19 @@ func (t FileSearchTool) createErrorResponse(err error, message string) (string, func (t FileSearchTool) Call(ctx context.Context, input string) (string, error) { if input == "" { - return t.createErrorResponse(fmt.Errorf("input is required"), "Input is required. Expected JSON format: {\"pattern\": \"*.go\"}") + return t.createErrorResponse( + fmt.Errorf("input is required"), + "Input is required. Expected JSON format: {\"pattern\": \"*.go\"}", + ) } // Parse JSON input var req FileSearchRequest if err := json.Unmarshal([]byte(input), &req); err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"pattern\": \"*.go\", \"maxResults\": 50}", err.Error())) + return t.createErrorResponse( + err, + fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"pattern\": \"*.go\", \"maxResults\": 50}", err.Error()), + ) } // Validate required fields diff --git a/cli/azd/internal/agent/tools/io/loader.go b/cli/azd/internal/agent/tools/io/loader.go index bf5e95a9f3f..1880f0e15d5 100644 --- a/cli/azd/internal/agent/tools/io/loader.go +++ b/cli/azd/internal/agent/tools/io/loader.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( diff --git a/cli/azd/internal/agent/tools/io/move_file.go b/cli/azd/internal/agent/tools/io/move_file.go index 98d77ac6cc3..9956580c381 100644 --- a/cli/azd/internal/agent/tools/io/move_file.go +++ b/cli/azd/internal/agent/tools/io/move_file.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -18,7 +21,8 @@ func (t MoveFileTool) Name() string { } func (t MoveFileTool) Description() string { - return "Move or rename a file. Input format: 'source|destination' (e.g., 'old.txt|new.txt' or './file.txt|./folder/file.txt')" + return "Move or rename a file.\n" + + "Input format: 'source|destination' (e.g., 'old.txt|new.txt' or './file.txt|./folder/file.txt')" } // createErrorResponse creates a JSON error response @@ -48,7 +52,10 @@ func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { input = strings.TrimSpace(input) if input == "" { - return t.createErrorResponse(fmt.Errorf("input is required in format 'source|destination'"), "Input is required in format 'source|destination'") + return t.createErrorResponse( + fmt.Errorf("input is required in format 'source|destination'"), + "Input is required in format 'source|destination'", + ) } // Split on first occurrence of '|' to separate source from destination @@ -61,7 +68,10 @@ func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { destination := strings.TrimSpace(parts[1]) if source == "" || destination == "" { - return t.createErrorResponse(fmt.Errorf("both source and destination paths are required"), "Both source and destination paths are required") + return t.createErrorResponse( + fmt.Errorf("both source and destination paths are required"), + "Both source and destination paths are required", + ) } // Check if source exists @@ -75,7 +85,10 @@ func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { // Check if destination already exists if _, err := os.Stat(destination); err == nil { - return t.createErrorResponse(fmt.Errorf("destination %s already exists", destination), fmt.Sprintf("Destination %s already exists", destination)) + return t.createErrorResponse( + fmt.Errorf("destination %s already exists", destination), + fmt.Sprintf("Destination %s already exists", destination), + ) } // Move/rename the file @@ -105,7 +118,13 @@ func (t MoveFileTool) Call(ctx context.Context, input string) (string, error) { Destination: destination, Type: fileType, Size: sourceInfo.Size(), - Message: fmt.Sprintf("Successfully moved %s from %s to %s (%d bytes)", fileType, source, destination, sourceInfo.Size()), + Message: fmt.Sprintf( + "Successfully moved %s from %s to %s (%d bytes)", + fileType, + source, + destination, + sourceInfo.Size(), + ), } // Convert to JSON diff --git a/cli/azd/internal/agent/tools/io/read_file.go b/cli/azd/internal/agent/tools/io/read_file.go index 9547d62054f..9a60c65c912 100644 --- a/cli/azd/internal/agent/tools/io/read_file.go +++ b/cli/azd/internal/agent/tools/io/read_file.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -54,7 +57,8 @@ func (t ReadFileTool) Name() string { } func (t ReadFileTool) Description() string { - return `Read file contents with intelligent handling for different file sizes and partial reads. Returns JSON response with file content and metadata. + return `Read file contents with intelligent handling for different file sizes and partial reads. +Returns JSON response with file content and metadata. Input: JSON payload with the following structure: { @@ -79,7 +83,8 @@ Examples: 5. Read single line: {"filePath": "package.json", "startLine": 42, "endLine": 42} -Files larger than 100KB are automatically truncated. Files over 1MB show size info only unless specific line range is requested. +Files larger than 100KB are automatically truncated. +Files over 1MB show size info only unless specific line range is requested. The input must be formatted as a single line valid JSON string.` } @@ -106,13 +111,23 @@ func (t ReadFileTool) createErrorResponse(err error, message string) (string, er func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { if input == "" { - return t.createErrorResponse(fmt.Errorf("empty input"), "No input provided. Expected JSON format: {\"filePath\": \"path/to/file.txt\"}") + return t.createErrorResponse( + fmt.Errorf("empty input"), + "No input provided. Expected JSON format: {\"filePath\": \"path/to/file.txt\"}", + ) } // Parse JSON input var req ReadFileRequest if err := json.Unmarshal([]byte(input), &req); err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input: %s. Expected format: {\"filePath\": \"path/to/file.txt\", \"startLine\": 1, \"endLine\": 50}", err.Error())) + return t.createErrorResponse( + err, + fmt.Sprintf( + "Invalid JSON input: %s. "+ + "Expected format: {\"filePath\": \"path/to/file.txt\", \"startLine\": 1, \"endLine\": 50}", + err.Error(), + ), + ) } // Validate required fields @@ -124,19 +139,32 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { fileInfo, err := os.Stat(req.FilePath) if err != nil { if os.IsNotExist(err) { - return t.createErrorResponse(err, fmt.Sprintf("File does not exist: %s. Please check file path spelling and location", req.FilePath)) + return t.createErrorResponse( + err, + fmt.Sprintf("File does not exist: %s. Please check file path spelling and location", req.FilePath), + ) } return t.createErrorResponse(err, fmt.Sprintf("Cannot access file %s: %s", req.FilePath, err.Error())) } if fileInfo.IsDir() { - return t.createErrorResponse(fmt.Errorf("path is a directory"), fmt.Sprintf("%s is a directory, not a file. Use directory_list tool for directories", req.FilePath)) + return t.createErrorResponse( + fmt.Errorf("path is a directory"), + fmt.Sprintf("%s is a directory, not a file. Use directory_list tool for directories", req.FilePath), + ) } // Handle very large files (>1MB) - require line range const maxFileSize = 1024 * 1024 // 1MB if fileInfo.Size() > maxFileSize && req.StartLine == 0 && req.EndLine == 0 { - return t.createErrorResponse(fmt.Errorf("file too large"), fmt.Sprintf("File %s is too large (%d bytes). Please specify startLine and endLine to read specific sections", req.FilePath, fileInfo.Size())) + return t.createErrorResponse( + fmt.Errorf("file too large"), + fmt.Sprintf( + "File %s is too large (%d bytes). Please specify startLine and endLine to read specific sections", + req.FilePath, + fileInfo.Size(), + ), + ) } // Read file content @@ -178,10 +206,16 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { // Validate line range if startLine > totalLines { - return t.createErrorResponse(fmt.Errorf("start line out of range"), fmt.Sprintf("Start line %d is greater than total lines %d in file", startLine, totalLines)) + return t.createErrorResponse( + fmt.Errorf("start line out of range"), + fmt.Sprintf("Start line %d is greater than total lines %d in file", startLine, totalLines), + ) } if startLine > endLine { - return t.createErrorResponse(fmt.Errorf("invalid line range"), fmt.Sprintf("Start line %d is greater than end line %d", startLine, endLine)) + return t.createErrorResponse( + fmt.Errorf("invalid line range"), + fmt.Sprintf("Start line %d is greater than end line %d", startLine, endLine), + ) } // Adjust endLine if it exceeds total lines @@ -231,9 +265,14 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { // Set appropriate message if isPartial && lineRange != nil { - response.Message = fmt.Sprintf("Successfully read %d lines (%d-%d) from file", lineRange.LinesRead, lineRange.StartLine, lineRange.EndLine) + response.Message = fmt.Sprintf( + "Successfully read %d lines (%d-%d) from file", + lineRange.LinesRead, + lineRange.StartLine, + lineRange.EndLine, + ) } else if isTruncated { - response.Message = fmt.Sprintf("Successfully read file (content truncated due to size)") + response.Message = "Successfully read file (content truncated due to size)" } else { response.Message = fmt.Sprintf("Successfully read entire file (%d lines)", totalLines) } diff --git a/cli/azd/internal/agent/tools/io/write_file.go b/cli/azd/internal/agent/tools/io/write_file.go index 18ab45b42d0..e6f4d3f5790 100644 --- a/cli/azd/internal/agent/tools/io/write_file.go +++ b/cli/azd/internal/agent/tools/io/write_file.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -55,7 +58,8 @@ func (t WriteFileTool) Name() string { } func (t WriteFileTool) Description() string { - return `Comprehensive file writing tool that handles full file writes, appends, and line-based partial updates. Returns JSON response with operation details. + return `Comprehensive file writing tool that handles full file writes, appends, and line-based partial updates. +Returns JSON response with operation details. Input: JSON payload with the following structure: { @@ -133,7 +137,14 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { // Debug: Check for common JSON issues input = strings.TrimSpace(input) if !strings.HasPrefix(input, "{") || !strings.HasSuffix(input, "}") { - return t.createErrorResponse(fmt.Errorf("malformed JSON structure"), fmt.Sprintf("Invalid JSON input: Input does not appear to be valid JSON object. Starts with: %q, Ends with: %q", input[:min(10, len(input))], input[max(0, len(input)-10):])) + return t.createErrorResponse( + fmt.Errorf("malformed JSON structure"), + fmt.Sprintf( + "Invalid JSON input: Input does not appear to be valid JSON object. Starts with: %q, Ends with: %q", + input[:min(10, len(input))], + input[max(0, len(input)-10):], + ), + ) } // Parse JSON input @@ -144,7 +155,10 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { if len(input) > 200 { truncatedInput = input[:200] + "...[truncated]" } - return t.createErrorResponse(err, fmt.Sprintf("Invalid JSON input. Error: %s. Input (first 200 chars): %s", err.Error(), truncatedInput)) + return t.createErrorResponse( + err, + fmt.Sprintf("Invalid JSON input. Error: %s. Input (first 200 chars): %s", err.Error(), truncatedInput), + ) } // Validate required fields @@ -165,20 +179,33 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { // If any line number is provided, both must be provided and valid if hasStartLine || hasEndLine { if !hasStartLine || !hasEndLine { - return t.createErrorResponse(fmt.Errorf("both startLine and endLine must be provided for partial write"), "Both startLine and endLine must be provided for partial write") + return t.createErrorResponse( + fmt.Errorf("both startLine and endLine must be provided for partial write"), + "Both startLine and endLine must be provided for partial write", + ) } // Validate that file exists for partial write BEFORE attempting filePath := strings.TrimSpace(req.Filename) if _, err := os.Stat(filePath); os.IsNotExist(err) { - return t.createErrorResponse(err, fmt.Sprintf("Cannot perform partial write on file '%s' because it does not exist. For new files, omit startLine and endLine parameters to create the entire file", filePath)) + return t.createErrorResponse( + err, + fmt.Sprintf( + "Cannot perform partial write on file '%s' because it does not exist. "+ + "For new files, omit startLine and endLine parameters to create the entire file", + filePath, + ), + ) } // Smart write mode: this should be a partial write if mode == "write" { return t.handlePartialWrite(ctx, req) } else { - return t.createErrorResponse(fmt.Errorf("startLine and endLine can only be used with write mode"), "startLine and endLine can only be used with write mode") + return t.createErrorResponse( + fmt.Errorf("startLine and endLine can only be used with write mode"), + "startLine and endLine can only be used with write mode", + ) } } @@ -196,7 +223,10 @@ func (t WriteFileTool) handlePartialWrite(ctx context.Context, req WriteFileRequ return t.createErrorResponse(fmt.Errorf("invalid endLine: %d", req.EndLine), "endLine must be >= 1") } if req.StartLine > req.EndLine { - return t.createErrorResponse(fmt.Errorf("invalid line range: startLine=%d > endLine=%d", req.StartLine, req.EndLine), "startLine cannot be greater than endLine") + return t.createErrorResponse( + fmt.Errorf("invalid line range: startLine=%d > endLine=%d", req.StartLine, req.EndLine), + "startLine cannot be greater than endLine", + ) } filePath := strings.TrimSpace(req.Filename) @@ -256,14 +286,18 @@ func (t WriteFileTool) handlePartialWrite(ctx context.Context, req WriteFileRequ finalContent := strings.Join(result, lineEnding) // If original file had trailing newline, preserve it - if len(fileBytes) > 0 && (string(fileBytes[len(fileBytes)-1:]) == "\n" || strings.HasSuffix(string(fileBytes), lineEnding)) { + if len(fileBytes) > 0 && + (string(fileBytes[len(fileBytes)-1:]) == "\n" || strings.HasSuffix(string(fileBytes), lineEnding)) { finalContent += lineEnding } // Write the updated content - err = os.WriteFile(filePath, []byte(finalContent), 0644) + err = os.WriteFile(filePath, []byte(finalContent), 0600) if err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Failed to write updated content to file %s: %s", filePath, err.Error())) + return t.createErrorResponse( + err, + fmt.Sprintf("Failed to write updated content to file %s: %s", filePath, err.Error()), + ) } // Get file info @@ -311,7 +345,10 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ // Provide feedback for large content if len(content) > 10000 { - fmt.Printf("📝 Large content detected (%d chars). Consider breaking into smaller edits for better reliability.\n", len(content)) + fmt.Printf( + "📝 Large content detected (%d chars). Consider breaking into smaller edits for better reliability.\n", + len(content), + ) } // Ensure directory exists @@ -325,27 +362,39 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ switch mode { case "create": if _, err := os.Stat(filePath); err == nil { - return t.createErrorResponse(fmt.Errorf("file %s already exists (create mode)", filePath), fmt.Sprintf("File %s already exists. Cannot create file in 'create' mode when file already exists", filePath)) + return t.createErrorResponse( + fmt.Errorf("file %s already exists (create mode)", filePath), + fmt.Sprintf( + "File %s already exists. Cannot create file in 'create' mode when file already exists", + filePath, + ), + ) } - err = os.WriteFile(filePath, []byte(content), 0644) + err = os.WriteFile(filePath, []byte(content), 0600) operation = "Created" case "append": - file, openErr := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + file, openErr := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600) if openErr != nil { - return t.createErrorResponse(openErr, fmt.Sprintf("Failed to open file for append %s: %s", filePath, openErr.Error())) + return t.createErrorResponse( + openErr, + fmt.Sprintf("Failed to open file for append %s: %s", filePath, openErr.Error()), + ) } defer file.Close() _, err = file.WriteString(content) operation = "Appended to" default: // "write" - err = os.WriteFile(filePath, []byte(content), 0644) + err = os.WriteFile(filePath, []byte(content), 0600) operation = "Wrote" } if err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Failed to %s file %s: %s", strings.ToLower(operation), filePath, err.Error())) + return t.createErrorResponse( + err, + fmt.Sprintf("Failed to %s file %s: %s", strings.ToLower(operation), filePath, err.Error()), + ) } // Get file size for verification diff --git a/cli/azd/internal/agent/tools/io/write_file_test.go b/cli/azd/internal/agent/tools/io/write_file_test.go index 2aacfe62cdb..05fb0cb4937 100644 --- a/cli/azd/internal/agent/tools/io/write_file_test.go +++ b/cli/azd/internal/agent/tools/io/write_file_test.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package io import ( @@ -101,11 +104,15 @@ func TestWriteFileTool_AppendMode(t *testing.T) { testFile := filepath.Join(tempDir, "test.txt") // Create initial file - err := os.WriteFile(testFile, []byte("Initial content"), 0644) + err := os.WriteFile(testFile, []byte("Initial content"), 0600) require.NoError(t, err) tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "\nAppended content", "mode": "append"}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "\nAppended content", "mode": "append"}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -131,7 +138,11 @@ func TestWriteFileTool_CreateMode_Success(t *testing.T) { testFile := filepath.Join(tempDir, "new-file.txt") tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New file content", "mode": "create"}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "New file content", "mode": "create"}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -155,7 +166,7 @@ func TestWriteFileTool_CreateMode_FileExists(t *testing.T) { tempDir := t.TempDir() testFile := filepath.Join(tempDir, "existing.txt") - err := os.WriteFile(testFile, []byte("Existing content"), 0644) + err := os.WriteFile(testFile, []byte("Existing content"), 0600) require.NoError(t, err) tool := WriteFileTool{} @@ -181,11 +192,15 @@ func TestWriteFileTool_PartialWrite_Basic(t *testing.T) { // Create initial file with multiple lines initialContent := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5" - err := os.WriteFile(testFile, []byte(initialContent), 0644) + err := os.WriteFile(testFile, []byte(initialContent), 0600) require.NoError(t, err) tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "Modified Line 2\nModified Line 3", "startLine": 2, "endLine": 3}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "Modified Line 2\nModified Line 3", "startLine": 2, "endLine": 3}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -217,11 +232,15 @@ func TestWriteFileTool_PartialWrite_SingleLine(t *testing.T) { // Create initial file initialContent := "Line 1\nLine 2\nLine 3" - err := os.WriteFile(testFile, []byte(initialContent), 0644) + err := os.WriteFile(testFile, []byte(initialContent), 0600) require.NoError(t, err) tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "Replaced Line 2", "startLine": 2, "endLine": 2}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "Replaced Line 2", "startLine": 2, "endLine": 2}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -251,12 +270,16 @@ func TestWriteFileTool_PartialWrite_SingleLineToMultipleLines(t *testing.T) { // Create initial file initialContent := "Line 1\nLine 2\nLine 3\nLine 4" - err := os.WriteFile(testFile, []byte(initialContent), 0644) + err := os.WriteFile(testFile, []byte(initialContent), 0600) require.NoError(t, err) tool := WriteFileTool{} // Replace single line 2 with multiple lines - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New Line 2a\nNew Line 2b\nNew Line 2c", "startLine": 2, "endLine": 2}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "New Line 2a\nNew Line 2b\nNew Line 2c", "startLine": 2, "endLine": 2}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -287,7 +310,11 @@ func TestWriteFileTool_PartialWrite_FileNotExists(t *testing.T) { testFile := filepath.Join(tempDir, "nonexistent.txt") tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New content", "startLine": 1, "endLine": 1}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "New content", "startLine": 1, "endLine": 1}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -304,7 +331,7 @@ func TestWriteFileTool_PartialWrite_InvalidLineNumbers(t *testing.T) { tempDir := t.TempDir() testFile := filepath.Join(tempDir, "test.txt") - err := os.WriteFile(testFile, []byte("Line 1\nLine 2"), 0644) + err := os.WriteFile(testFile, []byte("Line 1\nLine 2"), 0600) require.NoError(t, err) tool := WriteFileTool{} @@ -324,14 +351,22 @@ func TestWriteFileTool_PartialWrite_InvalidLineNumbers(t *testing.T) { assert.Contains(t, result, "Both startLine and endLine must be provided") // Test startLine < 1 (this will trigger the partial write validation) - input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": 0, "endLine": 1}` + input = `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "content", "startLine": 0, "endLine": 1}` result, err = tool.Call(context.Background(), input) assert.NoError(t, err) assert.Contains(t, result, "error") assert.Contains(t, result, "Both startLine and endLine must be provided") // 0 is treated as "not provided" // Test valid line numbers but startLine > endLine - input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": 3, "endLine": 1}` + input = `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "content", "startLine": 3, "endLine": 1}` result, err = tool.Call(context.Background(), input) assert.NoError(t, err) assert.Contains(t, result, "error") @@ -345,12 +380,16 @@ func TestWriteFileTool_PartialWrite_BeyondFileLength(t *testing.T) { // Create initial file with 3 lines initialContent := "Line 1\nLine 2\nLine 3" - err := os.WriteFile(testFile, []byte(initialContent), 0644) + err := os.WriteFile(testFile, []byte(initialContent), 0600) require.NoError(t, err) tool := WriteFileTool{} // Try to replace lines 2-5 (beyond file length) - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New content", "startLine": 2, "endLine": 5}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "New content", "startLine": 2, "endLine": 5}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -377,11 +416,15 @@ func TestWriteFileTool_PartialWrite_PreserveLineEndings(t *testing.T) { // Create initial file with CRLF line endings initialContent := "Line 1\r\nLine 2\r\nLine 3\r\n" - err := os.WriteFile(testFile, []byte(initialContent), 0644) + err := os.WriteFile(testFile, []byte(initialContent), 0600) require.NoError(t, err) tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "Modified Line 2", "startLine": 2, "endLine": 2}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "Modified Line 2", "startLine": 2, "endLine": 2}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -440,19 +483,31 @@ func TestWriteFileTool_Integration_ComplexScenario(t *testing.T) { tool := WriteFileTool{} // Step 1: Create initial file - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "# Configuration File\nversion: 1.0\nname: test\nport: 8080\ndebug: false", "mode": "create"}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "# Configuration File\nversion: 1.0\nname: test\nport: 8080\ndebug: false", "mode": "create"}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) assert.Contains(t, result, `"success": true`) // Step 2: Append new section - input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "\n# Database Config\nhost: localhost\nport: 5432", "mode": "append"}` + input = `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "\n# Database Config\nhost: localhost\nport: 5432", "mode": "append"}` result, err = tool.Call(context.Background(), input) assert.NoError(t, err) assert.Contains(t, result, `"success": true`) // Step 3: Update specific lines (change port and debug) - input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "port: 9090\ndebug: true", "startLine": 4, "endLine": 5}` + input = `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "port: 9090\ndebug: true", "startLine": 4, "endLine": 5}` result, err = tool.Call(context.Background(), input) assert.NoError(t, err) @@ -465,6 +520,7 @@ func TestWriteFileTool_Integration_ComplexScenario(t *testing.T) { // Verify final content content, err := os.ReadFile(testFile) assert.NoError(t, err) + //nolint:lll expectedContent := "# Configuration File\nversion: 1.0\nname: test\nport: 9090\ndebug: true\n# Database Config\nhost: localhost\nport: 5432" assert.Equal(t, expectedContent, string(content)) } @@ -474,20 +530,28 @@ func TestWriteFileTool_PartialWrite_InvalidLineRanges(t *testing.T) { tempDir := t.TempDir() testFile := filepath.Join(tempDir, "test.txt") - err := os.WriteFile(testFile, []byte("Line 1\nLine 2"), 0644) + err := os.WriteFile(testFile, []byte("Line 1\nLine 2"), 0600) require.NoError(t, err) tool := WriteFileTool{} // Test negative startLine (will be handled by partial write validation) - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": -1, "endLine": 1}` + input := `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "content", "startLine": -1, "endLine": 1}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) assert.Contains(t, result, "error") assert.Contains(t, result, "startLine must be") // Test negative endLine - input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": 1, "endLine": -1}` + input = `{"filename": "` + strings.ReplaceAll( + testFile, + "\\", + "\\\\", + ) + `", "content": "content", "startLine": 1, "endLine": -1}` result, err = tool.Call(context.Background(), input) assert.NoError(t, err) assert.Contains(t, result, "error") diff --git a/cli/azd/internal/agent/tools/loader.go b/cli/azd/internal/agent/tools/loader.go index 75be6baefe6..ec573ceac60 100644 --- a/cli/azd/internal/agent/tools/loader.go +++ b/cli/azd/internal/agent/tools/loader.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package tools import ( diff --git a/cli/azd/internal/agent/tools/mcp/loader.go b/cli/azd/internal/agent/tools/mcp/loader.go index 7ad5fdc1bd1..76346c11113 100644 --- a/cli/azd/internal/agent/tools/mcp/loader.go +++ b/cli/azd/internal/agent/tools/mcp/loader.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package mcp import ( diff --git a/cli/azd/internal/agent/tools/mcp/sampling_handler.go b/cli/azd/internal/agent/tools/mcp/sampling_handler.go index ffd948429c6..0af9f051f4a 100644 --- a/cli/azd/internal/agent/tools/mcp/sampling_handler.go +++ b/cli/azd/internal/agent/tools/mcp/sampling_handler.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package mcp import ( @@ -46,7 +49,10 @@ func (h *McpSamplingHandler) cleanContent(content string) string { return content } -func (h *McpSamplingHandler) CreateMessage(ctx context.Context, request mcp.CreateMessageRequest) (*mcp.CreateMessageResult, error) { +func (h *McpSamplingHandler) CreateMessage( + ctx context.Context, + request mcp.CreateMessageRequest, +) (*mcp.CreateMessageResult, error) { if h.debug { requestJson, err := json.MarshalIndent(request, "", " ") if err != nil { diff --git a/cli/azd/internal/agent/tools/weather/loader.go b/cli/azd/internal/agent/tools/weather/loader.go deleted file mode 100644 index afdf7894f68..00000000000 --- a/cli/azd/internal/agent/tools/weather/loader.go +++ /dev/null @@ -1,18 +0,0 @@ -package weather - -import ( - "github.com/tmc/langchaingo/tools" -) - -// WeatherToolsLoader loads weather-related tools -type WeatherToolsLoader struct{} - -func NewWeatherToolsLoader() *WeatherToolsLoader { - return &WeatherToolsLoader{} -} - -func (l *WeatherToolsLoader) LoadTools() ([]tools.Tool, error) { - return []tools.Tool{ - &WeatherTool{}, - }, nil -} diff --git a/cli/azd/internal/agent/tools/weather/weather.go b/cli/azd/internal/agent/tools/weather/weather.go deleted file mode 100644 index 0f8837c5124..00000000000 --- a/cli/azd/internal/agent/tools/weather/weather.go +++ /dev/null @@ -1,121 +0,0 @@ -package weather - -import ( - "context" - "fmt" - "math/rand" - "strings" - "time" - - "github.com/tmc/langchaingo/callbacks" -) - -// WeatherTool implements the Tool interface for getting weather information -type WeatherTool struct { - CallbacksHandler callbacks.Handler -} - -func (t WeatherTool) Name() string { - return "weather" -} - -func (t WeatherTool) Description() string { - return "Get current weather conditions for a city. Input: city name (e.g., 'San Diego' or 'New York')" -} - -func (t WeatherTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("weather: %s", input)) - } - - city := strings.TrimSpace(input) - if city == "" { - err := fmt.Errorf("city name is required") - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err - } - - // Initialize random seed based on current time - rand.Seed(time.Now().UnixNano()) - - // Generate more realistic temperature based on city - var temperature int - cityLower := strings.ToLower(city) - - // Assign temperature ranges based on typical climate - if strings.Contains(cityLower, "san diego") || strings.Contains(cityLower, "los angeles") || - strings.Contains(cityLower, "miami") || strings.Contains(cityLower, "phoenix") { - // Warm climate cities: 65-85°F - temperature = rand.Intn(21) + 65 - } else if strings.Contains(cityLower, "seattle") || strings.Contains(cityLower, "portland") || - strings.Contains(cityLower, "chicago") || strings.Contains(cityLower, "new york") { - // Moderate climate cities: 45-75°F - temperature = rand.Intn(31) + 45 - } else if strings.Contains(cityLower, "alaska") || strings.Contains(cityLower, "minneapolis") || - strings.Contains(cityLower, "denver") { - // Cold climate cities: 25-55°F - temperature = rand.Intn(31) + 25 - } else { - // Default range for unknown cities: 50-80°F - temperature = rand.Intn(31) + 50 - } - - // Weather conditions with probabilities - conditions := []string{ - "sunny", "sunny", "sunny", "sunny", // 40% chance - "partly cloudy", "partly cloudy", "partly cloudy", // 30% chance - "cloudy", "cloudy", // 20% chance - "rainy", // 10% chance - } - condition := conditions[rand.Intn(len(conditions))] - - // Add some variety to the response format - responseTemplates := []string{ - "It's %d°F and %s in %s", - "Current weather in %s: %d°F and %s", - "The weather in %s is %d°F with %s skies", - "%s is experiencing %s weather at %d°F", - } - - template := responseTemplates[rand.Intn(len(responseTemplates))] - - var response string - if strings.Contains(template, "It's %d°F and %s in %s") { - response = fmt.Sprintf(template, temperature, condition, city) - } else if strings.Contains(template, "Current weather in %s: %d°F and %s") { - response = fmt.Sprintf(template, city, temperature, condition) - } else if strings.Contains(template, "The weather in %s is %d°F with %s skies") { - response = fmt.Sprintf(template, city, temperature, condition) - } else { - // "%s is experiencing %s weather at %d°F" - response = fmt.Sprintf(template, city, condition, temperature) - } - - // Add some additional details occasionally - if rand.Intn(3) == 0 { - extras := []string{ - "Light breeze from the west.", - "Humidity is comfortable.", - "Perfect day to be outside!", - "Visibility is excellent.", - "No precipitation expected.", - } - if condition == "rainy" { - extras = []string{ - "Light rain expected to continue.", - "Bring an umbrella!", - "Rain should clear up by evening.", - } - } - extra := extras[rand.Intn(len(extras))] - response += ". " + extra - } - - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, response) - } - - return response, nil -} diff --git a/cli/azd/pkg/llm/azure_openai.go b/cli/azd/pkg/llm/azure_openai.go index 8abe43f5f25..e1108982377 100644 --- a/cli/azd/pkg/llm/azure_openai.go +++ b/cli/azd/pkg/llm/azure_openai.go @@ -42,6 +42,20 @@ func (p *AzureOpenAiModelProvider) CreateModelContainer(opts ...ModelOption) (*M return nil, err } + // Validate required attributes + requiredFields := map[string]string{ + "token": modelConfig.Token, + "endpoint": modelConfig.Endpoint, + "apiVersion": modelConfig.ApiVersion, + "model": modelConfig.Model, + } + + for fieldName, fieldValue := range requiredFields { + if fieldValue == "" { + return nil, fmt.Errorf("azure openai model configuration is missing required '%s' field", fieldName) + } + } + modelContainer := &ModelContainer{ Type: LlmTypeOpenAIAzure, IsLocal: false, @@ -77,7 +91,7 @@ func (p *AzureOpenAiModelProvider) CreateModelContainer(opts ...ModelOption) (*M } openAiModel.CallbacksHandler = modelContainer.logger - modelContainer.Model = NewModel(openAiModel, callOptions...) + modelContainer.Model = newModelWithCallOptions(openAiModel, callOptions...) return modelContainer, nil } diff --git a/cli/azd/pkg/llm/manager.go b/cli/azd/pkg/llm/manager.go index ede625a970e..2c8b2ec3853 100644 --- a/cli/azd/pkg/llm/manager.go +++ b/cli/azd/pkg/llm/manager.go @@ -131,8 +131,3 @@ func (m Manager) GetDefaultModel(opts ...ModelOption) (*ModelContainer, error) { func (m Manager) GetModel(modelType LlmType, opts ...ModelOption) (*ModelContainer, error) { return m.ModelFactory.CreateModelContainer(modelType, opts...) } - -var availableLlmTypes = []LlmType{ - LlmTypeOpenAIAzure, - LlmTypeOllama, -} diff --git a/cli/azd/pkg/llm/model.go b/cli/azd/pkg/llm/model.go index b00e7730c49..5bfba259675 100644 --- a/cli/azd/pkg/llm/model.go +++ b/cli/azd/pkg/llm/model.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package llm import ( @@ -7,22 +10,26 @@ import ( "github.com/tmc/langchaingo/llms" ) -var _ llms.Model = (*Model)(nil) +var _ llms.Model = (*modelWithCallOptions)(nil) // / Wraps an langchaingo model to allow specifying specific call options at create time -type Model struct { +type modelWithCallOptions struct { model llms.Model options []llms.CallOption } -func NewModel(model llms.Model, options ...llms.CallOption) *Model { - return &Model{ +func newModelWithCallOptions(model llms.Model, options ...llms.CallOption) *modelWithCallOptions { + return &modelWithCallOptions{ model: model, options: options, } } -func (m *Model) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error) { +func (m *modelWithCallOptions) GenerateContent( + ctx context.Context, + messages []llms.MessageContent, + options ...llms.CallOption, +) (*llms.ContentResponse, error) { allOptions := []llms.CallOption{} allOptions = append(allOptions, m.options...) allOptions = append(allOptions, options...) @@ -30,6 +37,6 @@ func (m *Model) GenerateContent(ctx context.Context, messages []llms.MessageCont return m.model.GenerateContent(ctx, messages, allOptions...) } -func (m *Model) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { +func (m *modelWithCallOptions) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { return "", fmt.Errorf("Deprecated, call GenerateContent") } diff --git a/cli/azd/pkg/llm/model_factory.go b/cli/azd/pkg/llm/model_factory.go index 610c5118dba..3994b1d3e08 100644 --- a/cli/azd/pkg/llm/model_factory.go +++ b/cli/azd/pkg/llm/model_factory.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package llm import ( @@ -21,7 +24,8 @@ func (f *ModelFactory) CreateModelContainer(modelType LlmType, opts ...ModelOpti var modelProvider ModelProvider if err := f.serviceLocator.ResolveNamed(string(modelType), &modelProvider); err != nil { return nil, &internal.ErrorWithSuggestion{ - Err: fmt.Errorf("The model type '%s' is not supported. Support types include: azure, ollama", modelType), + Err: fmt.Errorf("The model type '%s' is not supported. Support types include: azure, ollama", modelType), + //nolint:lll Suggestion: "Use `azd config set` to set the model type and any model specific options, such as the model name or version.", } } diff --git a/cli/azd/pkg/llm/ollama.go b/cli/azd/pkg/llm/ollama.go index 3c58cd970b8..27d6e1e83af 100644 --- a/cli/azd/pkg/llm/ollama.go +++ b/cli/azd/pkg/llm/ollama.go @@ -5,11 +5,15 @@ package llm import ( "github.com/azure/azure-dev/cli/azd/pkg/config" + "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/llms/ollama" ) type OllamaModelConfig struct { - Model string `json:"model"` + Model string `json:"model"` + Version string `json:"version"` + Temperature *float64 `json:"temperature"` + MaxTokens *int `json:"maxTokens"` } type OllamaModelProvider struct { @@ -28,7 +32,7 @@ func (p *OllamaModelProvider) CreateModelContainer(opts ...ModelOption) (*ModelC return nil, err } - defaultLlamaVersion := "llama3" + defaultModel := "llama3" var modelConfig OllamaModelConfig ok, err := userConfig.GetSection("ai.agent.model.ollama", &modelConfig) @@ -37,15 +41,20 @@ func (p *OllamaModelProvider) CreateModelContainer(opts ...ModelOption) (*ModelC } if ok { - defaultLlamaVersion = modelConfig.Model + defaultModel = modelConfig.Model + } + + // Set defaults if not defined + if modelConfig.Version == "" { + modelConfig.Version = "latest" } modelContainer := &ModelContainer{ Type: LlmTypeOllama, IsLocal: true, Metadata: ModelMetadata{ - Name: defaultLlamaVersion, - Version: "latest", + Name: defaultModel, + Version: modelConfig.Version, }, } @@ -53,15 +62,24 @@ func (p *OllamaModelProvider) CreateModelContainer(opts ...ModelOption) (*ModelC opt(modelContainer) } - model, err := ollama.New( - ollama.WithModel(defaultLlamaVersion), + ollamaModel, err := ollama.New( + ollama.WithModel(defaultModel), ) if err != nil { return nil, err } - model.CallbacksHandler = modelContainer.logger - modelContainer.Model = model + callOptions := []llms.CallOption{} + if modelConfig.Temperature != nil { + callOptions = append(callOptions, llms.WithTemperature(*modelConfig.Temperature)) + } + + if modelConfig.MaxTokens != nil { + callOptions = append(callOptions, llms.WithMaxTokens(*modelConfig.MaxTokens)) + } + + ollamaModel.CallbacksHandler = modelContainer.logger + modelContainer.Model = newModelWithCallOptions(ollamaModel, callOptions...) return modelContainer, nil } From 21a655969421da8ea11a33236575ba9a19976c42 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Thu, 7 Aug 2025 12:38:36 -0700 Subject: [PATCH 054/116] Fixes more spell linter issues --- cli/azd/.vscode/cspell-azd-dictionary.txt | 8 ++++++++ cli/azd/.vscode/cspell.yaml | 1 + 2 files changed, 9 insertions(+) diff --git a/cli/azd/.vscode/cspell-azd-dictionary.txt b/cli/azd/.vscode/cspell-azd-dictionary.txt index f9777d9041c..e8e5dd9b4bd 100644 --- a/cli/azd/.vscode/cspell-azd-dictionary.txt +++ b/cli/azd/.vscode/cspell-azd-dictionary.txt @@ -17,6 +17,7 @@ appinsightsexporter appinsightsstorage appplatform appservice +appuser arget armapimanagement armappconfiguration @@ -110,6 +111,7 @@ envsubst errcheck errorinfo errorlint +eventhub eventhubs executil flexconsumption @@ -159,6 +161,8 @@ mockarmresources mockazcli mongojs mvnw +myapp +myservice mysqladmin mysqlclient mysqldb @@ -194,8 +198,12 @@ psanford psycopg psycopgbinary pulumi +pycache pyapp pyproject +pytest +PYTHONDONTWRITEBYTECODE +PYTHONUNBUFFERED pyvenv rabbitmq reauthentication diff --git a/cli/azd/.vscode/cspell.yaml b/cli/azd/.vscode/cspell.yaml index da90d181a54..628897e2098 100644 --- a/cli/azd/.vscode/cspell.yaml +++ b/cli/azd/.vscode/cspell.yaml @@ -19,6 +19,7 @@ words: - idxs # Looks like the protogen has a spelling error for panics - pancis + - proto - protobuf - protoc - protoreflect From 8bc28bc0d05d5c38530040212939fd7a16ca78ef Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Thu, 7 Aug 2025 17:42:08 -0700 Subject: [PATCH 055/116] Moves azd commands to MCP server tools --- cli/azd/cmd/mcp.go | 116 +- cli/azd/cmd/root.go | 1 + cli/azd/docs/new-azd-command.md | 1325 +++++++++++++++++ .../tools/azd/azd_architecture_planning.go | 39 - .../tools/azd/azd_azure_yaml_generation.go | 39 - .../agent/tools/azd/azd_discovery_analysis.go | 39 - .../agent/tools/azd/azd_docker_generation.go | 39 - .../tools/azd/azd_iac_generation_rules.go | 39 - .../azd/azd_infrastructure_generation.go | 38 - .../internal/agent/tools/azd/azd_plan_init.go | 39 - .../agent/tools/azd/azd_project_validation.go | 46 - .../agent/tools/azd/azd_yaml_schema.go | 32 - cli/azd/internal/agent/tools/azd/loader.go | 38 - cli/azd/internal/agent/tools/loader.go | 2 - cli/azd/internal/agent/tools/mcp/mcp.json | 9 +- .../mcp/tools/azd_architecture_planning.go | 36 + .../mcp/tools/azd_azure_yaml_generation.go | 36 + .../mcp/tools/azd_discovery_analysis.go | 36 + .../mcp/tools/azd_docker_generation.go | 36 + .../mcp/tools/azd_iac_generation_rules.go | 36 + .../tools/azd_infrastructure_generation.go | 35 + cli/azd/internal/mcp/tools/azd_plan_init.go | 36 + .../mcp/tools/azd_project_validation.go | 36 + cli/azd/internal/mcp/tools/azd_yaml_schema.go | 27 + .../prompts/azd_architecture_planning.md | 0 .../prompts/azd_azure_yaml_generation.md | 0 .../tools}/prompts/azd_discovery_analysis.md | 0 .../tools}/prompts/azd_docker_generation.md | 0 .../prompts/azd_iac_generation_rules.md | 0 .../prompts/azd_infrastructure_generation.md | 0 .../tools}/prompts/azd_plan_init.md | 0 .../tools}/prompts/azd_project_validation.md | 0 .../azd => mcp/tools}/prompts/azure.yaml.json | 0 .../azd => mcp/tools}/prompts/prompts.go | 0 34 files changed, 1713 insertions(+), 442 deletions(-) create mode 100644 cli/azd/docs/new-azd-command.md delete mode 100644 cli/azd/internal/agent/tools/azd/azd_architecture_planning.go delete mode 100644 cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go delete mode 100644 cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go delete mode 100644 cli/azd/internal/agent/tools/azd/azd_docker_generation.go delete mode 100644 cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go delete mode 100644 cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go delete mode 100644 cli/azd/internal/agent/tools/azd/azd_plan_init.go delete mode 100644 cli/azd/internal/agent/tools/azd/azd_project_validation.go delete mode 100644 cli/azd/internal/agent/tools/azd/azd_yaml_schema.go delete mode 100644 cli/azd/internal/agent/tools/azd/loader.go create mode 100644 cli/azd/internal/mcp/tools/azd_architecture_planning.go create mode 100644 cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go create mode 100644 cli/azd/internal/mcp/tools/azd_discovery_analysis.go create mode 100644 cli/azd/internal/mcp/tools/azd_docker_generation.go create mode 100644 cli/azd/internal/mcp/tools/azd_iac_generation_rules.go create mode 100644 cli/azd/internal/mcp/tools/azd_infrastructure_generation.go create mode 100644 cli/azd/internal/mcp/tools/azd_plan_init.go create mode 100644 cli/azd/internal/mcp/tools/azd_project_validation.go create mode 100644 cli/azd/internal/mcp/tools/azd_yaml_schema.go rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azd_architecture_planning.md (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azd_azure_yaml_generation.md (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azd_discovery_analysis.md (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azd_docker_generation.md (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azd_iac_generation_rules.md (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azd_infrastructure_generation.md (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azd_plan_init.md (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azd_project_validation.md (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/azure.yaml.json (100%) rename cli/azd/internal/{agent/tools/azd => mcp/tools}/prompts/prompts.go (100%) diff --git a/cli/azd/cmd/mcp.go b/cli/azd/cmd/mcp.go index ca25f413714..b9c7652f3ec 100644 --- a/cli/azd/cmd/mcp.go +++ b/cli/azd/cmd/mcp.go @@ -6,74 +6,101 @@ package cmd import ( "context" "fmt" + "io" "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" - "github.com/azure/azure-dev/cli/azd/pkg/alpha" - "github.com/azure/azure-dev/cli/azd/pkg/exec" + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools" "github.com/azure/azure-dev/cli/azd/pkg/input" - "github.com/azure/azure-dev/cli/azd/pkg/llm/tools" + "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/mark3labs/mcp-go/server" "github.com/spf13/cobra" "github.com/spf13/pflag" ) -func newMcpFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpFlags { - flags := &mcpFlags{} - flags.Bind(cmd.Flags(), global) - - return flags +// Register MCP commands +func mcpActions(root *actions.ActionDescriptor) *actions.ActionDescriptor { + group := root.Add("mcp", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "mcp", + Short: "Manage Model Context Protocol (MCP) server.", + }, + GroupingOptions: actions.CommandGroupOptions{ + RootLevelHelp: actions.CmdGroupAlpha, + }, + }) + + // azd mcp start + group.Add("start", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "start", + Short: "Starts the MCP server.", + Long: `Starts the Model Context Protocol (MCP) server. + +This command starts an MCP server that can be used by MCP clients to access +azd functionality through the Model Context Protocol interface.`, + Args: cobra.NoArgs, + }, + OutputFormats: []output.Format{output.NoneFormat}, + DefaultFormat: output.NoneFormat, + ActionResolver: newMcpStartAction, + FlagsResolver: newMcpStartFlags, + }) + + return group } -func newMcpCmd() *cobra.Command { - return &cobra.Command{ - Use: "mcp", - Short: "Run MCP server.", - } +// Flags for MCP start command +type mcpStartFlags struct { + global *internal.GlobalCommandOptions } -type mcpFlags struct { - global *internal.GlobalCommandOptions - internal.EnvFlag +func newMcpStartFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpStartFlags { + flags := &mcpStartFlags{} + flags.Bind(cmd.Flags(), global) + return flags } -func (i *mcpFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { - i.EnvFlag.Bind(local, global) - i.global = global +func (f *mcpStartFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + f.global = global } -type mcpAction struct { - console input.Console - cmdRun exec.CommandRunner - flags *mcpFlags - alphaFeatureManager *alpha.FeatureManager +// Action for MCP start command +type mcpStartAction struct { + flags *mcpStartFlags + console input.Console + writer io.Writer } -func newMcpAction( - cmdRun exec.CommandRunner, +func newMcpStartAction( + flags *mcpStartFlags, console input.Console, - flags *mcpFlags, - alphaFeatureManager *alpha.FeatureManager, + writer io.Writer, ) actions.Action { - return &mcpAction{ - console: console, - cmdRun: cmdRun, - flags: flags, - alphaFeatureManager: alphaFeatureManager, + return &mcpStartAction{ + flags: flags, + console: console, + writer: writer, } } -func (i *mcpAction) Run(ctx context.Context) (*actions.ActionResult, error) { - - // Create a new MCP server +func (a *mcpStartAction) Run(ctx context.Context) (*actions.ActionResult, error) { s := server.NewMCPServer( "AZD MCP Server 🚀", "1.0.0", - server.WithToolCapabilities(false), + server.WithToolCapabilities(true), ) s.EnableSampling() s.AddTools( - tools.NewHello(), + tools.NewAzdPlanInitTool(), + tools.NewAzdDiscoveryAnalysisTool(), + tools.NewAzdArchitecturePlanningTool(), + tools.NewAzdAzureYamlGenerationTool(), + tools.NewAzdDockerGenerationTool(), + tools.NewAzdInfrastructureGenerationTool(), + tools.NewAzdIacGenerationRulesTool(), + tools.NewAzdProjectValidationTool(), + tools.NewAzdYamlSchemaTool(), ) // Start the server using stdio transport @@ -81,16 +108,5 @@ func (i *mcpAction) Run(ctx context.Context) (*actions.ActionResult, error) { fmt.Printf("Server error: %v\n", err) } - return &actions.ActionResult{ - Message: &actions.ResultMessage{}, - }, nil -} - -func getCmdMcpHelpDescription(*cobra.Command) string { - return generateCmdHelpDescription("Starts the azd MCP server.", - []string{}) -} - -func getCmdMcpHelpFooter(*cobra.Command) string { - return generateCmdHelpSamplesBlock(map[string]string{}) + return nil, nil } diff --git a/cli/azd/cmd/root.go b/cli/azd/cmd/root.go index 539a16fe4d8..3ebddebb7af 100644 --- a/cli/azd/cmd/root.go +++ b/cli/azd/cmd/root.go @@ -130,6 +130,7 @@ func NewRootCmd( templatesActions(root) authActions(root) hooksActions(root) + mcpActions(root) root.Add("version", &actions.ActionDescriptorOptions{ Command: &cobra.Command{ diff --git a/cli/azd/docs/new-azd-command.md b/cli/azd/docs/new-azd-command.md new file mode 100644 index 00000000000..26ecc5d5534 --- /dev/null +++ b/cli/azd/docs/new-azd-command.md @@ -0,0 +1,1325 @@ +# Adding New azd Commands - Comprehensive Guide + +This document provides detailed instructions for adding new commands or command groups to the Azure Developer CLI (azd). It's designed to enable both human developers and LLMs to systematically create new commands that integrate seamlessly with the existing azd architecture. + +## Table of Contents + +1. [Architecture Overview](#architecture-overview) +2. [File Structure and Naming Conventions](#file-structure-and-naming-conventions) +3. [Adding a New Top-Level Command Group](#adding-a-new-top-level-command-group) +4. [Adding Commands to Existing Groups](#adding-commands-to-existing-groups) +5. [Action Implementation Patterns](#action-implementation-patterns) +6. [Flags and Input Handling](#flags-and-input-handling) +7. [Output Formatting](#output-formatting) +8. [Error Handling](#error-handling) +9. [Integration with IoC Container](#integration-with-ioc-container) +10. [Complete Examples](#complete-examples) + +## Architecture Overview + +azd uses a layered architecture built on top of the [Cobra CLI library](https://github.com/spf13/cobra): + +``` +ActionDescriptor Tree → CobraBuilder → Cobra Commands → CLI +``` + +**Key Components:** +- **ActionDescriptor**: Higher-order component that describes commands, flags, middleware, and relationships +- **Action Interface**: Contains the actual command logic (`Run(ctx context.Context) (*ActionResult, error)`) +- **Flags**: Input parameters and options for commands +- **IoC Container**: Dependency injection system for resolving services +- **Output Formatters**: Handle JSON, Table, and None output formats + +## File Structure and Naming Conventions + +### File Organization + +Commands should be organized following these patterns: + +``` +cmd/ +├── root.go # Root command registration +├── .go # Top-level command groups (e.g., env.go, extension.go) +├── .go # Single commands (e.g., version.go, monitor.go) +└── actions/ + ├── action.go # Action interface definitions + └── action_descriptor.go # ActionDescriptor framework +``` + +### Naming Conventions + +| Component | Pattern | Example | +|-----------|---------|---------| +| **File Names** | `.go` | `extension.go`, `monitor.go` | +| **Command Groups** | `Actions(root *ActionDescriptor)` | `extensionActions()`, `envActions()` | +| **Action Types** | `Action` | `extensionListAction`, `envNewAction` | +| **Flag Types** | `Flags` | `extensionListFlags`, `envNewFlags` | +| **Constructors** | `new` | `newExtensionListAction`, `newExtensionListFlags` | +| **Cobra Commands** | `newCmd()` (when needed) | `newMonitorCmd()`, `newEnvListCmd()` | + +## Adding a New Top-Level Command Group + +### Step 1: Create the Command File + +Create a new file: `cmd/.go` + +```go +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package cmd + +import ( + "context" + "fmt" + "io" + + "github.com/azure/azure-dev/cli/azd/cmd/actions" + "github.com/azure/azure-dev/cli/azd/internal" + "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/output" + "github.com/azure/azure-dev/cli/azd/pkg/output/ux" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +// Register commands +func Actions(root *actions.ActionDescriptor) *actions.ActionDescriptor { + group := root.Add("", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "", + Aliases: []string{""}, // Optional + Short: "Manage resources.", + }, + GroupingOptions: actions.CommandGroupOptions{ + RootLevelHelp: actions.CmdGroupAzure, // Or appropriate group + }, + }) + + // Add subcommands here + // Example: azd list + group.Add("list", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "list", + Short: "List items.", + }, + OutputFormats: []output.Format{output.JsonFormat, output.TableFormat}, + DefaultFormat: output.TableFormat, + ActionResolver: newListAction, + FlagsResolver: newListFlags, + }) + + // Example: azd create + group.Add("create", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "create ", + Short: "Create a new item.", + Args: cobra.ExactArgs(1), + }, + OutputFormats: []output.Format{output.JsonFormat, output.NoneFormat}, + DefaultFormat: output.NoneFormat, + ActionResolver: newCreateAction, + FlagsResolver: newCreateFlags, + }) + + return group +} + +// Flags for list command +type ListFlags struct { + global *internal.GlobalCommandOptions + filter string + all bool + internal.EnvFlag +} + +func newListFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *ListFlags { + flags := &ListFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *ListFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + local.StringVar(&f.filter, "filter", "", "Filter results by name pattern") + local.BoolVar(&f.all, "all", false, "Show all items including hidden ones") + f.EnvFlag.Bind(local, global) + f.global = global +} + +// Action for list command +type ListAction struct { + flags *ListFlags + formatter output.Formatter + console input.Console + writer io.Writer + // Add your service dependencies here + // exampleService *services.ExampleService +} + +func newListAction( + flags *ListFlags, + formatter output.Formatter, + console input.Console, + writer io.Writer, + // Add your service dependencies here + // exampleService *services.ExampleService, +) actions.Action { + return &ListAction{ + flags: flags, + formatter: formatter, + console: console, + writer: writer, + // exampleService: exampleService, + } +} + +type ListItem struct { + Name string `json:"name"` + Description string `json:"description"` + Status string `json:"status"` + Created string `json:"created"` +} + +func (a *ListAction) Run(ctx context.Context) (*actions.ActionResult, error) { + a.console.MessageUxItem(ctx, &ux.MessageTitle{ + Title: "List items (azd list)", + TitleNote: "Retrieving available items", + }) + + // TODO: Implement actual list logic + // items, err := a.exampleService.List(ctx, a.flags.filter) + // if err != nil { + // return nil, fmt.Errorf("failed to list items: %w", err) + // } + + // Example placeholder data + items := []ListItem{ + { + Name: "example-item", + Description: "An example item", + Status: "active", + Created: "2024-01-01", + }, + } + + if len(items) == 0 { + a.console.Message(ctx, output.WithWarningFormat("No items found.")) + return nil, nil + } + + if a.formatter.Kind() == output.TableFormat { + columns := []output.Column{ + { + Heading: "Name", + ValueTemplate: "{{.Name}}", + }, + { + Heading: "Description", + ValueTemplate: "{{.Description}}", + }, + { + Heading: "Status", + ValueTemplate: "{{.Status}}", + }, + { + Heading: "Created", + ValueTemplate: "{{.Created}}", + }, + } + + return nil, a.formatter.Format(items, a.writer, output.TableFormatterOptions{ + Columns: columns, + }) + } + + return nil, a.formatter.Format(items, a.writer, nil) +} + +// Flags for create command +type CreateFlags struct { + global *internal.GlobalCommandOptions + description string + force bool + internal.EnvFlag +} + +func newCreateFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *CreateFlags { + flags := &CreateFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *CreateFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + local.StringVarP(&f.description, "description", "d", "", "Description for the new item") + local.BoolVarP(&f.force, "force", "f", false, "Force creation even if item exists") + f.EnvFlag.Bind(local, global) + f.global = global +} + +// Action for create command +type CreateAction struct { + args []string + flags *CreateFlags + console input.Console + // Add your service dependencies here + // exampleService *services.ExampleService +} + +func newCreateAction( + args []string, + flags *CreateFlags, + console input.Console, + // Add your service dependencies here + // exampleService *services.ExampleService, +) actions.Action { + return &CreateAction{ + args: args, + flags: flags, + console: console, + // exampleService: exampleService, + } +} + +func (a *CreateAction) Run(ctx context.Context) (*actions.ActionResult, error) { + itemName := a.args[0] + + a.console.MessageUxItem(ctx, &ux.MessageTitle{ + Title: "Create item (azd create)", + TitleNote: fmt.Sprintf("Creating new item '%s'", itemName), + }) + + stepMessage := fmt.Sprintf("Creating %s", output.WithHighLightFormat(itemName)) + a.console.ShowSpinner(ctx, stepMessage, input.Step) + + // TODO: Implement actual creation logic + // err := a.exampleService.Create(ctx, itemName, a.flags.description, a.flags.force) + // if err != nil { + // a.console.StopSpinner(ctx, stepMessage, input.StepFailed) + // return nil, fmt.Errorf("failed to create item: %w", err) + // } + + a.console.StopSpinner(ctx, stepMessage, input.StepDone) + + return &actions.ActionResult{ + Message: &actions.ResultMessage{ + Header: fmt.Sprintf("Successfully created item '%s'", itemName), + FollowUp: "Use 'azd list' to see all items.", + }, + }, nil +} +``` + +### Step 2: Register the Command Group + +Add the command group registration to `cmd/root.go`: + +```go +// In the NewRootCmd function, add your command group registration +func NewRootCmd(...) *cobra.Command { + // ... existing code ... + + configActions(root, opts) + envActions(root) + infraActions(root) + pipelineActions(root) + telemetryActions(root) + templatesActions(root) + authActions(root) + hooksActions(root) + Actions(root) // Add this line + + // ... rest of function ... +} +``` + +## Adding Commands to Existing Groups + +To add a new command to an existing command group (e.g., adding to `azd extension`): + +### Step 1: Add the Command to the Group + +In the existing command file (e.g., `cmd/extension.go`), add to the group registration function: + +```go +func extensionActions(root *actions.ActionDescriptor) *actions.ActionDescriptor { + group := root.Add("extension", &actions.ActionDescriptorOptions{ + // ... existing options ... + }) + + // ... existing commands ... + + // Add your new command + group.Add("validate", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "validate ", + Short: "Validate an extension configuration.", + Args: cobra.ExactArgs(1), + }, + OutputFormats: []output.Format{output.JsonFormat, output.NoneFormat}, + DefaultFormat: output.NoneFormat, + ActionResolver: newExtensionValidateAction, + FlagsResolver: newExtensionValidateFlags, + }) + + return group +} +``` + +### Step 2: Implement Flags and Action + +Add the flags and action implementation to the same file: + +```go +// Flags for the new command +type extensionValidateFlags struct { + strict bool + output string + global *internal.GlobalCommandOptions +} + +func newExtensionValidateFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *extensionValidateFlags { + flags := &extensionValidateFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *extensionValidateFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + local.BoolVar(&f.strict, "strict", false, "Enable strict validation mode") + local.StringVar(&f.output, "output-file", "", "Write validation results to file") + f.global = global +} + +// Action implementation +type extensionValidateAction struct { + args []string + flags *extensionValidateFlags + console input.Console + extensionManager *extensions.Manager // Use existing service dependencies +} + +func newExtensionValidateAction( + args []string, + flags *extensionValidateFlags, + console input.Console, + extensionManager *extensions.Manager, +) actions.Action { + return &extensionValidateAction{ + args: args, + flags: flags, + console: console, + extensionManager: extensionManager, + } +} + +func (a *extensionValidateAction) Run(ctx context.Context) (*actions.ActionResult, error) { + extensionName := a.args[0] + + a.console.MessageUxItem(ctx, &ux.MessageTitle{ + Title: "Validate extension (azd extension validate)", + TitleNote: fmt.Sprintf("Validating extension '%s'", extensionName), + }) + + stepMessage := fmt.Sprintf("Validating %s", output.WithHighLightFormat(extensionName)) + a.console.ShowSpinner(ctx, stepMessage, input.Step) + + // TODO: Implement validation logic + // validationResult, err := a.extensionManager.Validate(ctx, extensionName, a.flags.strict) + // if err != nil { + // a.console.StopSpinner(ctx, stepMessage, input.StepFailed) + // return nil, fmt.Errorf("validation failed: %w", err) + // } + + a.console.StopSpinner(ctx, stepMessage, input.StepDone) + + return &actions.ActionResult{ + Message: &actions.ResultMessage{ + Header: fmt.Sprintf("Extension '%s' validation completed successfully", extensionName), + FollowUp: "Extension is ready for use.", + }, + }, nil +} +``` + +## Action Implementation Patterns + +### Basic Action Structure + +```go +type myCommandAction struct { + // Dependencies + console input.Console + flags *myCommandFlags + + // Services (injected via IoC) + someService *services.SomeService + formatter output.Formatter + writer io.Writer +} + +func newMyCommandAction( + console input.Console, + flags *myCommandFlags, + someService *services.SomeService, + formatter output.Formatter, + writer io.Writer, +) actions.Action { + return &myCommandAction{ + console: console, + flags: flags, + someService: someService, + formatter: formatter, + writer: writer, + } +} + +func (a *myCommandAction) Run(ctx context.Context) (*actions.ActionResult, error) { + // 1. Display command start message + a.console.MessageUxItem(ctx, &ux.MessageTitle{ + Title: "My Command (azd mycommand)", + TitleNote: "Performing operation", + }) + + // 2. Show progress for long operations + stepMessage := "Processing request" + a.console.ShowSpinner(ctx, stepMessage, input.Step) + + // 3. Perform the actual work + result, err := a.someService.DoWork(ctx, a.flags.someFlag) + if err != nil { + a.console.StopSpinner(ctx, stepMessage, input.StepFailed) + return nil, fmt.Errorf("operation failed: %w", err) + } + + a.console.StopSpinner(ctx, stepMessage, input.StepDone) + + // 4. Format and display results + if a.formatter.Kind() != output.NoneFormat { + if err := a.formatter.Format(result, a.writer, nil); err != nil { + return nil, fmt.Errorf("failed to format output: %w", err) + } + } + + // 5. Return success result + return &actions.ActionResult{ + Message: &actions.ResultMessage{ + Header: "Operation completed successfully", + FollowUp: "Next steps: run 'azd mycommand list' to see results", + }, + }, nil +} +``` + +### Action with Complex Output Formatting + +```go +func (a *myListAction) Run(ctx context.Context) (*actions.ActionResult, error) { + items, err := a.service.List(ctx) + if err != nil { + return nil, fmt.Errorf("failed to retrieve items: %w", err) + } + + // Handle empty results + if len(items) == 0 { + a.console.Message(ctx, output.WithWarningFormat("No items found.")) + a.console.Message(ctx, fmt.Sprintf( + "Create one with %s", + output.WithHighLightFormat("azd mycommand create "), + )) + return nil, nil + } + + // Format output based on format type + switch a.formatter.Kind() { + case output.TableFormat: + columns := []output.Column{ + { + Heading: "Name", + ValueTemplate: "{{.Name}}", + }, + { + Heading: "Status", + ValueTemplate: "{{.Status}}", + }, + { + Heading: "Created", + ValueTemplate: "{{.CreatedAt | date}}", + }, + } + + return nil, a.formatter.Format(items, a.writer, output.TableFormatterOptions{ + Columns: columns, + }) + default: + return nil, a.formatter.Format(items, a.writer, nil) + } +} +``` + +## Flags and Input Handling + +### Standard Flag Patterns + +```go +type myCommandFlags struct { + // Basic types + stringFlag string + intFlag int + boolFlag bool + sliceFlag []string + + // Common azd patterns + subscription string + location string + environment string + + // Always include global options + global *internal.GlobalCommandOptions + + // Include environment flag for env-aware commands + internal.EnvFlag +} + +func newMyCommandFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *myCommandFlags { + flags := &myCommandFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *myCommandFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + // String flags + local.StringVarP(&f.stringFlag, "name", "n", "", "Name of the resource") + local.StringVar(&f.stringFlag, "long-flag", "default", "Description of flag") + + // Boolean flags + local.BoolVar(&f.boolFlag, "force", false, "Force the operation") + local.BoolVarP(&f.boolFlag, "verbose", "v", false, "Enable verbose output") + + // Integer flags + local.IntVar(&f.intFlag, "timeout", 300, "Timeout in seconds") + + // String slice flags + local.StringSliceVar(&f.sliceFlag, "tags", nil, "Tags to apply (can specify multiple)") + + // Common Azure flags + local.StringVarP(&f.subscription, "subscription", "s", "", "Azure subscription ID") + local.StringVarP(&f.location, "location", "l", "", "Azure location") + + // Bind environment flag for env-aware commands + f.EnvFlag.Bind(local, global) + + // Always set global + f.global = global +} +``` + +### Flag Validation + +```go +func (a *myCommandAction) Run(ctx context.Context) (*actions.ActionResult, error) { + // Validate required flags + if a.flags.stringFlag == "" { + return nil, fmt.Errorf("--name flag is required") + } + + // Validate flag combinations + if a.flags.force && a.flags.interactive { + return nil, fmt.Errorf("cannot use --force and --interactive together") + } + + // Validate enum values + validValues := []string{"dev", "test", "prod"} + if !slices.Contains(validValues, a.flags.environment) { + return nil, fmt.Errorf("invalid environment '%s', must be one of: %s", + a.flags.environment, strings.Join(validValues, ", ")) + } + + // Continue with command logic... +} +``` + +## Output Formatting + +### Standard Output Formats + +```go +// Define your output model +type MyItemOutput struct { + Name string `json:"name"` + Status string `json:"status"` + CreatedAt time.Time `json:"createdAt"` + Description string `json:"description,omitempty"` +} + +// Configure output formats in ActionDescriptorOptions +&actions.ActionDescriptorOptions{ + OutputFormats: []output.Format{ + output.JsonFormat, // --output json + output.TableFormat, // --output table (default) + output.NoneFormat, // --output none + }, + DefaultFormat: output.TableFormat, + // ... other options +} + +// Handle formatting in your action +func (a *myAction) Run(ctx context.Context) (*actions.ActionResult, error) { + data := getMyData() // Your data retrieval logic + + switch a.formatter.Kind() { + case output.TableFormat: + columns := []output.Column{ + { + Heading: "Name", + ValueTemplate: "{{.Name}}", + }, + { + Heading: "Status", + ValueTemplate: "{{.Status}}", + Width: 10, + }, + { + Heading: "Created", + ValueTemplate: "{{.CreatedAt | date}}", + }, + } + + return nil, a.formatter.Format(data, a.writer, output.TableFormatterOptions{ + Columns: columns, + }) + + case output.NoneFormat: + // Custom formatting for none output + for _, item := range data { + fmt.Fprintf(a.writer, "%s (%s)\n", item.Name, item.Status) + } + return nil, nil + + default: // JsonFormat and others + return nil, a.formatter.Format(data, a.writer, nil) + } +} +``` + +### Custom Display Methods + +```go +type MyDetailedOutput struct { + Name string + Description string + Properties map[string]string +} + +// Implement custom display for complex output +func (o *MyDetailedOutput) Display(writer io.Writer) error { + tabs := tabwriter.NewWriter( + writer, + 0, + output.TableTabSize, + 1, + output.TablePadCharacter, + output.TableFlags) + + text := [][]string{ + {"Name", ":", o.Name}, + {"Description", ":", o.Description}, + {"", "", ""}, + {"Properties", ":", ""}, + } + + for key, value := range o.Properties { + text = append(text, []string{" " + key, ":", value}) + } + + for _, line := range text { + _, err := tabs.Write([]byte(strings.Join(line, "\t") + "\n")) + if err != nil { + return err + } + } + + return tabs.Flush() +} + +// Use in action +func (a *myShowAction) Run(ctx context.Context) (*actions.ActionResult, error) { + data := getDetailedData() + + if a.formatter.Kind() == output.NoneFormat { + return nil, data.Display(a.writer) + } + + return nil, a.formatter.Format(data, a.writer, nil) +} +``` + +## Error Handling + +### Standard Error Patterns + +```go +func (a *myAction) Run(ctx context.Context) (*actions.ActionResult, error) { + // Service/API errors + result, err := a.service.DoSomething(ctx) + if err != nil { + // Wrap with context + return nil, fmt.Errorf("failed to perform operation: %w", err) + } + + // Validation errors + if result == nil { + return nil, fmt.Errorf("operation returned no results") + } + + // Business logic errors + if !result.IsValid { + return nil, fmt.Errorf("operation completed but result is invalid: %s", result.ValidationMessage) + } + + // Stop spinner on errors + stepMessage := "Processing" + a.console.ShowSpinner(ctx, stepMessage, input.Step) + + _, err = a.service.Process(ctx) + if err != nil { + a.console.StopSpinner(ctx, stepMessage, input.StepFailed) + return nil, fmt.Errorf("processing failed: %w", err) + } + + a.console.StopSpinner(ctx, stepMessage, input.StepDone) + + return &actions.ActionResult{ + Message: &actions.ResultMessage{ + Header: "Operation completed successfully", + }, + }, nil +} +``` + +### Error Handling with User Guidance + +```go +func (a *myAction) Run(ctx context.Context) (*actions.ActionResult, error) { + // Check prerequisites + if !a.checkPrerequisites(ctx) { + return nil, fmt.Errorf("prerequisites not met. Run 'azd auth login' first") + } + + // Handle specific error types + err := a.service.Operate(ctx) + if err != nil { + var notFoundErr *services.NotFoundError + var authErr *services.AuthenticationError + + switch { + case errors.As(err, ¬FoundErr): + return nil, fmt.Errorf("resource not found: %s. Use 'azd mycommand list' to see available resources", notFoundErr.ResourceName) + + case errors.As(err, &authErr): + return nil, fmt.Errorf("authentication failed: %w. Run 'azd auth login' to re-authenticate", err) + + default: + return nil, fmt.Errorf("operation failed: %w", err) + } + } + + return &actions.ActionResult{ + Message: &actions.ResultMessage{ + Header: "Operation completed", + }, + }, nil +} +``` + +## Integration with IoC Container + +### Service Registration + +When your command requires new services, register them in the appropriate place: + +```go +// In pkg/ioc/container.go or appropriate service registration location +func RegisterMyServices(container *ioc.Container) { + // Register your service + ioc.RegisterSingleton(container, func() *services.MyService { + return services.NewMyService() + }) + + // Register service with dependencies + ioc.RegisterSingleton(container, func( + httpClient *http.Client, + config *config.Config, + ) *services.MyComplexService { + return services.NewMyComplexService(httpClient, config) + }) +} +``` + +### Using Services in Actions + +```go +// Your action constructor automatically receives services via DI +func newMyCommandAction( + flags *myCommandFlags, + console input.Console, + formatter output.Formatter, + writer io.Writer, + // Your custom services + myService *services.MyService, + azureService *azure.AzureService, + // Standard azd services + azdContext *azdcontext.AzdContext, + env *environment.Environment, +) actions.Action { + return &myCommandAction{ + flags: flags, + console: console, + formatter: formatter, + writer: writer, + myService: myService, + azureService: azureService, + azdContext: azdContext, + env: env, + } +} +``` + +### Common Service Dependencies + +```go +// Commonly used services in azd commands: + +// Environment and context +azdContext *azdcontext.AzdContext +env *environment.Environment + +// Azure services +accountManager account.Manager +subscriptionResolver account.SubscriptionTenantResolver +resourceManager infra.ResourceManager +resourceService *azapi.ResourceService + +// User interaction +console input.Console +formatter output.Formatter +writer io.Writer + +// Configuration +config *config.Config +alphaFeatureManager *alpha.FeatureManager + +// Project and templates +projectManager *project.ProjectManager +templateManager *templates.TemplateManager +``` + +## Complete Examples + +### Example 1: Simple Single Command + +File: `cmd/validate.go` + +```go +package cmd + +import ( + "context" + "fmt" + + "github.com/azure/azure-dev/cli/azd/cmd/actions" + "github.com/azure/azure-dev/cli/azd/internal" + "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/output" + "github.com/azure/azure-dev/cli/azd/pkg/project" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +// Add to root.go registration +// root.Add("validate", &actions.ActionDescriptorOptions{ +// Command: newValidateCmd(), +// ActionResolver: newValidateAction, +// FlagsResolver: newValidateFlags, +// OutputFormats: []output.Format{output.JsonFormat, output.NoneFormat}, +// DefaultFormat: output.NoneFormat, +// GroupingOptions: actions.CommandGroupOptions{ +// RootLevelHelp: actions.CmdGroupManage, +// }, +// }) + +func newValidateCmd() *cobra.Command { + return &cobra.Command{ + Use: "validate", + Short: "Validate the current project configuration.", + } +} + +type validateFlags struct { + strict bool + global *internal.GlobalCommandOptions + internal.EnvFlag +} + +func newValidateFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *validateFlags { + flags := &validateFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *validateFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + local.BoolVar(&f.strict, "strict", false, "Enable strict validation mode") + f.EnvFlag.Bind(local, global) + f.global = global +} + +type validateAction struct { + flags *validateFlags + console input.Console + projectManager *project.ProjectManager +} + +func newValidateAction( + flags *validateFlags, + console input.Console, + projectManager *project.ProjectManager, +) actions.Action { + return &validateAction{ + flags: flags, + console: console, + projectManager: projectManager, + } +} + +func (a *validateAction) Run(ctx context.Context) (*actions.ActionResult, error) { + a.console.Message(ctx, "Validating project configuration...") + + // TODO: Implement validation logic + // isValid, errors := a.projectManager.Validate(ctx, a.flags.strict) + // if !isValid { + // return nil, fmt.Errorf("validation failed: %v", errors) + // } + + return &actions.ActionResult{ + Message: &actions.ResultMessage{ + Header: "Project validation completed successfully", + FollowUp: "Your project is ready for deployment.", + }, + }, nil +} +``` + +### Example 2: Command Group with Multiple Subcommands + +File: `cmd/resource.go` + +```go +package cmd + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/azure/azure-dev/cli/azd/cmd/actions" + "github.com/azure/azure-dev/cli/azd/internal" + "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/output" + "github.com/azure/azure-dev/cli/azd/pkg/output/ux" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +// Add to root.go: resourceActions(root) +func resourceActions(root *actions.ActionDescriptor) *actions.ActionDescriptor { + group := root.Add("resource", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "resource", + Short: "Manage Azure resources for the current project.", + }, + GroupingOptions: actions.CommandGroupOptions{ + RootLevelHelp: actions.CmdGroupAzure, + }, + }) + + group.Add("list", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "list", + Short: "List Azure resources for the current project.", + }, + OutputFormats: []output.Format{output.JsonFormat, output.TableFormat}, + DefaultFormat: output.TableFormat, + ActionResolver: newResourceListAction, + FlagsResolver: newResourceListFlags, + }) + + group.Add("show", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "show ", + Short: "Show details for a specific Azure resource.", + Args: cobra.ExactArgs(1), + }, + OutputFormats: []output.Format{output.JsonFormat, output.NoneFormat}, + DefaultFormat: output.NoneFormat, + ActionResolver: newResourceShowAction, + }) + + group.Add("delete", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "delete ", + Short: "Delete a specific Azure resource.", + Args: cobra.ExactArgs(1), + }, + OutputFormats: []output.Format{output.JsonFormat, output.NoneFormat}, + DefaultFormat: output.NoneFormat, + ActionResolver: newResourceDeleteAction, + FlagsResolver: newResourceDeleteFlags, + }) + + return group +} + +// List command implementation +type resourceListFlags struct { + resourceType string + location string + global *internal.GlobalCommandOptions + internal.EnvFlag +} + +func newResourceListFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *resourceListFlags { + flags := &resourceListFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *resourceListFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + local.StringVar(&f.resourceType, "type", "", "Filter by resource type") + local.StringVar(&f.location, "location", "", "Filter by location") + f.EnvFlag.Bind(local, global) + f.global = global +} + +type resourceListAction struct { + flags *resourceListFlags + formatter output.Formatter + console input.Console + writer io.Writer + // TODO: Add actual Azure resource service + // resourceService *azure.ResourceService +} + +func newResourceListAction( + flags *resourceListFlags, + formatter output.Formatter, + console input.Console, + writer io.Writer, +) actions.Action { + return &resourceListAction{ + flags: flags, + formatter: formatter, + console: console, + writer: writer, + } +} + +type resourceInfo struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Location string `json:"location"` + Status string `json:"status"` +} + +func (a *resourceListAction) Run(ctx context.Context) (*actions.ActionResult, error) { + a.console.MessageUxItem(ctx, &ux.MessageTitle{ + Title: "List Azure resources (azd resource list)", + TitleNote: "Retrieving resources for current project", + }) + + // TODO: Implement actual resource listing + // resources, err := a.resourceService.ListForProject(ctx, a.flags.resourceType, a.flags.location) + // if err != nil { + // return nil, fmt.Errorf("failed to list resources: %w", err) + // } + + // Placeholder data + resources := []resourceInfo{ + { + ID: "/subscriptions/xxx/resourceGroups/rg-example/providers/Microsoft.Web/sites/example-app", + Name: "example-app", + Type: "Microsoft.Web/sites", + Location: "eastus", + Status: "Running", + }, + } + + if len(resources) == 0 { + a.console.Message(ctx, output.WithWarningFormat("No resources found.")) + return nil, nil + } + + if a.formatter.Kind() == output.TableFormat { + columns := []output.Column{ + { + Heading: "Name", + ValueTemplate: "{{.Name}}", + }, + { + Heading: "Type", + ValueTemplate: "{{.Type}}", + }, + { + Heading: "Location", + ValueTemplate: "{{.Location}}", + }, + { + Heading: "Status", + ValueTemplate: "{{.Status}}", + }, + } + + return nil, a.formatter.Format(resources, a.writer, output.TableFormatterOptions{ + Columns: columns, + }) + } + + return nil, a.formatter.Format(resources, a.writer, nil) +} + +// Show command implementation +type resourceShowAction struct { + args []string + formatter output.Formatter + console input.Console + writer io.Writer +} + +func newResourceShowAction( + args []string, + formatter output.Formatter, + console input.Console, + writer io.Writer, +) actions.Action { + return &resourceShowAction{ + args: args, + formatter: formatter, + console: console, + writer: writer, + } +} + +func (a *resourceShowAction) Run(ctx context.Context) (*actions.ActionResult, error) { + resourceID := a.args[0] + + a.console.MessageUxItem(ctx, &ux.MessageTitle{ + Title: "Show Azure resource (azd resource show)", + TitleNote: fmt.Sprintf("Retrieving details for '%s'", resourceID), + }) + + // TODO: Implement actual resource details retrieval + // resource, err := a.resourceService.Get(ctx, resourceID) + // if err != nil { + // return nil, fmt.Errorf("failed to get resource details: %w", err) + // } + + // For now, just show that the command structure works + a.console.Message(ctx, fmt.Sprintf("Resource ID: %s", resourceID)) + a.console.Message(ctx, "TODO: Implement resource details display") + + return nil, nil +} + +// Delete command implementation +type resourceDeleteFlags struct { + force bool + global *internal.GlobalCommandOptions +} + +func newResourceDeleteFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *resourceDeleteFlags { + flags := &resourceDeleteFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *resourceDeleteFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + local.BoolVarP(&f.force, "force", "f", false, "Force deletion without confirmation") + f.global = global +} + +type resourceDeleteAction struct { + args []string + flags *resourceDeleteFlags + console input.Console +} + +func newResourceDeleteAction( + args []string, + flags *resourceDeleteFlags, + console input.Console, +) actions.Action { + return &resourceDeleteAction{ + args: args, + flags: flags, + console: console, + } +} + +func (a *resourceDeleteAction) Run(ctx context.Context) (*actions.ActionResult, error) { + resourceID := a.args[0] + + a.console.MessageUxItem(ctx, &ux.MessageTitle{ + Title: "Delete Azure resource (azd resource delete)", + TitleNote: fmt.Sprintf("Deleting resource '%s'", resourceID), + }) + + if !a.flags.force { + confirmed, err := a.console.Confirm(ctx, input.ConsoleOptions{ + Message: fmt.Sprintf("Are you sure you want to delete '%s'?", resourceID), + }) + if err != nil { + return nil, fmt.Errorf("failed to get confirmation: %w", err) + } + if !confirmed { + a.console.Message(ctx, "Deletion cancelled.") + return nil, nil + } + } + + stepMessage := fmt.Sprintf("Deleting %s", output.WithHighLightFormat(resourceID)) + a.console.ShowSpinner(ctx, stepMessage, input.Step) + + // TODO: Implement actual resource deletion + // err := a.resourceService.Delete(ctx, resourceID) + // if err != nil { + // a.console.StopSpinner(ctx, stepMessage, input.StepFailed) + // return nil, fmt.Errorf("failed to delete resource: %w", err) + // } + + // Simulate work + time.Sleep(1 * time.Second) + + a.console.StopSpinner(ctx, stepMessage, input.StepDone) + + return &actions.ActionResult{ + Message: &actions.ResultMessage{ + Header: fmt.Sprintf("Successfully deleted resource '%s'", resourceID), + FollowUp: "Use 'azd resource list' to see remaining resources.", + }, + }, nil +} +``` + +## Summary + +This guide provides a complete framework for adding new commands to azd. The key steps are: + +1. **Choose the pattern**: Single command or command group +2. **Create the file**: Follow naming conventions in `cmd/` directory +3. **Define the structure**: ActionDescriptor → Flags → Action +4. **Implement the logic**: Start with TODO comments for actual functionality +5. **Register the command**: Add to `root.go` or parent command group +6. **Handle dependencies**: Use IoC container for service injection +7. **Format output**: Support JSON, Table, and None formats appropriately +8. **Handle errors**: Provide clear error messages with guidance + +The generated command shells will compile and provide the basic CLI structure, allowing developers to focus on implementing the actual business logic within the marked TODO sections. diff --git a/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go b/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go deleted file mode 100644 index aa6ea409a26..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_architecture_planning.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -var _ tools.Tool = &AzdArchitecturePlanningTool{} - -type AzdArchitecturePlanningTool struct { -} - -func (t *AzdArchitecturePlanningTool) Name() string { - return "azd_architecture_planning" -} - -func (t *AzdArchitecturePlanningTool) Description() string { - return `Returns instructions for selecting appropriate Azure services for discovered application components and -designing infrastructure architecture. - -The LLM agent should execute these instructions using available tools. - -Use this tool when: -- Discovery analysis has been completed and azd-arch-plan.md exists -- Application components have been identified and classified -- Need to map components to Azure hosting services -- Ready to plan containerization and database strategies - -Input: "./azd-arch-plan.md"` -} - -func (t *AzdArchitecturePlanningTool) Call(ctx context.Context, input string) (string, error) { - return prompts.AzdArchitecturePlanningPrompt, nil -} diff --git a/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go b/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go deleted file mode 100644 index 2590eb2c0ba..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_azure_yaml_generation.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -var _ tools.Tool = &AzdAzureYamlGenerationTool{} - -type AzdAzureYamlGenerationTool struct { -} - -func (t *AzdAzureYamlGenerationTool) Name() string { - return "azd_azure_yaml_generation" -} - -func (t *AzdAzureYamlGenerationTool) Description() string { - return `Returns instructions for generating the azure.yaml configuration file with proper service hosting, -build, and deployment settings for AZD projects. - -The LLM agent should execute these instructions using available tools. - -Use this tool when: -- Architecture planning has been completed and Azure services selected -- Need to create or update azure.yaml configuration file -- Services have been mapped to Azure hosting platforms -- Ready to define build and deployment configurations - -Input: "./azd-arch-plan.md"` -} - -func (t *AzdAzureYamlGenerationTool) Call(ctx context.Context, input string) (string, error) { - return prompts.AzdAzureYamlGenerationPrompt, nil -} diff --git a/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go b/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go deleted file mode 100644 index f8b13425eea..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_discovery_analysis.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -var _ tools.Tool = &AzdDiscoveryAnalysisTool{} - -type AzdDiscoveryAnalysisTool struct { -} - -func (t *AzdDiscoveryAnalysisTool) Name() string { - return "azd_discovery_analysis" -} - -func (t *AzdDiscoveryAnalysisTool) Description() string { - return `Returns instructions for performing comprehensive discovery and analysis of application components -to prepare for Azure Developer CLI (AZD) initialization. - -The LLM agent should execute these instructions using available tools. - -Use this tool when: -- Starting Phase 1 of AZD migration process -- Need to identify all application components and dependencies -- Codebase analysis required before architecture planning -- azd-arch-plan.md does not exist or needs updating - -Input: "./azd-arch-plan.md"` -} - -func (t *AzdDiscoveryAnalysisTool) Call(ctx context.Context, input string) (string, error) { - return prompts.AzdDiscoveryAnalysisPrompt, nil -} diff --git a/cli/azd/internal/agent/tools/azd/azd_docker_generation.go b/cli/azd/internal/agent/tools/azd/azd_docker_generation.go deleted file mode 100644 index 57c03e2d807..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_docker_generation.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -var _ tools.Tool = &AzdDockerGenerationTool{} - -type AzdDockerGenerationTool struct { -} - -func (t *AzdDockerGenerationTool) Name() string { - return "azd_docker_generation" -} - -func (t *AzdDockerGenerationTool) Description() string { - return `Returns instructions for generating optimized Dockerfiles and container configurations for containerizable -services in AZD projects. - -The LLM agent should execute these instructions using available tools. - -Use this tool when: -- Architecture planning identified services requiring containerization -- azd-arch-plan.md shows Container Apps or AKS as selected hosting platform -- Need Dockerfiles for microservices, APIs, or containerized web applications -- Ready to implement containerization strategy - -Input: "./azd-arch-plan.md"` -} - -func (t *AzdDockerGenerationTool) Call(ctx context.Context, input string) (string, error) { - return prompts.AzdDockerGenerationPrompt, nil -} diff --git a/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go b/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go deleted file mode 100644 index d55f903e4d2..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_iac_generation_rules.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -var _ tools.Tool = &AzdIacGenerationRulesTool{} - -type AzdIacGenerationRulesTool struct { -} - -func (t *AzdIacGenerationRulesTool) Name() string { - return "azd_iac_generation_rules" -} - -func (t *AzdIacGenerationRulesTool) Description() string { - return `Returns comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules -for AZD projects. - -The LLM agent should reference these rules when generating infrastructure code. - -Use this tool when: -- Generating any Bicep infrastructure templates for AZD projects -- Need compliance rules and naming conventions for Azure resources -- Creating modular, reusable Bicep files -- Ensuring security and operational best practices - -Input: "./azd-arch-plan.md"` -} - -func (t *AzdIacGenerationRulesTool) Call(ctx context.Context, input string) (string, error) { - return prompts.AzdIacRulesPrompt, nil -} diff --git a/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go b/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go deleted file mode 100644 index 3c3e7cf52b9..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_infrastructure_generation.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -var _ tools.Tool = &AzdInfrastructureGenerationTool{} - -type AzdInfrastructureGenerationTool struct { -} - -func (t *AzdInfrastructureGenerationTool) Name() string { - return "azd_infrastructure_generation" -} - -func (t *AzdInfrastructureGenerationTool) Description() string { - return `Returns instructions for generating modular Bicep infrastructure templates following Azure security and -operational best practices for AZD projects. - -The LLM agent should execute these instructions using available tools. - -Use this tool when: -- Architecture planning completed with Azure services selected -- Need to create Bicep infrastructure templates -- Ready to implement infrastructure as code for deployment - -Input: "./azd-arch-plan.md"` -} - -func (t *AzdInfrastructureGenerationTool) Call(ctx context.Context, input string) (string, error) { - return prompts.AzdInfrastructureGenerationPrompt, nil -} diff --git a/cli/azd/internal/agent/tools/azd/azd_plan_init.go b/cli/azd/internal/agent/tools/azd/azd_plan_init.go deleted file mode 100644 index a6eb422ab78..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_plan_init.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -var _ tools.Tool = &AzdPlanInitTool{} - -type AzdPlanInitTool struct { -} - -func (t *AzdPlanInitTool) Name() string { - return "azd_plan_init" -} - -func (t *AzdPlanInitTool) Description() string { - return `Returns instructions for orchestrating complete AZD application initialization using structured phases -with specialized tools. - -The LLM agent should execute these instructions using available tools. - -Use this tool when: -- Starting new AZD project initialization or migration -- Need structured approach to transform application into AZD-compatible project -- Want to ensure proper sequencing of discovery, planning, and file generation -- Require complete project orchestration guidance - -Input: "./azd-arch-plan.md"` -} - -func (t *AzdPlanInitTool) Call(ctx context.Context, input string) (string, error) { - return prompts.AzdPlanInitPrompt, nil -} diff --git a/cli/azd/internal/agent/tools/azd/azd_project_validation.go b/cli/azd/internal/agent/tools/azd/azd_project_validation.go deleted file mode 100644 index 7645fac9ca0..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_project_validation.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - _ "embed" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -// AzdProjectValidationTool validates an AZD project by running comprehensive checks on all components -// including azure.yaml schema validation, Bicep template validation, environment setup, packaging, -// and deployment preview. -type AzdProjectValidationTool struct{} - -// Name returns the name of the tool. -func (t *AzdProjectValidationTool) Name() string { - return "azd_project_validation" -} - -// Description returns the description of the tool. -func (t *AzdProjectValidationTool) Description() string { - return `Returns instructions for validating AZD project by running comprehensive checks on azure.yaml schema, -Bicep templates, environment setup, packaging, and deployment preview. - -The LLM agent should execute these instructions using available tools. - -Use this tool when: -- All AZD configuration files have been generated -- Ready to validate complete project before deployment -- Need to ensure azure.yaml, Bicep templates, and environment are properly configured -- Final validation step before running azd up - -Input: "./azd-arch-plan.md"` -} - -// Call executes the tool with the given arguments. -func (t *AzdProjectValidationTool) Call(ctx context.Context, args string) (string, error) { - return prompts.AzdProjectValidationPrompt, nil -} - -// Ensure AzdProjectValidationTool implements the Tool interface. -var _ tools.Tool = (*AzdProjectValidationTool)(nil) diff --git a/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go b/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go deleted file mode 100644 index 678d268d7ea..00000000000 --- a/cli/azd/internal/agent/tools/azd/azd_yaml_schema.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "context" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd/prompts" - "github.com/tmc/langchaingo/tools" -) - -var _ tools.Tool = &AzdYamlSchemaTool{} - -type AzdYamlSchemaTool struct { -} - -func (t *AzdYamlSchemaTool) Name() string { - return "azd_yaml_schema" -} - -func (t *AzdYamlSchemaTool) Description() string { - return ` - Gets the Azure YAML JSON schema file specification and structure for azure.yaml configuration files used in AZD. - - Input: - ` -} - -func (t *AzdYamlSchemaTool) Call(ctx context.Context, input string) (string, error) { - return prompts.AzdYamlSchemaPrompt, nil -} diff --git a/cli/azd/internal/agent/tools/azd/loader.go b/cli/azd/internal/agent/tools/azd/loader.go deleted file mode 100644 index f55b9c93e77..00000000000 --- a/cli/azd/internal/agent/tools/azd/loader.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azd - -import ( - "github.com/tmc/langchaingo/tools" -) - -// AzdToolsLoader loads AZD-related tools -type AzdToolsLoader struct{} - -func NewAzdToolsLoader() *AzdToolsLoader { - return &AzdToolsLoader{} -} - -func (l *AzdToolsLoader) LoadTools() ([]tools.Tool, error) { - return []tools.Tool{ - // Original orchestrating tool - &AzdPlanInitTool{}, - - // Core workflow tools (use in sequence) - &AzdDiscoveryAnalysisTool{}, - &AzdArchitecturePlanningTool{}, - - // Focused file generation tools (use as needed) - &AzdAzureYamlGenerationTool{}, - &AzdInfrastructureGenerationTool{}, - &AzdDockerGenerationTool{}, - - // Validation tool (final step) - &AzdProjectValidationTool{}, - - // Supporting tools - &AzdIacGenerationRulesTool{}, - &AzdYamlSchemaTool{}, - }, nil -} diff --git a/cli/azd/internal/agent/tools/loader.go b/cli/azd/internal/agent/tools/loader.go index ec573ceac60..a0662c65c5c 100644 --- a/cli/azd/internal/agent/tools/loader.go +++ b/cli/azd/internal/agent/tools/loader.go @@ -6,7 +6,6 @@ package tools import ( "github.com/tmc/langchaingo/tools" - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/azd" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/dev" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/io" ) @@ -23,7 +22,6 @@ type LocalToolsLoader struct { func NewLocalToolsLoader() *LocalToolsLoader { return &LocalToolsLoader{ loaders: []ToolLoader{ - azd.NewAzdToolsLoader(), dev.NewDevToolsLoader(), io.NewIoToolsLoader(), }, diff --git a/cli/azd/internal/agent/tools/mcp/mcp.json b/cli/azd/internal/agent/tools/mcp/mcp.json index efca4416be8..67aa3a5dd2d 100644 --- a/cli/azd/internal/agent/tools/mcp/mcp.json +++ b/cli/azd/internal/agent/tools/mcp/mcp.json @@ -2,8 +2,13 @@ "servers": { "Azure": { "type": "stdio", - "command": "azmcp", - "args": ["server", "start"] + "command": "npx", + "args": ["-y", "@azure/mcp@latest", "server", "start"] + }, + "azd": { + "type": "stdio", + "command": "azd", + "args": ["mcp", "start"] } } } diff --git a/cli/azd/internal/mcp/tools/azd_architecture_planning.go b/cli/azd/internal/mcp/tools/azd_architecture_planning.go new file mode 100644 index 00000000000..b22ae1dde7e --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_architecture_planning.go @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdArchitecturePlanningTool creates a new azd architecture planning tool +func NewAzdArchitecturePlanningTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_architecture_planning", + mcp.WithDescription(`Returns instructions for selecting appropriate Azure services for discovered application components and +designing infrastructure architecture. + +The LLM agent should execute these instructions using available tools. + +Use this tool when: +- Discovery analysis has been completed and azd-arch-plan.md exists +- Application components have been identified and classified +- Need to map components to Azure hosting services +- Ready to plan containerization and database strategies`), + ), + Handler: handleAzdArchitecturePlanning, + } +} + +func handleAzdArchitecturePlanning(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdArchitecturePlanningPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go b/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go new file mode 100644 index 00000000000..50751d9246d --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdAzureYamlGenerationTool creates a new azd azure yaml generation tool +func NewAzdAzureYamlGenerationTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_azure_yaml_generation", + mcp.WithDescription(`Returns instructions for generating the azure.yaml configuration file with proper service hosting, +build, and deployment settings for AZD projects. + +The LLM agent should execute these instructions using available tools. + +Use this tool when: +- Architecture planning has been completed and Azure services selected +- Need to create or update azure.yaml configuration file +- Services have been mapped to Azure hosting platforms +- Ready to define build and deployment configurations`), + ), + Handler: handleAzdAzureYamlGeneration, + } +} + +func handleAzdAzureYamlGeneration(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdAzureYamlGenerationPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/azd_discovery_analysis.go b/cli/azd/internal/mcp/tools/azd_discovery_analysis.go new file mode 100644 index 00000000000..2581b21b152 --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_discovery_analysis.go @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdDiscoveryAnalysisTool creates a new azd discovery analysis tool +func NewAzdDiscoveryAnalysisTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_discovery_analysis", + mcp.WithDescription(`Returns instructions for performing comprehensive discovery and analysis of application components +to prepare for Azure Developer CLI (AZD) initialization. + +The LLM agent should execute these instructions using available tools. + +Use this tool when: +- Starting Phase 1 of AZD migration process +- Need to identify all application components and dependencies +- Codebase analysis required before architecture planning +- azd-arch-plan.md does not exist or needs updating`), + ), + Handler: handleAzdDiscoveryAnalysis, + } +} + +func handleAzdDiscoveryAnalysis(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdDiscoveryAnalysisPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/azd_docker_generation.go b/cli/azd/internal/mcp/tools/azd_docker_generation.go new file mode 100644 index 00000000000..bca51b5b24b --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_docker_generation.go @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdDockerGenerationTool creates a new azd docker generation tool +func NewAzdDockerGenerationTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_docker_generation", + mcp.WithDescription(`Returns instructions for generating optimized Dockerfiles and container configurations for containerizable +services in AZD projects. + +The LLM agent should execute these instructions using available tools. + +Use this tool when: +- Architecture planning identified services requiring containerization +- azd-arch-plan.md shows Container Apps or AKS as selected hosting platform +- Need Dockerfiles for microservices, APIs, or containerized web applications +- Ready to implement containerization strategy`), + ), + Handler: handleAzdDockerGeneration, + } +} + +func handleAzdDockerGeneration(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdDockerGenerationPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go b/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go new file mode 100644 index 00000000000..3b9710088ff --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdIacGenerationRulesTool creates a new azd iac generation rules tool +func NewAzdIacGenerationRulesTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_iac_generation_rules", + mcp.WithDescription(`Returns comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules +for AZD projects. + +The LLM agent should reference these rules when generating infrastructure code. + +Use this tool when: +- Generating any Bicep infrastructure templates for AZD projects +- Need compliance rules and naming conventions for Azure resources +- Creating modular, reusable Bicep files +- Ensuring security and operational best practices"`), + ), + Handler: handleAzdIacGenerationRules, + } +} + +func handleAzdIacGenerationRules(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdIacRulesPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go b/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go new file mode 100644 index 00000000000..ea04eba4701 --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go @@ -0,0 +1,35 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdInfrastructureGenerationTool creates a new azd infrastructure generation tool +func NewAzdInfrastructureGenerationTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_infrastructure_generation", + mcp.WithDescription(`Returns instructions for generating modular Bicep infrastructure templates following Azure security and +operational best practices for AZD projects. + +The LLM agent should execute these instructions using available tools. + +Use this tool when: +- Architecture planning completed with Azure services selected +- Need to create Bicep infrastructure templates +- Ready to implement infrastructure as code for deployment`), + ), + Handler: handleAzdInfrastructureGeneration, + } +} + +func handleAzdInfrastructureGeneration(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdInfrastructureGenerationPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/azd_plan_init.go b/cli/azd/internal/mcp/tools/azd_plan_init.go new file mode 100644 index 00000000000..b2b2cb8a143 --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_plan_init.go @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdPlanInitTool creates a new azd plan init tool +func NewAzdPlanInitTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_plan_init", + mcp.WithDescription(`Returns instructions for orchestrating complete AZD application initialization using structured phases +with specialized tools. + +The LLM agent should execute these instructions using available tools. + +Use this tool when: +- Starting new AZD project initialization or migration +- Need structured approach to transform application into AZD-compatible project +- Want to ensure proper sequencing of discovery, planning, and file generation +- Require complete project orchestration guidance`), + ), + Handler: handleAzdPlanInit, + } +} + +func handleAzdPlanInit(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdPlanInitPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/azd_project_validation.go b/cli/azd/internal/mcp/tools/azd_project_validation.go new file mode 100644 index 00000000000..9620074e15a --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_project_validation.go @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdProjectValidationTool creates a new azd project validation tool +func NewAzdProjectValidationTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_project_validation", + mcp.WithDescription(`Returns instructions for validating AZD project by running comprehensive checks on azure.yaml schema, +Bicep templates, environment setup, packaging, and deployment preview. + +The LLM agent should execute these instructions using available tools. + +Use this tool when: +- All AZD configuration files have been generated +- Ready to validate complete project before deployment +- Need to ensure azure.yaml, Bicep templates, and environment are properly configured +- Final validation step before running azd up`), + ), + Handler: handleAzdProjectValidation, + } +} + +func handleAzdProjectValidation(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdProjectValidationPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/azd_yaml_schema.go b/cli/azd/internal/mcp/tools/azd_yaml_schema.go new file mode 100644 index 00000000000..6afaf884813 --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_yaml_schema.go @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdYamlSchemaTool creates a new azd yaml schema tool +func NewAzdYamlSchemaTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_yaml_schema", + mcp.WithDescription(`Gets the Azure YAML JSON schema file specification and structure for azure.yaml configuration files used in AZD.`), + ), + Handler: handleAzdYamlSchema, + } +} + +func handleAzdYamlSchema(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdYamlSchemaPrompt), nil +} diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_architecture_planning.md b/cli/azd/internal/mcp/tools/prompts/azd_architecture_planning.md similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azd_architecture_planning.md rename to cli/azd/internal/mcp/tools/prompts/azd_architecture_planning.md diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md b/cli/azd/internal/mcp/tools/prompts/azd_azure_yaml_generation.md similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azd_azure_yaml_generation.md rename to cli/azd/internal/mcp/tools/prompts/azd_azure_yaml_generation.md diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_discovery_analysis.md b/cli/azd/internal/mcp/tools/prompts/azd_discovery_analysis.md similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azd_discovery_analysis.md rename to cli/azd/internal/mcp/tools/prompts/azd_discovery_analysis.md diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_docker_generation.md b/cli/azd/internal/mcp/tools/prompts/azd_docker_generation.md similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azd_docker_generation.md rename to cli/azd/internal/mcp/tools/prompts/azd_docker_generation.md diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md b/cli/azd/internal/mcp/tools/prompts/azd_iac_generation_rules.md similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azd_iac_generation_rules.md rename to cli/azd/internal/mcp/tools/prompts/azd_iac_generation_rules.md diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md b/cli/azd/internal/mcp/tools/prompts/azd_infrastructure_generation.md similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azd_infrastructure_generation.md rename to cli/azd/internal/mcp/tools/prompts/azd_infrastructure_generation.md diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_plan_init.md b/cli/azd/internal/mcp/tools/prompts/azd_plan_init.md similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azd_plan_init.md rename to cli/azd/internal/mcp/tools/prompts/azd_plan_init.md diff --git a/cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md b/cli/azd/internal/mcp/tools/prompts/azd_project_validation.md similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azd_project_validation.md rename to cli/azd/internal/mcp/tools/prompts/azd_project_validation.md diff --git a/cli/azd/internal/agent/tools/azd/prompts/azure.yaml.json b/cli/azd/internal/mcp/tools/prompts/azure.yaml.json similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/azure.yaml.json rename to cli/azd/internal/mcp/tools/prompts/azure.yaml.json diff --git a/cli/azd/internal/agent/tools/azd/prompts/prompts.go b/cli/azd/internal/mcp/tools/prompts/prompts.go similarity index 100% rename from cli/azd/internal/agent/tools/azd/prompts/prompts.go rename to cli/azd/internal/mcp/tools/prompts/prompts.go From 889cd2b0c8c4902fab42441b06cb59c7eb19d8cd Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 8 Aug 2025 10:19:11 -0700 Subject: [PATCH 056/116] Fixes lint issues --- cli/azd/.vscode/cspell.yaml | 13 +++++++++++++ .../internal/mcp/tools/azd_architecture_planning.go | 6 ++++-- .../internal/mcp/tools/azd_azure_yaml_generation.go | 6 ++++-- .../internal/mcp/tools/azd_discovery_analysis.go | 6 ++++-- cli/azd/internal/mcp/tools/azd_docker_generation.go | 6 ++++-- .../internal/mcp/tools/azd_iac_generation_rules.go | 6 ++++-- .../mcp/tools/azd_infrastructure_generation.go | 6 ++++-- cli/azd/internal/mcp/tools/azd_plan_init.go | 6 ++++-- .../internal/mcp/tools/azd_project_validation.go | 6 ++++-- cli/azd/internal/mcp/tools/azd_yaml_schema.go | 5 ++++- 10 files changed, 49 insertions(+), 17 deletions(-) diff --git a/cli/azd/.vscode/cspell.yaml b/cli/azd/.vscode/cspell.yaml index 628897e2098..10092e76ae4 100644 --- a/cli/azd/.vscode/cspell.yaml +++ b/cli/azd/.vscode/cspell.yaml @@ -164,6 +164,19 @@ overrides: - dall - datasource - vectorizing + - filename: docs/new-azd-command.md + words: + - pflag + - struct + - Errorf + - Sprintf + - mycommand + - omitempty + - Fprintf + - tabwriter + - azdcontext + - azapi + - eastus ignorePaths: - "**/*_test.go" - "**/mock*.go" diff --git a/cli/azd/internal/mcp/tools/azd_architecture_planning.go b/cli/azd/internal/mcp/tools/azd_architecture_planning.go index b22ae1dde7e..960c74d0cce 100644 --- a/cli/azd/internal/mcp/tools/azd_architecture_planning.go +++ b/cli/azd/internal/mcp/tools/azd_architecture_planning.go @@ -16,7 +16,8 @@ func NewAzdArchitecturePlanningTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_architecture_planning", - mcp.WithDescription(`Returns instructions for selecting appropriate Azure services for discovered application components and + mcp.WithDescription( + `Returns instructions for selecting appropriate Azure services for discovered application components and designing infrastructure architecture. The LLM agent should execute these instructions using available tools. @@ -25,7 +26,8 @@ Use this tool when: - Discovery analysis has been completed and azd-arch-plan.md exists - Application components have been identified and classified - Need to map components to Azure hosting services -- Ready to plan containerization and database strategies`), +- Ready to plan containerization and database strategies`, + ), ), Handler: handleAzdArchitecturePlanning, } diff --git a/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go b/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go index 50751d9246d..eaa0d51d705 100644 --- a/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go +++ b/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go @@ -16,7 +16,8 @@ func NewAzdAzureYamlGenerationTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_azure_yaml_generation", - mcp.WithDescription(`Returns instructions for generating the azure.yaml configuration file with proper service hosting, + mcp.WithDescription( + `Returns instructions for generating the azure.yaml configuration file with proper service hosting, build, and deployment settings for AZD projects. The LLM agent should execute these instructions using available tools. @@ -25,7 +26,8 @@ Use this tool when: - Architecture planning has been completed and Azure services selected - Need to create or update azure.yaml configuration file - Services have been mapped to Azure hosting platforms -- Ready to define build and deployment configurations`), +- Ready to define build and deployment configurations`, + ), ), Handler: handleAzdAzureYamlGeneration, } diff --git a/cli/azd/internal/mcp/tools/azd_discovery_analysis.go b/cli/azd/internal/mcp/tools/azd_discovery_analysis.go index 2581b21b152..1f4f6bc5087 100644 --- a/cli/azd/internal/mcp/tools/azd_discovery_analysis.go +++ b/cli/azd/internal/mcp/tools/azd_discovery_analysis.go @@ -16,7 +16,8 @@ func NewAzdDiscoveryAnalysisTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_discovery_analysis", - mcp.WithDescription(`Returns instructions for performing comprehensive discovery and analysis of application components + mcp.WithDescription( + `Returns instructions for performing comprehensive discovery and analysis of application components to prepare for Azure Developer CLI (AZD) initialization. The LLM agent should execute these instructions using available tools. @@ -25,7 +26,8 @@ Use this tool when: - Starting Phase 1 of AZD migration process - Need to identify all application components and dependencies - Codebase analysis required before architecture planning -- azd-arch-plan.md does not exist or needs updating`), +- azd-arch-plan.md does not exist or needs updating`, + ), ), Handler: handleAzdDiscoveryAnalysis, } diff --git a/cli/azd/internal/mcp/tools/azd_docker_generation.go b/cli/azd/internal/mcp/tools/azd_docker_generation.go index bca51b5b24b..c784cfe61cb 100644 --- a/cli/azd/internal/mcp/tools/azd_docker_generation.go +++ b/cli/azd/internal/mcp/tools/azd_docker_generation.go @@ -16,7 +16,8 @@ func NewAzdDockerGenerationTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_docker_generation", - mcp.WithDescription(`Returns instructions for generating optimized Dockerfiles and container configurations for containerizable + mcp.WithDescription( + `Returns instructions for generating optimized Dockerfiles and container configurations for containerizable services in AZD projects. The LLM agent should execute these instructions using available tools. @@ -25,7 +26,8 @@ Use this tool when: - Architecture planning identified services requiring containerization - azd-arch-plan.md shows Container Apps or AKS as selected hosting platform - Need Dockerfiles for microservices, APIs, or containerized web applications -- Ready to implement containerization strategy`), +- Ready to implement containerization strategy`, + ), ), Handler: handleAzdDockerGeneration, } diff --git a/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go b/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go index 3b9710088ff..8e2c3f1199b 100644 --- a/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go +++ b/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go @@ -16,7 +16,8 @@ func NewAzdIacGenerationRulesTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_iac_generation_rules", - mcp.WithDescription(`Returns comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules + mcp.WithDescription( + `Returns comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules for AZD projects. The LLM agent should reference these rules when generating infrastructure code. @@ -25,7 +26,8 @@ Use this tool when: - Generating any Bicep infrastructure templates for AZD projects - Need compliance rules and naming conventions for Azure resources - Creating modular, reusable Bicep files -- Ensuring security and operational best practices"`), +- Ensuring security and operational best practices"`, + ), ), Handler: handleAzdIacGenerationRules, } diff --git a/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go b/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go index ea04eba4701..bab4cfd9cf6 100644 --- a/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go +++ b/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go @@ -16,7 +16,8 @@ func NewAzdInfrastructureGenerationTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_infrastructure_generation", - mcp.WithDescription(`Returns instructions for generating modular Bicep infrastructure templates following Azure security and + mcp.WithDescription( + `Returns instructions for generating modular Bicep infrastructure templates following Azure security and operational best practices for AZD projects. The LLM agent should execute these instructions using available tools. @@ -24,7 +25,8 @@ The LLM agent should execute these instructions using available tools. Use this tool when: - Architecture planning completed with Azure services selected - Need to create Bicep infrastructure templates -- Ready to implement infrastructure as code for deployment`), +- Ready to implement infrastructure as code for deployment`, + ), ), Handler: handleAzdInfrastructureGeneration, } diff --git a/cli/azd/internal/mcp/tools/azd_plan_init.go b/cli/azd/internal/mcp/tools/azd_plan_init.go index b2b2cb8a143..051d233295f 100644 --- a/cli/azd/internal/mcp/tools/azd_plan_init.go +++ b/cli/azd/internal/mcp/tools/azd_plan_init.go @@ -16,7 +16,8 @@ func NewAzdPlanInitTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_plan_init", - mcp.WithDescription(`Returns instructions for orchestrating complete AZD application initialization using structured phases + mcp.WithDescription( + `Returns instructions for orchestrating complete AZD application initialization using structured phases with specialized tools. The LLM agent should execute these instructions using available tools. @@ -25,7 +26,8 @@ Use this tool when: - Starting new AZD project initialization or migration - Need structured approach to transform application into AZD-compatible project - Want to ensure proper sequencing of discovery, planning, and file generation -- Require complete project orchestration guidance`), +- Require complete project orchestration guidance`, + ), ), Handler: handleAzdPlanInit, } diff --git a/cli/azd/internal/mcp/tools/azd_project_validation.go b/cli/azd/internal/mcp/tools/azd_project_validation.go index 9620074e15a..af913410b72 100644 --- a/cli/azd/internal/mcp/tools/azd_project_validation.go +++ b/cli/azd/internal/mcp/tools/azd_project_validation.go @@ -16,7 +16,8 @@ func NewAzdProjectValidationTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_project_validation", - mcp.WithDescription(`Returns instructions for validating AZD project by running comprehensive checks on azure.yaml schema, + mcp.WithDescription( + `Returns instructions for validating AZD project by running comprehensive checks on azure.yaml schema, Bicep templates, environment setup, packaging, and deployment preview. The LLM agent should execute these instructions using available tools. @@ -25,7 +26,8 @@ Use this tool when: - All AZD configuration files have been generated - Ready to validate complete project before deployment - Need to ensure azure.yaml, Bicep templates, and environment are properly configured -- Final validation step before running azd up`), +- Final validation step before running azd up`, + ), ), Handler: handleAzdProjectValidation, } diff --git a/cli/azd/internal/mcp/tools/azd_yaml_schema.go b/cli/azd/internal/mcp/tools/azd_yaml_schema.go index 6afaf884813..8b65faaddbe 100644 --- a/cli/azd/internal/mcp/tools/azd_yaml_schema.go +++ b/cli/azd/internal/mcp/tools/azd_yaml_schema.go @@ -16,7 +16,10 @@ func NewAzdYamlSchemaTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_yaml_schema", - mcp.WithDescription(`Gets the Azure YAML JSON schema file specification and structure for azure.yaml configuration files used in AZD.`), + mcp.WithDescription( + `Gets the Azure YAML JSON schema file specification and structure for azure.yaml `+ + `configuration files used in AZD.`, + ), ), Handler: handleAzdYamlSchema, } From af2cf702a61fa74e651f1f068c607c5c151c0b32 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 8 Aug 2025 10:44:16 -0700 Subject: [PATCH 057/116] Adds github copilot instructions --- cli/azd/.github/copilot-instructions.md | 200 ++++++++++++++++++++++++ cli/azd/.vscode/cspell.yaml | 5 + cli/azd/cmd/mcp.go | 8 +- 3 files changed, 208 insertions(+), 5 deletions(-) create mode 100644 cli/azd/.github/copilot-instructions.md diff --git a/cli/azd/.github/copilot-instructions.md b/cli/azd/.github/copilot-instructions.md new file mode 100644 index 00000000000..36484b26f95 --- /dev/null +++ b/cli/azd/.github/copilot-instructions.md @@ -0,0 +1,200 @@ +# GitHub Copilot Instructions for Azure Developer CLI (azd) + +## Project Overview + +The Azure Developer CLI (azd) is a comprehensive command-line tool built in Go that streamlines Azure application development and deployment. The project follows Microsoft coding standards and uses a layered architecture with dependency injection, structured command patterns, and comprehensive testing. + +## Getting Started + +### Prerequisites +- [Go](https://go.dev/dl/) 1.24 +- [VS Code](https://code.visualstudio.com/) with [Go extension](https://marketplace.visualstudio.com/items?itemName=golang.Go) + +### Building & Testing +```bash +# Build +cd cli/azd +go build + +# Run tests (unit only) +go test ./... -short + +# Run all tests (including end-to-end) +go test ./... +``` + +### Development Guidelines +- Check existing [bug issues](https://github.com/Azure/azure-dev/issues?q=is%3Aopen+is%3Aissue+label%3Abug) or [enhancement issues](https://github.com/Azure/azure-dev/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement) +- Open an issue before starting work on significant changes +- Submit pull requests following the established patterns + +## Architecture & Design Patterns + +### Core Architecture +- **Layered Architecture**: `ActionDescriptor Tree → CobraBuilder → Cobra Commands → CLI` +- **Dependency Injection**: IoC container pattern for service resolution +- **Command Pattern**: Actions implement the `Action` interface with `Run(ctx context.Context) (*ActionResult, error)` +- **Model Context Protocol (MCP)**: Server implementation for AI agent interactions + +### Key Components +- **ActionDescriptor**: Higher-order component defining commands, flags, middleware, and relationships +- **Actions**: Application logic handling CLI commands (`cmd/actions/`) +- **Tools**: External tool integrations and MCP server tools +- **Packages**: Reusable business logic (`pkg/`) +- **Internal**: Internal implementation details (`internal/`) + +## Command Development + +For detailed guidance on adding new commands, see: +- **[docs/new-azd-command.md](./docs/new-azd-command.md)** - Comprehensive guide for adding new commands + +### Quick Reference +- Follow the ActionDescriptor pattern for new commands +- Use dependency injection for service resolution +- Implement proper error handling and output formatting +- Support multiple output formats (JSON, Table, None) + +## Code Quality Standards + +### Required Linting Pipeline +Always run this complete pipeline before submitting changes: +```bash +cspell lint '**/*.go' --config ./.vscode/cspell.yaml --root . --no-progress && \ +golines . -w -m 125 && \ +golangci-lint run --timeout 5m && \ +../../eng/scripts/copyright-check.sh . --fix +``` + +**Pipeline Components:** +- `cspell`: Spell checking for Go files +- `golines`: Line length formatting for Go files (125 char limit) +- `golangci-lint`: Go code quality and style checking +- `copyright-check.sh`: Ensures proper Microsoft copyright headers + +### Line Length & Formatting +- **Maximum line length for Go files**: 125 characters (enforced by `lll` linter) +- Use `golines` with `-m 125` flag for automatic formatting of Go code +- Break long strings in Go code using string concatenation with `+` +- **Documentation files (Markdown)**: No strict line length limit, prioritize readability + +### Spelling & Documentation +- Use cspell with project config: `--config ./.vscode/cspell.yaml` +- Add technical terms to document-specific overrides in `.vscode/cspell.yaml` +- Pattern for document-specific words: +```yaml +overrides: + - filename: path/to/file.ext + words: + - technicalterm1 + - technicalterm2 +``` + +### Copyright Headers +All Go files must include Microsoft copyright header: +```go +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +``` + +## MCP Tools Development + +### Tool Pattern +MCP tools follow the ServerTool interface pattern from `github.com/mark3labs/mcp-go/server`. Each tool should have: +- Constructor function: `NewXXXTool() server.ServerTool` +- Handler function: `handleXXX(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error)` +- Proper tool descriptions and parameter definitions +- Snake_case tool names (e.g., `azd_plan_init`) + +## Package Structure Guidelines + +### Import Organization +1. Standard library imports +2. External dependencies +3. Azure/azd internal packages +4. Local package imports + +### Internal vs Package Separation +- `internal/`: Implementation details, not meant for external use +- `pkg/`: Reusable business logic that could be imported by other projects +- Clear interface boundaries between packages + +## Testing Requirements + +### Test Commands +```bash +# Unit tests only +go test ./... -short + +# All tests including end-to-end +go test ./... +``` + +### Test File Patterns +- Unit tests: `*_test.go` alongside source files +- Functional tests: `test/functional/` directory +- Mock exclusions configured in `.golangci.yaml` + +## Error Handling & Logging + +### Error Patterns +- Use `fmt.Errorf` for error wrapping with context +- Return meaningful error messages for CLI users +- Handle context cancellation appropriately + +### Output Formatting +- Support multiple output formats: JSON, Table, None +- Use structured output for machine consumption +- Provide user-friendly messages for human consumption + +## Documentation Standards + +### Code Documentation +- Public functions and types must have Go doc comments +- Comments should start with the function/type name +- Provide context and usage examples where helpful + +### Inline Documentation +- Use clear variable and function names +- Add comments for complex business logic +- Document non-obvious dependencies or assumptions + +## Security & Best Practices + +### Enabled Linters +- `errorlint`: Error handling best practices +- `gosec`: Security vulnerability detection +- `lll`: Line length enforcement (125 chars) +- `staticcheck`: Advanced static analysis + +### Security Considerations +- Handle sensitive data appropriately (credentials, tokens) +- Validate all user inputs +- Use secure defaults for configuration +- Follow Azure security best practices + +## Validation Checklist + +Before submitting any changes, ensure: + +- [ ] All linting pipeline steps pass without errors +- [ ] Copyright headers are present on all new files +- [ ] Spelling check passes with appropriate dictionary entries +- [ ] Line length under 125 characters for Go files (Markdown files have no strict limit) +- [ ] Tests pass (unit and integration where applicable) +- [ ] Error handling is comprehensive and user-friendly +- [ ] Documentation is updated for new features +- [ ] Command patterns follow established conventions +- [ ] MCP tools follow ServerTool interface pattern +- [ ] Package organization follows internal/pkg separation +- [ ] Import statements are properly organized + +## Common Patterns to Follow + +### Key Principles +- Use ActionDescriptor pattern for command registration +- Leverage dependency injection through IoC container +- Follow established naming conventions (see docs/new-azd-command.md) +- Implement proper error handling and output formatting +- Use structured configuration with sensible defaults + +This instruction set ensures consistency with the established codebase patterns and helps maintain the high-quality standards expected in the Azure Developer CLI project. diff --git a/cli/azd/.vscode/cspell.yaml b/cli/azd/.vscode/cspell.yaml index 10092e76ae4..16fd4325393 100644 --- a/cli/azd/.vscode/cspell.yaml +++ b/cli/azd/.vscode/cspell.yaml @@ -177,6 +177,11 @@ overrides: - azdcontext - azapi - eastus + - filename: .github/copilot-instructions.md + words: + - golines + - technicalterm + - Errorf ignorePaths: - "**/*_test.go" - "**/mock*.go" diff --git a/cli/azd/cmd/mcp.go b/cli/azd/cmd/mcp.go index b9c7652f3ec..7e78c9e8a80 100644 --- a/cli/azd/cmd/mcp.go +++ b/cli/azd/cmd/mcp.go @@ -22,11 +22,9 @@ import ( func mcpActions(root *actions.ActionDescriptor) *actions.ActionDescriptor { group := root.Add("mcp", &actions.ActionDescriptorOptions{ Command: &cobra.Command{ - Use: "mcp", - Short: "Manage Model Context Protocol (MCP) server.", - }, - GroupingOptions: actions.CommandGroupOptions{ - RootLevelHelp: actions.CmdGroupAlpha, + Use: "mcp", + Short: "Manage Model Context Protocol (MCP) server.", + Hidden: true, }, }) From 459fc3c633d5def1300a353c873cc615dacce2db Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 8 Aug 2025 11:00:52 -0700 Subject: [PATCH 058/116] Updates inline go docs --- cli/azd/internal/agent/agent.go | 11 +++++++ .../internal/agent/conversational_agent.go | 14 ++++++--- .../logging/{logger.go => action_logger.go} | 0 cli/azd/internal/agent/one_shot_agent.go | 11 ++++--- cli/azd/internal/agent/tools/loader.go | 5 ++- cli/azd/internal/agent/tools/mcp/loader.go | 31 ++++++++++++++----- .../agent/tools/mcp/sampling_handler.go | 31 +++++++++++++------ cli/azd/pkg/llm/azure_openai.go | 6 ++++ cli/azd/pkg/llm/manager.go | 2 +- cli/azd/pkg/llm/model.go | 6 +++- cli/azd/pkg/llm/model_factory.go | 6 ++++ cli/azd/pkg/llm/ollama.go | 6 ++++ 12 files changed, 101 insertions(+), 28 deletions(-) rename cli/azd/internal/agent/logging/{logger.go => action_logger.go} (100%) diff --git a/cli/azd/internal/agent/agent.go b/cli/azd/internal/agent/agent.go index 8dc07ae2668..e3ead707b6d 100644 --- a/cli/azd/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -13,6 +13,8 @@ import ( "github.com/tmc/langchaingo/tools" ) +// Agent represents an AI agent that can execute tools and interact with language models. +// It manages multiple models for different purposes and maintains an executor for tool execution. type Agent struct { debug bool defaultModel llms.Model @@ -22,38 +24,45 @@ type Agent struct { callbacksHandler callbacks.Handler } +// AgentOption is a functional option for configuring an Agent type AgentOption func(*Agent) +// WithDebug returns an option that enables or disables debug logging for the agent func WithDebug(debug bool) AgentOption { return func(agent *Agent) { agent.debug = debug } } +// WithDefaultModel returns an option that sets the default language model for the agent func WithDefaultModel(model llms.Model) AgentOption { return func(agent *Agent) { agent.defaultModel = model } } +// WithSamplingModel returns an option that sets the sampling model for the agent func WithSamplingModel(model llms.Model) AgentOption { return func(agent *Agent) { agent.samplingModel = model } } +// WithTools returns an option that adds the specified tools to the agent's toolkit func WithTools(tools ...tools.Tool) AgentOption { return func(agent *Agent) { agent.tools = tools } } +// WithCallbacksHandler returns an option that sets the callbacks handler for the agent func WithCallbacksHandler(handler callbacks.Handler) AgentOption { return func(agent *Agent) { agent.callbacksHandler = handler } } +// toolNames returns a comma-separated string of all tool names in the provided slice func toolNames(tools []tools.Tool) string { var tn strings.Builder for i, tool := range tools { @@ -66,6 +75,8 @@ func toolNames(tools []tools.Tool) string { return tn.String() } +// toolDescriptions returns a formatted string containing the name and description +// of each tool in the provided slice, with each tool on a separate line func toolDescriptions(tools []tools.Tool) string { var ts strings.Builder for _, tool := range tools { diff --git a/cli/azd/internal/agent/conversational_agent.go b/cli/azd/internal/agent/conversational_agent.go index 1c6622cac7d..5fe070b2b02 100644 --- a/cli/azd/internal/agent/conversational_agent.go +++ b/cli/azd/internal/agent/conversational_agent.go @@ -26,12 +26,15 @@ import ( //go:embed prompts/conversational.txt var conversational_prompt_template string -// ConversationalAzdAiAgent represents an enhanced AZD Copilot agent with action tracking, -// intent validation, and conversation memory +// ConversationalAzdAiAgent represents an enhanced AZD Copilot agent with conversation memory, +// tool filtering, and interactive capabilities type ConversationalAzdAiAgent struct { *Agent } +// NewConversationalAzdAiAgent creates a new conversational agent with memory, tool loading, +// and MCP sampling capabilities. It filters out excluded tools and configures the agent +// for interactive conversations with a high iteration limit for complex tasks. func NewConversationalAzdAiAgent(llm llms.Model, opts ...AgentOption) (*ConversationalAzdAiAgent, error) { azdAgent := &ConversationalAzdAiAgent{ Agent: &Agent{ @@ -115,11 +118,14 @@ func NewConversationalAzdAiAgent(llm llms.Model, opts ...AgentOption) (*Conversa return azdAgent, nil } +// SendMessage processes a single message through the agent and returns the response func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, args ...string) (string, error) { return aai.runChain(ctx, strings.Join(args, "\n")) } -// RunConversationLoop runs the enhanced AZD Copilot agent with full capabilities +// StartConversation runs an interactive conversation loop with the agent. +// It accepts an optional initial query and handles user input/output with proper formatting. +// The conversation continues until the user types "exit" or "quit". func (aai *ConversationalAzdAiAgent) StartConversation(ctx context.Context, args ...string) (string, error) { fmt.Println("🤖 AZD Copilot - Interactive Mode") fmt.Println("═══════════════════════════════════════════════════════════") @@ -171,7 +177,7 @@ func (aai *ConversationalAzdAiAgent) StartConversation(ctx context.Context, args return "", nil } -// ProcessQuery processes a user query with full action tracking and validation +// runChain executes a user query through the agent's chain with memory and returns the response func (aai *ConversationalAzdAiAgent) runChain(ctx context.Context, userInput string) (string, error) { // Execute with enhanced input - agent should automatically handle memory output, err := chains.Run(ctx, aai.executor, userInput) diff --git a/cli/azd/internal/agent/logging/logger.go b/cli/azd/internal/agent/logging/action_logger.go similarity index 100% rename from cli/azd/internal/agent/logging/logger.go rename to cli/azd/internal/agent/logging/action_logger.go diff --git a/cli/azd/internal/agent/one_shot_agent.go b/cli/azd/internal/agent/one_shot_agent.go index e2d8c9adcd2..7137e6efccd 100644 --- a/cli/azd/internal/agent/one_shot_agent.go +++ b/cli/azd/internal/agent/one_shot_agent.go @@ -18,8 +18,8 @@ import ( mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" ) -// OneShotAzdAiAgent represents an enhanced AZD Copilot agent with action tracking, -// intent validation, and conversation memory +// OneShotAzdAiAgent represents an AZD Copilot agent designed for single-request processing +// without conversation memory, optimized for one-time queries and responses type OneShotAzdAiAgent struct { *Agent } @@ -27,6 +27,9 @@ type OneShotAzdAiAgent struct { //go:embed prompts/one_shot.txt var one_shot_prompt_template string +// NewOneShotAzdAiAgent creates a new one-shot agent optimized for single queries. +// It loads tools from multiple sources, filters excluded tools, and configures +// the agent for stateless operation without conversation memory. func NewOneShotAzdAiAgent(llm llms.Model, opts ...AgentOption) (*OneShotAzdAiAgent, error) { azdAgent := &OneShotAzdAiAgent{ Agent: &Agent{ @@ -100,12 +103,12 @@ func NewOneShotAzdAiAgent(llm llms.Model, opts ...AgentOption) (*OneShotAzdAiAge return azdAgent, nil } -// RunConversationLoop runs the enhanced AZD Copilot agent with full capabilities +// SendMessage processes a single message through the one-shot agent and returns the response func (aai *OneShotAzdAiAgent) SendMessage(ctx context.Context, args ...string) (string, error) { return aai.runChain(ctx, strings.Join(args, "\n")) } -// ProcessQuery processes a user query with full action tracking and validation +// runChain executes a user query through the one-shot agent without memory persistence func (aai *OneShotAzdAiAgent) runChain(ctx context.Context, userInput string) (string, error) { // Execute with enhanced input - agent should automatically handle memory output, err := chains.Run(ctx, aai.executor, userInput) diff --git a/cli/azd/internal/agent/tools/loader.go b/cli/azd/internal/agent/tools/loader.go index a0662c65c5c..d45098543a2 100644 --- a/cli/azd/internal/agent/tools/loader.go +++ b/cli/azd/internal/agent/tools/loader.go @@ -15,10 +15,12 @@ type ToolLoader interface { LoadTools() ([]tools.Tool, error) } +// LocalToolsLoader manages loading tools from multiple local tool categories type LocalToolsLoader struct { loaders []ToolLoader } +// NewLocalToolsLoader creates a new instance with default tool loaders for dev and io categories func NewLocalToolsLoader() *LocalToolsLoader { return &LocalToolsLoader{ loaders: []ToolLoader{ @@ -28,7 +30,8 @@ func NewLocalToolsLoader() *LocalToolsLoader { } } -// LoadLocalTools loads all tools from all categories with the provided callback handler +// LoadTools loads and returns all tools from all registered tool loaders. +// Returns an error if any individual loader fails to load its tools. func (l *LocalToolsLoader) LoadTools() ([]tools.Tool, error) { var allTools []tools.Tool diff --git a/cli/azd/internal/agent/tools/mcp/loader.go b/cli/azd/internal/agent/tools/mcp/loader.go index 76346c11113..af9e8321470 100644 --- a/cli/azd/internal/agent/tools/mcp/loader.go +++ b/cli/azd/internal/agent/tools/mcp/loader.go @@ -7,6 +7,7 @@ import ( "context" "encoding/json" "fmt" + "log" _ "embed" @@ -21,27 +22,40 @@ var _mcpJson string // McpConfig represents the overall MCP configuration structure type McpConfig struct { + // Servers maps server names to their configurations Servers map[string]ServerConfig `json:"servers"` } // ServerConfig represents an individual server configuration type ServerConfig struct { - Type string `json:"type"` - Command string `json:"command"` - Args []string `json:"args,omitempty"` - Env []string `json:"env,omitempty"` + // Type specifies the type of MCP server (e.g., "stdio") + Type string `json:"type"` + // Command is the executable path or command to run the MCP server + Command string `json:"command"` + // Args are optional command-line arguments for the server command + Args []string `json:"args,omitempty"` + // Env are optional environment variables for the server process + Env []string `json:"env,omitempty"` } +// McpToolsLoader manages the loading of tools from MCP (Model Context Protocol) servers type McpToolsLoader struct { + // samplingHandler handles sampling requests from MCP clients samplingHandler client.SamplingHandler } +// NewMcpToolsLoader creates a new instance of McpToolsLoader with the provided sampling handler func NewMcpToolsLoader(samplingHandler client.SamplingHandler) *McpToolsLoader { return &McpToolsLoader{ samplingHandler: samplingHandler, } } +// LoadTools loads and returns all available tools from configured MCP servers. +// It parses the embedded mcp.json configuration, connects to each server, +// and collects all tools from each successfully connected server. +// Returns an error if the configuration cannot be parsed, but continues +// processing other servers if individual server connections fail. func (l *McpToolsLoader) LoadTools() ([]tools.Tool, error) { // Deserialize the embedded mcp.json configuration var config McpConfig @@ -60,19 +74,22 @@ func (l *McpToolsLoader) LoadTools() ([]tools.Tool, error) { ctx := context.Background() if err := mcpClient.Start(ctx); err != nil { - return nil, err + log.Printf("Failed to start MCP client for server %s: %v", serverName, err) + continue } // Create the adapter adapter, err := langchaingo_mcp_adapter.New(mcpClient) if err != nil { - return nil, fmt.Errorf("failed to create adapter for server %s: %w", serverName, err) + log.Printf("Failed to create adapter for server %s: %v", serverName, err) + continue } // Get all tools from MCP server mcpTools, err := adapter.Tools() if err != nil { - return nil, fmt.Errorf("failed to get tools from server %s: %w", serverName, err) + log.Printf("Failed to get tools from server %s: %v", serverName, err) + continue } // Add the tools to our collection diff --git a/cli/azd/internal/agent/tools/mcp/sampling_handler.go b/cli/azd/internal/agent/tools/mcp/sampling_handler.go index 0af9f051f4a..0c0b3f7df0d 100644 --- a/cli/azd/internal/agent/tools/mcp/sampling_handler.go +++ b/cli/azd/internal/agent/tools/mcp/sampling_handler.go @@ -14,19 +14,25 @@ import ( "github.com/tmc/langchaingo/llms" ) +// McpSamplingHandler handles sampling requests from MCP clients by delegating +// to an underlying language model and converting responses to MCP format type McpSamplingHandler struct { llm llms.Model debug bool } +// SamplingHandlerOption is a functional option for configuring McpSamplingHandler type SamplingHandlerOption func(*McpSamplingHandler) +// WithDebug returns an option that enables or disables debug logging func WithDebug(debug bool) SamplingHandlerOption { return func(h *McpSamplingHandler) { h.debug = debug } } +// NewMcpSamplingHandler creates a new MCP sampling handler with the specified +// language model and applies any provided options func NewMcpSamplingHandler(llm llms.Model, opts ...SamplingHandlerOption) *McpSamplingHandler { handler := &McpSamplingHandler{ llm: llm, @@ -39,16 +45,10 @@ func NewMcpSamplingHandler(llm llms.Model, opts ...SamplingHandlerOption) *McpSa return handler } -// cleanContent converts literal line break escape sequences to actual line break characters -func (h *McpSamplingHandler) cleanContent(content string) string { - // Replace literal escape sequences with actual control characters - // Handle Windows-style \r\n first (most common), then individual ones - content = strings.ReplaceAll(content, "\\r\\n", "\r\n") - content = strings.ReplaceAll(content, "\\n", "\n") - content = strings.ReplaceAll(content, "\\r", "\r") - return content -} - +// CreateMessage handles MCP sampling requests by converting MCP messages to the +// language model format, generating a response, and converting back to MCP format. +// It supports various content types including text, maps, and arrays, and provides +// debug logging when enabled. Returns an error-wrapped response if LLM generation fails. func (h *McpSamplingHandler) CreateMessage( ctx context.Context, request mcp.CreateMessageRequest, @@ -156,3 +156,14 @@ func (h *McpSamplingHandler) CreateMessage( return samplingResponse, nil } + +// cleanContent converts literal line break escape sequences to actual line break characters. +// It handles Windows-style \r\n sequences first, then individual \n and \r sequences. +func (h *McpSamplingHandler) cleanContent(content string) string { + // Replace literal escape sequences with actual control characters + // Handle Windows-style \r\n first (most common), then individual ones + content = strings.ReplaceAll(content, "\\r\\n", "\r\n") + content = strings.ReplaceAll(content, "\\n", "\n") + content = strings.ReplaceAll(content, "\\r", "\r") + return content +} diff --git a/cli/azd/pkg/llm/azure_openai.go b/cli/azd/pkg/llm/azure_openai.go index e1108982377..7319e7c6087 100644 --- a/cli/azd/pkg/llm/azure_openai.go +++ b/cli/azd/pkg/llm/azure_openai.go @@ -11,6 +11,7 @@ import ( "github.com/tmc/langchaingo/llms/openai" ) +// AzureOpenAiModelConfig holds configuration settings for Azure OpenAI models type AzureOpenAiModelConfig struct { Model string `json:"model"` Version string `json:"version"` @@ -21,16 +22,21 @@ type AzureOpenAiModelConfig struct { MaxTokens *int `json:"maxTokens"` } +// AzureOpenAiModelProvider creates Azure OpenAI models from user configuration type AzureOpenAiModelProvider struct { userConfigManager config.UserConfigManager } +// NewAzureOpenAiModelProvider creates a new Azure OpenAI model provider func NewAzureOpenAiModelProvider(userConfigManager config.UserConfigManager) ModelProvider { return &AzureOpenAiModelProvider{ userConfigManager: userConfigManager, } } +// CreateModelContainer creates a model container for Azure OpenAI with configuration +// loaded from user settings. It validates required fields and applies optional parameters +// like temperature and max tokens before creating the OpenAI client. func (p *AzureOpenAiModelProvider) CreateModelContainer(opts ...ModelOption) (*ModelContainer, error) { userConfig, err := p.userConfigManager.Load() if err != nil { diff --git a/cli/azd/pkg/llm/manager.go b/cli/azd/pkg/llm/manager.go index 2c8b2ec3853..8ba4f88bcff 100644 --- a/cli/azd/pkg/llm/manager.go +++ b/cli/azd/pkg/llm/manager.go @@ -127,7 +127,7 @@ func (m Manager) GetDefaultModel(opts ...ModelOption) (*ModelContainer, error) { return m.ModelFactory.CreateModelContainer(LlmType(defaultModelType), opts...) } -// GetModel returns the configured model from the global azd user configuration +// GetModel returns the specified model type from the global azd user configuration func (m Manager) GetModel(modelType LlmType, opts ...ModelOption) (*ModelContainer, error) { return m.ModelFactory.CreateModelContainer(modelType, opts...) } diff --git a/cli/azd/pkg/llm/model.go b/cli/azd/pkg/llm/model.go index 5bfba259675..aa55b628a6b 100644 --- a/cli/azd/pkg/llm/model.go +++ b/cli/azd/pkg/llm/model.go @@ -12,12 +12,13 @@ import ( var _ llms.Model = (*modelWithCallOptions)(nil) -// / Wraps an langchaingo model to allow specifying specific call options at create time +// modelWithCallOptions wraps a langchaingo model to allow specifying default call options at creation time type modelWithCallOptions struct { model llms.Model options []llms.CallOption } +// newModelWithCallOptions creates a new model wrapper with default call options func newModelWithCallOptions(model llms.Model, options ...llms.CallOption) *modelWithCallOptions { return &modelWithCallOptions{ model: model, @@ -25,6 +26,8 @@ func newModelWithCallOptions(model llms.Model, options ...llms.CallOption) *mode } } +// GenerateContent generates content using the wrapped model, combining default options +// with any additional options provided at call time func (m *modelWithCallOptions) GenerateContent( ctx context.Context, messages []llms.MessageContent, @@ -37,6 +40,7 @@ func (m *modelWithCallOptions) GenerateContent( return m.model.GenerateContent(ctx, messages, allOptions...) } +// Call is deprecated and returns an error directing users to use GenerateContent instead func (m *modelWithCallOptions) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { return "", fmt.Errorf("Deprecated, call GenerateContent") } diff --git a/cli/azd/pkg/llm/model_factory.go b/cli/azd/pkg/llm/model_factory.go index 3994b1d3e08..7aa79c462cc 100644 --- a/cli/azd/pkg/llm/model_factory.go +++ b/cli/azd/pkg/llm/model_factory.go @@ -10,16 +10,21 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/ioc" ) +// ModelFactory creates model containers using registered model providers type ModelFactory struct { serviceLocator ioc.ServiceLocator } +// NewModelFactory creates a new model factory with the given service locator func NewModelFactory(serviceLocator ioc.ServiceLocator) *ModelFactory { return &ModelFactory{ serviceLocator: serviceLocator, } } +// CreateModelContainer creates a model container for the specified model type. +// It resolves the appropriate model provider and delegates container creation to it. +// Returns an error with suggestions if the model type is not supported. func (f *ModelFactory) CreateModelContainer(modelType LlmType, opts ...ModelOption) (*ModelContainer, error) { var modelProvider ModelProvider if err := f.serviceLocator.ResolveNamed(string(modelType), &modelProvider); err != nil { @@ -33,6 +38,7 @@ func (f *ModelFactory) CreateModelContainer(modelType LlmType, opts ...ModelOpti return modelProvider.CreateModelContainer(opts...) } +// ModelProvider defines the interface for creating model containers type ModelProvider interface { CreateModelContainer(opts ...ModelOption) (*ModelContainer, error) } diff --git a/cli/azd/pkg/llm/ollama.go b/cli/azd/pkg/llm/ollama.go index 27d6e1e83af..51015163e09 100644 --- a/cli/azd/pkg/llm/ollama.go +++ b/cli/azd/pkg/llm/ollama.go @@ -9,6 +9,7 @@ import ( "github.com/tmc/langchaingo/llms/ollama" ) +// OllamaModelConfig holds configuration settings for Ollama models type OllamaModelConfig struct { Model string `json:"model"` Version string `json:"version"` @@ -16,16 +17,21 @@ type OllamaModelConfig struct { MaxTokens *int `json:"maxTokens"` } +// OllamaModelProvider creates Ollama models from user configuration with sensible defaults type OllamaModelProvider struct { userConfigManager config.UserConfigManager } +// NewOllamaModelProvider creates a new Ollama model provider func NewOllamaModelProvider(userConfigManager config.UserConfigManager) ModelProvider { return &OllamaModelProvider{ userConfigManager: userConfigManager, } } +// CreateModelContainer creates a model container for Ollama with configuration from user settings. +// It defaults to "llama3" model if none specified and "latest" version if not configured. +// Applies optional parameters like temperature and max tokens to the Ollama client. func (p *OllamaModelProvider) CreateModelContainer(opts ...ModelOption) (*ModelContainer, error) { userConfig, err := p.userConfigManager.Load() if err != nil { From 88e2cedf9add200ade1db916c55a96f73c90ab5f Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 8 Aug 2025 17:09:07 -0700 Subject: [PATCH 059/116] WIP: user consent --- cli/azd/.github/copilot-instructions.md | 27 ++ cli/azd/cmd/container.go | 4 + cli/azd/cmd/init.go | 31 +- cli/azd/cmd/mcp.go | 213 ++++++++- cli/azd/internal/agent/agent.go | 27 +- cli/azd/internal/agent/agent_factory.go | 89 ++++ cli/azd/internal/agent/consent/checker.go | 187 ++++++++ .../agent/consent/consent_wrapper_tool.go | 94 ++++ cli/azd/internal/agent/consent/manager.go | 437 ++++++++++++++++++ cli/azd/internal/agent/consent/types.go | 78 ++++ .../internal/agent/conversational_agent.go | 46 +- cli/azd/internal/agent/one_shot_agent.go | 46 +- cli/azd/internal/agent/tools/common/types.go | 27 ++ .../agent/tools/dev/command_executor.go | 33 +- cli/azd/internal/agent/tools/dev/loader.go | 8 +- .../agent/tools/io/change_directory.go | 4 +- cli/azd/internal/agent/tools/io/copy_file.go | 4 +- .../agent/tools/io/create_directory.go | 4 +- .../agent/tools/io/current_directory.go | 4 +- .../agent/tools/io/delete_directory.go | 4 +- .../internal/agent/tools/io/delete_file.go | 4 +- .../internal/agent/tools/io/directory_list.go | 4 +- cli/azd/internal/agent/tools/io/file_info.go | 4 +- .../internal/agent/tools/io/file_search.go | 4 +- cli/azd/internal/agent/tools/io/loader.go | 6 +- cli/azd/internal/agent/tools/io/move_file.go | 4 +- cli/azd/internal/agent/tools/io/read_file.go | 4 +- cli/azd/internal/agent/tools/io/write_file.go | 4 +- cli/azd/internal/agent/tools/loader.go | 9 +- cli/azd/internal/agent/tools/mcp/loader.go | 27 +- .../internal/agent/tools/mcp/tool_adapter.go | 104 +++++ cli/azd/pkg/input/console.go | 24 + 32 files changed, 1366 insertions(+), 199 deletions(-) create mode 100644 cli/azd/internal/agent/agent_factory.go create mode 100644 cli/azd/internal/agent/consent/checker.go create mode 100644 cli/azd/internal/agent/consent/consent_wrapper_tool.go create mode 100644 cli/azd/internal/agent/consent/manager.go create mode 100644 cli/azd/internal/agent/consent/types.go create mode 100644 cli/azd/internal/agent/tools/mcp/tool_adapter.go diff --git a/cli/azd/.github/copilot-instructions.md b/cli/azd/.github/copilot-instructions.md index 36484b26f95..6d204c38632 100644 --- a/cli/azd/.github/copilot-instructions.md +++ b/cli/azd/.github/copilot-instructions.md @@ -197,4 +197,31 @@ Before submitting any changes, ensure: - Implement proper error handling and output formatting - Use structured configuration with sensible defaults +### Go Code Structure Standards + +When creating or modifying Go struct files, follow this organization order: + +1. **Package Documentation**: At the top of main file or `doc.go` + ```go + // Package packagename provides... + package packagename + ``` + +2. **Constants**: Package-level constants +3. **Package-level Variables**: Global variables and error definitions +4. **Type Declarations**: Structs, interfaces, and custom types + - For 3+ types, consider using `types.go` file + - Custom error types with struct declarations (or in `types.go` if many) +5. **Primary Struct Declaration(s)**: Main structs for the package +6. **Constructor Functions**: `NewXXX()` functions +7. **Public Struct Methods**: `func (s *Struct) PublicMethod()` + - Group interface implementations with comment: `// For XXX interface support` +8. **Private Struct Methods**: `func (s *struct) privateMethod()` +9. **Private Package Functions**: `func privateFunction()` + +**File Organization Guidelines:** +- **Complex packages**: Use `types.go` for type definitions, `init.go` for initialization +- **Package init**: Place `init()` functions at top of file or in dedicated `init.go` +- **Struct embedding**: No special placement rules - treat embedded and embedding structs equally + This instruction set ensures consistency with the established codebase patterns and helps maintain the high-quality standards expected in the Azure Developer CLI project. diff --git a/cli/azd/cmd/container.go b/cli/azd/cmd/container.go index 34b1f7df3b8..c1725a7dc52 100644 --- a/cli/azd/cmd/container.go +++ b/cli/azd/cmd/container.go @@ -21,6 +21,8 @@ import ( "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/cmd/middleware" "github.com/azure/azure-dev/cli/azd/internal" + "github.com/azure/azure-dev/cli/azd/internal/agent" + "github.com/azure/azure-dev/cli/azd/internal/agent/consent" "github.com/azure/azure-dev/cli/azd/internal/cmd" "github.com/azure/azure-dev/cli/azd/internal/grpcserver" "github.com/azure/azure-dev/cli/azd/internal/repository" @@ -550,6 +552,8 @@ func registerCommonDependencies(container *ioc.NestedContainer) { // AI & LLM components container.MustRegisterSingleton(llm.NewManager) container.MustRegisterSingleton(llm.NewModelFactory) + container.MustRegisterScoped(agent.NewAgentFactory) + container.MustRegisterScoped(consent.NewConsentManager) container.MustRegisterNamedSingleton("ollama", llm.NewOllamaModelProvider) container.MustRegisterNamedSingleton("azure", llm.NewAzureOpenAiModelProvider) diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index 0b35a210631..705506d21b9 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -16,7 +16,6 @@ import ( "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" "github.com/azure/azure-dev/cli/azd/internal/agent" - "github.com/azure/azure-dev/cli/azd/internal/agent/logging" "github.com/azure/azure-dev/cli/azd/internal/repository" "github.com/azure/azure-dev/cli/azd/internal/tracing" "github.com/azure/azure-dev/cli/azd/internal/tracing/fields" @@ -137,7 +136,7 @@ type initAction struct { featuresManager *alpha.FeatureManager extensionsManager *extensions.Manager azd workflow.AzdCommandRunner - llmManager *llm.Manager + agentFactory *agent.AgentFactory } func newInitAction( @@ -152,7 +151,7 @@ func newInitAction( featuresManager *alpha.FeatureManager, extensionsManager *extensions.Manager, azd workflow.AzdCommandRunner, - llmManager *llm.Manager, + agentFactory *agent.AgentFactory, ) actions.Action { return &initAction{ lazyAzdCtx: lazyAzdCtx, @@ -166,7 +165,7 @@ func newInitAction( featuresManager: featuresManager, extensionsManager: extensionsManager, azd: azd, - llmManager: llmManager, + agentFactory: agentFactory, } } @@ -377,25 +376,7 @@ func (i *initAction) initAppWithCopilot(ctx context.Context) error { // Warn user that this is an alpha feature i.console.WarnForFeature(ctx, llm.FeatureLlm) - fileLogger, cleanup, err := logging.NewFileLoggerDefault() - if err != nil { - return err - } - defer cleanup() - - defaultModelContainer, err := i.llmManager.GetDefaultModel(llm.WithLogger(fileLogger)) - if err != nil { - return err - } - - samplingModelContainer, err := i.llmManager.GetDefaultModel() - if err != nil { - return err - } - - azdAgent, err := agent.NewConversationalAzdAiAgent( - defaultModelContainer.Model, - agent.WithSamplingModel(samplingModelContainer.Model), + azdAgent, err := i.agentFactory.Create( agent.WithDebug(i.flags.global.EnableDebugLogging), ) if err != nil { @@ -488,7 +469,7 @@ Do not stop until all tasks are complete and fully resolved. // collectAndApplyFeedback prompts for user feedback and applies it using the agent in a loop func (i *initAction) collectAndApplyFeedback( ctx context.Context, - azdAgent *agent.ConversationalAzdAiAgent, + azdAgent agent.Agent, promptMessage string, ) error { // Loop to allow multiple rounds of feedback @@ -546,7 +527,7 @@ func (i *initAction) collectAndApplyFeedback( } // postCompletionFeedbackLoop provides a final opportunity for feedback after all steps complete -func (i *initAction) postCompletionFeedbackLoop(ctx context.Context, azdAgent *agent.ConversationalAzdAiAgent) error { +func (i *initAction) postCompletionFeedbackLoop(ctx context.Context, azdAgent agent.Agent) error { i.console.Message(ctx, "") i.console.Message(ctx, "🎉 All initialization steps completed!") i.console.Message(ctx, "") diff --git a/cli/azd/cmd/mcp.go b/cli/azd/cmd/mcp.go index 7e78c9e8a80..9c59a8a5244 100644 --- a/cli/azd/cmd/mcp.go +++ b/cli/azd/cmd/mcp.go @@ -7,10 +7,13 @@ import ( "context" "fmt" "io" + "strings" "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" + "github.com/azure/azure-dev/cli/azd/internal/agent/consent" "github.com/azure/azure-dev/cli/azd/internal/mcp/tools" + "github.com/azure/azure-dev/cli/azd/pkg/config" "github.com/azure/azure-dev/cli/azd/pkg/input" "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/mark3labs/mcp-go/server" @@ -45,6 +48,43 @@ azd functionality through the Model Context Protocol interface.`, FlagsResolver: newMcpStartFlags, }) + // azd mcp consent subcommands + consentGroup := group.Add("consent", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "consent", + Short: "Manage MCP tool consent.", + Long: "Manage consent rules for MCP tool execution.", + }, + }) + + // azd mcp consent list + consentGroup.Add("list", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "list", + Short: "List consent rules.", + Long: "List all consent rules for MCP tools.", + Args: cobra.NoArgs, + }, + OutputFormats: []output.Format{output.JsonFormat, output.TableFormat}, + DefaultFormat: output.TableFormat, + ActionResolver: newMcpConsentListAction, + FlagsResolver: newMcpConsentFlags, + }) + + // azd mcp consent clear + consentGroup.Add("clear", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "clear", + Short: "Clear consent rules.", + Long: "Clear consent rules for MCP tools.", + Args: cobra.NoArgs, + }, + OutputFormats: []output.Format{output.NoneFormat}, + DefaultFormat: output.NoneFormat, + ActionResolver: newMcpConsentClearAction, + FlagsResolver: newMcpConsentFlags, + }) + return group } @@ -65,20 +105,15 @@ func (f *mcpStartFlags) Bind(local *pflag.FlagSet, global *internal.GlobalComman // Action for MCP start command type mcpStartAction struct { - flags *mcpStartFlags - console input.Console - writer io.Writer + flags *mcpStartFlags } func newMcpStartAction( flags *mcpStartFlags, - console input.Console, - writer io.Writer, + userConfigManager config.UserConfigManager, ) actions.Action { return &mcpStartAction{ - flags: flags, - console: console, - writer: writer, + flags: flags, } } @@ -89,7 +124,7 @@ func (a *mcpStartAction) Run(ctx context.Context) (*actions.ActionResult, error) ) s.EnableSampling() - s.AddTools( + allTools := []server.ServerTool{ tools.NewAzdPlanInitTool(), tools.NewAzdDiscoveryAnalysisTool(), tools.NewAzdArchitecturePlanningTool(), @@ -99,7 +134,9 @@ func (a *mcpStartAction) Run(ctx context.Context) (*actions.ActionResult, error) tools.NewAzdIacGenerationRulesTool(), tools.NewAzdProjectValidationTool(), tools.NewAzdYamlSchemaTool(), - ) + } + + s.AddTools(allTools...) // Start the server using stdio transport if err := server.ServeStdio(s); err != nil { @@ -108,3 +145,159 @@ func (a *mcpStartAction) Run(ctx context.Context) (*actions.ActionResult, error) return nil, nil } + +// Flags for MCP consent commands +type mcpConsentFlags struct { + global *internal.GlobalCommandOptions + scope string + toolID string +} + +func newMcpConsentFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentFlags { + flags := &mcpConsentFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *mcpConsentFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + f.global = global + local.StringVar(&f.scope, "scope", "global", "Consent scope (global, project, session)") + local.StringVar(&f.toolID, "tool-id", "", "Specific tool ID to operate on") +} + +// Action for MCP consent list command +type mcpConsentListAction struct { + flags *mcpConsentFlags + formatter output.Formatter + writer io.Writer + userConfigManager config.UserConfigManager + consentManager consent.ConsentManager +} + +func newMcpConsentListAction( + flags *mcpConsentFlags, + formatter output.Formatter, + writer io.Writer, + userConfigManager config.UserConfigManager, + consentManager consent.ConsentManager, +) actions.Action { + return &mcpConsentListAction{ + flags: flags, + formatter: formatter, + writer: writer, + userConfigManager: userConfigManager, + consentManager: consentManager, + } +} + +func (a *mcpConsentListAction) Run(ctx context.Context) (*actions.ActionResult, error) { + var scope consent.ConsentScope + switch a.flags.scope { + case "global": + scope = consent.ScopeGlobal + case "project": + scope = consent.ScopeProject + case "session": + scope = consent.ScopeSession + default: + return nil, fmt.Errorf("invalid scope: %s", a.flags.scope) + } + + rules, err := a.consentManager.ListConsents(ctx, scope) + if err != nil { + return nil, fmt.Errorf("failed to list consent rules: %w", err) + } + + if len(rules) == 0 { + fmt.Fprintf(a.writer, "No consent rules found for scope: %s\n", a.flags.scope) + return nil, nil + } + + // Format output + if a.formatter.Kind() == output.JsonFormat { + return nil, a.formatter.Format(rules, a.writer, nil) + } + + // Table format + fmt.Fprintf(a.writer, "Consent Rules (%s scope):\n", a.flags.scope) + fmt.Fprintf(a.writer, "%-40s %-15s %-20s\n", "Tool ID", "Permission", "Granted At") + fmt.Fprintf(a.writer, "%s\n", strings.Repeat("-", 75)) + + for _, rule := range rules { + fmt.Fprintf(a.writer, "%-40s %-15s %-20s\n", + rule.ToolID, + rule.Permission, + rule.GrantedAt.Format("2006-01-02 15:04:05")) + } + + return nil, nil +} + +// Action for MCP consent clear command +type mcpConsentClearAction struct { + flags *mcpConsentFlags + console input.Console + userConfigManager config.UserConfigManager + consentManager consent.ConsentManager +} + +func newMcpConsentClearAction( + flags *mcpConsentFlags, + console input.Console, + userConfigManager config.UserConfigManager, + consentManager consent.ConsentManager, +) actions.Action { + return &mcpConsentClearAction{ + flags: flags, + console: console, + userConfigManager: userConfigManager, + consentManager: consentManager, + } +} + +func (a *mcpConsentClearAction) Run(ctx context.Context) (*actions.ActionResult, error) { + var scope consent.ConsentScope + switch a.flags.scope { + case "global": + scope = consent.ScopeGlobal + case "project": + scope = consent.ScopeProject + case "session": + scope = consent.ScopeSession + default: + return nil, fmt.Errorf("invalid scope: %s", a.flags.scope) + } + + var err error + if a.flags.toolID != "" { + // Clear specific tool + err = a.consentManager.ClearConsentByToolID(ctx, a.flags.toolID, scope) + if err == nil { + fmt.Fprintf(a.console.Handles().Stdout, "Cleared consent for tool: %s\n", a.flags.toolID) + } + } else { + // Clear all rules for scope + confirmed, confirmErr := a.console.Confirm(ctx, input.ConsoleOptions{ + Message: fmt.Sprintf("Are you sure you want to clear all consent rules for scope '%s'?", a.flags.scope), + }) + if confirmErr != nil { + return nil, confirmErr + } + + if !confirmed { + fmt.Fprintf(a.console.Handles().Stdout, "Operation cancelled.\n") + return nil, nil + } + + err = a.consentManager.ClearConsents(ctx, scope) + if err == nil { + fmt.Fprintf(a.console.Handles().Stdout, "Cleared all consent rules for scope: %s\n", a.flags.scope) + } + } + + if err != nil { + return nil, fmt.Errorf("failed to clear consent rules: %w", err) + } + + return nil, nil +} diff --git a/cli/azd/internal/agent/agent.go b/cli/azd/internal/agent/agent.go index e3ead707b6d..b4a653c92b9 100644 --- a/cli/azd/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -4,6 +4,7 @@ package agent import ( + "context" "fmt" "strings" @@ -13,51 +14,47 @@ import ( "github.com/tmc/langchaingo/tools" ) -// Agent represents an AI agent that can execute tools and interact with language models. +// agentBase represents an AI agent that can execute tools and interact with language models. // It manages multiple models for different purposes and maintains an executor for tool execution. -type Agent struct { +type agentBase struct { debug bool defaultModel llms.Model - samplingModel llms.Model executor *agents.Executor tools []tools.Tool callbacksHandler callbacks.Handler } +type Agent interface { + SendMessage(ctx context.Context, args ...string) (string, error) +} + // AgentOption is a functional option for configuring an Agent -type AgentOption func(*Agent) +type AgentOption func(*agentBase) // WithDebug returns an option that enables or disables debug logging for the agent func WithDebug(debug bool) AgentOption { - return func(agent *Agent) { + return func(agent *agentBase) { agent.debug = debug } } // WithDefaultModel returns an option that sets the default language model for the agent func WithDefaultModel(model llms.Model) AgentOption { - return func(agent *Agent) { + return func(agent *agentBase) { agent.defaultModel = model } } -// WithSamplingModel returns an option that sets the sampling model for the agent -func WithSamplingModel(model llms.Model) AgentOption { - return func(agent *Agent) { - agent.samplingModel = model - } -} - // WithTools returns an option that adds the specified tools to the agent's toolkit func WithTools(tools ...tools.Tool) AgentOption { - return func(agent *Agent) { + return func(agent *agentBase) { agent.tools = tools } } // WithCallbacksHandler returns an option that sets the callbacks handler for the agent func WithCallbacksHandler(handler callbacks.Handler) AgentOption { - return func(agent *Agent) { + return func(agent *agentBase) { agent.callbacksHandler = handler } } diff --git a/cli/azd/internal/agent/agent_factory.go b/cli/azd/internal/agent/agent_factory.go new file mode 100644 index 00000000000..6cb27cc605a --- /dev/null +++ b/cli/azd/internal/agent/agent_factory.go @@ -0,0 +1,89 @@ +package agent + +import ( + "github.com/azure/azure-dev/cli/azd/internal/agent/consent" + "github.com/azure/azure-dev/cli/azd/internal/agent/logging" + localtools "github.com/azure/azure-dev/cli/azd/internal/agent/tools" + mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" + "github.com/azure/azure-dev/cli/azd/pkg/llm" + "github.com/tmc/langchaingo/tools" +) + +type AgentFactory struct { + consentManager consent.ConsentManager + llmManager *llm.Manager +} + +func NewAgentFactory( + consentManager consent.ConsentManager, + llmManager *llm.Manager, +) *AgentFactory { + return &AgentFactory{ + consentManager: consentManager, + llmManager: llmManager, + } +} + +func (f *AgentFactory) Create(opts ...AgentOption) (Agent, error) { + fileLogger, cleanup, err := logging.NewFileLoggerDefault() + if err != nil { + return nil, err + } + defer cleanup() + + defaultModelContainer, err := f.llmManager.GetDefaultModel(llm.WithLogger(fileLogger)) + if err != nil { + return nil, err + } + + samplingModelContainer, err := f.llmManager.GetDefaultModel() + if err != nil { + return nil, err + } + + // Create sampling handler for MCP + samplingHandler := mcptools.NewMcpSamplingHandler( + samplingModelContainer.Model, + ) + + toolLoaders := []localtools.ToolLoader{ + localtools.NewLocalToolsLoader(), + mcptools.NewMcpToolsLoader(samplingHandler), + } + + // Define block list of excluded tools + excludedTools := map[string]bool{ + "extension_az": true, + "extension_azd": true, + // Add more excluded tools here as needed + } + + allTools := []tools.Tool{} + + for _, toolLoader := range toolLoaders { + categoryTools, err := toolLoader.LoadTools() + if err != nil { + return nil, err + } + + // Filter out excluded tools + for _, tool := range categoryTools { + if !excludedTools[tool.Name()] { + allTools = append(allTools, tool) + } + } + } + + protectedTools := f.consentManager.WrapTools(allTools) + + allOptions := []AgentOption{} + allOptions = append(allOptions, opts...) + allOptions = append(allOptions, WithTools(protectedTools...)) + + azdAgent, err := NewConversationalAzdAiAgent(defaultModelContainer.Model, allOptions...) + if err != nil { + return nil, err + } + + return azdAgent, nil +} diff --git a/cli/azd/internal/agent/consent/checker.go b/cli/azd/internal/agent/consent/checker.go new file mode 100644 index 00000000000..4dfccf5f3af --- /dev/null +++ b/cli/azd/internal/agent/consent/checker.go @@ -0,0 +1,187 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package consent + +import ( + "context" + "fmt" + + "github.com/azure/azure-dev/cli/azd/pkg/output" + "github.com/azure/azure-dev/cli/azd/pkg/ux" + "github.com/mark3labs/mcp-go/mcp" +) + +// ConsentChecker provides shared consent checking logic for different tool types +type ConsentChecker struct { + consentMgr ConsentManager + serverName string +} + +// NewConsentChecker creates a new shared consent checker +func NewConsentChecker( + consentMgr ConsentManager, + serverName string, +) *ConsentChecker { + return &ConsentChecker{ + consentMgr: consentMgr, + serverName: serverName, + } +} + +// CheckToolConsent checks if a tool execution should be allowed +func (cc *ConsentChecker) CheckToolConsent(ctx context.Context, toolName, toolDesc string) (*ConsentDecision, error) { + return cc.CheckToolConsentWithAnnotations(ctx, toolName, toolDesc, nil) +} + +// CheckToolConsentWithAnnotations checks tool consent with optional MCP annotations +func (cc *ConsentChecker) CheckToolConsentWithAnnotations(ctx context.Context, toolName, toolDesc string, annotations *mcp.ToolAnnotation) (*ConsentDecision, error) { + toolID := fmt.Sprintf("%s/%s", cc.serverName, toolName) + + // Create consent request + consentRequest := ConsentRequest{ + ToolID: toolID, + ServerName: cc.serverName, + SessionID: "", // Not needed since each manager represents one session + Annotations: annotations, + } + + return cc.consentMgr.CheckConsent(ctx, consentRequest) +} + +// PromptAndGrantConsent shows consent prompt and grants permission based on user choice +func (cc *ConsentChecker) PromptAndGrantConsent(ctx context.Context, toolName, toolDesc string) error { + toolID := fmt.Sprintf("%s/%s", cc.serverName, toolName) + + choice, err := cc.promptForConsent(ctx, toolName, toolDesc) + if err != nil { + return fmt.Errorf("consent prompt failed: %w", err) + } + + if choice == "deny" { + return fmt.Errorf("tool execution denied by user") + } + + // Grant consent based on user choice + return cc.grantConsentFromChoice(ctx, toolID, choice) +} + +// promptForConsent shows an interactive consent prompt and returns the user's choice +func (cc *ConsentChecker) promptForConsent(ctx context.Context, toolName, toolDesc string) (string, error) { + message := fmt.Sprintf( + "Tool %s from server %s requires consent.\n\nHow would you like to proceed?", + output.WithHighLightFormat(toolName), + output.WithHighLightFormat(cc.serverName), + ) + + helpMessage := toolDesc + + choices := []*ux.SelectChoice{ + { + Value: "deny", + Label: "Deny - Block this tool execution", + }, + { + Value: "once", + Label: "Allow once - Execute this time only", + }, + { + Value: "session", + Label: "Allow for session - Allow until restart", + }, + { + Value: "project", + Label: "Allow for project - Remember for this project", + }, + { + Value: "always", + Label: "Allow always - Remember globally", + }, + } + + // Add server trust option if not already trusted + if !cc.isServerAlreadyTrusted(ctx) { + choices = append(choices, &ux.SelectChoice{ + Value: "server", + Label: fmt.Sprintf("Trust server '%s' - Allow all tools from this server", cc.serverName), + }) + } + + selector := ux.NewSelect(&ux.SelectOptions{ + Message: message, + HelpMessage: helpMessage, + Choices: choices, + EnableFiltering: ux.Ptr(false), + }) + + choiceIndex, err := selector.Ask(ctx) + if err != nil { + return "", err + } + + if choiceIndex == nil || *choiceIndex < 0 || *choiceIndex >= len(choices) { + return "", fmt.Errorf("invalid choice selected") + } + + return choices[*choiceIndex].Value, nil +} + +// isServerAlreadyTrusted checks if the server is already trusted +func (cc *ConsentChecker) isServerAlreadyTrusted(ctx context.Context) bool { + request := ConsentRequest{ + ServerName: cc.serverName, + SessionID: "", // Not needed since each manager represents one session + } + + // Create a mock consent request to check if server is trusted + decision, err := cc.consentMgr.CheckConsent(ctx, request) + if err != nil { + return false + } + + return decision.Allowed && decision.Reason == "trusted server" +} + +// grantConsentFromChoice processes the user's consent choice and saves the appropriate rule +func (cc *ConsentChecker) grantConsentFromChoice(ctx context.Context, toolID string, choice string) error { + var rule ConsentRule + var scope ConsentScope + + switch choice { + case "once": + rule = ConsentRule{ + ToolID: toolID, + Permission: ConsentOnce, + } + scope = ScopeSession + case "session": + rule = ConsentRule{ + ToolID: toolID, + Permission: ConsentSession, + } + scope = ScopeSession + case "project": + rule = ConsentRule{ + ToolID: toolID, + Permission: ConsentProject, + } + scope = ScopeProject + case "always": + rule = ConsentRule{ + ToolID: toolID, + Permission: ConsentAlways, + } + scope = ScopeGlobal + case "server": + // Grant trust to entire server + rule = ConsentRule{ + ToolID: fmt.Sprintf("%s/*", cc.serverName), + Permission: ConsentServerAlways, + } + scope = ScopeGlobal + default: + return fmt.Errorf("unknown consent choice: %s", choice) + } + + return cc.consentMgr.GrantConsent(ctx, rule, scope) +} diff --git a/cli/azd/internal/agent/consent/consent_wrapper_tool.go b/cli/azd/internal/agent/consent/consent_wrapper_tool.go new file mode 100644 index 00000000000..de0ab9ba5ee --- /dev/null +++ b/cli/azd/internal/agent/consent/consent_wrapper_tool.go @@ -0,0 +1,94 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package consent + +import ( + "context" + "fmt" + + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/mark3labs/mcp-go/mcp" + "github.com/tmc/langchaingo/tools" +) + +// Ensure ConsentWrapperTool implements common.Tool +var _ tools.Tool = (*ConsentWrapperTool)(nil) + +// ConsentWrapperTool wraps a langchaingo tool with consent protection +type ConsentWrapperTool struct { + console input.Console + tool tools.Tool + consentChecker *ConsentChecker + annotations *mcp.ToolAnnotation +} + +// Name returns the name of the tool +func (c *ConsentWrapperTool) Name() string { + return c.tool.Name() +} + +// Description returns the description of the tool +func (c *ConsentWrapperTool) Description() string { + return c.tool.Description() +} + +// Call executes the tool with consent protection +func (c *ConsentWrapperTool) Call(ctx context.Context, input string) (string, error) { + // Check consent using enhanced checker with annotations + decision, err := c.consentChecker.CheckToolConsentWithAnnotations(ctx, c.Name(), c.Description(), c.annotations) + if err != nil { + return "", fmt.Errorf("consent check failed: %w", err) + } + + if !decision.Allowed { + if decision.RequiresPrompt { + if err := c.console.DoInteraction(func() error { + // Show interactive consent prompt using shared checker + promptErr := c.consentChecker.PromptAndGrantConsent(ctx, c.Name(), c.Description()) + c.console.Message(ctx, "") + + return promptErr + }); err != nil { + return "", err + } + } else { + return "", fmt.Errorf("tool execution denied: %s", decision.Reason) + } + } + + // Consent granted, execute the original tool + return c.tool.Call(ctx, input) +} + +// newConsentWrapperTool wraps a langchaingo tool with consent protection +func newConsentWrapperTool( + tool tools.Tool, + console input.Console, + consentManager ConsentManager, +) tools.Tool { + var server string + var annotations *mcp.ToolAnnotation + + if annotatedTool, ok := tool.(common.AnnotatedTool); ok { + toolAnnotations := annotatedTool.Annotations() + annotations = &toolAnnotations + server = annotatedTool.Server() + } + + if commonTool, ok := tool.(common.Tool); ok { + server = commonTool.Server() + } + + if server == "" { + server = "unknown" + } + + return &ConsentWrapperTool{ + tool: tool, + console: console, + consentChecker: NewConsentChecker(consentManager, server), + annotations: annotations, + } +} diff --git a/cli/azd/internal/agent/consent/manager.go b/cli/azd/internal/agent/consent/manager.go new file mode 100644 index 00000000000..28a0cd1d19d --- /dev/null +++ b/cli/azd/internal/agent/consent/manager.go @@ -0,0 +1,437 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package consent + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/azure/azure-dev/cli/azd/pkg/config" + "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/tmc/langchaingo/tools" +) + +const ( + ConfigKeyMCPConsent = "mcp.consent" +) + +// consentManager implements the ConsentManager interface +type consentManager struct { + console input.Console + userConfigManager config.UserConfigManager + sessionRules []ConsentRule // Rules for this session + sessionMutex sync.RWMutex +} + +// NewConsentManager creates a new consent manager +func NewConsentManager( + console input.Console, + userConfigManager config.UserConfigManager, +) ConsentManager { + return &consentManager{ + console: console, + userConfigManager: userConfigManager, + sessionRules: make([]ConsentRule, 0), + } +} + +// CheckConsent checks if a tool execution should be allowed +func (cm *consentManager) CheckConsent(ctx context.Context, request ConsentRequest) (*ConsentDecision, error) { + // Check for explicit deny rules first + if decision := cm.checkExplicitRules(ctx, request); decision != nil && !decision.Allowed { + return decision, nil + } + + // Check if server is trusted + if cm.isServerTrusted(ctx, request.ServerName) { + return &ConsentDecision{Allowed: true, Reason: "trusted server"}, nil + } + + // Check if read-only tools are globally allowed + if request.Annotations != nil && request.Annotations.ReadOnlyHint != nil && *request.Annotations.ReadOnlyHint { + if cm.isReadOnlyToolsAllowed(ctx) { + return &ConsentDecision{Allowed: true, Reason: "read-only tool allowed"}, nil + } + } + + // Check existing consent rules + if decision := cm.checkExplicitRules(ctx, request); decision != nil && decision.Allowed { + return decision, nil + } + + // No consent found - require prompt + return &ConsentDecision{ + Allowed: false, + RequiresPrompt: true, + Reason: "no consent granted", + }, nil +} + +// GrantConsent grants consent for a tool +func (cm *consentManager) GrantConsent(ctx context.Context, rule ConsentRule, scope ConsentScope) error { + rule.GrantedAt = time.Now() + + switch scope { + case ScopeSession: + return cm.addSessionRule(rule) + case ScopeProject: + return cm.addProjectRule(ctx, rule) + case ScopeGlobal: + return cm.addGlobalRule(ctx, rule) + default: + return fmt.Errorf("unknown consent scope: %s", scope) + } +} + +// ListConsents lists consent rules for a given scope +func (cm *consentManager) ListConsents(ctx context.Context, scope ConsentScope) ([]ConsentRule, error) { + switch scope { + case ScopeSession: + return cm.getSessionRules(), nil + case ScopeProject: + return cm.getProjectRules(ctx, "") + case ScopeGlobal: + return cm.getGlobalRules(ctx) + default: + return nil, fmt.Errorf("unknown consent scope: %s", scope) + } +} + +// ClearConsents clears all consent rules for a given scope +func (cm *consentManager) ClearConsents(ctx context.Context, scope ConsentScope) error { + switch scope { + case ScopeSession: + return cm.clearSessionRules() + case ScopeProject: + return fmt.Errorf("project-level consent clearing not yet implemented") + case ScopeGlobal: + return cm.clearGlobalRules(ctx) + default: + return fmt.Errorf("unknown consent scope: %s", scope) + } +} + +// ClearConsentByToolID clears consent for a specific tool +func (cm *consentManager) ClearConsentByToolID(ctx context.Context, toolID string, scope ConsentScope) error { + switch scope { + case ScopeSession: + return cm.removeSessionRule(toolID) + case ScopeProject: + return fmt.Errorf("project-level consent removal not yet implemented") + case ScopeGlobal: + return cm.removeGlobalRule(ctx, toolID) + default: + return fmt.Errorf("unknown consent scope: %s", scope) + } +} + +// WrapTool wraps a single langchaingo tool with consent protection +func (cm *consentManager) WrapTool(tool tools.Tool) tools.Tool { + return newConsentWrapperTool(tool, cm.console, cm) +} + +// WrapTools wraps multiple langchaingo tools with consent protection +func (cm *consentManager) WrapTools(langchainTools []tools.Tool) []tools.Tool { + wrappedTools := make([]tools.Tool, len(langchainTools)) + + for i, tool := range langchainTools { + wrappedTools[i] = cm.WrapTool(tool) + } + + return wrappedTools +} + +// checkExplicitRules checks for explicit consent rules across all scopes +func (cm *consentManager) checkExplicitRules(ctx context.Context, request ConsentRequest) *ConsentDecision { + // Check session rules first + cm.sessionMutex.RLock() + sessionRules := cm.sessionRules + cm.sessionMutex.RUnlock() + + if len(sessionRules) > 0 { + if decision := cm.findMatchingRule(sessionRules, request); decision != nil { + return decision + } + } + + // Check project rules + if request.ProjectPath != "" { + if projectRules, err := cm.getProjectRules(ctx, request.ProjectPath); err == nil { + if decision := cm.findMatchingRule(projectRules, request); decision != nil { + return decision + } + } + } + + // Check global rules + if globalRules, err := cm.getGlobalRules(ctx); err == nil { + if decision := cm.findMatchingRule(globalRules, request); decision != nil { + return decision + } + } + + return nil +} + +// findMatchingRule finds a matching rule for the request +func (cm *consentManager) findMatchingRule(rules []ConsentRule, request ConsentRequest) *ConsentDecision { + serverName := request.ServerName + + for i, rule := range rules { + // Check for exact tool match + if rule.ToolID == request.ToolID { + decision := cm.evaluateRule(rule) + + // If this is a one-time consent rule, remove it after evaluation + if decision.Allowed && rule.Permission == ConsentOnce { + // Clean up the one-time rule from session rules + go func(ruleIndex int) { + cm.removeSessionRuleByIndex(ruleIndex) + }(i) + } + + return decision + } + + // Check for server-wide consent + if rule.Permission == ConsentServerAlways && rule.ToolID == fmt.Sprintf("%s/*", serverName) { + return &ConsentDecision{Allowed: true, Reason: "server trusted"} + } + } + + return nil +} + +// evaluateRule evaluates a consent rule and returns a decision +func (cm *consentManager) evaluateRule(rule ConsentRule) *ConsentDecision { + switch rule.Permission { + case ConsentDeny: + return &ConsentDecision{Allowed: false, Reason: "explicitly denied"} + case ConsentOnce: + // For one-time consent, we allow it but mark it for removal + // The caller should handle removing this rule after use + return &ConsentDecision{Allowed: true, Reason: "one-time consent"} + case ConsentSession, ConsentProject, ConsentAlways, ConsentServerAlways: + return &ConsentDecision{Allowed: true, Reason: string(rule.Permission)} + default: + return &ConsentDecision{Allowed: false, RequiresPrompt: true, Reason: "unknown permission level"} + } +} + +// isServerTrusted checks if a server is in the trusted servers list +func (cm *consentManager) isServerTrusted(ctx context.Context, serverName string) bool { + config, err := cm.getGlobalConsentConfig(ctx) + if err != nil { + return false + } + + for _, trustedServer := range config.TrustedServers { + if trustedServer == serverName { + return true + } + } + + return false +} + +// isReadOnlyToolsAllowed checks if read-only tools are globally allowed +func (cm *consentManager) isReadOnlyToolsAllowed(ctx context.Context) bool { + config, err := cm.getGlobalConsentConfig(ctx) + if err != nil { + return false + } + + return config.AllowReadOnlyTools +} + +// addSessionRule adds a rule to the session rules +func (cm *consentManager) addSessionRule(rule ConsentRule) error { + cm.sessionMutex.Lock() + defer cm.sessionMutex.Unlock() + + cm.sessionRules = append(cm.sessionRules, rule) + return nil +} + +// addProjectRule adds a rule to the project configuration +func (cm *consentManager) addProjectRule(ctx context.Context, rule ConsentRule) error { + // This would need to be implemented with the environment manager + // For now, return an error to indicate it's not implemented + return fmt.Errorf("project-level consent not yet implemented") +} + +// addGlobalRule adds a rule to the global configuration +func (cm *consentManager) addGlobalRule(ctx context.Context, rule ConsentRule) error { + userConfig, err := cm.userConfigManager.Load() + if err != nil { + return fmt.Errorf("failed to load user config: %w", err) + } + + var consentConfig ConsentConfig + if exists, err := userConfig.GetSection(ConfigKeyMCPConsent, &consentConfig); err != nil { + return fmt.Errorf("failed to get consent config: %w", err) + } else if !exists { + consentConfig = ConsentConfig{} + } + + // Add or update the rule + consentConfig.Rules = cm.addOrUpdateRule(consentConfig.Rules, rule) + + if err := userConfig.Set(ConfigKeyMCPConsent, consentConfig); err != nil { + return fmt.Errorf("failed to set consent config: %w", err) + } + + return cm.userConfigManager.Save(userConfig) +} + +// addOrUpdateRule adds a new rule or updates an existing one +func (cm *consentManager) addOrUpdateRule(rules []ConsentRule, newRule ConsentRule) []ConsentRule { + // Check if rule already exists and update it + for i, rule := range rules { + if rule.ToolID == newRule.ToolID { + rules[i] = newRule + return rules + } + } + + // Rule doesn't exist, add it + return append(rules, newRule) +} + +// getSessionRules returns session rules for this session +func (cm *consentManager) getSessionRules() []ConsentRule { + cm.sessionMutex.RLock() + defer cm.sessionMutex.RUnlock() + + // Return a copy to avoid race conditions + result := make([]ConsentRule, len(cm.sessionRules)) + copy(result, cm.sessionRules) + return result +} + +// getProjectRules returns project-level consent rules +func (cm *consentManager) getProjectRules(ctx context.Context, projectPath string) ([]ConsentRule, error) { + // TODO: Implement project-level consent rules + return []ConsentRule{}, nil +} + +// getGlobalRules returns global consent rules +func (cm *consentManager) getGlobalRules(ctx context.Context) ([]ConsentRule, error) { + config, err := cm.getGlobalConsentConfig(ctx) + if err != nil { + return nil, err + } + + return config.Rules, nil +} + +// getGlobalConsentConfig loads the global consent configuration +func (cm *consentManager) getGlobalConsentConfig(ctx context.Context) (*ConsentConfig, error) { + userConfig, err := cm.userConfigManager.Load() + if err != nil { + return nil, fmt.Errorf("failed to load user config: %w", err) + } + + var consentConfig ConsentConfig + if exists, err := userConfig.GetSection(ConfigKeyMCPConsent, &consentConfig); err != nil { + return nil, fmt.Errorf("failed to get consent config: %w", err) + } else if !exists { + consentConfig = ConsentConfig{} + } + + return &consentConfig, nil +} + +// clearSessionRules clears all rules for this session +func (cm *consentManager) clearSessionRules() error { + cm.sessionMutex.Lock() + defer cm.sessionMutex.Unlock() + + cm.sessionRules = make([]ConsentRule, 0) + return nil +} + +// clearGlobalRules clears all global consent rules +func (cm *consentManager) clearGlobalRules(ctx context.Context) error { + userConfig, err := cm.userConfigManager.Load() + if err != nil { + return fmt.Errorf("failed to load user config: %w", err) + } + + consentConfig := ConsentConfig{ + Rules: []ConsentRule{}, + AllowReadOnlyTools: false, + TrustedServers: []string{}, + } + + if err := userConfig.Set(ConfigKeyMCPConsent, consentConfig); err != nil { + return fmt.Errorf("failed to clear consent config: %w", err) + } + + return cm.userConfigManager.Save(userConfig) +} + +// removeSessionRule removes a specific rule from session rules +func (cm *consentManager) removeSessionRule(toolID string) error { + cm.sessionMutex.Lock() + defer cm.sessionMutex.Unlock() + + // Filter out the rule to remove + filtered := make([]ConsentRule, 0, len(cm.sessionRules)) + for _, rule := range cm.sessionRules { + if rule.ToolID != toolID { + filtered = append(filtered, rule) + } + } + + cm.sessionRules = filtered + return nil +} + +// removeSessionRuleByIndex removes a rule by its index (for cleanup after one-time use) +func (cm *consentManager) removeSessionRuleByIndex(index int) error { + cm.sessionMutex.Lock() + defer cm.sessionMutex.Unlock() + + if index < 0 || index >= len(cm.sessionRules) { + return nil // Index out of bounds, nothing to remove + } + + // Remove the rule at the specified index + cm.sessionRules = append(cm.sessionRules[:index], cm.sessionRules[index+1:]...) + return nil +} + +// removeGlobalRule removes a specific rule from global configuration +func (cm *consentManager) removeGlobalRule(ctx context.Context, toolID string) error { + userConfig, err := cm.userConfigManager.Load() + if err != nil { + return fmt.Errorf("failed to load user config: %w", err) + } + + var consentConfig ConsentConfig + if exists, err := userConfig.GetSection(ConfigKeyMCPConsent, &consentConfig); err != nil { + return fmt.Errorf("failed to get consent config: %w", err) + } else if !exists { + return nil // Nothing to remove + } + + // Filter out the rule to remove + filtered := make([]ConsentRule, 0, len(consentConfig.Rules)) + for _, rule := range consentConfig.Rules { + if rule.ToolID != toolID { + filtered = append(filtered, rule) + } + } + + consentConfig.Rules = filtered + + if err := userConfig.Set(ConfigKeyMCPConsent, consentConfig); err != nil { + return fmt.Errorf("failed to update consent config: %w", err) + } + + return cm.userConfigManager.Save(userConfig) +} diff --git a/cli/azd/internal/agent/consent/types.go b/cli/azd/internal/agent/consent/types.go new file mode 100644 index 00000000000..b745398d61e --- /dev/null +++ b/cli/azd/internal/agent/consent/types.go @@ -0,0 +1,78 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package consent + +import ( + "context" + "time" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/tmc/langchaingo/tools" +) + +// ConsentLevel represents the level of consent granted for a tool +type ConsentLevel string + +// ConsentScope represents where consent rules are stored +type ConsentScope string + +const ( + ConsentDeny ConsentLevel = "deny" + ConsentPrompt ConsentLevel = "prompt" + ConsentOnce ConsentLevel = "once" + ConsentSession ConsentLevel = "session" + ConsentProject ConsentLevel = "project" + ConsentAlways ConsentLevel = "always" + ConsentServerAlways ConsentLevel = "server_always" // All tools from server +) + +const ( + ScopeGlobal ConsentScope = "global" + ScopeProject ConsentScope = "project" + ScopeSession ConsentScope = "session" +) + +// ConsentRule represents a single consent rule for a tool +type ConsentRule struct { + ToolID string `json:"tool_id"` + Permission ConsentLevel `json:"permission"` + GrantedAt time.Time `json:"granted_at"` +} + +// ConsentConfig represents the MCP consent configuration +type ConsentConfig struct { + Rules []ConsentRule `json:"rules,omitempty"` + AllowReadOnlyTools bool `json:"allow_readonly_tools,omitempty"` + TrustedServers []string `json:"trusted_servers,omitempty"` +} + +// ConsentRequest represents a request to check consent for a tool +type ConsentRequest struct { + ToolID string + ServerName string + Parameters map[string]interface{} + SessionID string + ProjectPath string + Annotations *mcp.ToolAnnotation +} + +// ConsentDecision represents the result of a consent check +type ConsentDecision struct { + Allowed bool + Reason string + RequiresPrompt bool +} + +// ConsentManager manages consent rules and decisions +type ConsentManager interface { + CheckConsent(ctx context.Context, request ConsentRequest) (*ConsentDecision, error) + GrantConsent(ctx context.Context, rule ConsentRule, scope ConsentScope) error + ListConsents(ctx context.Context, scope ConsentScope) ([]ConsentRule, error) + ClearConsents(ctx context.Context, scope ConsentScope) error + ClearConsentByToolID(ctx context.Context, toolID string, scope ConsentScope) error + + // Tool wrapping methods + WrapTool(tool tools.Tool) tools.Tool + WrapTools(tools []tools.Tool) []tools.Tool +} diff --git a/cli/azd/internal/agent/conversational_agent.go b/cli/azd/internal/agent/conversational_agent.go index 5fe070b2b02..103b3663308 100644 --- a/cli/azd/internal/agent/conversational_agent.go +++ b/cli/azd/internal/agent/conversational_agent.go @@ -18,9 +18,6 @@ import ( "github.com/tmc/langchaingo/memory" "github.com/tmc/langchaingo/prompts" "github.com/tmc/langchaingo/tools" - - localtools "github.com/azure/azure-dev/cli/azd/internal/agent/tools" - mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" ) //go:embed prompts/conversational.txt @@ -29,7 +26,7 @@ var conversational_prompt_template string // ConversationalAzdAiAgent represents an enhanced AZD Copilot agent with conversation memory, // tool filtering, and interactive capabilities type ConversationalAzdAiAgent struct { - *Agent + *agentBase } // NewConversationalAzdAiAgent creates a new conversational agent with memory, tool loading, @@ -37,15 +34,14 @@ type ConversationalAzdAiAgent struct { // for interactive conversations with a high iteration limit for complex tasks. func NewConversationalAzdAiAgent(llm llms.Model, opts ...AgentOption) (*ConversationalAzdAiAgent, error) { azdAgent := &ConversationalAzdAiAgent{ - Agent: &Agent{ - defaultModel: llm, - samplingModel: llm, - tools: []tools.Tool{}, + agentBase: &agentBase{ + defaultModel: llm, + tools: []tools.Tool{}, }, } for _, opt := range opts { - opt(azdAgent.Agent) + opt(azdAgent.agentBase) } smartMemory := memory.NewConversationBuffer( @@ -55,38 +51,6 @@ func NewConversationalAzdAiAgent(llm llms.Model, opts ...AgentOption) (*Conversa memory.WithAIPrefix("AI"), ) - // Create sampling handler for MCP - samplingHandler := mcptools.NewMcpSamplingHandler( - azdAgent.samplingModel, - mcptools.WithDebug(azdAgent.debug), - ) - - toolLoaders := []localtools.ToolLoader{ - localtools.NewLocalToolsLoader(), - mcptools.NewMcpToolsLoader(samplingHandler), - } - - // Define block list of excluded tools - excludedTools := map[string]bool{ - "extension_az": true, - "extension_azd": true, - // Add more excluded tools here as needed - } - - for _, toolLoader := range toolLoaders { - categoryTools, err := toolLoader.LoadTools() - if err != nil { - return nil, err - } - - // Filter out excluded tools - for _, tool := range categoryTools { - if !excludedTools[tool.Name()] { - azdAgent.tools = append(azdAgent.tools, tool) - } - } - } - promptTemplate := prompts.PromptTemplate{ Template: conversational_prompt_template, TemplateFormat: prompts.TemplateFormatGoTemplate, diff --git a/cli/azd/internal/agent/one_shot_agent.go b/cli/azd/internal/agent/one_shot_agent.go index 7137e6efccd..42587b4f440 100644 --- a/cli/azd/internal/agent/one_shot_agent.go +++ b/cli/azd/internal/agent/one_shot_agent.go @@ -13,15 +13,12 @@ import ( "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/prompts" "github.com/tmc/langchaingo/tools" - - localtools "github.com/azure/azure-dev/cli/azd/internal/agent/tools" - mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" ) // OneShotAzdAiAgent represents an AZD Copilot agent designed for single-request processing // without conversation memory, optimized for one-time queries and responses type OneShotAzdAiAgent struct { - *Agent + *agentBase } //go:embed prompts/one_shot.txt @@ -32,47 +29,14 @@ var one_shot_prompt_template string // the agent for stateless operation without conversation memory. func NewOneShotAzdAiAgent(llm llms.Model, opts ...AgentOption) (*OneShotAzdAiAgent, error) { azdAgent := &OneShotAzdAiAgent{ - Agent: &Agent{ - defaultModel: llm, - samplingModel: llm, - tools: []tools.Tool{}, + agentBase: &agentBase{ + defaultModel: llm, + tools: []tools.Tool{}, }, } for _, opt := range opts { - opt(azdAgent.Agent) - } - - // Create sampling handler for MCP - samplingHandler := mcptools.NewMcpSamplingHandler( - azdAgent.samplingModel, - mcptools.WithDebug(azdAgent.debug), - ) - - toolLoaders := []localtools.ToolLoader{ - localtools.NewLocalToolsLoader(), - mcptools.NewMcpToolsLoader(samplingHandler), - } - - // Define block list of excluded tools - excludedTools := map[string]bool{ - "extension_az": true, - "extension_azd": true, - // Add more excluded tools here as needed - } - - for _, toolLoader := range toolLoaders { - categoryTools, err := toolLoader.LoadTools() - if err != nil { - return nil, err - } - - // Filter out excluded tools - for _, tool := range categoryTools { - if !excludedTools[tool.Name()] { - azdAgent.tools = append(azdAgent.tools, tool) - } - } + opt(azdAgent.agentBase) } promptTemplate := prompts.PromptTemplate{ diff --git a/cli/azd/internal/agent/tools/common/types.go b/cli/azd/internal/agent/tools/common/types.go index b8740f01b06..a5d1b565826 100644 --- a/cli/azd/internal/agent/tools/common/types.go +++ b/cli/azd/internal/agent/tools/common/types.go @@ -3,8 +3,35 @@ package common +import ( + "context" + + "github.com/mark3labs/mcp-go/mcp" +) + // ErrorResponse represents a JSON error response structure that can be reused across all tools type ErrorResponse struct { Error bool `json:"error"` Message string `json:"message"` } + +type Tool interface { + Name() string + Server() string + Description() string + Call(ctx context.Context, input string) (string, error) +} + +// AnnotatedTool extends the Tool interface with MCP annotations +type AnnotatedTool interface { + Tool + // Annotations returns MCP tool behavior annotations + Annotations() mcp.ToolAnnotation +} + +type LocalTool struct { +} + +func (t *LocalTool) Server() string { + return "local" +} diff --git a/cli/azd/internal/agent/tools/dev/command_executor.go b/cli/azd/internal/agent/tools/dev/command_executor.go index 1ecc9041318..fb73f63b246 100644 --- a/cli/azd/internal/agent/tools/dev/command_executor.go +++ b/cli/azd/internal/agent/tools/dev/command_executor.go @@ -14,12 +14,11 @@ import ( "strings" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" - "github.com/tmc/langchaingo/callbacks" ) // CommandExecutorTool implements the Tool interface for executing commands and scripts type CommandExecutorTool struct { - CallbacksHandler callbacks.Handler + common.LocalTool } func (t CommandExecutorTool) Name() string { @@ -77,19 +76,12 @@ type CommandResponse struct { } func (t CommandExecutorTool) Call(ctx context.Context, input string) (string, error) { - // Invoke callback for tool start - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("execute_command: %s", input)) - } - if input == "" { errorResponse := common.ErrorResponse{ Error: true, Message: "command execution request is required", } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("command execution request is required")) - } + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") return string(jsonData), nil } @@ -101,9 +93,7 @@ func (t CommandExecutorTool) Call(ctx context.Context, input string) (string, er Error: true, Message: fmt.Sprintf("failed to parse command request: %s", err.Error()), } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to parse command request: %w", err)) - } + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") return string(jsonData), nil } @@ -114,9 +104,7 @@ func (t CommandExecutorTool) Call(ctx context.Context, input string) (string, er Error: true, Message: "command is required", } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("command is required")) - } + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") return string(jsonData), nil } @@ -133,9 +121,7 @@ func (t CommandExecutorTool) Call(ctx context.Context, input string) (string, er Error: true, Message: fmt.Sprintf("execution failed: %s", err.Error()), } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("execution failed: %w", err)) - } + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") return string(jsonData), nil } @@ -150,18 +136,11 @@ func (t CommandExecutorTool) Call(ctx context.Context, input string) (string, er Error: true, Message: fmt.Sprintf("failed to marshal JSON response: %s", err.Error()), } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, fmt.Errorf("failed to marshal JSON response: %w", err)) - } + errorJsonData, _ := json.MarshalIndent(errorResponse, "", " ") return string(errorJsonData), nil } - // Invoke callback for tool end - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, string(jsonData)) - } - return string(jsonData), nil } diff --git a/cli/azd/internal/agent/tools/dev/loader.go b/cli/azd/internal/agent/tools/dev/loader.go index 1028825fb22..bbeeaa41ab3 100644 --- a/cli/azd/internal/agent/tools/dev/loader.go +++ b/cli/azd/internal/agent/tools/dev/loader.go @@ -3,9 +3,7 @@ package dev -import ( - "github.com/tmc/langchaingo/tools" -) +import "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" // DevToolLoader loads development-related tools type DevToolsLoader struct{} @@ -14,8 +12,8 @@ func NewDevToolsLoader() *DevToolsLoader { return &DevToolsLoader{} } -func (l *DevToolsLoader) LoadTools() ([]tools.Tool, error) { - return []tools.Tool{ +func (l *DevToolsLoader) LoadTools() ([]common.Tool, error) { + return []common.Tool{ &CommandExecutorTool{}, }, nil } diff --git a/cli/azd/internal/agent/tools/io/change_directory.go b/cli/azd/internal/agent/tools/io/change_directory.go index b942e09b458..7ee8d358adf 100644 --- a/cli/azd/internal/agent/tools/io/change_directory.go +++ b/cli/azd/internal/agent/tools/io/change_directory.go @@ -15,7 +15,9 @@ import ( ) // ChangeDirectoryTool implements the Tool interface for changing the current working directory -type ChangeDirectoryTool struct{} +type ChangeDirectoryTool struct { + common.LocalTool +} func (t ChangeDirectoryTool) Name() string { return "change_directory" diff --git a/cli/azd/internal/agent/tools/io/copy_file.go b/cli/azd/internal/agent/tools/io/copy_file.go index 0272421454a..47478bea7f2 100644 --- a/cli/azd/internal/agent/tools/io/copy_file.go +++ b/cli/azd/internal/agent/tools/io/copy_file.go @@ -15,7 +15,9 @@ import ( ) // CopyFileTool implements the Tool interface for copying files -type CopyFileTool struct{} +type CopyFileTool struct { + common.LocalTool +} func (t CopyFileTool) Name() string { return "copy_file" diff --git a/cli/azd/internal/agent/tools/io/create_directory.go b/cli/azd/internal/agent/tools/io/create_directory.go index 57f2e83710e..81df0811bb8 100644 --- a/cli/azd/internal/agent/tools/io/create_directory.go +++ b/cli/azd/internal/agent/tools/io/create_directory.go @@ -14,7 +14,9 @@ import ( ) // CreateDirectoryTool implements the Tool interface for creating directories -type CreateDirectoryTool struct{} +type CreateDirectoryTool struct { + common.LocalTool +} func (t CreateDirectoryTool) Name() string { return "create_directory" diff --git a/cli/azd/internal/agent/tools/io/current_directory.go b/cli/azd/internal/agent/tools/io/current_directory.go index 0ba2d925c3e..34a9bdf31f0 100644 --- a/cli/azd/internal/agent/tools/io/current_directory.go +++ b/cli/azd/internal/agent/tools/io/current_directory.go @@ -13,7 +13,9 @@ import ( ) // CurrentDirectoryTool implements the Tool interface for getting current directory -type CurrentDirectoryTool struct{} +type CurrentDirectoryTool struct { + common.LocalTool +} func (t CurrentDirectoryTool) Name() string { return "cwd" diff --git a/cli/azd/internal/agent/tools/io/delete_directory.go b/cli/azd/internal/agent/tools/io/delete_directory.go index 27ae1413ce5..0ee3bb9be80 100644 --- a/cli/azd/internal/agent/tools/io/delete_directory.go +++ b/cli/azd/internal/agent/tools/io/delete_directory.go @@ -14,7 +14,9 @@ import ( ) // DeleteDirectoryTool implements the Tool interface for deleting directories -type DeleteDirectoryTool struct{} +type DeleteDirectoryTool struct { + common.LocalTool +} func (t DeleteDirectoryTool) Name() string { return "delete_directory" diff --git a/cli/azd/internal/agent/tools/io/delete_file.go b/cli/azd/internal/agent/tools/io/delete_file.go index 828c0180e4a..86251bd9bce 100644 --- a/cli/azd/internal/agent/tools/io/delete_file.go +++ b/cli/azd/internal/agent/tools/io/delete_file.go @@ -14,7 +14,9 @@ import ( ) // DeleteFileTool implements the Tool interface for deleting files -type DeleteFileTool struct{} +type DeleteFileTool struct { + common.LocalTool +} func (t DeleteFileTool) Name() string { return "delete_file" diff --git a/cli/azd/internal/agent/tools/io/directory_list.go b/cli/azd/internal/agent/tools/io/directory_list.go index 7833026b3ad..0d5fc60c538 100644 --- a/cli/azd/internal/agent/tools/io/directory_list.go +++ b/cli/azd/internal/agent/tools/io/directory_list.go @@ -15,7 +15,9 @@ import ( ) // DirectoryListTool implements the Tool interface for listing directory contents -type DirectoryListTool struct{} +type DirectoryListTool struct { + common.LocalTool +} func (t DirectoryListTool) Name() string { return "list_directory" diff --git a/cli/azd/internal/agent/tools/io/file_info.go b/cli/azd/internal/agent/tools/io/file_info.go index f05763acf8f..8c4ebb77952 100644 --- a/cli/azd/internal/agent/tools/io/file_info.go +++ b/cli/azd/internal/agent/tools/io/file_info.go @@ -15,7 +15,9 @@ import ( ) // FileInfoTool implements the Tool interface for getting file information -type FileInfoTool struct{} +type FileInfoTool struct { + common.LocalTool +} func (t FileInfoTool) Name() string { return "file_info" diff --git a/cli/azd/internal/agent/tools/io/file_search.go b/cli/azd/internal/agent/tools/io/file_search.go index 700274d1479..6fa5b6e1d0b 100644 --- a/cli/azd/internal/agent/tools/io/file_search.go +++ b/cli/azd/internal/agent/tools/io/file_search.go @@ -14,7 +14,9 @@ import ( ) // FileSearchTool implements a tool for searching files using glob patterns -type FileSearchTool struct{} +type FileSearchTool struct { + common.LocalTool +} // FileSearchRequest represents the JSON payload for file search requests type FileSearchRequest struct { diff --git a/cli/azd/internal/agent/tools/io/loader.go b/cli/azd/internal/agent/tools/io/loader.go index 1880f0e15d5..d5ed2967d1d 100644 --- a/cli/azd/internal/agent/tools/io/loader.go +++ b/cli/azd/internal/agent/tools/io/loader.go @@ -4,7 +4,7 @@ package io import ( - "github.com/tmc/langchaingo/tools" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" ) // IoToolsLoader loads IO-related tools @@ -14,8 +14,8 @@ func NewIoToolsLoader() *IoToolsLoader { return &IoToolsLoader{} } -func (l *IoToolsLoader) LoadTools() ([]tools.Tool, error) { - return []tools.Tool{ +func (l *IoToolsLoader) LoadTools() ([]common.Tool, error) { + return []common.Tool{ &CurrentDirectoryTool{}, &ChangeDirectoryTool{}, &DirectoryListTool{}, diff --git a/cli/azd/internal/agent/tools/io/move_file.go b/cli/azd/internal/agent/tools/io/move_file.go index 9956580c381..7ae9cbf364f 100644 --- a/cli/azd/internal/agent/tools/io/move_file.go +++ b/cli/azd/internal/agent/tools/io/move_file.go @@ -14,7 +14,9 @@ import ( ) // MoveFileTool implements the Tool interface for moving/renaming files -type MoveFileTool struct{} +type MoveFileTool struct { + common.LocalTool +} func (t MoveFileTool) Name() string { return "move_file" diff --git a/cli/azd/internal/agent/tools/io/read_file.go b/cli/azd/internal/agent/tools/io/read_file.go index 9a60c65c912..822a3d3761f 100644 --- a/cli/azd/internal/agent/tools/io/read_file.go +++ b/cli/azd/internal/agent/tools/io/read_file.go @@ -16,7 +16,9 @@ import ( ) // ReadFileTool implements the Tool interface for reading file contents -type ReadFileTool struct{} +type ReadFileTool struct { + common.LocalTool +} // ReadFileRequest represents the JSON payload for file read requests type ReadFileRequest struct { diff --git a/cli/azd/internal/agent/tools/io/write_file.go b/cli/azd/internal/agent/tools/io/write_file.go index e6f4d3f5790..7c7f98460c7 100644 --- a/cli/azd/internal/agent/tools/io/write_file.go +++ b/cli/azd/internal/agent/tools/io/write_file.go @@ -16,7 +16,9 @@ import ( ) // WriteFileTool implements a comprehensive file writing tool that handles all scenarios -type WriteFileTool struct{} +type WriteFileTool struct { + common.LocalTool +} // WriteFileRequest represents the JSON input for the write_file tool type WriteFileRequest struct { diff --git a/cli/azd/internal/agent/tools/loader.go b/cli/azd/internal/agent/tools/loader.go index d45098543a2..5b8f1461f1b 100644 --- a/cli/azd/internal/agent/tools/loader.go +++ b/cli/azd/internal/agent/tools/loader.go @@ -4,15 +4,14 @@ package tools import ( - "github.com/tmc/langchaingo/tools" - + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/dev" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/io" ) // ToolLoader provides an interface for loading tools from different categories type ToolLoader interface { - LoadTools() ([]tools.Tool, error) + LoadTools() ([]common.Tool, error) } // LocalToolsLoader manages loading tools from multiple local tool categories @@ -32,8 +31,8 @@ func NewLocalToolsLoader() *LocalToolsLoader { // LoadTools loads and returns all tools from all registered tool loaders. // Returns an error if any individual loader fails to load its tools. -func (l *LocalToolsLoader) LoadTools() ([]tools.Tool, error) { - var allTools []tools.Tool +func (l *LocalToolsLoader) LoadTools() ([]common.Tool, error) { + var allTools []common.Tool for _, loader := range l.loaders { categoryTools, err := loader.LoadTools() diff --git a/cli/azd/internal/agent/tools/mcp/loader.go b/cli/azd/internal/agent/tools/mcp/loader.go index af9e8321470..36428bb7ef9 100644 --- a/cli/azd/internal/agent/tools/mcp/loader.go +++ b/cli/azd/internal/agent/tools/mcp/loader.go @@ -11,10 +11,10 @@ import ( _ "embed" - langchaingo_mcp_adapter "github.com/i2y/langchaingo-mcp-adapter" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/mark3labs/mcp-go/client" "github.com/mark3labs/mcp-go/client/transport" - "github.com/tmc/langchaingo/tools" + "github.com/mark3labs/mcp-go/mcp" ) //go:embed mcp.json @@ -56,14 +56,14 @@ func NewMcpToolsLoader(samplingHandler client.SamplingHandler) *McpToolsLoader { // and collects all tools from each successfully connected server. // Returns an error if the configuration cannot be parsed, but continues // processing other servers if individual server connections fail. -func (l *McpToolsLoader) LoadTools() ([]tools.Tool, error) { +func (l *McpToolsLoader) LoadTools() ([]common.Tool, error) { // Deserialize the embedded mcp.json configuration var config McpConfig if err := json.Unmarshal([]byte(_mcpJson), &config); err != nil { return nil, fmt.Errorf("failed to parse mcp.json: %w", err) } - var allTools []tools.Tool + var allTools []common.Tool // Iterate through each server configuration for serverName, serverConfig := range config.Servers { @@ -78,22 +78,19 @@ func (l *McpToolsLoader) LoadTools() ([]tools.Tool, error) { continue } - // Create the adapter - adapter, err := langchaingo_mcp_adapter.New(mcpClient) + // Get tools directly from MCP client + toolsRequest := mcp.ListToolsRequest{} + toolsResult, err := mcpClient.ListTools(ctx, toolsRequest) if err != nil { - log.Printf("Failed to create adapter for server %s: %v", serverName, err) + log.Printf("Failed to list tools from server %s: %v", serverName, err) continue } - // Get all tools from MCP server - mcpTools, err := adapter.Tools() - if err != nil { - log.Printf("Failed to get tools from server %s: %v", serverName, err) - continue + // Convert MCP tools to langchaingo tools using our adapter + for _, mcpTool := range toolsResult.Tools { + toolAdapter := NewMcpToolAdapter(serverName, mcpTool, mcpClient) + allTools = append(allTools, toolAdapter) } - - // Add the tools to our collection - allTools = append(allTools, mcpTools...) } return allTools, nil diff --git a/cli/azd/internal/agent/tools/mcp/tool_adapter.go b/cli/azd/internal/agent/tools/mcp/tool_adapter.go new file mode 100644 index 00000000000..04131d2743d --- /dev/null +++ b/cli/azd/internal/agent/tools/mcp/tool_adapter.go @@ -0,0 +1,104 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package mcp + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/mark3labs/mcp-go/client" + "github.com/mark3labs/mcp-go/mcp" +) + +// McpToolAdapter wraps an MCP tool with full schema fidelity preservation +type McpToolAdapter struct { + server string + tool mcp.Tool + mcpClient client.MCPClient +} + +// Ensure McpToolAdapter implements AnnotatedTool interface +var _ common.AnnotatedTool = (*McpToolAdapter)(nil) + +// NewMcpToolAdapter creates a new adapter that preserves full MCP tool schema fidelity +func NewMcpToolAdapter(server string, tool mcp.Tool, mcpClient client.MCPClient) *McpToolAdapter { + return &McpToolAdapter{ + server: server, + tool: tool, + mcpClient: mcpClient, + } +} + +// Name implements tools.Tool interface +func (m *McpToolAdapter) Name() string { + return m.tool.Name +} + +func (m *McpToolAdapter) Server() string { + return m.server +} + +func (m *McpToolAdapter) Description() string { + return m.tool.Description +} + +// GetAnnotations returns tool behavior annotations +func (m *McpToolAdapter) Annotations() mcp.ToolAnnotation { + return m.tool.Annotations +} + +// Call implements tools.Tool interface +func (m *McpToolAdapter) Call(ctx context.Context, input string) (string, error) { + // Parse input JSON + var args map[string]interface{} + if err := json.Unmarshal([]byte(input), &args); err != nil { + return "", fmt.Errorf("invalid JSON input: %w", err) + } + + // Create MCP call request + req := mcp.CallToolRequest{ + Request: mcp.Request{ + Method: "tools/call", + }, + } + req.Params.Name = m.tool.Name + req.Params.Arguments = args + + // Call the MCP tool + result, err := m.mcpClient.CallTool(ctx, req) + if err != nil { + return "", fmt.Errorf("MCP tool call failed: %w", err) + } + + // Handle different content types in result + if len(result.Content) == 0 { + return "", fmt.Errorf("empty result from MCP tool") + } + + // Extract text from various content types + var response string + for _, content := range result.Content { + switch c := content.(type) { + case mcp.TextContent: + response += c.Text + case mcp.ImageContent: + response += fmt.Sprintf("[Image: %s]", c.Data) + case mcp.EmbeddedResource: + if textResource, ok := c.Resource.(mcp.TextResourceContents); ok { + response += textResource.Text + } else { + response += fmt.Sprintf("[Resource: %s]", c.Resource) + } + default: + // Try to marshal unknown content as JSON + if jsonBytes, err := json.Marshal(content); err == nil { + response += string(jsonBytes) + } + } + } + + return response, nil +} diff --git a/cli/azd/pkg/input/console.go b/cli/azd/pkg/input/console.go index 8f115ae89e0..3d107721b9b 100644 --- a/cli/azd/pkg/input/console.go +++ b/cli/azd/pkg/input/console.go @@ -131,6 +131,8 @@ type Console interface { GetWriter() io.Writer // Gets the standard input, output and error stream Handles() ConsoleHandles + // Executes an interactive action, managing spinner state + DoInteraction(action func() error) error ConsoleShim } @@ -1069,3 +1071,25 @@ func (c *AskerConsole) doInteraction(promptFn func(c *AskerConsole) error) error // Execute the interactive prompt return promptFn(c) } + +func (c *AskerConsole) DoInteraction(action func() error) error { + if c.spinner.Status() == yacspin.SpinnerRunning { + _ = c.spinner.Pause() + + // Ensure the spinner is always resumed + defer func() { + _ = c.spinner.Unpause() + }() + } + + // Track total time for promptFn. + // It includes the time spent in rendering the prompt (likely <1ms) + // before the user has a chance to interact with the prompt. + start := time.Now() + defer func() { + tracing.InteractTimeMs.Add(time.Since(start).Milliseconds()) + }() + + // Execute the interactive prompt + return action() +} From b17be7cf349c92a02b827b9d84989bf1c31b461f Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Mon, 11 Aug 2025 13:19:10 -0700 Subject: [PATCH 060/116] Adds more consent validation --- cli/azd/cmd/mcp.go | 266 ++++++++++++++++++ cli/azd/internal/agent/agent.go | 10 +- cli/azd/internal/agent/agent_factory.go | 4 +- cli/azd/internal/agent/consent/checker.go | 71 ++++- .../agent/consent/consent_wrapper_tool.go | 45 ++- cli/azd/internal/agent/consent/manager.go | 237 ++++++++-------- cli/azd/internal/agent/consent/types.go | 38 ++- .../internal/agent/conversational_agent.go | 6 +- cli/azd/internal/agent/one_shot_agent.go | 6 +- cli/azd/internal/agent/tools/common/types.go | 6 +- cli/azd/internal/agent/tools/common/utils.go | 18 ++ .../agent/tools/dev/command_executor.go | 13 +- cli/azd/internal/agent/tools/dev/loader.go | 4 +- .../internal/agent/tools/http/http_fetcher.go | 42 ++- cli/azd/internal/agent/tools/http/loader.go | 2 +- .../agent/tools/io/change_directory.go | 13 +- cli/azd/internal/agent/tools/io/copy_file.go | 65 ++++- .../agent/tools/io/create_directory.go | 13 +- .../agent/tools/io/current_directory.go | 17 +- .../agent/tools/io/delete_directory.go | 13 +- .../internal/agent/tools/io/delete_file.go | 13 +- .../internal/agent/tools/io/directory_list.go | 13 +- cli/azd/internal/agent/tools/io/file_info.go | 13 +- .../internal/agent/tools/io/file_search.go | 13 +- cli/azd/internal/agent/tools/io/loader.go | 4 +- cli/azd/internal/agent/tools/io/move_file.go | 13 +- cli/azd/internal/agent/tools/io/read_file.go | 13 +- cli/azd/internal/agent/tools/io/write_file.go | 13 +- cli/azd/internal/agent/tools/loader.go | 6 +- cli/azd/internal/agent/tools/mcp/loader.go | 4 +- cli/azd/test/mocks/mockinput/mock_console.go | 5 + 31 files changed, 759 insertions(+), 240 deletions(-) create mode 100644 cli/azd/internal/agent/tools/common/utils.go diff --git a/cli/azd/cmd/mcp.go b/cli/azd/cmd/mcp.go index 9c59a8a5244..dd878e16dec 100644 --- a/cli/azd/cmd/mcp.go +++ b/cli/azd/cmd/mcp.go @@ -85,6 +85,34 @@ azd functionality through the Model Context Protocol interface.`, FlagsResolver: newMcpConsentFlags, }) + // azd mcp consent grant + consentGroup.Add("grant", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "grant", + Short: "Grant consent trust rules.", + Long: "Grant trust rules for MCP tools and servers.", + Args: cobra.NoArgs, + }, + OutputFormats: []output.Format{output.NoneFormat}, + DefaultFormat: output.NoneFormat, + ActionResolver: newMcpConsentGrantAction, + FlagsResolver: newMcpConsentGrantFlags, + }) + + // azd mcp consent revoke + consentGroup.Add("revoke", &actions.ActionDescriptorOptions{ + Command: &cobra.Command{ + Use: "revoke", + Short: "Revoke consent trust rules.", + Long: "Revoke specific consent rules for MCP tools and servers.", + Args: cobra.NoArgs, + }, + OutputFormats: []output.Format{output.NoneFormat}, + DefaultFormat: output.NoneFormat, + ActionResolver: newMcpConsentRevokeAction, + FlagsResolver: newMcpConsentRevokeFlags, + }) + return group } @@ -165,6 +193,29 @@ func (f *mcpConsentFlags) Bind(local *pflag.FlagSet, global *internal.GlobalComm local.StringVar(&f.toolID, "tool-id", "", "Specific tool ID to operate on") } +// Flags for MCP consent grant command +type mcpConsentGrantFlags struct { + globalOptions *internal.GlobalCommandOptions + tool string + server string + globalFlag bool + scope string +} + +func newMcpConsentGrantFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentGrantFlags { + flags := &mcpConsentGrantFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *mcpConsentGrantFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + f.globalOptions = global + local.StringVar(&f.tool, "tool", "", "Specific tool name (requires --server)") + local.StringVar(&f.server, "server", "", "Server name") + local.BoolVar(&f.globalFlag, "global", false, "Apply globally to all servers") + local.StringVar(&f.scope, "scope", "all", "Scope of the rule: 'all' or 'read-only'") +} + // Action for MCP consent list command type mcpConsentListAction struct { flags *mcpConsentFlags @@ -301,3 +352,218 @@ func (a *mcpConsentClearAction) Run(ctx context.Context) (*actions.ActionResult, return nil, nil } + +// Action for MCP consent grant command +type mcpConsentGrantAction struct { + flags *mcpConsentGrantFlags + console input.Console + userConfigManager config.UserConfigManager + consentManager consent.ConsentManager +} + +func newMcpConsentGrantAction( + flags *mcpConsentGrantFlags, + console input.Console, + userConfigManager config.UserConfigManager, + consentManager consent.ConsentManager, +) actions.Action { + return &mcpConsentGrantAction{ + flags: flags, + console: console, + userConfigManager: userConfigManager, + consentManager: consentManager, + } +} + +func (a *mcpConsentGrantAction) Run(ctx context.Context) (*actions.ActionResult, error) { + // Validate flag combinations + if a.flags.tool != "" && a.flags.server == "" { + return nil, fmt.Errorf("--tool requires --server") + } + + if a.flags.globalFlag && (a.flags.server != "" || a.flags.tool != "") { + return nil, fmt.Errorf("--global cannot be combined with --server or --tool") + } + + if !a.flags.globalFlag && a.flags.server == "" { + return nil, fmt.Errorf("specify either --global or --server") + } + + // Validate scope + if a.flags.scope != "all" && a.flags.scope != "read-only" { + return nil, fmt.Errorf("--scope must be 'all' or 'read-only'") + } + + // Build rule + var toolID string + var ruleScope consent.RuleScope + var description string + + if a.flags.scope == "read-only" { + ruleScope = consent.RuleScopeReadOnly + } else { + ruleScope = consent.RuleScopeAll + } + + if a.flags.globalFlag { + toolID = "*" + if a.flags.scope == "read-only" { + description = "all read-only tools globally" + } else { + description = "all tools globally" + } + } else if a.flags.tool != "" { + toolID = fmt.Sprintf("%s/%s", a.flags.server, a.flags.tool) + if a.flags.scope == "read-only" { + description = fmt.Sprintf("read-only tool %s from server %s", a.flags.tool, a.flags.server) + } else { + description = fmt.Sprintf("tool %s from server %s", a.flags.tool, a.flags.server) + } + } else { + toolID = fmt.Sprintf("%s/*", a.flags.server) + if a.flags.scope == "read-only" { + description = fmt.Sprintf("read-only tools from server %s", a.flags.server) + } else { + description = fmt.Sprintf("all tools from server %s", a.flags.server) + } + } + + rule := consent.ConsentRule{ + ToolID: toolID, + Permission: consent.ConsentAlways, + RuleScope: ruleScope, + } + + if err := a.consentManager.GrantConsent(ctx, rule, consent.ScopeGlobal); err != nil { + return nil, fmt.Errorf("failed to grant consent: %w", err) + } + + fmt.Fprintf(a.console.Handles().Stdout, "Granted trust for %s\n", description) + + return nil, nil +} + +// Flags for MCP consent revoke command +type mcpConsentRevokeFlags struct { + globalOptions *internal.GlobalCommandOptions + tool string + server string + globalFlag bool + scope string + toolPattern string +} + +func newMcpConsentRevokeFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentRevokeFlags { + flags := &mcpConsentRevokeFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *mcpConsentRevokeFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + f.globalOptions = global + local.StringVar(&f.tool, "tool", "", "Specific tool name (requires --server)") + local.StringVar(&f.server, "server", "", "Server name") + local.BoolVar(&f.globalFlag, "global", false, "Apply globally to all servers") + local.StringVar(&f.scope, "scope", "all", "Scope of the rule: 'all' or 'read-only'") + local.StringVar( + &f.toolPattern, + "rule-pattern", + "", + "Revoke trust for a specific rule pattern (e.g., 'server/tool' or 'server/*')", + ) +} + +// Action for MCP consent revoke command +type mcpConsentRevokeAction struct { + flags *mcpConsentRevokeFlags + console input.Console + userConfigManager config.UserConfigManager + consentManager consent.ConsentManager +} + +func newMcpConsentRevokeAction( + flags *mcpConsentRevokeFlags, + console input.Console, + userConfigManager config.UserConfigManager, + consentManager consent.ConsentManager, +) actions.Action { + return &mcpConsentRevokeAction{ + flags: flags, + console: console, + userConfigManager: userConfigManager, + consentManager: consentManager, + } +} + +func (a *mcpConsentRevokeAction) Run(ctx context.Context) (*actions.ActionResult, error) { + // Count options set + optionsSet := 0 + if a.flags.globalFlag { + optionsSet++ + } + if a.flags.server != "" { + optionsSet++ + } + if a.flags.toolPattern != "" { + optionsSet++ + } + + if optionsSet == 0 { + return nil, fmt.Errorf("specify one of: --global, --server, or --rule-pattern") + } + + if optionsSet > 1 { + return nil, fmt.Errorf("specify only one option at a time") + } + + // Validate flag combinations for new structure + if a.flags.tool != "" && a.flags.server == "" { + return nil, fmt.Errorf("--tool requires --server") + } + + if a.flags.globalFlag && (a.flags.server != "" || a.flags.tool != "") { + return nil, fmt.Errorf("--global cannot be combined with --server or --tool") + } + + // Validate scope + if a.flags.scope != "all" && a.flags.scope != "read-only" { + return nil, fmt.Errorf("--scope must be 'all' or 'read-only'") + } + + var toolID string + var description string + + if a.flags.toolPattern != "" { + toolID = a.flags.toolPattern + description = fmt.Sprintf("trust for pattern: %s", a.flags.toolPattern) + } else if a.flags.globalFlag { + toolID = "*" + if a.flags.scope == "read-only" { + description = "global read-only trust" + } else { + description = "global trust" + } + } else if a.flags.tool != "" { + toolID = fmt.Sprintf("%s/%s", a.flags.server, a.flags.tool) + if a.flags.scope == "read-only" { + description = fmt.Sprintf("read-only trust for tool %s from server %s", a.flags.tool, a.flags.server) + } else { + description = fmt.Sprintf("trust for tool %s from server %s", a.flags.tool, a.flags.server) + } + } else { + toolID = fmt.Sprintf("%s/*", a.flags.server) + if a.flags.scope == "read-only" { + description = fmt.Sprintf("read-only trust for server: %s", a.flags.server) + } else { + description = fmt.Sprintf("trust for server: %s", a.flags.server) + } + } + + if err := a.consentManager.ClearConsentByToolID(ctx, toolID, consent.ScopeGlobal); err != nil { + return nil, fmt.Errorf("failed to revoke consent: %w", err) + } + + fmt.Fprintf(a.console.Handles().Stdout, "Revoked %s\n", description) + + return nil, nil +} diff --git a/cli/azd/internal/agent/agent.go b/cli/azd/internal/agent/agent.go index b4a653c92b9..f11c11c6dc0 100644 --- a/cli/azd/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -8,10 +8,10 @@ import ( "fmt" "strings" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/tmc/langchaingo/agents" "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/llms" - "github.com/tmc/langchaingo/tools" ) // agentBase represents an AI agent that can execute tools and interact with language models. @@ -20,7 +20,7 @@ type agentBase struct { debug bool defaultModel llms.Model executor *agents.Executor - tools []tools.Tool + tools []common.AnnotatedTool callbacksHandler callbacks.Handler } @@ -46,7 +46,7 @@ func WithDefaultModel(model llms.Model) AgentOption { } // WithTools returns an option that adds the specified tools to the agent's toolkit -func WithTools(tools ...tools.Tool) AgentOption { +func WithTools(tools ...common.AnnotatedTool) AgentOption { return func(agent *agentBase) { agent.tools = tools } @@ -60,7 +60,7 @@ func WithCallbacksHandler(handler callbacks.Handler) AgentOption { } // toolNames returns a comma-separated string of all tool names in the provided slice -func toolNames(tools []tools.Tool) string { +func toolNames(tools []common.AnnotatedTool) string { var tn strings.Builder for i, tool := range tools { if i > 0 { @@ -74,7 +74,7 @@ func toolNames(tools []tools.Tool) string { // toolDescriptions returns a formatted string containing the name and description // of each tool in the provided slice, with each tool on a separate line -func toolDescriptions(tools []tools.Tool) string { +func toolDescriptions(tools []common.AnnotatedTool) string { var ts strings.Builder for _, tool := range tools { ts.WriteString(fmt.Sprintf("- %s: %s\n", tool.Name(), tool.Description())) diff --git a/cli/azd/internal/agent/agent_factory.go b/cli/azd/internal/agent/agent_factory.go index 6cb27cc605a..1b704930513 100644 --- a/cli/azd/internal/agent/agent_factory.go +++ b/cli/azd/internal/agent/agent_factory.go @@ -4,9 +4,9 @@ import ( "github.com/azure/azure-dev/cli/azd/internal/agent/consent" "github.com/azure/azure-dev/cli/azd/internal/agent/logging" localtools "github.com/azure/azure-dev/cli/azd/internal/agent/tools" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" "github.com/azure/azure-dev/cli/azd/pkg/llm" - "github.com/tmc/langchaingo/tools" ) type AgentFactory struct { @@ -58,7 +58,7 @@ func (f *AgentFactory) Create(opts ...AgentOption) (Agent, error) { // Add more excluded tools here as needed } - allTools := []tools.Tool{} + allTools := []common.AnnotatedTool{} for _, toolLoader := range toolLoaders { categoryTools, err := toolLoader.LoadTools() diff --git a/cli/azd/internal/agent/consent/checker.go b/cli/azd/internal/agent/consent/checker.go index 4dfccf5f3af..07149c74f79 100644 --- a/cli/azd/internal/agent/consent/checker.go +++ b/cli/azd/internal/agent/consent/checker.go @@ -29,13 +29,12 @@ func NewConsentChecker( } } -// CheckToolConsent checks if a tool execution should be allowed -func (cc *ConsentChecker) CheckToolConsent(ctx context.Context, toolName, toolDesc string) (*ConsentDecision, error) { - return cc.CheckToolConsentWithAnnotations(ctx, toolName, toolDesc, nil) -} - // CheckToolConsentWithAnnotations checks tool consent with optional MCP annotations -func (cc *ConsentChecker) CheckToolConsentWithAnnotations(ctx context.Context, toolName, toolDesc string, annotations *mcp.ToolAnnotation) (*ConsentDecision, error) { +func (cc *ConsentChecker) CheckToolConsent( + ctx context.Context, + toolName, toolDesc string, + annotations mcp.ToolAnnotation, +) (*ConsentDecision, error) { toolID := fmt.Sprintf("%s/%s", cc.serverName, toolName) // Create consent request @@ -50,10 +49,14 @@ func (cc *ConsentChecker) CheckToolConsentWithAnnotations(ctx context.Context, t } // PromptAndGrantConsent shows consent prompt and grants permission based on user choice -func (cc *ConsentChecker) PromptAndGrantConsent(ctx context.Context, toolName, toolDesc string) error { +func (cc *ConsentChecker) PromptAndGrantConsent( + ctx context.Context, + toolName, toolDesc string, + annotations mcp.ToolAnnotation, +) error { toolID := fmt.Sprintf("%s/%s", cc.serverName, toolName) - choice, err := cc.promptForConsent(ctx, toolName, toolDesc) + choice, err := cc.promptForConsent(ctx, toolName, toolDesc, annotations) if err != nil { return fmt.Errorf("consent prompt failed: %w", err) } @@ -67,7 +70,11 @@ func (cc *ConsentChecker) PromptAndGrantConsent(ctx context.Context, toolName, t } // promptForConsent shows an interactive consent prompt and returns the user's choice -func (cc *ConsentChecker) promptForConsent(ctx context.Context, toolName, toolDesc string) (string, error) { +func (cc *ConsentChecker) promptForConsent( + ctx context.Context, + toolName, toolDesc string, + annotations mcp.ToolAnnotation, +) (string, error) { message := fmt.Sprintf( "Tool %s from server %s requires consent.\n\nHow would you like to proceed?", output.WithHighLightFormat(toolName), @@ -103,7 +110,21 @@ func (cc *ConsentChecker) promptForConsent(ctx context.Context, toolName, toolDe if !cc.isServerAlreadyTrusted(ctx) { choices = append(choices, &ux.SelectChoice{ Value: "server", - Label: fmt.Sprintf("Trust server '%s' - Allow all tools from this server", cc.serverName), + Label: "Allow all tools from this server", + }) + } + + // Add readonly trust options if this is a readonly tool + isReadOnlyTool := annotations.ReadOnlyHint != nil && *annotations.ReadOnlyHint + if isReadOnlyTool { + choices = append(choices, &ux.SelectChoice{ + Value: "readonly_server", + Label: "Allow all read-only tools from this server", + }) + + choices = append(choices, &ux.SelectChoice{ + Value: "readonly_global", + Label: "Allow all read-only tools from any server", }) } @@ -112,6 +133,7 @@ func (cc *ConsentChecker) promptForConsent(ctx context.Context, toolName, toolDe HelpMessage: helpMessage, Choices: choices, EnableFiltering: ux.Ptr(false), + DisplayCount: 10, }) choiceIndex, err := selector.Ask(ctx) @@ -128,18 +150,22 @@ func (cc *ConsentChecker) promptForConsent(ctx context.Context, toolName, toolDe // isServerAlreadyTrusted checks if the server is already trusted func (cc *ConsentChecker) isServerAlreadyTrusted(ctx context.Context) bool { + // Create a mock tool request to check if server has full trust request := ConsentRequest{ - ServerName: cc.serverName, - SessionID: "", // Not needed since each manager represents one session + ToolID: fmt.Sprintf("%s/test-tool", cc.serverName), + ServerName: cc.serverName, + SessionID: "", // Not needed since each manager represents one session + Annotations: mcp.ToolAnnotation{}, // No readonly hint } - // Create a mock consent request to check if server is trusted + // Check if server has full trust (not readonly-only) decision, err := cc.consentMgr.CheckConsent(ctx, request) if err != nil { return false } - return decision.Allowed && decision.Reason == "trusted server" + // Server is trusted if it's allowed and the reason indicates server-level trust + return decision.Allowed && (decision.Reason == "server trusted" || decision.Reason == "server_always") } // grantConsentFromChoice processes the user's consent choice and saves the appropriate rule @@ -177,6 +203,23 @@ func (cc *ConsentChecker) grantConsentFromChoice(ctx context.Context, toolID str rule = ConsentRule{ ToolID: fmt.Sprintf("%s/*", cc.serverName), Permission: ConsentServerAlways, + RuleScope: RuleScopeAll, + } + scope = ScopeGlobal + case "readonly_server": + // Grant trust to readonly tools from this server + rule = ConsentRule{ + ToolID: fmt.Sprintf("%s/*", cc.serverName), + Permission: ConsentAlways, + RuleScope: RuleScopeReadOnly, + } + scope = ScopeGlobal + case "readonly_global": + // Grant trust to all readonly tools globally + rule = ConsentRule{ + ToolID: "*", + Permission: ConsentAlways, + RuleScope: RuleScopeReadOnly, } scope = ScopeGlobal default: diff --git a/cli/azd/internal/agent/consent/consent_wrapper_tool.go b/cli/azd/internal/agent/consent/consent_wrapper_tool.go index de0ab9ba5ee..52e8bcf56fa 100644 --- a/cli/azd/internal/agent/consent/consent_wrapper_tool.go +++ b/cli/azd/internal/agent/consent/consent_wrapper_tool.go @@ -19,9 +19,9 @@ var _ tools.Tool = (*ConsentWrapperTool)(nil) // ConsentWrapperTool wraps a langchaingo tool with consent protection type ConsentWrapperTool struct { console input.Console - tool tools.Tool + tool common.AnnotatedTool consentChecker *ConsentChecker - annotations *mcp.ToolAnnotation + annotations mcp.ToolAnnotation } // Name returns the name of the tool @@ -29,6 +29,16 @@ func (c *ConsentWrapperTool) Name() string { return c.tool.Name() } +// Server returns the server of the tool +func (c *ConsentWrapperTool) Server() string { + return c.tool.Server() +} + +// Annotations returns the annotations of the tool +func (c *ConsentWrapperTool) Annotations() mcp.ToolAnnotation { + return c.annotations +} + // Description returns the description of the tool func (c *ConsentWrapperTool) Description() string { return c.tool.Description() @@ -37,7 +47,7 @@ func (c *ConsentWrapperTool) Description() string { // Call executes the tool with consent protection func (c *ConsentWrapperTool) Call(ctx context.Context, input string) (string, error) { // Check consent using enhanced checker with annotations - decision, err := c.consentChecker.CheckToolConsentWithAnnotations(ctx, c.Name(), c.Description(), c.annotations) + decision, err := c.consentChecker.CheckToolConsent(ctx, c.Name(), c.Description(), c.annotations) if err != nil { return "", fmt.Errorf("consent check failed: %w", err) } @@ -45,8 +55,8 @@ func (c *ConsentWrapperTool) Call(ctx context.Context, input string) (string, er if !decision.Allowed { if decision.RequiresPrompt { if err := c.console.DoInteraction(func() error { - // Show interactive consent prompt using shared checker - promptErr := c.consentChecker.PromptAndGrantConsent(ctx, c.Name(), c.Description()) + // Show interactive consent prompt using shared checker with annotations + promptErr := c.consentChecker.PromptAndGrantConsent(ctx, c.Name(), c.Description(), c.annotations) c.console.Message(ctx, "") return promptErr @@ -64,31 +74,14 @@ func (c *ConsentWrapperTool) Call(ctx context.Context, input string) (string, er // newConsentWrapperTool wraps a langchaingo tool with consent protection func newConsentWrapperTool( - tool tools.Tool, + tool common.AnnotatedTool, console input.Console, consentManager ConsentManager, -) tools.Tool { - var server string - var annotations *mcp.ToolAnnotation - - if annotatedTool, ok := tool.(common.AnnotatedTool); ok { - toolAnnotations := annotatedTool.Annotations() - annotations = &toolAnnotations - server = annotatedTool.Server() - } - - if commonTool, ok := tool.(common.Tool); ok { - server = commonTool.Server() - } - - if server == "" { - server = "unknown" - } - +) common.AnnotatedTool { return &ConsentWrapperTool{ tool: tool, console: console, - consentChecker: NewConsentChecker(consentManager, server), - annotations: annotations, + consentChecker: NewConsentChecker(consentManager, tool.Server()), + annotations: tool.Annotations(), } } diff --git a/cli/azd/internal/agent/consent/manager.go b/cli/azd/internal/agent/consent/manager.go index 28a0cd1d19d..93eee617e31 100644 --- a/cli/azd/internal/agent/consent/manager.go +++ b/cli/azd/internal/agent/consent/manager.go @@ -9,9 +9,9 @@ import ( "sync" "time" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/azure/azure-dev/cli/azd/pkg/config" "github.com/azure/azure-dev/cli/azd/pkg/input" - "github.com/tmc/langchaingo/tools" ) const ( @@ -40,25 +40,8 @@ func NewConsentManager( // CheckConsent checks if a tool execution should be allowed func (cm *consentManager) CheckConsent(ctx context.Context, request ConsentRequest) (*ConsentDecision, error) { - // Check for explicit deny rules first - if decision := cm.checkExplicitRules(ctx, request); decision != nil && !decision.Allowed { - return decision, nil - } - - // Check if server is trusted - if cm.isServerTrusted(ctx, request.ServerName) { - return &ConsentDecision{Allowed: true, Reason: "trusted server"}, nil - } - - // Check if read-only tools are globally allowed - if request.Annotations != nil && request.Annotations.ReadOnlyHint != nil && *request.Annotations.ReadOnlyHint { - if cm.isReadOnlyToolsAllowed(ctx) { - return &ConsentDecision{Allowed: true, Reason: "read-only tool allowed"}, nil - } - } - - // Check existing consent rules - if decision := cm.checkExplicitRules(ctx, request); decision != nil && decision.Allowed { + // Check explicit rules across all scopes with unified logic + if decision := cm.checkUnifiedRules(ctx, request); decision != nil { return decision, nil } @@ -74,6 +57,11 @@ func (cm *consentManager) CheckConsent(ctx context.Context, request ConsentReque func (cm *consentManager) GrantConsent(ctx context.Context, rule ConsentRule, scope ConsentScope) error { rule.GrantedAt = time.Now() + // Set default RuleScope if not specified (backward compatibility) + if rule.RuleScope == "" { + rule.RuleScope = RuleScopeAll + } + switch scope { case ScopeSession: return cm.addSessionRule(rule) @@ -129,82 +117,21 @@ func (cm *consentManager) ClearConsentByToolID(ctx context.Context, toolID strin } // WrapTool wraps a single langchaingo tool with consent protection -func (cm *consentManager) WrapTool(tool tools.Tool) tools.Tool { +func (cm *consentManager) WrapTool(tool common.AnnotatedTool) common.AnnotatedTool { return newConsentWrapperTool(tool, cm.console, cm) } // WrapTools wraps multiple langchaingo tools with consent protection -func (cm *consentManager) WrapTools(langchainTools []tools.Tool) []tools.Tool { - wrappedTools := make([]tools.Tool, len(langchainTools)) +func (cm *consentManager) WrapTools(tools []common.AnnotatedTool) []common.AnnotatedTool { + wrappedTools := make([]common.AnnotatedTool, len(tools)) - for i, tool := range langchainTools { + for i, tool := range tools { wrappedTools[i] = cm.WrapTool(tool) } return wrappedTools } -// checkExplicitRules checks for explicit consent rules across all scopes -func (cm *consentManager) checkExplicitRules(ctx context.Context, request ConsentRequest) *ConsentDecision { - // Check session rules first - cm.sessionMutex.RLock() - sessionRules := cm.sessionRules - cm.sessionMutex.RUnlock() - - if len(sessionRules) > 0 { - if decision := cm.findMatchingRule(sessionRules, request); decision != nil { - return decision - } - } - - // Check project rules - if request.ProjectPath != "" { - if projectRules, err := cm.getProjectRules(ctx, request.ProjectPath); err == nil { - if decision := cm.findMatchingRule(projectRules, request); decision != nil { - return decision - } - } - } - - // Check global rules - if globalRules, err := cm.getGlobalRules(ctx); err == nil { - if decision := cm.findMatchingRule(globalRules, request); decision != nil { - return decision - } - } - - return nil -} - -// findMatchingRule finds a matching rule for the request -func (cm *consentManager) findMatchingRule(rules []ConsentRule, request ConsentRequest) *ConsentDecision { - serverName := request.ServerName - - for i, rule := range rules { - // Check for exact tool match - if rule.ToolID == request.ToolID { - decision := cm.evaluateRule(rule) - - // If this is a one-time consent rule, remove it after evaluation - if decision.Allowed && rule.Permission == ConsentOnce { - // Clean up the one-time rule from session rules - go func(ruleIndex int) { - cm.removeSessionRuleByIndex(ruleIndex) - }(i) - } - - return decision - } - - // Check for server-wide consent - if rule.Permission == ConsentServerAlways && rule.ToolID == fmt.Sprintf("%s/*", serverName) { - return &ConsentDecision{Allowed: true, Reason: "server trusted"} - } - } - - return nil -} - // evaluateRule evaluates a consent rule and returns a decision func (cm *consentManager) evaluateRule(rule ConsentRule) *ConsentDecision { switch rule.Permission { @@ -221,32 +148,6 @@ func (cm *consentManager) evaluateRule(rule ConsentRule) *ConsentDecision { } } -// isServerTrusted checks if a server is in the trusted servers list -func (cm *consentManager) isServerTrusted(ctx context.Context, serverName string) bool { - config, err := cm.getGlobalConsentConfig(ctx) - if err != nil { - return false - } - - for _, trustedServer := range config.TrustedServers { - if trustedServer == serverName { - return true - } - } - - return false -} - -// isReadOnlyToolsAllowed checks if read-only tools are globally allowed -func (cm *consentManager) isReadOnlyToolsAllowed(ctx context.Context) bool { - config, err := cm.getGlobalConsentConfig(ctx) - if err != nil { - return false - } - - return config.AllowReadOnlyTools -} - // addSessionRule adds a rule to the session rules func (cm *consentManager) addSessionRule(rule ConsentRule) error { cm.sessionMutex.Lock() @@ -362,9 +263,7 @@ func (cm *consentManager) clearGlobalRules(ctx context.Context) error { } consentConfig := ConsentConfig{ - Rules: []ConsentRule{}, - AllowReadOnlyTools: false, - TrustedServers: []string{}, + Rules: []ConsentRule{}, } if err := userConfig.Set(ConfigKeyMCPConsent, consentConfig); err != nil { @@ -435,3 +334,113 @@ func (cm *consentManager) removeGlobalRule(ctx context.Context, toolID string) e return cm.userConfigManager.Save(userConfig) } + +// checkUnifiedRules checks rules using the new unified matching logic +func (cm *consentManager) checkUnifiedRules(ctx context.Context, request ConsentRequest) *ConsentDecision { + isReadOnlyTool := request.Annotations.ReadOnlyHint != nil && *request.Annotations.ReadOnlyHint + + // Check session rules first + cm.sessionMutex.RLock() + sessionRules := cm.sessionRules + cm.sessionMutex.RUnlock() + + if decision := cm.findMatchingUnifiedRule(sessionRules, request, isReadOnlyTool); decision != nil { + return decision + } + + // Check project rules + if request.ProjectPath != "" { + if projectRules, err := cm.getProjectRules(ctx, request.ProjectPath); err == nil { + if decision := cm.findMatchingUnifiedRule(projectRules, request, isReadOnlyTool); decision != nil { + return decision + } + } + } + + // Check global rules + if globalRules, err := cm.getGlobalRules(ctx); err == nil { + if decision := cm.findMatchingUnifiedRule(globalRules, request, isReadOnlyTool); decision != nil { + return decision + } + } + + return nil +} + +// findMatchingUnifiedRule finds a matching rule using unified pattern and scope matching +func (cm *consentManager) findMatchingUnifiedRule( + rules []ConsentRule, + request ConsentRequest, + isReadOnlyTool bool, +) *ConsentDecision { + // Process rules in order: deny rules first, then allow rules + // This implements: Explicit deny > Global scope > Server scope > Tool scope precedence + + // First pass: Check for deny rules + for _, rule := range rules { + if rule.Permission == ConsentDeny && cm.ruleMatches(rule, request, isReadOnlyTool) { + return &ConsentDecision{Allowed: false, Reason: "explicitly denied"} + } + } + + // Second pass: Check for allow rules in precedence order + // Global patterns first (* pattern) + for i, rule := range rules { + if rule.Permission != ConsentDeny && rule.ToolID == "*" && cm.ruleMatches(rule, request, isReadOnlyTool) { + return cm.evaluateAllowRule(rule, i) + } + } + + // Server patterns next (server/* pattern) + serverPattern := fmt.Sprintf("%s/*", request.ServerName) + for i, rule := range rules { + if rule.Permission != ConsentDeny && rule.ToolID == serverPattern && cm.ruleMatches(rule, request, isReadOnlyTool) { + return cm.evaluateAllowRule(rule, i) + } + } + + // Specific tool patterns last (exact match) + for i, rule := range rules { + if rule.Permission != ConsentDeny && rule.ToolID == request.ToolID && cm.ruleMatches(rule, request, isReadOnlyTool) { + return cm.evaluateAllowRule(rule, i) + } + } + + return nil +} + +// ruleMatches checks if a rule matches the request considering scope restrictions +func (cm *consentManager) ruleMatches(rule ConsentRule, request ConsentRequest, isReadOnlyTool bool) bool { + // Default to "all" scope for backward compatibility + ruleScope := rule.RuleScope + if ruleScope == "" { + ruleScope = RuleScopeAll + } + + // Check scope restrictions + switch ruleScope { + case RuleScopeReadOnly: + // Rule only applies to read-only tools + return isReadOnlyTool + case RuleScopeAll: + // Rule applies to all tools + return true + default: + // Unknown scope, default to not matching + return false + } +} + +// evaluateAllowRule evaluates an allow rule and handles one-time cleanup +func (cm *consentManager) evaluateAllowRule(rule ConsentRule, ruleIndex int) *ConsentDecision { + decision := cm.evaluateRule(rule) + + // If this is a one-time consent rule, remove it after evaluation + if decision.Allowed && rule.Permission == ConsentOnce { + go func(index int) { + cm.removeSessionRuleByIndex(index) + }(ruleIndex) + } + + return decision +} diff --git a/cli/azd/internal/agent/consent/types.go b/cli/azd/internal/agent/consent/types.go index b745398d61e..fcb1dae1bf1 100644 --- a/cli/azd/internal/agent/consent/types.go +++ b/cli/azd/internal/agent/consent/types.go @@ -7,8 +7,8 @@ import ( "context" "time" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/mark3labs/mcp-go/mcp" - "github.com/tmc/langchaingo/tools" ) // ConsentLevel represents the level of consent granted for a tool @@ -17,14 +17,18 @@ type ConsentLevel string // ConsentScope represents where consent rules are stored type ConsentScope string +// RuleScope represents what types of tools a rule applies to +type RuleScope string + const ( - ConsentDeny ConsentLevel = "deny" - ConsentPrompt ConsentLevel = "prompt" - ConsentOnce ConsentLevel = "once" - ConsentSession ConsentLevel = "session" - ConsentProject ConsentLevel = "project" - ConsentAlways ConsentLevel = "always" - ConsentServerAlways ConsentLevel = "server_always" // All tools from server + ConsentDeny ConsentLevel = "deny" + ConsentPrompt ConsentLevel = "prompt" + ConsentOnce ConsentLevel = "once" + ConsentSession ConsentLevel = "session" + ConsentProject ConsentLevel = "project" + ConsentAlways ConsentLevel = "always" + ConsentServerAlways ConsentLevel = "server_always" // All tools from server + ConsentServerReadOnly ConsentLevel = "server_readonly" // Read-only tools from server ) const ( @@ -33,18 +37,22 @@ const ( ScopeSession ConsentScope = "session" ) +const ( + RuleScopeAll RuleScope = "all" // All tools matching the pattern + RuleScopeReadOnly RuleScope = "readonly" // Only read-only tools matching the pattern +) + // ConsentRule represents a single consent rule for a tool type ConsentRule struct { - ToolID string `json:"tool_id"` + ToolID string `json:"toolId"` Permission ConsentLevel `json:"permission"` + RuleScope RuleScope `json:"scope,omitempty"` // Defaults to "all" for backward compatibility GrantedAt time.Time `json:"granted_at"` } // ConsentConfig represents the MCP consent configuration type ConsentConfig struct { - Rules []ConsentRule `json:"rules,omitempty"` - AllowReadOnlyTools bool `json:"allow_readonly_tools,omitempty"` - TrustedServers []string `json:"trusted_servers,omitempty"` + Rules []ConsentRule `json:"rules,omitempty"` } // ConsentRequest represents a request to check consent for a tool @@ -54,7 +62,7 @@ type ConsentRequest struct { Parameters map[string]interface{} SessionID string ProjectPath string - Annotations *mcp.ToolAnnotation + Annotations mcp.ToolAnnotation } // ConsentDecision represents the result of a consent check @@ -73,6 +81,6 @@ type ConsentManager interface { ClearConsentByToolID(ctx context.Context, toolID string, scope ConsentScope) error // Tool wrapping methods - WrapTool(tool tools.Tool) tools.Tool - WrapTools(tools []tools.Tool) []tools.Tool + WrapTool(tool common.AnnotatedTool) common.AnnotatedTool + WrapTools(tools []common.AnnotatedTool) []common.AnnotatedTool } diff --git a/cli/azd/internal/agent/conversational_agent.go b/cli/azd/internal/agent/conversational_agent.go index 103b3663308..9115e2fe719 100644 --- a/cli/azd/internal/agent/conversational_agent.go +++ b/cli/azd/internal/agent/conversational_agent.go @@ -11,13 +11,13 @@ import ( "os" "strings" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/fatih/color" "github.com/tmc/langchaingo/agents" "github.com/tmc/langchaingo/chains" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/memory" "github.com/tmc/langchaingo/prompts" - "github.com/tmc/langchaingo/tools" ) //go:embed prompts/conversational.txt @@ -36,7 +36,7 @@ func NewConversationalAzdAiAgent(llm llms.Model, opts ...AgentOption) (*Conversa azdAgent := &ConversationalAzdAiAgent{ agentBase: &agentBase{ defaultModel: llm, - tools: []tools.Tool{}, + tools: []common.AnnotatedTool{}, }, } @@ -63,7 +63,7 @@ func NewConversationalAzdAiAgent(llm llms.Model, opts ...AgentOption) (*Conversa } // 4. Create agent with memory directly integrated - conversationAgent := agents.NewConversationalAgent(llm, azdAgent.tools, + conversationAgent := agents.NewConversationalAgent(llm, common.ToLangChainTools(azdAgent.tools), agents.WithPrompt(promptTemplate), agents.WithMemory(smartMemory), agents.WithCallbacksHandler(azdAgent.callbacksHandler), diff --git a/cli/azd/internal/agent/one_shot_agent.go b/cli/azd/internal/agent/one_shot_agent.go index 42587b4f440..40742518945 100644 --- a/cli/azd/internal/agent/one_shot_agent.go +++ b/cli/azd/internal/agent/one_shot_agent.go @@ -8,11 +8,11 @@ import ( _ "embed" "strings" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/tmc/langchaingo/agents" "github.com/tmc/langchaingo/chains" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/prompts" - "github.com/tmc/langchaingo/tools" ) // OneShotAzdAiAgent represents an AZD Copilot agent designed for single-request processing @@ -31,7 +31,7 @@ func NewOneShotAzdAiAgent(llm llms.Model, opts ...AgentOption) (*OneShotAzdAiAge azdAgent := &OneShotAzdAiAgent{ agentBase: &agentBase{ defaultModel: llm, - tools: []tools.Tool{}, + tools: []common.AnnotatedTool{}, }, } @@ -50,7 +50,7 @@ func NewOneShotAzdAiAgent(llm llms.Model, opts ...AgentOption) (*OneShotAzdAiAge } // 4. Create agent with memory directly integrated - oneShotAgent := agents.NewOneShotAgent(llm, azdAgent.tools, + oneShotAgent := agents.NewOneShotAgent(llm, common.ToLangChainTools(azdAgent.tools), agents.WithPrompt(promptTemplate), agents.WithCallbacksHandler(azdAgent.callbacksHandler), agents.WithReturnIntermediateSteps(), diff --git a/cli/azd/internal/agent/tools/common/types.go b/cli/azd/internal/agent/tools/common/types.go index a5d1b565826..bc9299a234d 100644 --- a/cli/azd/internal/agent/tools/common/types.go +++ b/cli/azd/internal/agent/tools/common/types.go @@ -29,9 +29,9 @@ type AnnotatedTool interface { Annotations() mcp.ToolAnnotation } -type LocalTool struct { +type BuiltInTool struct { } -func (t *LocalTool) Server() string { - return "local" +func (t *BuiltInTool) Server() string { + return "built-in" } diff --git a/cli/azd/internal/agent/tools/common/utils.go b/cli/azd/internal/agent/tools/common/utils.go new file mode 100644 index 00000000000..352ab27d5be --- /dev/null +++ b/cli/azd/internal/agent/tools/common/utils.go @@ -0,0 +1,18 @@ +package common + +import "github.com/tmc/langchaingo/tools" + +// ToPtr converts a value to a pointer +func ToPtr[T any](value T) *T { + return &value +} + +// ToLangChainTools converts a slice of AnnotatedTool to a slice of tools.Tool +func ToLangChainTools(annotatedTools []AnnotatedTool) []tools.Tool { + var rawTools []tools.Tool + for _, tool := range annotatedTools { + rawTools = append(rawTools, tool) + } + + return rawTools +} diff --git a/cli/azd/internal/agent/tools/dev/command_executor.go b/cli/azd/internal/agent/tools/dev/command_executor.go index fb73f63b246..4c71fea4a43 100644 --- a/cli/azd/internal/agent/tools/dev/command_executor.go +++ b/cli/azd/internal/agent/tools/dev/command_executor.go @@ -14,17 +14,28 @@ import ( "strings" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/mark3labs/mcp-go/mcp" ) // CommandExecutorTool implements the Tool interface for executing commands and scripts type CommandExecutorTool struct { - common.LocalTool + common.BuiltInTool } func (t CommandExecutorTool) Name() string { return "execute_command" } +func (t CommandExecutorTool) Annotations() mcp.ToolAnnotation { + return mcp.ToolAnnotation{ + Title: "Execute Terminal Command", + ReadOnlyHint: common.ToPtr(false), + DestructiveHint: common.ToPtr(true), + IdempotentHint: common.ToPtr(false), + OpenWorldHint: common.ToPtr(true), + } +} + func (t CommandExecutorTool) Description() string { return `Execute any command with arguments through the system shell for better compatibility. diff --git a/cli/azd/internal/agent/tools/dev/loader.go b/cli/azd/internal/agent/tools/dev/loader.go index bbeeaa41ab3..430ff9cc090 100644 --- a/cli/azd/internal/agent/tools/dev/loader.go +++ b/cli/azd/internal/agent/tools/dev/loader.go @@ -12,8 +12,8 @@ func NewDevToolsLoader() *DevToolsLoader { return &DevToolsLoader{} } -func (l *DevToolsLoader) LoadTools() ([]common.Tool, error) { - return []common.Tool{ +func (l *DevToolsLoader) LoadTools() ([]common.AnnotatedTool, error) { + return []common.AnnotatedTool{ &CommandExecutorTool{}, }, nil } diff --git a/cli/azd/internal/agent/tools/http/http_fetcher.go b/cli/azd/internal/agent/tools/http/http_fetcher.go index 7a4cb1c9c24..882a7f401ea 100644 --- a/cli/azd/internal/agent/tools/http/http_fetcher.go +++ b/cli/azd/internal/agent/tools/http/http_fetcher.go @@ -9,53 +9,49 @@ import ( "io" "net/http" - "github.com/tmc/langchaingo/callbacks" + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/mark3labs/mcp-go/mcp" ) // HTTPFetcherTool implements the Tool interface for making HTTP requests type HTTPFetcherTool struct { - CallbacksHandler callbacks.Handler + common.BuiltInTool } func (t HTTPFetcherTool) Name() string { return "http_fetcher" } +func (t HTTPFetcherTool) Annotations() mcp.ToolAnnotation { + return mcp.ToolAnnotation{ + Title: "Fetch HTTP Endpoint", + ReadOnlyHint: common.ToPtr(true), + DestructiveHint: common.ToPtr(false), + IdempotentHint: common.ToPtr(true), + OpenWorldHint: common.ToPtr(true), + } +} + func (t HTTPFetcherTool) Description() string { return "Make HTTP GET requests to fetch content from URLs. Input should be a valid URL." } func (t HTTPFetcherTool) Call(ctx context.Context, input string) (string, error) { - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolStart(ctx, fmt.Sprintf("http_fetcher: %s", input)) - } - // #nosec G107 - HTTP requests with variable URLs are the intended functionality of this tool resp, err := http.Get(input) if err != nil { - toolErr := fmt.Errorf("failed to fetch URL %s: %w", input, err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return "", fmt.Errorf("failed to fetch URL %s: %w", input, err) } + defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - err := fmt.Errorf("HTTP request failed with status: %s", resp.Status) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, err) - } - return "", err + return "", fmt.Errorf("HTTP request failed with status: %s", resp.Status) } body, err := io.ReadAll(resp.Body) if err != nil { - toolErr := fmt.Errorf("failed to read response body: %w", err) - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolError(ctx, toolErr) - } - return "", toolErr + return "", fmt.Errorf("failed to read response body: %w", err) } var output string @@ -67,9 +63,5 @@ func (t HTTPFetcherTool) Call(ctx context.Context, input string) (string, error) output += "\n" } - if t.CallbacksHandler != nil { - t.CallbacksHandler.HandleToolEnd(ctx, output) - } - return output, nil } diff --git a/cli/azd/internal/agent/tools/http/loader.go b/cli/azd/internal/agent/tools/http/loader.go index 32a1ce4dbc8..b2b5f3f4482 100644 --- a/cli/azd/internal/agent/tools/http/loader.go +++ b/cli/azd/internal/agent/tools/http/loader.go @@ -21,6 +21,6 @@ func NewHttpToolsLoader(callbackHandler callbacks.Handler) *HttpToolsLoader { func (l *HttpToolsLoader) LoadTools() ([]tools.Tool, error) { return []tools.Tool{ - &HTTPFetcherTool{CallbacksHandler: l.callbackHandler}, + &HTTPFetcherTool{}, }, nil } diff --git a/cli/azd/internal/agent/tools/io/change_directory.go b/cli/azd/internal/agent/tools/io/change_directory.go index 7ee8d358adf..b02c70daf8e 100644 --- a/cli/azd/internal/agent/tools/io/change_directory.go +++ b/cli/azd/internal/agent/tools/io/change_directory.go @@ -12,17 +12,28 @@ import ( "strings" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/mark3labs/mcp-go/mcp" ) // ChangeDirectoryTool implements the Tool interface for changing the current working directory type ChangeDirectoryTool struct { - common.LocalTool + common.BuiltInTool } func (t ChangeDirectoryTool) Name() string { return "change_directory" } +func (t ChangeDirectoryTool) Annotations() mcp.ToolAnnotation { + return mcp.ToolAnnotation{ + Title: "Change Directory", + ReadOnlyHint: common.ToPtr(false), + DestructiveHint: common.ToPtr(false), + IdempotentHint: common.ToPtr(true), + OpenWorldHint: common.ToPtr(false), + } +} + func (t ChangeDirectoryTool) Description() string { return "Change the current working directory. " + "Input: directory path (e.g., '../parent' or './subfolder' or absolute path)" diff --git a/cli/azd/internal/agent/tools/io/copy_file.go b/cli/azd/internal/agent/tools/io/copy_file.go index 47478bea7f2..91b0d2edd96 100644 --- a/cli/azd/internal/agent/tools/io/copy_file.go +++ b/cli/azd/internal/agent/tools/io/copy_file.go @@ -12,22 +12,44 @@ import ( "strings" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/mark3labs/mcp-go/mcp" ) // CopyFileTool implements the Tool interface for copying files type CopyFileTool struct { - common.LocalTool + common.BuiltInTool } func (t CopyFileTool) Name() string { return "copy_file" } +func (t CopyFileTool) Annotations() mcp.ToolAnnotation { + return mcp.ToolAnnotation{ + Title: "Copy File", + ReadOnlyHint: common.ToPtr(false), + DestructiveHint: common.ToPtr(false), + IdempotentHint: common.ToPtr(true), + OpenWorldHint: common.ToPtr(false), + } +} + func (t CopyFileTool) Description() string { - return `Copy a file to a new location. -Input: JSON object with required 'source' and 'destination' fields: {"source": "file.txt", "destination": "backup.txt"} + return `Copy a file to a new location. By default, fails if destination already exists. +Input: JSON object with required 'source' and 'destination' fields, optional 'overwrite' field: +{"source": "file.txt", "destination": "backup.txt", "overwrite": false} + +Fields: +- source: Path to the source file (required) +- destination: Path where the file should be copied (required) +- overwrite: If true, allows overwriting existing destination file (optional, default: false) + Returns: JSON with copy operation details or error information. -The input must be formatted as a single line valid JSON string.` +The input must be formatted as a single line valid JSON string. + +Examples: +- Safe copy: {"source": "data.txt", "destination": "backup.txt"} +- Copy with overwrite: {"source": "data.txt", "destination": "backup.txt", "overwrite": true}` } // createErrorResponse creates a JSON error response @@ -56,6 +78,7 @@ func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { type InputParams struct { Source string `json:"source"` Destination string `json:"destination"` + Overwrite bool `json:"overwrite,omitempty"` // Optional: allow overwriting existing files } var params InputParams @@ -68,7 +91,7 @@ func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { return t.createErrorResponse( err, fmt.Sprintf( - "Invalid JSON input: %s. Expected format: {\"source\": \"file.txt\", \"destination\": \"backup.txt\"}", + "Invalid JSON input: %s. Expected format: {\"source\": \"file.txt\", \"destination\": \"backup.txt\", \"overwrite\": false}", err.Error(), ), ) @@ -97,6 +120,19 @@ func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { ) } + // Check if destination exists and handle overwrite logic + destinationExisted := false + if _, err := os.Stat(destination); err == nil { + // Destination file exists + destinationExisted = true + if !params.Overwrite { + return t.createErrorResponse( + fmt.Errorf("destination file %s already exists", destination), + fmt.Sprintf("Destination file %s already exists. Set \"overwrite\": true to allow overwriting", destination), + ) + } + } + // Open source file sourceFile, err := os.Open(source) if err != nil { @@ -123,15 +159,32 @@ func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { Source string `json:"source"` Destination string `json:"destination"` BytesCopied int64 `json:"bytesCopied"` + Overwritten bool `json:"overwritten"` // Indicates if an existing file was overwritten Message string `json:"message"` } + // Determine if this was an overwrite operation + wasOverwrite := destinationExisted && params.Overwrite + + var message string + if wasOverwrite { + message = fmt.Sprintf( + "Successfully copied %s to %s (%d bytes) - overwrote existing file", + source, + destination, + bytesWritten, + ) + } else { + message = fmt.Sprintf("Successfully copied %s to %s (%d bytes)", source, destination, bytesWritten) + } + response := CopyResponse{ Success: true, Source: source, Destination: destination, BytesCopied: bytesWritten, - Message: fmt.Sprintf("Successfully copied %s to %s (%d bytes)", source, destination, bytesWritten), + Overwritten: wasOverwrite, + Message: message, } // Convert to JSON diff --git a/cli/azd/internal/agent/tools/io/create_directory.go b/cli/azd/internal/agent/tools/io/create_directory.go index 81df0811bb8..369b284d3b0 100644 --- a/cli/azd/internal/agent/tools/io/create_directory.go +++ b/cli/azd/internal/agent/tools/io/create_directory.go @@ -11,17 +11,28 @@ import ( "strings" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/mark3labs/mcp-go/mcp" ) // CreateDirectoryTool implements the Tool interface for creating directories type CreateDirectoryTool struct { - common.LocalTool + common.BuiltInTool } func (t CreateDirectoryTool) Name() string { return "create_directory" } +func (t CreateDirectoryTool) Annotations() mcp.ToolAnnotation { + return mcp.ToolAnnotation{ + Title: "Create Directory", + ReadOnlyHint: common.ToPtr(false), + DestructiveHint: common.ToPtr(false), + IdempotentHint: common.ToPtr(true), + OpenWorldHint: common.ToPtr(false), + } +} + func (t CreateDirectoryTool) Description() string { return "Create a directory (and any necessary parent directories). " + "Input: directory path (e.g., 'docs' or './src/components')" diff --git a/cli/azd/internal/agent/tools/io/current_directory.go b/cli/azd/internal/agent/tools/io/current_directory.go index 34a9bdf31f0..30ffb5d60fc 100644 --- a/cli/azd/internal/agent/tools/io/current_directory.go +++ b/cli/azd/internal/agent/tools/io/current_directory.go @@ -10,19 +10,30 @@ import ( "os" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/mark3labs/mcp-go/mcp" ) // CurrentDirectoryTool implements the Tool interface for getting current directory type CurrentDirectoryTool struct { - common.LocalTool + common.BuiltInTool } func (t CurrentDirectoryTool) Name() string { - return "cwd" + return "current_directory" +} + +func (t CurrentDirectoryTool) Annotations() mcp.ToolAnnotation { + return mcp.ToolAnnotation{ + Title: "Get Current Directory", + ReadOnlyHint: common.ToPtr(true), + DestructiveHint: common.ToPtr(false), + IdempotentHint: common.ToPtr(true), + OpenWorldHint: common.ToPtr(false), + } } func (t CurrentDirectoryTool) Description() string { - return "Get the current working directory to understand the project context. " + + return "Get the current working directory for the project workspace " + "Input: use 'current' or '.' (any input works)" } diff --git a/cli/azd/internal/agent/tools/io/delete_directory.go b/cli/azd/internal/agent/tools/io/delete_directory.go index 0ee3bb9be80..b4402065320 100644 --- a/cli/azd/internal/agent/tools/io/delete_directory.go +++ b/cli/azd/internal/agent/tools/io/delete_directory.go @@ -11,17 +11,28 @@ import ( "strings" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/mark3labs/mcp-go/mcp" ) // DeleteDirectoryTool implements the Tool interface for deleting directories type DeleteDirectoryTool struct { - common.LocalTool + common.BuiltInTool } func (t DeleteDirectoryTool) Name() string { return "delete_directory" } +func (t DeleteDirectoryTool) Annotations() mcp.ToolAnnotation { + return mcp.ToolAnnotation{ + Title: "Delete Directory", + ReadOnlyHint: common.ToPtr(false), + DestructiveHint: common.ToPtr(true), + IdempotentHint: common.ToPtr(false), + OpenWorldHint: common.ToPtr(false), + } +} + func (t DeleteDirectoryTool) Description() string { return "Delete a directory and all its contents. Input: directory path (e.g., 'temp-folder' or './old-docs')" } diff --git a/cli/azd/internal/agent/tools/io/delete_file.go b/cli/azd/internal/agent/tools/io/delete_file.go index 86251bd9bce..6b73a1b09bf 100644 --- a/cli/azd/internal/agent/tools/io/delete_file.go +++ b/cli/azd/internal/agent/tools/io/delete_file.go @@ -11,17 +11,28 @@ import ( "strings" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/mark3labs/mcp-go/mcp" ) // DeleteFileTool implements the Tool interface for deleting files type DeleteFileTool struct { - common.LocalTool + common.BuiltInTool } func (t DeleteFileTool) Name() string { return "delete_file" } +func (t DeleteFileTool) Annotations() mcp.ToolAnnotation { + return mcp.ToolAnnotation{ + Title: "Delete File", + ReadOnlyHint: common.ToPtr(false), + DestructiveHint: common.ToPtr(true), + IdempotentHint: common.ToPtr(false), + OpenWorldHint: common.ToPtr(false), + } +} + func (t DeleteFileTool) Description() string { return "Delete a file. Input: file path (e.g., 'temp.txt' or './docs/old-file.md')" } diff --git a/cli/azd/internal/agent/tools/io/directory_list.go b/cli/azd/internal/agent/tools/io/directory_list.go index 0d5fc60c538..4028c53627a 100644 --- a/cli/azd/internal/agent/tools/io/directory_list.go +++ b/cli/azd/internal/agent/tools/io/directory_list.go @@ -12,17 +12,28 @@ import ( "strings" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/mark3labs/mcp-go/mcp" ) // DirectoryListTool implements the Tool interface for listing directory contents type DirectoryListTool struct { - common.LocalTool + common.BuiltInTool } func (t DirectoryListTool) Name() string { return "list_directory" } +func (t DirectoryListTool) Annotations() mcp.ToolAnnotation { + return mcp.ToolAnnotation{ + Title: "List Directory Contents", + ReadOnlyHint: common.ToPtr(true), + DestructiveHint: common.ToPtr(false), + IdempotentHint: common.ToPtr(true), + OpenWorldHint: common.ToPtr(false), + } +} + func (t DirectoryListTool) Description() string { return `List files and folders in a directory. Input: JSON object with required 'path' field: {"path": ".", "includeHidden": false} diff --git a/cli/azd/internal/agent/tools/io/file_info.go b/cli/azd/internal/agent/tools/io/file_info.go index 8c4ebb77952..092cd8a1af1 100644 --- a/cli/azd/internal/agent/tools/io/file_info.go +++ b/cli/azd/internal/agent/tools/io/file_info.go @@ -12,17 +12,28 @@ import ( "time" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/mark3labs/mcp-go/mcp" ) // FileInfoTool implements the Tool interface for getting file information type FileInfoTool struct { - common.LocalTool + common.BuiltInTool } func (t FileInfoTool) Name() string { return "file_info" } +func (t FileInfoTool) Annotations() mcp.ToolAnnotation { + return mcp.ToolAnnotation{ + Title: "Get File Information", + ReadOnlyHint: common.ToPtr(true), + DestructiveHint: common.ToPtr(false), + IdempotentHint: common.ToPtr(true), + OpenWorldHint: common.ToPtr(false), + } +} + func (t FileInfoTool) Description() string { return "Get information about a file (size, modification time, permissions). " + "Input: file path (e.g., 'data.txt' or './docs/readme.md'). Returns JSON with file information." diff --git a/cli/azd/internal/agent/tools/io/file_search.go b/cli/azd/internal/agent/tools/io/file_search.go index 6fa5b6e1d0b..8e3e79fa8fc 100644 --- a/cli/azd/internal/agent/tools/io/file_search.go +++ b/cli/azd/internal/agent/tools/io/file_search.go @@ -11,11 +11,12 @@ import ( "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/bmatcuk/doublestar/v4" + "github.com/mark3labs/mcp-go/mcp" ) // FileSearchTool implements a tool for searching files using glob patterns type FileSearchTool struct { - common.LocalTool + common.BuiltInTool } // FileSearchRequest represents the JSON payload for file search requests @@ -28,6 +29,16 @@ func (t FileSearchTool) Name() string { return "file_search" } +func (t FileSearchTool) Annotations() mcp.ToolAnnotation { + return mcp.ToolAnnotation{ + Title: "Search Files by Pattern", + ReadOnlyHint: common.ToPtr(true), + DestructiveHint: common.ToPtr(false), + IdempotentHint: common.ToPtr(true), + OpenWorldHint: common.ToPtr(false), + } +} + func (t FileSearchTool) Description() string { return `Searches for files matching a glob pattern in the current working directory using the doublestar library for full glob support. diff --git a/cli/azd/internal/agent/tools/io/loader.go b/cli/azd/internal/agent/tools/io/loader.go index d5ed2967d1d..2045725143c 100644 --- a/cli/azd/internal/agent/tools/io/loader.go +++ b/cli/azd/internal/agent/tools/io/loader.go @@ -14,8 +14,8 @@ func NewIoToolsLoader() *IoToolsLoader { return &IoToolsLoader{} } -func (l *IoToolsLoader) LoadTools() ([]common.Tool, error) { - return []common.Tool{ +func (l *IoToolsLoader) LoadTools() ([]common.AnnotatedTool, error) { + return []common.AnnotatedTool{ &CurrentDirectoryTool{}, &ChangeDirectoryTool{}, &DirectoryListTool{}, diff --git a/cli/azd/internal/agent/tools/io/move_file.go b/cli/azd/internal/agent/tools/io/move_file.go index 7ae9cbf364f..66583fb23fb 100644 --- a/cli/azd/internal/agent/tools/io/move_file.go +++ b/cli/azd/internal/agent/tools/io/move_file.go @@ -11,17 +11,28 @@ import ( "strings" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/mark3labs/mcp-go/mcp" ) // MoveFileTool implements the Tool interface for moving/renaming files type MoveFileTool struct { - common.LocalTool + common.BuiltInTool } func (t MoveFileTool) Name() string { return "move_file" } +func (t MoveFileTool) Annotations() mcp.ToolAnnotation { + return mcp.ToolAnnotation{ + Title: "Move or Rename File", + ReadOnlyHint: common.ToPtr(false), + DestructiveHint: common.ToPtr(true), + IdempotentHint: common.ToPtr(false), + OpenWorldHint: common.ToPtr(false), + } +} + func (t MoveFileTool) Description() string { return "Move or rename a file.\n" + "Input format: 'source|destination' (e.g., 'old.txt|new.txt' or './file.txt|./folder/file.txt')" diff --git a/cli/azd/internal/agent/tools/io/read_file.go b/cli/azd/internal/agent/tools/io/read_file.go index 822a3d3761f..0eb707dbf75 100644 --- a/cli/azd/internal/agent/tools/io/read_file.go +++ b/cli/azd/internal/agent/tools/io/read_file.go @@ -13,11 +13,12 @@ import ( "time" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/mark3labs/mcp-go/mcp" ) // ReadFileTool implements the Tool interface for reading file contents type ReadFileTool struct { - common.LocalTool + common.BuiltInTool } // ReadFileRequest represents the JSON payload for file read requests @@ -58,6 +59,16 @@ func (t ReadFileTool) Name() string { return "read_file" } +func (t ReadFileTool) Annotations() mcp.ToolAnnotation { + return mcp.ToolAnnotation{ + Title: "Read File Contents", + ReadOnlyHint: common.ToPtr(true), + DestructiveHint: common.ToPtr(false), + IdempotentHint: common.ToPtr(true), + OpenWorldHint: common.ToPtr(false), + } +} + func (t ReadFileTool) Description() string { return `Read file contents with intelligent handling for different file sizes and partial reads. Returns JSON response with file content and metadata. diff --git a/cli/azd/internal/agent/tools/io/write_file.go b/cli/azd/internal/agent/tools/io/write_file.go index 7c7f98460c7..33d856a8e89 100644 --- a/cli/azd/internal/agent/tools/io/write_file.go +++ b/cli/azd/internal/agent/tools/io/write_file.go @@ -13,11 +13,12 @@ import ( "time" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/mark3labs/mcp-go/mcp" ) // WriteFileTool implements a comprehensive file writing tool that handles all scenarios type WriteFileTool struct { - common.LocalTool + common.BuiltInTool } // WriteFileRequest represents the JSON input for the write_file tool @@ -59,6 +60,16 @@ func (t WriteFileTool) Name() string { return "write_file" } +func (t WriteFileTool) Annotations() mcp.ToolAnnotation { + return mcp.ToolAnnotation{ + Title: "Write File Contents", + ReadOnlyHint: common.ToPtr(false), + DestructiveHint: common.ToPtr(true), + IdempotentHint: common.ToPtr(false), + OpenWorldHint: common.ToPtr(false), + } +} + func (t WriteFileTool) Description() string { return `Comprehensive file writing tool that handles full file writes, appends, and line-based partial updates. Returns JSON response with operation details. diff --git a/cli/azd/internal/agent/tools/loader.go b/cli/azd/internal/agent/tools/loader.go index 5b8f1461f1b..4d5ff902d84 100644 --- a/cli/azd/internal/agent/tools/loader.go +++ b/cli/azd/internal/agent/tools/loader.go @@ -11,7 +11,7 @@ import ( // ToolLoader provides an interface for loading tools from different categories type ToolLoader interface { - LoadTools() ([]common.Tool, error) + LoadTools() ([]common.AnnotatedTool, error) } // LocalToolsLoader manages loading tools from multiple local tool categories @@ -31,8 +31,8 @@ func NewLocalToolsLoader() *LocalToolsLoader { // LoadTools loads and returns all tools from all registered tool loaders. // Returns an error if any individual loader fails to load its tools. -func (l *LocalToolsLoader) LoadTools() ([]common.Tool, error) { - var allTools []common.Tool +func (l *LocalToolsLoader) LoadTools() ([]common.AnnotatedTool, error) { + var allTools []common.AnnotatedTool for _, loader := range l.loaders { categoryTools, err := loader.LoadTools() diff --git a/cli/azd/internal/agent/tools/mcp/loader.go b/cli/azd/internal/agent/tools/mcp/loader.go index 36428bb7ef9..91e16220962 100644 --- a/cli/azd/internal/agent/tools/mcp/loader.go +++ b/cli/azd/internal/agent/tools/mcp/loader.go @@ -56,14 +56,14 @@ func NewMcpToolsLoader(samplingHandler client.SamplingHandler) *McpToolsLoader { // and collects all tools from each successfully connected server. // Returns an error if the configuration cannot be parsed, but continues // processing other servers if individual server connections fail. -func (l *McpToolsLoader) LoadTools() ([]common.Tool, error) { +func (l *McpToolsLoader) LoadTools() ([]common.AnnotatedTool, error) { // Deserialize the embedded mcp.json configuration var config McpConfig if err := json.Unmarshal([]byte(_mcpJson), &config); err != nil { return nil, fmt.Errorf("failed to parse mcp.json: %w", err) } - var allTools []common.Tool + var allTools []common.AnnotatedTool // Iterate through each server configuration for serverName, serverConfig := range config.Servers { diff --git a/cli/azd/test/mocks/mockinput/mock_console.go b/cli/azd/test/mocks/mockinput/mock_console.go index 64eede8242f..861cfdbe90a 100644 --- a/cli/azd/test/mocks/mockinput/mock_console.go +++ b/cli/azd/test/mocks/mockinput/mock_console.go @@ -177,6 +177,11 @@ func (c *MockConsole) MultiSelect(ctx context.Context, options input.ConsoleOpti func (c *MockConsole) Flush() { } +// DoInteraction executes the provided action function and returns any error encountered +func (c *MockConsole) DoInteraction(action func() error) error { + return action() +} + // Finds a matching mock expression and returns the configured value func (c *MockConsole) respond(command string, options input.ConsoleOptions) (any, error) { var match *MockConsoleExpression From cac0351c2897f7797bf686a7fd19c83213c09f67 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Mon, 11 Aug 2025 18:17:38 -0700 Subject: [PATCH 061/116] Adds unit tests for reading files, LLM multi-partial write scenarios. --- .../tools/io/file_io_integration_test.go | 536 ++++++++++++++ .../internal/agent/tools/io/read_file_test.go | 690 ++++++++++++++++++ 2 files changed, 1226 insertions(+) create mode 100644 cli/azd/internal/agent/tools/io/file_io_integration_test.go create mode 100644 cli/azd/internal/agent/tools/io/read_file_test.go diff --git a/cli/azd/internal/agent/tools/io/file_io_integration_test.go b/cli/azd/internal/agent/tools/io/file_io_integration_test.go new file mode 100644 index 00000000000..b4d5aefc0eb --- /dev/null +++ b/cli/azd/internal/agent/tools/io/file_io_integration_test.go @@ -0,0 +1,536 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package io + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Real-world LLM workflow integration tests +// These simulate actual scenarios where LLMs read multiple sections, +// make edits via WriteFileTool, then read again to verify changes +func TestReadFileTool_LLMWorkflow_CodeAnalysisAndEdit(t *testing.T) { + // Simulate LLM analyzing a simple function and making changes + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "calculator.go") + + // Simple initial Go code + initialContent := `package main + +import "fmt" + +func add(a, b int) int { + return a + b +} + +func main() { + result := add(5, 3) + fmt.Println(result) +}` + + err := os.WriteFile(testFile, []byte(initialContent), 0600) + require.NoError(t, err) + + readTool := ReadFileTool{} + writeTool := WriteFileTool{} + + // Step 1: LLM reads the entire file to understand structure + readRequest1 := ReadFileRequest{ + FilePath: testFile, + } + result1, err := readTool.Call(context.Background(), mustMarshalJSON(readRequest1)) + assert.NoError(t, err) + + var response1 ReadFileResponse + err = json.Unmarshal([]byte(result1), &response1) + require.NoError(t, err) + assert.True(t, response1.Success) + assert.Contains(t, response1.Content, "func add") + assert.Contains(t, response1.Content, "func main") + + // Step 2: LLM reads just the add function (lines 5-7) + readRequest2 := ReadFileRequest{ + FilePath: testFile, + StartLine: 5, + EndLine: 7, + } + result2, err := readTool.Call(context.Background(), mustMarshalJSON(readRequest2)) + assert.NoError(t, err) + + var response2 ReadFileResponse + err = json.Unmarshal([]byte(result2), &response2) + require.NoError(t, err) + assert.True(t, response2.Success) + assert.Contains(t, response2.Content, "func add(a, b int) int") + assert.Equal(t, 3, response2.LineRange.LinesRead) + + // Step 3: LLM replaces the add function with a more robust version + newFunction := `func add(a, b int) int { + fmt.Printf("Adding %d + %d\n", a, b) + return a + b +}` + + writeRequest := WriteFileRequest{ + Filename: testFile, + Content: newFunction, + StartLine: 5, + EndLine: 7, + } + writeResult, err := writeTool.Call(context.Background(), mustMarshalJSON(writeRequest)) + assert.NoError(t, err) + assert.Contains(t, writeResult, `"success": true`) + + // Step 4: LLM reads the updated function to verify change + readRequest3 := ReadFileRequest{ + FilePath: testFile, + StartLine: 5, + EndLine: 8, + } + result3, err := readTool.Call(context.Background(), mustMarshalJSON(readRequest3)) + assert.NoError(t, err) + + var response3 ReadFileResponse + err = json.Unmarshal([]byte(result3), &response3) + require.NoError(t, err) + assert.True(t, response3.Success) + assert.Contains(t, response3.Content, "Printf") + assert.Contains(t, response3.Content, "Adding %d + %d") + + // Step 5: LLM reads main function (which may have shifted) + readRequest4 := ReadFileRequest{ + FilePath: testFile, + StartLine: 9, + EndLine: 12, + } + result4, err := readTool.Call(context.Background(), mustMarshalJSON(readRequest4)) + assert.NoError(t, err) + + var response4 ReadFileResponse + err = json.Unmarshal([]byte(result4), &response4) + require.NoError(t, err) + assert.True(t, response4.Success) + assert.Contains(t, response4.Content, "func main") +} + +func TestReadFileTool_LLMWorkflow_MultiplePartialReadsAndWrites(t *testing.T) { + // Simulate LLM working on a complex configuration file + tempDir := t.TempDir() + configFile := filepath.Join(tempDir, "config.yaml") + + initialConfig := `# Application Configuration +app: + name: "MyApp" + version: "1.0.0" + debug: false + +database: + host: "localhost" + port: 5432 + name: "myapp_db" + ssl: false + +redis: + host: "localhost" + port: 6379 + db: 0 + +logging: + level: "info" + format: "json" + output: "stdout" +` + + err := os.WriteFile(configFile, []byte(initialConfig), 0600) + require.NoError(t, err) + + readTool := ReadFileTool{} + writeTool := WriteFileTool{} + + // Step 1: LLM scans file structure (first 10 lines) + readRequest1 := ReadFileRequest{ + FilePath: configFile, + StartLine: 1, + EndLine: 10, + } + result1, err := readTool.Call(context.Background(), mustMarshalJSON(readRequest1)) + assert.NoError(t, err) + + var response1 ReadFileResponse + err = json.Unmarshal([]byte(result1), &response1) + require.NoError(t, err) + assert.True(t, response1.Success) + assert.Contains(t, response1.Content, "app:") + assert.Contains(t, response1.Content, "database:") + + // Step 2: LLM focuses on database section + readRequest2 := ReadFileRequest{ + FilePath: configFile, + StartLine: 7, + EndLine: 12, + } + result2, err := readTool.Call(context.Background(), mustMarshalJSON(readRequest2)) + assert.NoError(t, err) + + var response2 ReadFileResponse + err = json.Unmarshal([]byte(result2), &response2) + require.NoError(t, err) + assert.True(t, response2.Success) + assert.Contains(t, response2.Content, "host: \"localhost\"") + assert.Contains(t, response2.Content, "ssl: false") + + // Step 3: LLM updates database config for production + newDbConfig := `database: + host: "prod-db.example.com" + port: 5432 + name: "myapp_production" + ssl: true + pool_size: 20` + + writeRequest1 := WriteFileRequest{ + Filename: configFile, + Content: newDbConfig, + StartLine: 7, + EndLine: 11, + } + writeResult1, err := writeTool.Call(context.Background(), mustMarshalJSON(writeRequest1)) + assert.NoError(t, err) + assert.Contains(t, writeResult1, `"success": true`) + + // Step 4: LLM reads redis section (which should have moved due to previous edit) + readRequest3 := ReadFileRequest{ + FilePath: configFile, + StartLine: 13, + EndLine: 16, + } + result3, err := readTool.Call(context.Background(), mustMarshalJSON(readRequest3)) + assert.NoError(t, err) + + var response3 ReadFileResponse + err = json.Unmarshal([]byte(result3), &response3) + require.NoError(t, err) + assert.True(t, response3.Success) + assert.Contains(t, response3.Content, "redis:") + + // Step 5: LLM reads logging section to update it + readRequest4 := ReadFileRequest{ + FilePath: configFile, + StartLine: 17, + EndLine: 21, + } + result4, err := readTool.Call(context.Background(), mustMarshalJSON(readRequest4)) + assert.NoError(t, err) + + var response4 ReadFileResponse + err = json.Unmarshal([]byte(result4), &response4) + require.NoError(t, err) + assert.True(t, response4.Success) + assert.Contains(t, response4.Content, "logging:") + + // Step 6: LLM updates logging for production + newLoggingConfig := `logging: + level: "warn" + format: "structured" + output: "file" + file: "/var/log/myapp.log" + rotation: "daily"` + + writeRequest2 := WriteFileRequest{ + Filename: configFile, + Content: newLoggingConfig, + StartLine: 17, + EndLine: 20, + } + writeResult2, err := writeTool.Call(context.Background(), mustMarshalJSON(writeRequest2)) + assert.NoError(t, err) + assert.Contains(t, writeResult2, `"success": true`) + + // Step 7: LLM does final validation read of entire file + readRequestFinal := ReadFileRequest{ + FilePath: configFile, + } + resultFinal, err := readTool.Call(context.Background(), mustMarshalJSON(readRequestFinal)) + assert.NoError(t, err) + + var responseFinal ReadFileResponse + err = json.Unmarshal([]byte(resultFinal), &responseFinal) + require.NoError(t, err) + assert.True(t, responseFinal.Success) + assert.Contains(t, responseFinal.Content, "prod-db.example.com") + assert.Contains(t, responseFinal.Content, "ssl: true") + assert.Contains(t, responseFinal.Content, "level: \"warn\"") + assert.Contains(t, responseFinal.Content, "rotation: \"daily\"") +} + +func TestReadFileTool_LLMWorkflow_RefactoringWithContext(t *testing.T) { + // Simulate LLM refactoring a class by reading context around methods + tempDir := t.TempDir() + classFile := filepath.Join(tempDir, "user_service.py") + + initialPython := `"""User service for managing user operations.""" + +import logging +from typing import Optional, List +from database import Database + +class UserService: + """Service class for user management.""" + + def __init__(self, db: Database): + self.db = db + self.logger = logging.getLogger(__name__) + + def create_user(self, username: str, email: str) -> bool: + """Create a new user.""" + try: + self.logger.info(f"Creating user: {username}") + query = "INSERT INTO users (username, email) VALUES (?, ?)" + self.db.execute(query, (username, email)) + return True + except Exception as e: + self.logger.error(f"Failed to create user: {e}") + return False + + def get_user(self, user_id: int) -> Optional[dict]: + """Get user by ID.""" + try: + query = "SELECT * FROM users WHERE id = ?" + result = self.db.fetch_one(query, (user_id,)) + return result + except Exception as e: + self.logger.error(f"Failed to get user: {e}") + return None + + def delete_user(self, user_id: int) -> bool: + """Delete user by ID.""" + try: + query = "DELETE FROM users WHERE id = ?" + self.db.execute(query, (user_id,)) + return True + except Exception as e: + self.logger.error(f"Failed to delete user: {e}") + return False +` + + err := os.WriteFile(classFile, []byte(initialPython), 0600) + require.NoError(t, err) + + readTool := ReadFileTool{} + writeTool := WriteFileTool{} + + // Step 1: LLM reads class definition and constructor + readRequest1 := ReadFileRequest{ + FilePath: classFile, + StartLine: 7, + EndLine: 12, + } + result1, err := readTool.Call(context.Background(), mustMarshalJSON(readRequest1)) + assert.NoError(t, err) + + var response1 ReadFileResponse + err = json.Unmarshal([]byte(result1), &response1) + require.NoError(t, err) + assert.True(t, response1.Success) + assert.Contains(t, response1.Content, "class UserService:") + assert.Contains(t, response1.Content, "__init__") + + // Step 2: LLM reads create_user method with some context + readRequest2 := ReadFileRequest{ + FilePath: classFile, + StartLine: 14, + EndLine: 22, + } + result2, err := readTool.Call(context.Background(), mustMarshalJSON(readRequest2)) + assert.NoError(t, err) + + var response2 ReadFileResponse + err = json.Unmarshal([]byte(result2), &response2) + require.NoError(t, err) + assert.True(t, response2.Success) + assert.Contains(t, response2.Content, "create_user") + assert.Contains(t, response2.Content, "INSERT INTO users") + + // Step 3: LLM refactors create_user method to add validation + improvedCreateUser := ` def create_user(self, username: str, email: str) -> bool: + """Create a new user with validation.""" + if not username or not email: + self.logger.warning("Username and email are required") + return False + + if "@" not in email: + self.logger.warning(f"Invalid email format: {email}") + return False + + try: + self.logger.info(f"Creating user: {username}") + query = "INSERT INTO users (username, email) VALUES (?, ?)" + self.db.execute(query, (username, email)) + return True + except Exception as e: + self.logger.error(f"Failed to create user: {e}") + return False` + + writeRequest1 := WriteFileRequest{ + Filename: classFile, + Content: improvedCreateUser, + StartLine: 14, + EndLine: 22, + } + writeResult1, err := writeTool.Call(context.Background(), mustMarshalJSON(writeRequest1)) + assert.NoError(t, err) + assert.Contains(t, writeResult1, `"success": true`) + + // Step 4: LLM reads get_user method (line numbers shifted due to edit) + readRequest3 := ReadFileRequest{ + FilePath: classFile, + StartLine: 31, + EndLine: 38, + } + result3, err := readTool.Call(context.Background(), mustMarshalJSON(readRequest3)) + assert.NoError(t, err) + + var response3 ReadFileResponse + err = json.Unmarshal([]byte(result3), &response3) + require.NoError(t, err) + assert.True(t, response3.Success) + assert.Contains(t, response3.Content, "get_user") + + // Step 5: LLM reads context around delete_user to understand the pattern + readRequest4 := ReadFileRequest{ + FilePath: classFile, + StartLine: 40, + EndLine: 47, + } + result4, err := readTool.Call(context.Background(), mustMarshalJSON(readRequest4)) + assert.NoError(t, err) + + var response4 ReadFileResponse + err = json.Unmarshal([]byte(result4), &response4) + require.NoError(t, err) + assert.True(t, response4.Success) + assert.Contains(t, response4.Content, "delete_user") + + // Step 6: LLM verifies the refactoring by reading the updated create_user method + readRequest5 := ReadFileRequest{ + FilePath: classFile, + StartLine: 14, + EndLine: 30, + } + result5, err := readTool.Call(context.Background(), mustMarshalJSON(readRequest5)) + assert.NoError(t, err) + + var response5 ReadFileResponse + err = json.Unmarshal([]byte(result5), &response5) + require.NoError(t, err) + assert.True(t, response5.Success) + assert.Contains(t, response5.Content, "if not username or not email:") + assert.Contains(t, response5.Content, "Invalid email format") + assert.True(t, response5.IsPartial) +} + +func TestReadFileTool_LLMWorkflow_HandleLineShifts(t *testing.T) { + // Test that reads work correctly after writes that change line counts + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "shifts.txt") + + initialContent := `Line 1 +Line 2 +Line 3 +Line 4 +Line 5 +Line 6 +Line 7 +Line 8 +Line 9 +Line 10` + + err := os.WriteFile(testFile, []byte(initialContent), 0600) + require.NoError(t, err) + + readTool := ReadFileTool{} + writeTool := WriteFileTool{} + + // Step 1: Read lines 3-5 + readRequest1 := ReadFileRequest{ + FilePath: testFile, + StartLine: 3, + EndLine: 5, + } + result1, err := readTool.Call(context.Background(), mustMarshalJSON(readRequest1)) + assert.NoError(t, err) + + var response1 ReadFileResponse + err = json.Unmarshal([]byte(result1), &response1) + require.NoError(t, err) + assert.True(t, response1.Success) + assert.Equal(t, "Line 3\nLine 4\nLine 5", response1.Content) + + // Step 2: Insert multiple lines at line 4, shifting everything down + insertContent := `Line 3 +New Line A +New Line B +New Line C +Line 4` + + writeRequest := WriteFileRequest{ + Filename: testFile, + Content: insertContent, + StartLine: 3, + EndLine: 4, + } + writeResult, err := writeTool.Call(context.Background(), mustMarshalJSON(writeRequest)) + assert.NoError(t, err) + assert.Contains(t, writeResult, `"success": true`) + + // Step 3: Try to read what was originally line 5 (now line 8) + readRequest2 := ReadFileRequest{ + FilePath: testFile, + StartLine: 8, + EndLine: 8, + } + result2, err := readTool.Call(context.Background(), mustMarshalJSON(readRequest2)) + assert.NoError(t, err) + + var response2 ReadFileResponse + err = json.Unmarshal([]byte(result2), &response2) + require.NoError(t, err) + assert.True(t, response2.Success) + assert.Equal(t, "Line 5", response2.Content) + + // Step 4: Read the new inserted content + readRequest3 := ReadFileRequest{ + FilePath: testFile, + StartLine: 4, + EndLine: 6, + } + result3, err := readTool.Call(context.Background(), mustMarshalJSON(readRequest3)) + assert.NoError(t, err) + + var response3 ReadFileResponse + err = json.Unmarshal([]byte(result3), &response3) + require.NoError(t, err) + assert.True(t, response3.Success) + assert.Contains(t, response3.Content, "New Line A") + assert.Contains(t, response3.Content, "New Line B") + assert.Contains(t, response3.Content, "New Line C") + + // Step 5: Verify total line count changed correctly + readRequestFull := ReadFileRequest{ + FilePath: testFile, + } + resultFull, err := readTool.Call(context.Background(), mustMarshalJSON(readRequestFull)) + assert.NoError(t, err) + + var responseFull ReadFileResponse + err = json.Unmarshal([]byte(resultFull), &responseFull) + require.NoError(t, err) + assert.True(t, responseFull.Success) + assert.Contains(t, responseFull.Message, "13 lines") // Originally 10, added 3, removed 1 +} diff --git a/cli/azd/internal/agent/tools/io/read_file_test.go b/cli/azd/internal/agent/tools/io/read_file_test.go new file mode 100644 index 00000000000..a5c6885837b --- /dev/null +++ b/cli/azd/internal/agent/tools/io/read_file_test.go @@ -0,0 +1,690 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package io + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Helper function to marshal request structs to JSON strings +func mustMarshalJSON(v interface{}) string { + data, err := json.Marshal(v) + if err != nil { + panic(fmt.Sprintf("Failed to marshal JSON: %v", err)) + } + return string(data) +} + +func TestReadFileTool_Name(t *testing.T) { + tool := ReadFileTool{} + assert.Equal(t, "read_file", tool.Name()) +} + +func TestReadFileTool_Description(t *testing.T) { + tool := ReadFileTool{} + desc := tool.Description() + assert.Contains(t, desc, "Read file contents") + assert.Contains(t, desc, "startLine") + assert.Contains(t, desc, "endLine") + assert.Contains(t, desc, "JSON") +} + +func TestReadFileTool_Annotations(t *testing.T) { + tool := ReadFileTool{} + annotations := tool.Annotations() + assert.Equal(t, "Read File Contents", annotations.Title) + assert.NotNil(t, annotations.ReadOnlyHint) + assert.True(t, *annotations.ReadOnlyHint) + assert.NotNil(t, annotations.DestructiveHint) + assert.False(t, *annotations.DestructiveHint) + assert.NotNil(t, annotations.IdempotentHint) + assert.True(t, *annotations.IdempotentHint) + assert.NotNil(t, annotations.OpenWorldHint) + assert.False(t, *annotations.OpenWorldHint) +} + +func TestReadFileTool_Call_EmptyInput(t *testing.T) { + tool := ReadFileTool{} + result, err := tool.Call(context.Background(), "") + + assert.NoError(t, err) + + var errorResp common.ErrorResponse + err = json.Unmarshal([]byte(result), &errorResp) + require.NoError(t, err) + + assert.True(t, errorResp.Error) + assert.Contains(t, errorResp.Message, "No input provided") + assert.Contains(t, errorResp.Message, "JSON format") +} + +func TestReadFileTool_Call_InvalidJSON(t *testing.T) { + tool := ReadFileTool{} + result, err := tool.Call(context.Background(), "invalid json") + + assert.NoError(t, err) + + var errorResp common.ErrorResponse + err = json.Unmarshal([]byte(result), &errorResp) + require.NoError(t, err) + + assert.True(t, errorResp.Error) + assert.Contains(t, errorResp.Message, "Invalid JSON input") +} + +func TestReadFileTool_Call_MalformedJSON(t *testing.T) { + tool := ReadFileTool{} + result, err := tool.Call(context.Background(), `{"filePath": "test.txt", "unclosed": "value}`) + + assert.NoError(t, err) + + var errorResp common.ErrorResponse + err = json.Unmarshal([]byte(result), &errorResp) + require.NoError(t, err) + + assert.True(t, errorResp.Error) + assert.Contains(t, errorResp.Message, "Invalid JSON input") +} + +func TestReadFileTool_Call_MissingFilePath(t *testing.T) { + tool := ReadFileTool{} + input := `{"startLine": 1, "endLine": 10}` + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var errorResp common.ErrorResponse + err = json.Unmarshal([]byte(result), &errorResp) + require.NoError(t, err) + + assert.True(t, errorResp.Error) + assert.Contains(t, errorResp.Message, "filePath cannot be empty") +} + +func TestReadFileTool_Call_EmptyFilePath(t *testing.T) { + tool := ReadFileTool{} + input := `{"filePath": "", "startLine": 1}` + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var errorResp common.ErrorResponse + err = json.Unmarshal([]byte(result), &errorResp) + require.NoError(t, err) + + assert.True(t, errorResp.Error) + assert.Contains(t, errorResp.Message, "filePath cannot be empty") +} + +func TestReadFileTool_Call_FileNotFound(t *testing.T) { + tool := ReadFileTool{} + input := `{"filePath": "/nonexistent/file.txt"}` + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var errorResp common.ErrorResponse + err = json.Unmarshal([]byte(result), &errorResp) + require.NoError(t, err) + + assert.True(t, errorResp.Error) + assert.Contains(t, errorResp.Message, "File does not exist") + assert.Contains(t, errorResp.Message, "check file path spelling") +} + +func TestReadFileTool_Call_DirectoryInsteadOfFile(t *testing.T) { + tempDir := t.TempDir() + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(tempDir, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var errorResp common.ErrorResponse + err = json.Unmarshal([]byte(result), &errorResp) + require.NoError(t, err) + + assert.True(t, errorResp.Error) + assert.Contains(t, errorResp.Message, "is a directory") + assert.Contains(t, errorResp.Message, "directory_list tool") +} + +func TestReadFileTool_ReadEntireSmallFile(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + testContent := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5" + + err := os.WriteFile(testFile, []byte(testContent), 0600) + require.NoError(t, err) + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var response ReadFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, testFile, response.FilePath) + assert.Equal(t, testContent, response.Content) + assert.False(t, response.IsTruncated) + assert.False(t, response.IsPartial) + assert.Nil(t, response.LineRange) + assert.Contains(t, response.Message, "Successfully read entire file (5 lines)") + assert.Greater(t, response.FileInfo.Size, int64(0)) + assert.False(t, response.FileInfo.ModifiedTime.IsZero()) + assert.NotEmpty(t, response.FileInfo.Permissions) +} + +func TestReadFileTool_ReadSingleLine(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + testContent := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5" + + err := os.WriteFile(testFile, []byte(testContent), 0600) + require.NoError(t, err) + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s", "startLine": 3, "endLine": 3}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var response ReadFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "Line 3", response.Content) + assert.False(t, response.IsTruncated) + assert.True(t, response.IsPartial) + require.NotNil(t, response.LineRange) + assert.Equal(t, 3, response.LineRange.StartLine) + assert.Equal(t, 3, response.LineRange.EndLine) + assert.Equal(t, 5, response.LineRange.TotalLines) + assert.Equal(t, 1, response.LineRange.LinesRead) + assert.Contains(t, response.Message, "Successfully read 1 lines (3-3)") +} + +func TestReadFileTool_ReadMultipleLines(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + testContent := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5" + + err := os.WriteFile(testFile, []byte(testContent), 0600) + require.NoError(t, err) + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s", "startLine": 2, "endLine": 4}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var response ReadFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "Line 2\nLine 3\nLine 4", response.Content) + assert.False(t, response.IsTruncated) + assert.True(t, response.IsPartial) + require.NotNil(t, response.LineRange) + assert.Equal(t, 2, response.LineRange.StartLine) + assert.Equal(t, 4, response.LineRange.EndLine) + assert.Equal(t, 5, response.LineRange.TotalLines) + assert.Equal(t, 3, response.LineRange.LinesRead) + assert.Contains(t, response.Message, "Successfully read 3 lines (2-4)") +} + +func TestReadFileTool_ReadFromStartToLine(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + testContent := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5" + + err := os.WriteFile(testFile, []byte(testContent), 0600) + require.NoError(t, err) + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s", "endLine": 3}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var response ReadFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "Line 1\nLine 2\nLine 3", response.Content) + assert.True(t, response.IsPartial) + require.NotNil(t, response.LineRange) + assert.Equal(t, 1, response.LineRange.StartLine) + assert.Equal(t, 3, response.LineRange.EndLine) + assert.Equal(t, 3, response.LineRange.LinesRead) +} + +func TestReadFileTool_ReadFromLineToEnd(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + testContent := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5" + + err := os.WriteFile(testFile, []byte(testContent), 0600) + require.NoError(t, err) + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s", "startLine": 3}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var response ReadFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "Line 3\nLine 4\nLine 5", response.Content) + assert.True(t, response.IsPartial) + require.NotNil(t, response.LineRange) + assert.Equal(t, 3, response.LineRange.StartLine) + assert.Equal(t, 5, response.LineRange.EndLine) + assert.Equal(t, 3, response.LineRange.LinesRead) +} + +func TestReadFileTool_StartLineOutOfRange(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + testContent := "Line 1\nLine 2\nLine 3" + + err := os.WriteFile(testFile, []byte(testContent), 0600) + require.NoError(t, err) + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s", "startLine": 10, "endLine": 15}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var errorResp common.ErrorResponse + err = json.Unmarshal([]byte(result), &errorResp) + require.NoError(t, err) + + assert.True(t, errorResp.Error) + assert.Contains(t, errorResp.Message, "Start line 10 is greater than total lines 3") +} + +func TestReadFileTool_InvalidLineRange_StartGreaterThanEnd(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + testContent := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5" + + err := os.WriteFile(testFile, []byte(testContent), 0600) + require.NoError(t, err) + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s", "startLine": 4, "endLine": 2}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var errorResp common.ErrorResponse + err = json.Unmarshal([]byte(result), &errorResp) + require.NoError(t, err) + + assert.True(t, errorResp.Error) + assert.Contains(t, errorResp.Message, "Start line 4 is greater than end line 2") +} + +func TestReadFileTool_EndLineExceedsTotalLines(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + testContent := "Line 1\nLine 2\nLine 3" + + err := os.WriteFile(testFile, []byte(testContent), 0600) + require.NoError(t, err) + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s", "startLine": 2, "endLine": 10}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var response ReadFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "Line 2\nLine 3", response.Content) + assert.True(t, response.IsPartial) + require.NotNil(t, response.LineRange) + assert.Equal(t, 2, response.LineRange.StartLine) + assert.Equal(t, 3, response.LineRange.EndLine) // Adjusted to total lines + assert.Equal(t, 2, response.LineRange.LinesRead) +} + +func TestReadFileTool_EmptyFile(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "empty.txt") + + err := os.WriteFile(testFile, []byte(""), 0600) + require.NoError(t, err) + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var response ReadFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "", response.Content) + assert.False(t, response.IsTruncated) + assert.False(t, response.IsPartial) + assert.Contains(t, response.Message, "Successfully read entire file (0 lines)") +} + +func TestReadFileTool_SingleLineFile(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "single.txt") + testContent := "Only one line" + + err := os.WriteFile(testFile, []byte(testContent), 0600) + require.NoError(t, err) + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var response ReadFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, testContent, response.Content) + assert.False(t, response.IsTruncated) + assert.False(t, response.IsPartial) + assert.Contains(t, response.Message, "Successfully read entire file (1 lines)") +} + +func TestReadFileTool_FileWithOnlyNewlines(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "newlines.txt") + testContent := "\n\n\n" + + err := os.WriteFile(testFile, []byte(testContent), 0600) + require.NoError(t, err) + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var response ReadFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "\n\n", response.Content) // 3 empty lines joined with newlines = 2 newlines + assert.False(t, response.IsTruncated) + assert.False(t, response.IsPartial) + assert.Contains(t, response.Message, "Successfully read entire file (3 lines)") +} + +func TestReadFileTool_LargeFileWithoutLineRange(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "large.txt") + + // Create a file larger than 1MB + largeContent := strings.Repeat("This is a line that will be repeated many times to create a large file.\n", 20000) + err := os.WriteFile(testFile, []byte(largeContent), 0600) + require.NoError(t, err) + + // Verify file is actually large + fileInfo, err := os.Stat(testFile) + require.NoError(t, err) + require.Greater(t, fileInfo.Size(), int64(1024*1024)) // Greater than 1MB + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var errorResp common.ErrorResponse + err = json.Unmarshal([]byte(result), &errorResp) + require.NoError(t, err) + + assert.True(t, errorResp.Error) + assert.Contains(t, errorResp.Message, "File") + assert.Contains(t, errorResp.Message, "is too large") + assert.Contains(t, errorResp.Message, "specify startLine and endLine") +} + +func TestReadFileTool_LargeFileWithLineRange(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "large.txt") + + // Create a file larger than 1MB + largeContent := strings.Repeat("This is line content that will be repeated many times.\n", 20000) + err := os.WriteFile(testFile, []byte(largeContent), 0600) + require.NoError(t, err) + + // Verify file is actually large + fileInfo, err := os.Stat(testFile) + require.NoError(t, err) + require.Greater(t, fileInfo.Size(), int64(1024*1024)) // Greater than 1MB + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s", "startLine": 100, "endLine": 102}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var response ReadFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.True(t, response.IsPartial) + require.NotNil(t, response.LineRange) + assert.Equal(t, 100, response.LineRange.StartLine) + assert.Equal(t, 102, response.LineRange.EndLine) + assert.Equal(t, 3, response.LineRange.LinesRead) + assert.Contains(t, response.Content, "This is line content") +} + +func TestReadFileTool_ContentTruncation(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "medium.txt") + + // Create content that exceeds 100KB (truncation threshold) + lineContent := strings.Repeat("A", 1000) // 1KB per line + lines := make([]string, 150) // 150KB total + for i := range lines { + lines[i] = fmt.Sprintf("Line %d: %s", i+1, lineContent) + } + content := strings.Join(lines, "\n") + + err := os.WriteFile(testFile, []byte(content), 0600) + require.NoError(t, err) + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var response ReadFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.True(t, response.IsTruncated) + assert.False(t, response.IsPartial) + assert.Contains(t, response.Content, "[content truncated]") + assert.Contains(t, response.Message, "content truncated due to size") + assert.Less(t, len(response.Content), len(content)) // Should be shorter than original +} + +func TestReadFileTool_SpecialCharacters(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "special.txt") + testContent := "Line with émojis 😀🎉\nLine with unicode: ñáéíóú\nLine with symbols: @#$%^&*()\nLine with tabs:\t\tand\tspaces" + + err := os.WriteFile(testFile, []byte(testContent), 0600) + require.NoError(t, err) + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var response ReadFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, testContent, response.Content) + assert.Contains(t, response.Content, "😀🎉") + assert.Contains(t, response.Content, "ñáéíóú") + assert.Contains(t, response.Content, "\t") +} + +func TestReadFileTool_WindowsLineEndings(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "windows.txt") + // Use Windows line endings (CRLF) + testContent := "Line 1\r\nLine 2\r\nLine 3\r\n" + + err := os.WriteFile(testFile, []byte(testContent), 0600) + require.NoError(t, err) + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s", "startLine": 2, "endLine": 2}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var response ReadFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + // The scanner should handle CRLF properly and return just "Line 2" + assert.Equal(t, "Line 2", response.Content) + assert.Equal(t, 3, response.LineRange.TotalLines) +} + +func TestReadFileTool_FileInfoMetadata(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "metadata.txt") + testContent := "Test content for metadata" + + err := os.WriteFile(testFile, []byte(testContent), 0644) + require.NoError(t, err) + + // Get file info for comparison + expectedInfo, err := os.Stat(testFile) + require.NoError(t, err) + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var response ReadFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, expectedInfo.Size(), response.FileInfo.Size) + assert.Equal(t, expectedInfo.Mode().String(), response.FileInfo.Permissions) + + // Check that modification time is within a reasonable range (within 1 minute) + timeDiff := response.FileInfo.ModifiedTime.Sub(expectedInfo.ModTime()) + assert.Less(t, timeDiff, time.Minute) + assert.Greater(t, timeDiff, -time.Minute) +} + +func TestReadFileTool_JSONResponseStructure(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "json_test.txt") + testContent := "Line 1\nLine 2" + + err := os.WriteFile(testFile, []byte(testContent), 0600) + require.NoError(t, err) + + tool := ReadFileTool{} + input := fmt.Sprintf(`{"filePath": "%s", "startLine": 1, "endLine": 1}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + // Test that result is valid JSON + var jsonResult map[string]interface{} + err = json.Unmarshal([]byte(result), &jsonResult) + require.NoError(t, err) + + // Check required fields exist + assert.Contains(t, jsonResult, "success") + assert.Contains(t, jsonResult, "filePath") + assert.Contains(t, jsonResult, "content") + assert.Contains(t, jsonResult, "isTruncated") + assert.Contains(t, jsonResult, "isPartial") + assert.Contains(t, jsonResult, "lineRange") + assert.Contains(t, jsonResult, "fileInfo") + assert.Contains(t, jsonResult, "message") + + // Verify JSON formatting (should be indented) + assert.Contains(t, result, "\n") // Should have newlines for formatting + assert.Contains(t, result, " ") // Should have indentation +} + +func TestReadFileTool_ZeroBasedToOneBasedConversion(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "indexing.txt") + testContent := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5" + + err := os.WriteFile(testFile, []byte(testContent), 0600) + require.NoError(t, err) + + tool := ReadFileTool{} + + // Test reading line 1 (should be "Line 1", not "Line 2") + input := fmt.Sprintf(`{"filePath": "%s", "startLine": 1, "endLine": 1}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + result, err := tool.Call(context.Background(), input) + + assert.NoError(t, err) + + var response ReadFileResponse + err = json.Unmarshal([]byte(result), &response) + require.NoError(t, err) + + assert.True(t, response.Success) + assert.Equal(t, "Line 1", response.Content) + assert.Equal(t, 1, response.LineRange.StartLine) + assert.Equal(t, 1, response.LineRange.EndLine) +} From b24e242e11465d36e442e244ebd902e5a7ea7903 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Mon, 11 Aug 2025 18:30:45 -0700 Subject: [PATCH 062/116] Updates write tool description about partial writes --- cli/azd/internal/agent/tools/io/write_file.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/cli/azd/internal/agent/tools/io/write_file.go b/cli/azd/internal/agent/tools/io/write_file.go index 33d856a8e89..c9715b7c66a 100644 --- a/cli/azd/internal/agent/tools/io/write_file.go +++ b/cli/azd/internal/agent/tools/io/write_file.go @@ -74,6 +74,10 @@ func (t WriteFileTool) Description() string { return `Comprehensive file writing tool that handles full file writes, appends, and line-based partial updates. Returns JSON response with operation details. +CRITICAL SAFETY GUIDANCE FOR PARTIAL WRITES: +When making multiple partial writes to the same file, ALWAYS re-read the file between writes! +Line numbers shift when you insert/delete lines, causing corruption if you use stale line numbers. + Input: JSON payload with the following structure: { "filename": "path/to/file.txt", @@ -93,13 +97,12 @@ MODES: - "append": Add content to end of existing file - "create": Create file only if it doesn't exist -PARTIAL WRITES (line-based editing): -⚠️ IMPORTANT: Partial writes REQUIRE an existing file. Cannot create new files with line positioning. Add startLine and endLine to any "write" operation to replace specific lines in EXISTING files: - Both are 1-based and inclusive - startLine=5, endLine=8 replaces lines 5, 6, 7, and 8 - If endLine > file length, content is appended - File MUST exist for partial writes - use regular write mode for new files +- ALWAYS re-read file after writes that change line counts to get accurate line positions EXAMPLES: @@ -112,6 +115,12 @@ Append to file: Partial write (replace specific lines in EXISTING file): {"filename": "./config.json", "content": " \"newSetting\": true,\n \"version\": \"2.0\"", "startLine": 3, "endLine": 4} +Safe multi-step partial editing workflow: +1. {"filename": "file.py", "startLine": 1, "endLine": 50} // read_file to understand structure +2. {"filename": "file.py", "content": "new function", "startLine": 5, "endLine": 8} // first write +3. {"filename": "file.py", "startLine": 1, "endLine": 50} // RE-READ to get updated line numbers +4. {"filename": "file.py", "content": "updated content", "startLine": 12, "endLine": 15} // use fresh line numbers + Create only if doesn't exist: {"filename": "./new-file.txt", "content": "Initial content", "mode": "create"} From 79c048555d8f6258926d7185fe608c8f20e93101 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Tue, 12 Aug 2025 12:40:29 -0700 Subject: [PATCH 063/116] Fixes MCP tools --- cli/azd/internal/agent/consent/types.go | 6 +++--- cli/azd/internal/agent/tools/mcp/loader.go | 14 ++++++++++++++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/cli/azd/internal/agent/consent/types.go b/cli/azd/internal/agent/consent/types.go index fcb1dae1bf1..ef1b56881d6 100644 --- a/cli/azd/internal/agent/consent/types.go +++ b/cli/azd/internal/agent/consent/types.go @@ -27,8 +27,8 @@ const ( ConsentSession ConsentLevel = "session" ConsentProject ConsentLevel = "project" ConsentAlways ConsentLevel = "always" - ConsentServerAlways ConsentLevel = "server_always" // All tools from server - ConsentServerReadOnly ConsentLevel = "server_readonly" // Read-only tools from server + ConsentServerAlways ConsentLevel = "server-always" // All tools from server + ConsentServerReadOnly ConsentLevel = "server-readonly" // Read-only tools from server ) const ( @@ -47,7 +47,7 @@ type ConsentRule struct { ToolID string `json:"toolId"` Permission ConsentLevel `json:"permission"` RuleScope RuleScope `json:"scope,omitempty"` // Defaults to "all" for backward compatibility - GrantedAt time.Time `json:"granted_at"` + GrantedAt time.Time `json:"grantedAt"` } // ConsentConfig represents the MCP consent configuration diff --git a/cli/azd/internal/agent/tools/mcp/loader.go b/cli/azd/internal/agent/tools/mcp/loader.go index 91e16220962..6d614d3516b 100644 --- a/cli/azd/internal/agent/tools/mcp/loader.go +++ b/cli/azd/internal/agent/tools/mcp/loader.go @@ -78,6 +78,20 @@ func (l *McpToolsLoader) LoadTools() ([]common.AnnotatedTool, error) { continue } + initRequest := mcp.InitializeRequest{} + initRequest.Params.ProtocolVersion = mcp.LATEST_PROTOCOL_VERSION + initRequest.Params.ClientInfo = mcp.Implementation{ + Name: "Azure Developer CLI (azd)", + Version: "1.0.1", + } + + initResult, err := mcpClient.Initialize(context.Background(), initRequest) + if err != nil { + return nil, fmt.Errorf("initialize: %w", err) + } + + log.Printf("Initialized MCP client for server %s (%s)", initResult.ServerInfo.Name, initResult.ServerInfo.Version) + // Get tools directly from MCP client toolsRequest := mcp.ListToolsRequest{} toolsResult, err := mcpClient.ListTools(ctx, toolsRequest) From f498b4c8c8d9893f0372e112296cef8b8f458c1d Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Tue, 12 Aug 2025 16:12:28 -0700 Subject: [PATCH 064/116] Adds consent system for sampling requests --- cli/azd/cmd/init.go | 4 +- cli/azd/cmd/mcp.go | 254 +++++++++++++++--- cli/azd/internal/agent/agent_factory.go | 25 +- cli/azd/internal/agent/consent/checker.go | 247 +++++++++++++++-- .../agent/consent/consent_wrapper_tool.go | 4 + cli/azd/internal/agent/consent/manager.go | 97 ++++++- cli/azd/internal/agent/consent/types.go | 44 ++- cli/azd/internal/agent/tools/io/copy_file.go | 3 +- .../internal/agent/tools/io/read_file_test.go | 5 +- .../agent/tools/mcp/sampling_handler.go | 83 +++++- cli/azd/internal/mcp/tools/azd_sample.go | 48 ++++ 11 files changed, 725 insertions(+), 89 deletions(-) create mode 100644 cli/azd/internal/mcp/tools/azd_sample.go diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index 705506d21b9..23b0451176c 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -376,13 +376,15 @@ func (i *initAction) initAppWithCopilot(ctx context.Context) error { // Warn user that this is an alpha feature i.console.WarnForFeature(ctx, llm.FeatureLlm) - azdAgent, err := i.agentFactory.Create( + azdAgent, cleanup, err := i.agentFactory.Create( agent.WithDebug(i.flags.global.EnableDebugLogging), ) if err != nil { return err } + defer cleanup() + type initStep struct { Name string Description string diff --git a/cli/azd/cmd/mcp.go b/cli/azd/cmd/mcp.go index dd878e16dec..49a96f4ad0a 100644 --- a/cli/azd/cmd/mcp.go +++ b/cli/azd/cmd/mcp.go @@ -162,6 +162,7 @@ func (a *mcpStartAction) Run(ctx context.Context) (*actions.ActionResult, error) tools.NewAzdIacGenerationRulesTool(), tools.NewAzdProjectValidationTool(), tools.NewAzdYamlSchemaTool(), + tools.NewSamplingTool(), } s.AddTools(allTools...) @@ -176,9 +177,10 @@ func (a *mcpStartAction) Run(ctx context.Context) (*actions.ActionResult, error) // Flags for MCP consent commands type mcpConsentFlags struct { - global *internal.GlobalCommandOptions - scope string - toolID string + global *internal.GlobalCommandOptions + scope string + toolID string + ruleType string } func newMcpConsentFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentFlags { @@ -189,8 +191,9 @@ func newMcpConsentFlags(cmd *cobra.Command, global *internal.GlobalCommandOption func (f *mcpConsentFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { f.global = global - local.StringVar(&f.scope, "scope", "global", "Consent scope (global, project, session)") + local.StringVar(&f.scope, "scope", "global", "Consent scope (global, project)") local.StringVar(&f.toolID, "tool-id", "", "Specific tool ID to operate on") + local.StringVar(&f.ruleType, "type", "", "Rule type to filter by (tool, sampling)") } // Flags for MCP consent grant command @@ -200,6 +203,7 @@ type mcpConsentGrantFlags struct { server string globalFlag bool scope string + ruleType string } func newMcpConsentGrantFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentGrantFlags { @@ -214,6 +218,7 @@ func (f *mcpConsentGrantFlags) Bind(local *pflag.FlagSet, global *internal.Globa local.StringVar(&f.server, "server", "", "Server name") local.BoolVar(&f.globalFlag, "global", false, "Apply globally to all servers") local.StringVar(&f.scope, "scope", "all", "Scope of the rule: 'all' or 'read-only'") + local.StringVar(&f.ruleType, "type", "tool", "Type of rule: 'tool' or 'sampling'") } // Action for MCP consent list command @@ -221,6 +226,7 @@ type mcpConsentListAction struct { flags *mcpConsentFlags formatter output.Formatter writer io.Writer + console input.Console userConfigManager config.UserConfigManager consentManager consent.ConsentManager } @@ -229,6 +235,7 @@ func newMcpConsentListAction( flags *mcpConsentFlags, formatter output.Formatter, writer io.Writer, + console input.Console, userConfigManager config.UserConfigManager, consentManager consent.ConsentManager, ) actions.Action { @@ -236,31 +243,50 @@ func newMcpConsentListAction( flags: flags, formatter: formatter, writer: writer, + console: console, userConfigManager: userConfigManager, consentManager: consentManager, } } func (a *mcpConsentListAction) Run(ctx context.Context) (*actions.ActionResult, error) { + // Command heading + fmt.Fprintf(a.writer, "Listing MCP consent rules...\n\n") + var scope consent.ConsentScope switch a.flags.scope { case "global": scope = consent.ScopeGlobal case "project": scope = consent.ScopeProject - case "session": - scope = consent.ScopeSession default: - return nil, fmt.Errorf("invalid scope: %s", a.flags.scope) + return nil, fmt.Errorf("invalid scope: %s (allowed: global, project)", a.flags.scope) + } + + var rules []consent.ConsentRule + var err error + + // Use type-filtered method if type is specified + if a.flags.ruleType != "" { + ruleType, parseErr := consent.ParseConsentRuleType(a.flags.ruleType) + if parseErr != nil { + return nil, parseErr + } + rules, err = a.consentManager.ListConsentsByType(ctx, scope, ruleType) + } else { + rules, err = a.consentManager.ListConsents(ctx, scope) } - rules, err := a.consentManager.ListConsents(ctx, scope) if err != nil { return nil, fmt.Errorf("failed to list consent rules: %w", err) } if len(rules) == 0 { - fmt.Fprintf(a.writer, "No consent rules found for scope: %s\n", a.flags.scope) + typeInfo := "" + if a.flags.ruleType != "" { + typeInfo = fmt.Sprintf(" of type '%s'", a.flags.ruleType) + } + fmt.Fprintf(a.writer, "No consent rules found for scope: %s%s\n", a.flags.scope, typeInfo) return nil, nil } @@ -271,16 +297,19 @@ func (a *mcpConsentListAction) Run(ctx context.Context) (*actions.ActionResult, // Table format fmt.Fprintf(a.writer, "Consent Rules (%s scope):\n", a.flags.scope) - fmt.Fprintf(a.writer, "%-40s %-15s %-20s\n", "Tool ID", "Permission", "Granted At") - fmt.Fprintf(a.writer, "%s\n", strings.Repeat("-", 75)) + fmt.Fprintf(a.writer, "%-10s %-35s %-15s %-20s\n", "Type", "Tool ID", "Permission", "Granted At") + fmt.Fprintf(a.writer, "%s\n", strings.Repeat("-", 80)) for _, rule := range rules { - fmt.Fprintf(a.writer, "%-40s %-15s %-20s\n", + fmt.Fprintf(a.writer, "%-10s %-35s %-15s %-20s\n", + rule.Type, rule.ToolID, rule.Permission, rule.GrantedAt.Format("2006-01-02 15:04:05")) } + fmt.Fprintf(a.writer, "\nListed %d consent rule(s)\n", len(rules)) + return nil, nil } @@ -307,16 +336,17 @@ func newMcpConsentClearAction( } func (a *mcpConsentClearAction) Run(ctx context.Context) (*actions.ActionResult, error) { + // Command heading + fmt.Fprintf(a.console.Handles().Stdout, "Clearing MCP consent rules...\n\n") + var scope consent.ConsentScope switch a.flags.scope { case "global": scope = consent.ScopeGlobal case "project": scope = consent.ScopeProject - case "session": - scope = consent.ScopeSession default: - return nil, fmt.Errorf("invalid scope: %s", a.flags.scope) + return nil, fmt.Errorf("invalid scope: %s (allowed: global, project)", a.flags.scope) } var err error @@ -327,9 +357,19 @@ func (a *mcpConsentClearAction) Run(ctx context.Context) (*actions.ActionResult, fmt.Fprintf(a.console.Handles().Stdout, "Cleared consent for tool: %s\n", a.flags.toolID) } } else { - // Clear all rules for scope + // Get confirmation message based on type filter + confirmMessage := fmt.Sprintf("Are you sure you want to clear all consent rules for scope '%s'?", a.flags.scope) + if a.flags.ruleType != "" { + confirmMessage = fmt.Sprintf( + "Are you sure you want to clear all %s consent rules for scope '%s'?", + a.flags.ruleType, + a.flags.scope, + ) + } + + // Clear all rules for scope (with optional type filtering) confirmed, confirmErr := a.console.Confirm(ctx, input.ConsoleOptions{ - Message: fmt.Sprintf("Are you sure you want to clear all consent rules for scope '%s'?", a.flags.scope), + Message: confirmMessage, }) if confirmErr != nil { return nil, confirmErr @@ -340,9 +380,28 @@ func (a *mcpConsentClearAction) Run(ctx context.Context) (*actions.ActionResult, return nil, nil } - err = a.consentManager.ClearConsents(ctx, scope) - if err == nil { - fmt.Fprintf(a.console.Handles().Stdout, "Cleared all consent rules for scope: %s\n", a.flags.scope) + if a.flags.ruleType != "" { + // Type-specific clearing using the new consent manager method + ruleType, parseErr := consent.ParseConsentRuleType(a.flags.ruleType) + if parseErr != nil { + return nil, parseErr + } + + err = a.consentManager.ClearConsentsByType(ctx, scope, ruleType) + if err == nil { + fmt.Fprintf( + a.console.Handles().Stdout, + "Cleared all %s consent rules for scope: %s\n", + a.flags.ruleType, + a.flags.scope, + ) + } + } else { + // Clear all rules for scope + err = a.consentManager.ClearConsents(ctx, scope) + if err == nil { + fmt.Fprintf(a.console.Handles().Stdout, "Cleared all consent rules for scope: %s\n", a.flags.scope) + } } } @@ -376,6 +435,9 @@ func newMcpConsentGrantAction( } func (a *mcpConsentGrantAction) Run(ctx context.Context) (*actions.ActionResult, error) { + // Command heading + fmt.Fprintf(a.console.Handles().Stdout, "Granting MCP consent rules...\n\n") + // Validate flag combinations if a.flags.tool != "" && a.flags.server == "" { return nil, fmt.Errorf("--tool requires --server") @@ -394,6 +456,17 @@ func (a *mcpConsentGrantAction) Run(ctx context.Context) (*actions.ActionResult, return nil, fmt.Errorf("--scope must be 'all' or 'read-only'") } + // Validate type using the new parser + ruleType, err := consent.ParseConsentRuleType(a.flags.ruleType) + if err != nil { + return nil, err + } + + // For sampling type, tool-specific grants are not supported + if ruleType == consent.ConsentRuleTypeSampling && a.flags.tool != "" { + return nil, fmt.Errorf("--tool is not supported for sampling rules") + } + // Build rule var toolID string var ruleScope consent.RuleScope @@ -407,10 +480,18 @@ func (a *mcpConsentGrantAction) Run(ctx context.Context) (*actions.ActionResult, if a.flags.globalFlag { toolID = "*" - if a.flags.scope == "read-only" { - description = "all read-only tools globally" + if ruleType == consent.ConsentRuleTypeSampling { + if a.flags.scope == "read-only" { + description = "all read-only sampling globally" + } else { + description = "all sampling globally" + } } else { - description = "all tools globally" + if a.flags.scope == "read-only" { + description = "all read-only tools globally" + } else { + description = "all tools globally" + } } } else if a.flags.tool != "" { toolID = fmt.Sprintf("%s/%s", a.flags.server, a.flags.tool) @@ -421,14 +502,23 @@ func (a *mcpConsentGrantAction) Run(ctx context.Context) (*actions.ActionResult, } } else { toolID = fmt.Sprintf("%s/*", a.flags.server) - if a.flags.scope == "read-only" { - description = fmt.Sprintf("read-only tools from server %s", a.flags.server) + if ruleType == consent.ConsentRuleTypeSampling { + if a.flags.scope == "read-only" { + description = fmt.Sprintf("read-only sampling from server %s", a.flags.server) + } else { + description = fmt.Sprintf("all sampling from server %s", a.flags.server) + } } else { - description = fmt.Sprintf("all tools from server %s", a.flags.server) + if a.flags.scope == "read-only" { + description = fmt.Sprintf("read-only tools from server %s", a.flags.server) + } else { + description = fmt.Sprintf("all tools from server %s", a.flags.server) + } } } rule := consent.ConsentRule{ + Type: ruleType, ToolID: toolID, Permission: consent.ConsentAlways, RuleScope: ruleScope, @@ -451,6 +541,7 @@ type mcpConsentRevokeFlags struct { globalFlag bool scope string toolPattern string + ruleType string } func newMcpConsentRevokeFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentRevokeFlags { @@ -471,6 +562,7 @@ func (f *mcpConsentRevokeFlags) Bind(local *pflag.FlagSet, global *internal.Glob "", "Revoke trust for a specific rule pattern (e.g., 'server/tool' or 'server/*')", ) + local.StringVar(&f.ruleType, "type", "", "Type of rule to revoke: 'tool' or 'sampling' (default: all types)") } // Action for MCP consent revoke command @@ -496,6 +588,9 @@ func newMcpConsentRevokeAction( } func (a *mcpConsentRevokeAction) Run(ctx context.Context) (*actions.ActionResult, error) { + // Command heading + fmt.Fprintf(a.console.Handles().Stdout, "Revoking MCP consent rules...\n\n") + // Count options set optionsSet := 0 if a.flags.globalFlag { @@ -530,18 +625,51 @@ func (a *mcpConsentRevokeAction) Run(ctx context.Context) (*actions.ActionResult return nil, fmt.Errorf("--scope must be 'all' or 'read-only'") } + // Validate type if specified + var ruleType consent.ConsentRuleType + if a.flags.ruleType != "" { + var err error + ruleType, err = consent.ParseConsentRuleType(a.flags.ruleType) + if err != nil { + return nil, err + } + + // For sampling type, tool-specific revocations are not supported + if ruleType == consent.ConsentRuleTypeSampling && a.flags.tool != "" { + return nil, fmt.Errorf("--tool is not supported for sampling rules") + } + } + var toolID string var description string if a.flags.toolPattern != "" { toolID = a.flags.toolPattern - description = fmt.Sprintf("trust for pattern: %s", a.flags.toolPattern) + if a.flags.ruleType != "" { + description = fmt.Sprintf("%s trust for pattern: %s", a.flags.ruleType, a.flags.toolPattern) + } else { + description = fmt.Sprintf("trust for pattern: %s", a.flags.toolPattern) + } } else if a.flags.globalFlag { toolID = "*" - if a.flags.scope == "read-only" { - description = "global read-only trust" + if a.flags.ruleType == "sampling" { + if a.flags.scope == "read-only" { + description = "global read-only sampling trust" + } else { + description = "global sampling trust" + } + } else if a.flags.ruleType == "tool" { + if a.flags.scope == "read-only" { + description = "global read-only tool trust" + } else { + description = "global tool trust" + } } else { - description = "global trust" + if a.flags.scope == "read-only" { + description = "global read-only trust" + } else { + description = "global trust" + } } } else if a.flags.tool != "" { toolID = fmt.Sprintf("%s/%s", a.flags.server, a.flags.tool) @@ -552,18 +680,74 @@ func (a *mcpConsentRevokeAction) Run(ctx context.Context) (*actions.ActionResult } } else { toolID = fmt.Sprintf("%s/*", a.flags.server) - if a.flags.scope == "read-only" { - description = fmt.Sprintf("read-only trust for server: %s", a.flags.server) + if a.flags.ruleType == "sampling" { + if a.flags.scope == "read-only" { + description = fmt.Sprintf("read-only sampling trust for server: %s", a.flags.server) + } else { + description = fmt.Sprintf("sampling trust for server: %s", a.flags.server) + } + } else if a.flags.ruleType == "tool" { + if a.flags.scope == "read-only" { + description = fmt.Sprintf("read-only tool trust for server: %s", a.flags.server) + } else { + description = fmt.Sprintf("tool trust for server: %s", a.flags.server) + } } else { - description = fmt.Sprintf("trust for server: %s", a.flags.server) + if a.flags.scope == "read-only" { + description = fmt.Sprintf("read-only trust for server: %s", a.flags.server) + } else { + description = fmt.Sprintf("trust for server: %s", a.flags.server) + } } } - if err := a.consentManager.ClearConsentByToolID(ctx, toolID, consent.ScopeGlobal); err != nil { - return nil, fmt.Errorf("failed to revoke consent: %w", err) + // If type filtering is requested, use the new consent manager method + if a.flags.ruleType != "" { + rules, err := a.consentManager.ListConsentsByType(ctx, consent.ScopeGlobal, ruleType) + if err != nil { + return nil, fmt.Errorf("failed to list consent rules: %w", err) + } + + rulesToClear := make([]consent.ConsentRule, 0) + for _, rule := range rules { + if ruleMatchesPattern(rule.ToolID, toolID) { + rulesToClear = append(rulesToClear, rule) + } + } + + if len(rulesToClear) == 0 { + fmt.Fprintf(a.console.Handles().Stdout, "No matching %s rules found\n", a.flags.ruleType) + return nil, nil + } + + for _, rule := range rulesToClear { + if err := a.consentManager.ClearConsentByToolID(ctx, rule.ToolID, consent.ScopeGlobal); err != nil { + return nil, fmt.Errorf("failed to revoke consent for %s: %w", rule.ToolID, err) + } + } + } else { + if err := a.consentManager.ClearConsentByToolID(ctx, toolID, consent.ScopeGlobal); err != nil { + return nil, fmt.Errorf("failed to revoke consent: %w", err) + } } fmt.Fprintf(a.console.Handles().Stdout, "Revoked %s\n", description) return nil, nil } + +// ruleMatchesPattern checks if a rule's toolID matches the given pattern +func ruleMatchesPattern(ruleToolID, pattern string) bool { + if pattern == "*" { + return true + } + if pattern == ruleToolID { + return true + } + // Handle server/* patterns + if strings.HasSuffix(pattern, "/*") { + serverPattern := strings.TrimSuffix(pattern, "/*") + return strings.HasPrefix(ruleToolID, serverPattern+"/") || ruleToolID == serverPattern+"/*" + } + return false +} diff --git a/cli/azd/internal/agent/agent_factory.go b/cli/azd/internal/agent/agent_factory.go index 1b704930513..864a85caee0 100644 --- a/cli/azd/internal/agent/agent_factory.go +++ b/cli/azd/internal/agent/agent_factory.go @@ -6,44 +6,49 @@ import ( localtools "github.com/azure/azure-dev/cli/azd/internal/agent/tools" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" mcptools "github.com/azure/azure-dev/cli/azd/internal/agent/tools/mcp" + "github.com/azure/azure-dev/cli/azd/pkg/input" "github.com/azure/azure-dev/cli/azd/pkg/llm" ) type AgentFactory struct { consentManager consent.ConsentManager llmManager *llm.Manager + console input.Console } func NewAgentFactory( consentManager consent.ConsentManager, + console input.Console, llmManager *llm.Manager, ) *AgentFactory { return &AgentFactory{ consentManager: consentManager, llmManager: llmManager, + console: console, } } -func (f *AgentFactory) Create(opts ...AgentOption) (Agent, error) { +func (f *AgentFactory) Create(opts ...AgentOption) (Agent, func() error, error) { fileLogger, cleanup, err := logging.NewFileLoggerDefault() if err != nil { - return nil, err + return nil, cleanup, err } - defer cleanup() defaultModelContainer, err := f.llmManager.GetDefaultModel(llm.WithLogger(fileLogger)) if err != nil { - return nil, err + return nil, cleanup, err } - samplingModelContainer, err := f.llmManager.GetDefaultModel() + samplingModelContainer, err := f.llmManager.GetDefaultModel(llm.WithLogger(fileLogger)) if err != nil { - return nil, err + return nil, cleanup, err } // Create sampling handler for MCP samplingHandler := mcptools.NewMcpSamplingHandler( - samplingModelContainer.Model, + f.consentManager, + f.console, + samplingModelContainer, ) toolLoaders := []localtools.ToolLoader{ @@ -63,7 +68,7 @@ func (f *AgentFactory) Create(opts ...AgentOption) (Agent, error) { for _, toolLoader := range toolLoaders { categoryTools, err := toolLoader.LoadTools() if err != nil { - return nil, err + return nil, cleanup, err } // Filter out excluded tools @@ -82,8 +87,8 @@ func (f *AgentFactory) Create(opts ...AgentOption) (Agent, error) { azdAgent, err := NewConversationalAzdAiAgent(defaultModelContainer.Model, allOptions...) if err != nil { - return nil, err + return nil, cleanup, err } - return azdAgent, nil + return azdAgent, cleanup, nil } diff --git a/cli/azd/internal/agent/consent/checker.go b/cli/azd/internal/agent/consent/checker.go index 07149c74f79..de0eeb4f10e 100644 --- a/cli/azd/internal/agent/consent/checker.go +++ b/cli/azd/internal/agent/consent/checker.go @@ -41,13 +41,94 @@ func (cc *ConsentChecker) CheckToolConsent( consentRequest := ConsentRequest{ ToolID: toolID, ServerName: cc.serverName, - SessionID: "", // Not needed since each manager represents one session + Type: ConsentRuleTypeTool, // This is a tool execution request + SessionID: "", // Not needed since each manager represents one session Annotations: annotations, } return cc.consentMgr.CheckConsent(ctx, consentRequest) } +// CheckSamplingConsent checks sampling consent for a specific tool +func (cc *ConsentChecker) CheckSamplingConsent( + ctx context.Context, + toolName string, +) (*ConsentDecision, error) { + toolID := fmt.Sprintf("%s/%s", cc.serverName, toolName) + + // Create consent request for sampling + consentRequest := ConsentRequest{ + ToolID: toolID, + ServerName: cc.serverName, + Type: ConsentRuleTypeSampling, // This is a sampling request + SessionID: "", // Not needed since each manager represents one session + } + + return cc.consentMgr.CheckConsent(ctx, consentRequest) +} + +// formatToolDescriptionWithAnnotations creates a formatted description with tool annotations as bullet points +func (cc *ConsentChecker) formatToolDescriptionWithAnnotations( + toolDesc string, + annotations mcp.ToolAnnotation, +) string { + if toolDesc == "" { + toolDesc = "No description available" + } + + // Start with the base description + description := toolDesc + + // Collect annotation information + var annotationBullets []string + + if annotations.Title != "" { + annotationBullets = append(annotationBullets, fmt.Sprintf("• Title: %s", annotations.Title)) + } + + if annotations.ReadOnlyHint != nil { + if *annotations.ReadOnlyHint { + annotationBullets = append(annotationBullets, "• Read-only operation") + } else { + annotationBullets = append(annotationBullets, "• May modify data") + } + } + + if annotations.DestructiveHint != nil { + if *annotations.DestructiveHint { + annotationBullets = append(annotationBullets, "• Potentially destructive operation") + } else { + annotationBullets = append(annotationBullets, "• Non-destructive operation") + } + } + + if annotations.IdempotentHint != nil { + if *annotations.IdempotentHint { + annotationBullets = append(annotationBullets, "• Idempotent (safe to retry)") + } else { + annotationBullets = append(annotationBullets, "• Not idempotent (may have side effects on retry)") + } + } + + if annotations.OpenWorldHint != nil { + if *annotations.OpenWorldHint { + annotationBullets = append(annotationBullets, "• May access external resources") + } else { + annotationBullets = append(annotationBullets, "• Operates on local resources only") + } + } + + // Append annotations as bullet list if any exist + if len(annotationBullets) > 0 { + description += "\n\nTool characteristics:" + for _, bullet := range annotationBullets { + description += "\n" + bullet + } + } + + return description +} + // PromptAndGrantConsent shows consent prompt and grants permission based on user choice func (cc *ConsentChecker) PromptAndGrantConsent( ctx context.Context, @@ -56,7 +137,7 @@ func (cc *ConsentChecker) PromptAndGrantConsent( ) error { toolID := fmt.Sprintf("%s/%s", cc.serverName, toolName) - choice, err := cc.promptForConsent(ctx, toolName, toolDesc, annotations) + choice, err := cc.promptForToolConsent(ctx, toolName, toolDesc, annotations) if err != nil { return fmt.Errorf("consent prompt failed: %w", err) } @@ -66,22 +147,22 @@ func (cc *ConsentChecker) PromptAndGrantConsent( } // Grant consent based on user choice - return cc.grantConsentFromChoice(ctx, toolID, choice) + return cc.grantConsentFromChoice(ctx, toolID, choice, ConsentRuleTypeTool) } -// promptForConsent shows an interactive consent prompt and returns the user's choice -func (cc *ConsentChecker) promptForConsent( +// promptForToolConsent shows an interactive consent prompt and returns the user's choice +func (cc *ConsentChecker) promptForToolConsent( ctx context.Context, toolName, toolDesc string, annotations mcp.ToolAnnotation, ) (string, error) { message := fmt.Sprintf( - "Tool %s from server %s requires consent.\n\nHow would you like to proceed?", + "Tool %s (%s) requires consent.\n\nHow would you like to proceed?", output.WithHighLightFormat(toolName), output.WithHighLightFormat(cc.serverName), ) - helpMessage := toolDesc + helpMessage := cc.formatToolDescriptionWithAnnotations(toolDesc, annotations) choices := []*ux.SelectChoice{ { @@ -107,7 +188,7 @@ func (cc *ConsentChecker) promptForConsent( } // Add server trust option if not already trusted - if !cc.isServerAlreadyTrusted(ctx) { + if !cc.isServerAlreadyTrusted(ctx, ConsentRuleTypeTool) { choices = append(choices, &ux.SelectChoice{ Value: "server", Label: "Allow all tools from this server", @@ -128,6 +209,12 @@ func (cc *ConsentChecker) promptForConsent( }) } + // Add global sampling trust option + choices = append(choices, &ux.SelectChoice{ + Value: "global", + Label: "Allow all tools from any server", + }) + selector := ux.NewSelect(&ux.SelectOptions{ Message: message, HelpMessage: helpMessage, @@ -148,17 +235,22 @@ func (cc *ConsentChecker) promptForConsent( return choices[*choiceIndex].Value, nil } -// isServerAlreadyTrusted checks if the server is already trusted -func (cc *ConsentChecker) isServerAlreadyTrusted(ctx context.Context) bool { - // Create a mock tool request to check if server has full trust +// isServerAlreadyTrusted checks if the server is already trusted for the specified rule type +func (cc *ConsentChecker) isServerAlreadyTrusted(ctx context.Context, ruleType ConsentRuleType) bool { + // Create a mock request to check if server has trust for the specified rule type request := ConsentRequest{ - ToolID: fmt.Sprintf("%s/test-tool", cc.serverName), - ServerName: cc.serverName, - SessionID: "", // Not needed since each manager represents one session - Annotations: mcp.ToolAnnotation{}, // No readonly hint + ToolID: fmt.Sprintf("%s/test-tool", cc.serverName), + ServerName: cc.serverName, + Type: ruleType, + SessionID: "", } - // Check if server has full trust (not readonly-only) + // For tool requests, add annotations to avoid readonly-only matches + if ruleType == ConsentRuleTypeTool { + request.Annotations = mcp.ToolAnnotation{} // No readonly hint + } + + // Check if server has trust for this rule type decision, err := cc.consentMgr.CheckConsent(ctx, request) if err != nil { return false @@ -169,31 +261,40 @@ func (cc *ConsentChecker) isServerAlreadyTrusted(ctx context.Context) bool { } // grantConsentFromChoice processes the user's consent choice and saves the appropriate rule -func (cc *ConsentChecker) grantConsentFromChoice(ctx context.Context, toolID string, choice string) error { +func (cc *ConsentChecker) grantConsentFromChoice( + ctx context.Context, + toolID string, + choice string, + ruleType ConsentRuleType, +) error { var rule ConsentRule var scope ConsentScope switch choice { case "once": rule = ConsentRule{ + Type: ruleType, ToolID: toolID, Permission: ConsentOnce, } scope = ScopeSession case "session": rule = ConsentRule{ + Type: ruleType, ToolID: toolID, Permission: ConsentSession, } scope = ScopeSession case "project": rule = ConsentRule{ + Type: ruleType, ToolID: toolID, Permission: ConsentProject, } scope = ScopeProject case "always": rule = ConsentRule{ + Type: ruleType, ToolID: toolID, Permission: ConsentAlways, } @@ -201,22 +302,39 @@ func (cc *ConsentChecker) grantConsentFromChoice(ctx context.Context, toolID str case "server": // Grant trust to entire server rule = ConsentRule{ + Type: ruleType, ToolID: fmt.Sprintf("%s/*", cc.serverName), Permission: ConsentServerAlways, RuleScope: RuleScopeAll, } scope = ScopeGlobal + case "global": + rule = ConsentRule{ + Type: ruleType, + ToolID: "*", + Permission: ConsentAlways, + RuleScope: RuleScopeAll, + } + scope = ScopeGlobal case "readonly_server": - // Grant trust to readonly tools from this server + // Grant trust to readonly tools from this server (only for tool rules) + if ruleType != ConsentRuleTypeTool { + return fmt.Errorf("readonly server option only available for tool consent") + } rule = ConsentRule{ + Type: ruleType, ToolID: fmt.Sprintf("%s/*", cc.serverName), Permission: ConsentAlways, RuleScope: RuleScopeReadOnly, } scope = ScopeGlobal case "readonly_global": - // Grant trust to all readonly tools globally + // Grant trust to all readonly tools globally (only for tool rules) + if ruleType != ConsentRuleTypeTool { + return fmt.Errorf("readonly global option only available for tool consent") + } rule = ConsentRule{ + Type: ruleType, ToolID: "*", Permission: ConsentAlways, RuleScope: RuleScopeReadOnly, @@ -228,3 +346,94 @@ func (cc *ConsentChecker) grantConsentFromChoice(ctx context.Context, toolID str return cc.consentMgr.GrantConsent(ctx, rule, scope) } + +// PromptAndGrantSamplingConsent shows sampling consent prompt and grants permission based on user choice +func (cc *ConsentChecker) PromptAndGrantSamplingConsent( + ctx context.Context, + toolName, toolDesc string, +) error { + toolID := fmt.Sprintf("%s/%s", cc.serverName, toolName) + + choice, err := cc.promptForSamplingConsent(ctx, toolName, toolDesc) + if err != nil { + return fmt.Errorf("sampling consent prompt failed: %w", err) + } + + if choice == "deny" { + return fmt.Errorf("sampling denied by user") + } + + // Grant sampling consent based on user choice + return cc.grantConsentFromChoice(ctx, toolID, choice, ConsentRuleTypeSampling) +} + +// promptForSamplingConsent shows an interactive sampling consent prompt and returns the user's choice +func (cc *ConsentChecker) promptForSamplingConsent( + ctx context.Context, + toolName, toolDesc string, +) (string, error) { + message := fmt.Sprintf( + "Tool %s (%s) wants to send data to an external language model for processing.\n\n"+ + "How would you like to proceed?", + output.WithHighLightFormat(toolName), + output.WithHighLightFormat(cc.serverName), + ) + + helpMessage := fmt.Sprintf("This will allow the tool to send data to an LLM for analysis or generation. %s", toolDesc) + + choices := []*ux.SelectChoice{ + { + Value: "deny", + Label: "Deny - Block this sampling request", + }, + { + Value: "once", + Label: "Allow once - Allow this sampling request only", + }, + { + Value: "session", + Label: "Allow for session - Allow sampling until restart", + }, + { + Value: "project", + Label: "Allow for project - Remember for this project", + }, + { + Value: "always", + Label: "Allow always - Remember globally for this tool", + }, + } + + // Add server trust option if not already trusted for sampling + if !cc.isServerAlreadyTrusted(ctx, ConsentRuleTypeSampling) { + choices = append(choices, &ux.SelectChoice{ + Value: "server", + Label: "Allow sampling for all tools from this server", + }) + } + + // Add global sampling trust option + choices = append(choices, &ux.SelectChoice{ + Value: "global", + Label: "Allow sampling for all tools from any server", + }) + + selector := ux.NewSelect(&ux.SelectOptions{ + Message: message, + HelpMessage: helpMessage, + Choices: choices, + EnableFiltering: ux.Ptr(false), + DisplayCount: 10, + }) + + choiceIndex, err := selector.Ask(ctx) + if err != nil { + return "", err + } + + if choiceIndex == nil || *choiceIndex < 0 || *choiceIndex >= len(choices) { + return "", fmt.Errorf("invalid choice selected") + } + + return choices[*choiceIndex].Value, nil +} diff --git a/cli/azd/internal/agent/consent/consent_wrapper_tool.go b/cli/azd/internal/agent/consent/consent_wrapper_tool.go index 52e8bcf56fa..4bde700e958 100644 --- a/cli/azd/internal/agent/consent/consent_wrapper_tool.go +++ b/cli/azd/internal/agent/consent/consent_wrapper_tool.go @@ -46,6 +46,10 @@ func (c *ConsentWrapperTool) Description() string { // Call executes the tool with consent protection func (c *ConsentWrapperTool) Call(ctx context.Context, input string) (string, error) { + // Set current executing tool for tracking (used by sampling handler) + SetCurrentExecutingTool(c.Name(), c.Server()) + defer ClearCurrentExecutingTool() + // Check consent using enhanced checker with annotations decision, err := c.consentChecker.CheckToolConsent(ctx, c.Name(), c.Description(), c.annotations) if err != nil { diff --git a/cli/azd/internal/agent/consent/manager.go b/cli/azd/internal/agent/consent/manager.go index 93eee617e31..d38a3da3f95 100644 --- a/cli/azd/internal/agent/consent/manager.go +++ b/cli/azd/internal/agent/consent/manager.go @@ -18,6 +18,45 @@ const ( ConfigKeyMCPConsent = "mcp.consent" ) +// Global state for tracking current executing tool +var ( + executingTool = &ExecutingTool{} +) + +// SetCurrentExecutingTool sets the currently executing tool (thread-safe) +func SetCurrentExecutingTool(name, server string) { + executingTool.Lock() + defer executingTool.Unlock() + executingTool.Name = name + executingTool.Server = server +} + +// ClearCurrentExecutingTool clears the currently executing tool (thread-safe) +func ClearCurrentExecutingTool() { + executingTool.Lock() + defer executingTool.Unlock() + executingTool.Name = "" + executingTool.Server = "" +} + +// GetCurrentExecutingTool gets the currently executing tool (thread-safe) +// Returns nil if no tool is currently executing +func GetCurrentExecutingTool() *ExecutingTool { + executingTool.RLock() + defer executingTool.RUnlock() + + // Return nil if no tool is currently executing + if executingTool.Name == "" && executingTool.Server == "" { + return nil + } + + // Return a copy to avoid exposing the mutex + return &ExecutingTool{ + Name: executingTool.Name, + Server: executingTool.Server, + } +} + // consentManager implements the ConsentManager interface type consentManager struct { console input.Console @@ -116,6 +155,43 @@ func (cm *consentManager) ClearConsentByToolID(ctx context.Context, toolID strin } } +// ListConsentsByType lists consent rules filtered by type for a given scope +func (cm *consentManager) ListConsentsByType( + ctx context.Context, + scope ConsentScope, + ruleType ConsentRuleType, +) ([]ConsentRule, error) { + allRules, err := cm.ListConsents(ctx, scope) + if err != nil { + return nil, err + } + + filteredRules := make([]ConsentRule, 0) + for _, rule := range allRules { + if rule.Type == ruleType { + filteredRules = append(filteredRules, rule) + } + } + + return filteredRules, nil +} + +// ClearConsentsByType clears all consent rules of a specific type for a given scope +func (cm *consentManager) ClearConsentsByType(ctx context.Context, scope ConsentScope, ruleType ConsentRuleType) error { + rules, err := cm.ListConsentsByType(ctx, scope, ruleType) + if err != nil { + return fmt.Errorf("failed to list consent rules: %w", err) + } + + for _, rule := range rules { + if err := cm.ClearConsentByToolID(ctx, rule.ToolID, scope); err != nil { + return fmt.Errorf("failed to clear consent for tool %s: %w", rule.ToolID, err) + } + } + + return nil +} + // WrapTool wraps a single langchaingo tool with consent protection func (cm *consentManager) WrapTool(tool common.AnnotatedTool) common.AnnotatedTool { return newConsentWrapperTool(tool, cm.console, cm) @@ -137,6 +213,8 @@ func (cm *consentManager) evaluateRule(rule ConsentRule) *ConsentDecision { switch rule.Permission { case ConsentDeny: return &ConsentDecision{Allowed: false, Reason: "explicitly denied"} + case ConsentPrompt: + return &ConsentDecision{Allowed: false, RequiresPrompt: true, Reason: "requires prompt"} case ConsentOnce: // For one-time consent, we allow it but mark it for removal // The caller should handle removing this rule after use @@ -378,7 +456,7 @@ func (cm *consentManager) findMatchingUnifiedRule( // First pass: Check for deny rules for _, rule := range rules { - if rule.Permission == ConsentDeny && cm.ruleMatches(rule, request, isReadOnlyTool) { + if rule.Permission == ConsentDeny && rule.Type == request.Type && cm.ruleMatches(rule, request, isReadOnlyTool) { return &ConsentDecision{Allowed: false, Reason: "explicitly denied"} } } @@ -386,23 +464,26 @@ func (cm *consentManager) findMatchingUnifiedRule( // Second pass: Check for allow rules in precedence order // Global patterns first (* pattern) for i, rule := range rules { - if rule.Permission != ConsentDeny && rule.ToolID == "*" && cm.ruleMatches(rule, request, isReadOnlyTool) { - return cm.evaluateAllowRule(rule, i) + if rule.Permission != ConsentDeny && rule.Type == request.Type && rule.ToolID == "*" && + cm.ruleMatches(rule, request, isReadOnlyTool) { + return cm.evaluateAllowRule(rule, request, i) } } // Server patterns next (server/* pattern) serverPattern := fmt.Sprintf("%s/*", request.ServerName) for i, rule := range rules { - if rule.Permission != ConsentDeny && rule.ToolID == serverPattern && cm.ruleMatches(rule, request, isReadOnlyTool) { - return cm.evaluateAllowRule(rule, i) + if rule.Permission != ConsentDeny && rule.Type == request.Type && rule.ToolID == serverPattern && + cm.ruleMatches(rule, request, isReadOnlyTool) { + return cm.evaluateAllowRule(rule, request, i) } } // Specific tool patterns last (exact match) for i, rule := range rules { - if rule.Permission != ConsentDeny && rule.ToolID == request.ToolID && cm.ruleMatches(rule, request, isReadOnlyTool) { - return cm.evaluateAllowRule(rule, i) + if rule.Permission != ConsentDeny && rule.Type == request.Type && rule.ToolID == request.ToolID && + cm.ruleMatches(rule, request, isReadOnlyTool) { + return cm.evaluateAllowRule(rule, request, i) } } @@ -432,7 +513,7 @@ func (cm *consentManager) ruleMatches(rule ConsentRule, request ConsentRequest, } // evaluateAllowRule evaluates an allow rule and handles one-time cleanup -func (cm *consentManager) evaluateAllowRule(rule ConsentRule, ruleIndex int) *ConsentDecision { +func (cm *consentManager) evaluateAllowRule(rule ConsentRule, request ConsentRequest, ruleIndex int) *ConsentDecision { decision := cm.evaluateRule(rule) // If this is a one-time consent rule, remove it after evaluation diff --git a/cli/azd/internal/agent/consent/types.go b/cli/azd/internal/agent/consent/types.go index ef1b56881d6..7a95f3419ce 100644 --- a/cli/azd/internal/agent/consent/types.go +++ b/cli/azd/internal/agent/consent/types.go @@ -5,6 +5,8 @@ package consent import ( "context" + "fmt" + "sync" "time" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" @@ -20,6 +22,9 @@ type ConsentScope string // RuleScope represents what types of tools a rule applies to type RuleScope string +// ConsentRuleType represents the type of consent rule +type ConsentRuleType string + const ( ConsentDeny ConsentLevel = "deny" ConsentPrompt ConsentLevel = "prompt" @@ -42,12 +47,34 @@ const ( RuleScopeReadOnly RuleScope = "readonly" // Only read-only tools matching the pattern ) +const ( + ConsentRuleTypeTool ConsentRuleType = "tool" // Tool execution consent + ConsentRuleTypeSampling ConsentRuleType = "sampling" // LLM sampling consent +) + +// AllowedRuleTypes contains the valid rule types for command validation +var AllowedRuleTypes = []string{ + string(ConsentRuleTypeTool), + string(ConsentRuleTypeSampling), +} + +// ParseConsentRuleType converts a string to ConsentRuleType with validation +func ParseConsentRuleType(ruleTypeStr string) (ConsentRuleType, error) { + for _, allowedType := range AllowedRuleTypes { + if ruleTypeStr == allowedType { + return ConsentRuleType(ruleTypeStr), nil + } + } + return "", fmt.Errorf("invalid rule type: %s (allowed: %v)", ruleTypeStr, AllowedRuleTypes) +} + // ConsentRule represents a single consent rule for a tool type ConsentRule struct { - ToolID string `json:"toolId"` - Permission ConsentLevel `json:"permission"` - RuleScope RuleScope `json:"scope,omitempty"` // Defaults to "all" for backward compatibility - GrantedAt time.Time `json:"grantedAt"` + ToolID string `json:"toolId"` + Type ConsentRuleType `json:"type"` // Type of consent rule (tool, sampling, etc.) + Permission ConsentLevel `json:"permission"` // Permission level for this rule type + RuleScope RuleScope `json:"scope,omitempty"` // Defaults to "all" for backward compatibility + GrantedAt time.Time `json:"grantedAt"` } // ConsentConfig represents the MCP consent configuration @@ -59,6 +86,7 @@ type ConsentConfig struct { type ConsentRequest struct { ToolID string ServerName string + Type ConsentRuleType // Type of consent being requested (tool, sampling, etc.) Parameters map[string]interface{} SessionID string ProjectPath string @@ -77,10 +105,18 @@ type ConsentManager interface { CheckConsent(ctx context.Context, request ConsentRequest) (*ConsentDecision, error) GrantConsent(ctx context.Context, rule ConsentRule, scope ConsentScope) error ListConsents(ctx context.Context, scope ConsentScope) ([]ConsentRule, error) + ListConsentsByType(ctx context.Context, scope ConsentScope, ruleType ConsentRuleType) ([]ConsentRule, error) ClearConsents(ctx context.Context, scope ConsentScope) error + ClearConsentsByType(ctx context.Context, scope ConsentScope, ruleType ConsentRuleType) error ClearConsentByToolID(ctx context.Context, toolID string, scope ConsentScope) error // Tool wrapping methods WrapTool(tool common.AnnotatedTool) common.AnnotatedTool WrapTools(tools []common.AnnotatedTool) []common.AnnotatedTool } + +type ExecutingTool struct { + sync.RWMutex + Name string + Server string +} diff --git a/cli/azd/internal/agent/tools/io/copy_file.go b/cli/azd/internal/agent/tools/io/copy_file.go index 91b0d2edd96..b61e5c88d3c 100644 --- a/cli/azd/internal/agent/tools/io/copy_file.go +++ b/cli/azd/internal/agent/tools/io/copy_file.go @@ -91,7 +91,8 @@ func (t CopyFileTool) Call(ctx context.Context, input string) (string, error) { return t.createErrorResponse( err, fmt.Sprintf( - "Invalid JSON input: %s. Expected format: {\"source\": \"file.txt\", \"destination\": \"backup.txt\", \"overwrite\": false}", + "Invalid JSON input: %s. Expected format: "+ + `{"source": "file.txt", "destination": "backup.txt", "overwrite": false}`, err.Error(), ), ) diff --git a/cli/azd/internal/agent/tools/io/read_file_test.go b/cli/azd/internal/agent/tools/io/read_file_test.go index a5c6885837b..94395555933 100644 --- a/cli/azd/internal/agent/tools/io/read_file_test.go +++ b/cli/azd/internal/agent/tools/io/read_file_test.go @@ -550,7 +550,8 @@ func TestReadFileTool_ContentTruncation(t *testing.T) { func TestReadFileTool_SpecialCharacters(t *testing.T) { tempDir := t.TempDir() testFile := filepath.Join(tempDir, "special.txt") - testContent := "Line with émojis 😀🎉\nLine with unicode: ñáéíóú\nLine with symbols: @#$%^&*()\nLine with tabs:\t\tand\tspaces" + testContent := "Line with émojis 😀🎉\nLine with unicode: ñáéíóú\n" + + "Line with symbols: @#$%^&*()\nLine with tabs:\t\tand\tspaces" err := os.WriteFile(testFile, []byte(testContent), 0600) require.NoError(t, err) @@ -602,7 +603,7 @@ func TestReadFileTool_FileInfoMetadata(t *testing.T) { testFile := filepath.Join(tempDir, "metadata.txt") testContent := "Test content for metadata" - err := os.WriteFile(testFile, []byte(testContent), 0644) + err := os.WriteFile(testFile, []byte(testContent), 0600) require.NoError(t, err) // Get file info for comparison diff --git a/cli/azd/internal/agent/tools/mcp/sampling_handler.go b/cli/azd/internal/agent/tools/mcp/sampling_handler.go index 0c0b3f7df0d..ce465b3a6b9 100644 --- a/cli/azd/internal/agent/tools/mcp/sampling_handler.go +++ b/cli/azd/internal/agent/tools/mcp/sampling_handler.go @@ -9,6 +9,9 @@ import ( "fmt" "strings" + "github.com/azure/azure-dev/cli/azd/internal/agent/consent" + "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/llm" "github.com/fatih/color" "github.com/mark3labs/mcp-go/mcp" "github.com/tmc/langchaingo/llms" @@ -17,8 +20,10 @@ import ( // McpSamplingHandler handles sampling requests from MCP clients by delegating // to an underlying language model and converting responses to MCP format type McpSamplingHandler struct { - llm llms.Model - debug bool + llm *llm.ModelContainer + debug bool + consentManager consent.ConsentManager + console input.Console } // SamplingHandlerOption is a functional option for configuring McpSamplingHandler @@ -33,9 +38,16 @@ func WithDebug(debug bool) SamplingHandlerOption { // NewMcpSamplingHandler creates a new MCP sampling handler with the specified // language model and applies any provided options -func NewMcpSamplingHandler(llm llms.Model, opts ...SamplingHandlerOption) *McpSamplingHandler { +func NewMcpSamplingHandler( + consentManager consent.ConsentManager, + console input.Console, + llm *llm.ModelContainer, + opts ...SamplingHandlerOption, +) *McpSamplingHandler { handler := &McpSamplingHandler{ - llm: llm, + consentManager: consentManager, + console: console, + llm: llm, } for _, opt := range opts { @@ -53,13 +65,31 @@ func (h *McpSamplingHandler) CreateMessage( ctx context.Context, request mcp.CreateMessageRequest, ) (*mcp.CreateMessageResult, error) { + // Get current executing tool for context (package-level tracking) + currentTool := consent.GetCurrentExecutingTool() + if currentTool == nil { + return nil, fmt.Errorf("no current tool executing") + } + + // Check consent for sampling if consent manager is available + if err := h.checkSamplingConsent(ctx, currentTool, request); err != nil { + return &mcp.CreateMessageResult{ + SamplingMessage: mcp.SamplingMessage{ + Role: mcp.RoleAssistant, + Content: llms.TextPart(fmt.Sprintf("Sampling request denied: %v", err)), + }, + Model: "consent-denied", + StopReason: "consent_denied", + }, nil + } + if h.debug { requestJson, err := json.MarshalIndent(request, "", " ") if err != nil { return nil, err } - color.HiBlack("\nSamplingStart\n%s\n", requestJson) + color.HiBlack("\nSamplingStart (Tool: %s/%s)\n%s\n", currentTool.Server, currentTool.Name, requestJson) } messages := []llms.MessageContent{} @@ -108,14 +138,14 @@ func (h *McpSamplingHandler) CreateMessage( color.HiBlack("\nSamplingLLMContent\n%s\n", inputJson) } - res, err := h.llm.GenerateContent(ctx, messages) + res, err := h.llm.Model.GenerateContent(ctx, messages) if err != nil { return &mcp.CreateMessageResult{ SamplingMessage: mcp.SamplingMessage{ Role: mcp.RoleAssistant, Content: llms.TextPart(err.Error()), }, - Model: "llm-delegated", + Model: h.llm.Metadata.Name, StopReason: "error", }, nil } @@ -128,7 +158,7 @@ func (h *McpSamplingHandler) CreateMessage( Role: mcp.RoleAssistant, Content: llms.TextPart(""), }, - Model: "llm-delegated", + Model: h.llm.Metadata.Name, StopReason: "no_choices", } } else { @@ -140,7 +170,7 @@ func (h *McpSamplingHandler) CreateMessage( Role: mcp.RoleAssistant, Content: llms.TextPart(choice.Content), }, - Model: "llm-delegated", + Model: h.llm.Metadata.Name, StopReason: "endTurn", } } @@ -167,3 +197,38 @@ func (h *McpSamplingHandler) cleanContent(content string) string { content = strings.ReplaceAll(content, "\\r", "\r") return content } + +// checkSamplingConsent checks consent for sampling requests using the current executing tool +func (h *McpSamplingHandler) checkSamplingConsent( + ctx context.Context, + currentTool *consent.ExecutingTool, + request mcp.CreateMessageRequest, +) error { + // Create a consent checker for this specific server + consentChecker := consent.NewConsentChecker(h.consentManager, currentTool.Server) + + // Check sampling consent using the consent checker + decision, err := consentChecker.CheckSamplingConsent(ctx, currentTool.Name) + if err != nil { + return fmt.Errorf("consent check failed: %w", err) + } + + if !decision.Allowed { + if decision.RequiresPrompt { + // Use console.DoInteraction to show consent prompt + if err := h.console.DoInteraction(func() error { + return consentChecker.PromptAndGrantSamplingConsent( + ctx, + currentTool.Name, + "Allows sending data to external language models for processing", + ) + }); err != nil { + return err + } + } else { + return fmt.Errorf("sampling denied: %s", decision.Reason) + } + } + + return nil +} diff --git a/cli/azd/internal/mcp/tools/azd_sample.go b/cli/azd/internal/mcp/tools/azd_sample.go new file mode 100644 index 00000000000..c380812346b --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_sample.go @@ -0,0 +1,48 @@ +package tools + +import ( + "context" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +func NewSamplingTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_sample_test", + mcp.WithDescription("Runs MCP sampling to test sampling behavior"), + ), + Handler: func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + serverFromCtx := server.ServerFromContext(ctx) + samplingRequest := mcp.CreateMessageRequest{ + CreateMessageParams: mcp.CreateMessageParams{ + Messages: []mcp.SamplingMessage{ + { + Role: mcp.RoleUser, + Content: mcp.TextContent{ + Type: "text", + Text: "What is 10 plus 10?", + }, + }, + }, + SystemPrompt: "You are a helpful assistant", + MaxTokens: 1000, + Temperature: 0.7, + }, + } + + samplingResult, err := serverFromCtx.RequestSampling(ctx, samplingRequest) + if err != nil { + return mcp.NewToolResultErrorFromErr("Error during sampling", err), nil + } + + if textContent, ok := samplingResult.Content.(mcp.TextContent); ok { + return mcp.NewToolResultText(textContent.Text), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("%v", samplingResult.Content)), nil + }, + } +} From 106c55d9eb06db7548869b60ebad713c31917af4 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Tue, 12 Aug 2025 17:23:03 -0700 Subject: [PATCH 065/116] Revision of consent system --- cli/azd/cmd/mcp.go | 537 +++++++++------------- cli/azd/internal/agent/consent/checker.go | 137 +++--- cli/azd/internal/agent/consent/manager.go | 193 ++++---- cli/azd/internal/agent/consent/types.go | 210 ++++++--- 4 files changed, 536 insertions(+), 541 deletions(-) diff --git a/cli/azd/cmd/mcp.go b/cli/azd/cmd/mcp.go index 49a96f4ad0a..722657f130a 100644 --- a/cli/azd/cmd/mcp.go +++ b/cli/azd/cmd/mcp.go @@ -7,7 +7,6 @@ import ( "context" "fmt" "io" - "strings" "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" @@ -90,8 +89,22 @@ azd functionality through the Model Context Protocol interface.`, Command: &cobra.Command{ Use: "grant", Short: "Grant consent trust rules.", - Long: "Grant trust rules for MCP tools and servers.", - Args: cobra.NoArgs, + Long: `Grant trust rules for MCP tools and servers. + +This command creates consent rules that allow MCP tools to execute +without prompting for permission. You can specify different permission +levels and scopes for the rules. + +Examples: + # Grant always permission to all tools globally + azd mcp consent grant --global --permission always + + # Grant session permission to a specific server + azd mcp consent grant --server my-server --permission session + + # Grant project permission to a specific tool with read-only scope + azd mcp consent grant --server my-server --tool my-tool --permission project --scope read-only`, + Args: cobra.NoArgs, }, OutputFormats: []output.Format{output.NoneFormat}, DefaultFormat: output.NoneFormat, @@ -99,19 +112,20 @@ azd functionality through the Model Context Protocol interface.`, FlagsResolver: newMcpConsentGrantFlags, }) + // TODO: Re-implement revoke command with new structure // azd mcp consent revoke - consentGroup.Add("revoke", &actions.ActionDescriptorOptions{ - Command: &cobra.Command{ - Use: "revoke", - Short: "Revoke consent trust rules.", - Long: "Revoke specific consent rules for MCP tools and servers.", - Args: cobra.NoArgs, - }, - OutputFormats: []output.Format{output.NoneFormat}, - DefaultFormat: output.NoneFormat, - ActionResolver: newMcpConsentRevokeAction, - FlagsResolver: newMcpConsentRevokeFlags, - }) + // consentGroup.Add("revoke", &actions.ActionDescriptorOptions{ + // Command: &cobra.Command{ + // Use: "revoke", + // Short: "Revoke consent trust rules.", + // Long: "Revoke specific consent rules for MCP tools and servers.", + // Args: cobra.NoArgs, + // }, + // OutputFormats: []output.Format{output.NoneFormat}, + // DefaultFormat: output.NoneFormat, + // ActionResolver: newMcpConsentRevokeAction, + // FlagsResolver: newMcpConsentRevokeFlags, + // }) return group } @@ -177,10 +191,10 @@ func (a *mcpStartAction) Run(ctx context.Context) (*actions.ActionResult, error) // Flags for MCP consent commands type mcpConsentFlags struct { - global *internal.GlobalCommandOptions - scope string - toolID string - ruleType string + global *internal.GlobalCommandOptions + scope string + target string + operationContext string } func newMcpConsentFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentFlags { @@ -191,9 +205,9 @@ func newMcpConsentFlags(cmd *cobra.Command, global *internal.GlobalCommandOption func (f *mcpConsentFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { f.global = global - local.StringVar(&f.scope, "scope", "global", "Consent scope (global, project)") - local.StringVar(&f.toolID, "tool-id", "", "Specific tool ID to operate on") - local.StringVar(&f.ruleType, "type", "", "Rule type to filter by (tool, sampling)") + local.StringVar(&f.scope, "scope", "global", "Consent scope (global, project, session)") + local.StringVar(&f.target, "target", "", "Specific target to operate on (server/tool format)") + local.StringVar(&f.operationContext, "context", "", "Operation context to filter by (tool, sampling)") } // Flags for MCP consent grant command @@ -202,8 +216,10 @@ type mcpConsentGrantFlags struct { tool string server string globalFlag bool - scope string - ruleType string + action string + operation string + permission string + ruleScope string } func newMcpConsentGrantFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentGrantFlags { @@ -217,8 +233,10 @@ func (f *mcpConsentGrantFlags) Bind(local *pflag.FlagSet, global *internal.Globa local.StringVar(&f.tool, "tool", "", "Specific tool name (requires --server)") local.StringVar(&f.server, "server", "", "Server name") local.BoolVar(&f.globalFlag, "global", false, "Apply globally to all servers") - local.StringVar(&f.scope, "scope", "all", "Scope of the rule: 'all' or 'read-only'") - local.StringVar(&f.ruleType, "type", "tool", "Type of rule: 'tool' or 'sampling'") + local.StringVar(&f.action, "action", "all", "Action type: 'all' or 'readonly'") + local.StringVar(&f.operation, "operation", "tool", "Operation type: 'tool' or 'sampling'") + local.StringVar(&f.permission, "permission", "allow", "Permission: 'allow', 'deny', or 'prompt'") + local.StringVar(&f.ruleScope, "scope", "global", "Rule scope: 'session', 'project', or 'global'") } // Action for MCP consent list command @@ -250,29 +268,28 @@ func newMcpConsentListAction( } func (a *mcpConsentListAction) Run(ctx context.Context) (*actions.ActionResult, error) { - // Command heading - fmt.Fprintf(a.writer, "Listing MCP consent rules...\n\n") - - var scope consent.ConsentScope + var scope consent.Scope switch a.flags.scope { case "global": scope = consent.ScopeGlobal case "project": scope = consent.ScopeProject + case "session": + scope = consent.ScopeSession default: - return nil, fmt.Errorf("invalid scope: %s (allowed: global, project)", a.flags.scope) + return nil, fmt.Errorf("invalid scope: %s (allowed: global, project, session)", a.flags.scope) } var rules []consent.ConsentRule var err error - // Use type-filtered method if type is specified - if a.flags.ruleType != "" { - ruleType, parseErr := consent.ParseConsentRuleType(a.flags.ruleType) + // Use operation context-filtered method if context is specified + if a.flags.operationContext != "" { + operationContext, parseErr := consent.ParseOperationContext(a.flags.operationContext) if parseErr != nil { return nil, parseErr } - rules, err = a.consentManager.ListConsentsByType(ctx, scope, ruleType) + rules, err = a.consentManager.ListConsentsByOperationContext(ctx, scope, operationContext) } else { rules, err = a.consentManager.ListConsents(ctx, scope) } @@ -282,35 +299,76 @@ func (a *mcpConsentListAction) Run(ctx context.Context) (*actions.ActionResult, } if len(rules) == 0 { - typeInfo := "" - if a.flags.ruleType != "" { - typeInfo = fmt.Sprintf(" of type '%s'", a.flags.ruleType) + var typeInfo string + if a.flags.operationContext != "" { + typeInfo = fmt.Sprintf(" of context '%s'", a.flags.operationContext) } - fmt.Fprintf(a.writer, "No consent rules found for scope: %s%s\n", a.flags.scope, typeInfo) + fmt.Fprintf(a.writer, "No consent rules found%s.\n", typeInfo) return nil, nil } - // Format output - if a.formatter.Kind() == output.JsonFormat { - return nil, a.formatter.Format(rules, a.writer, nil) + // Convert rules to display format + type ruleDisplay struct { + Target string `json:"target"` + Context string `json:"context"` + Action string `json:"action"` + Permission string `json:"permission"` + Scope string `json:"scope"` + GrantedAt string `json:"grantedAt"` } - // Table format - fmt.Fprintf(a.writer, "Consent Rules (%s scope):\n", a.flags.scope) - fmt.Fprintf(a.writer, "%-10s %-35s %-15s %-20s\n", "Type", "Tool ID", "Permission", "Granted At") - fmt.Fprintf(a.writer, "%s\n", strings.Repeat("-", 80)) - + var displayRules []ruleDisplay for _, rule := range rules { - fmt.Fprintf(a.writer, "%-10s %-35s %-15s %-20s\n", - rule.Type, - rule.ToolID, - rule.Permission, - rule.GrantedAt.Format("2006-01-02 15:04:05")) + displayRules = append(displayRules, ruleDisplay{ + Target: string(rule.Target), + Context: string(rule.Operation), + Action: string(rule.Action), + Permission: string(rule.Permission), + Scope: string(rule.Scope), + GrantedAt: rule.GrantedAt.Format("2006-01-02 15:04:05"), + }) } - fmt.Fprintf(a.writer, "\nListed %d consent rule(s)\n", len(rules)) + if a.formatter.Kind() == output.JsonFormat { + return nil, a.formatter.Format(displayRules, a.writer, nil) + } + + // Use table formatter for better output + if a.formatter.Kind() == output.TableFormat { + columns := []output.Column{ + { + Heading: "Target", + ValueTemplate: "{{.Target}}", + }, + { + Heading: "Context", + ValueTemplate: "{{.Context}}", + }, + { + Heading: "Action", + ValueTemplate: "{{.Action}}", + }, + { + Heading: "Permission", + ValueTemplate: "{{.Permission}}", + }, + { + Heading: "Scope", + ValueTemplate: "{{.Scope}}", + }, + { + Heading: "Granted At", + ValueTemplate: "{{.GrantedAt}}", + }, + } + + return nil, a.formatter.Format(displayRules, a.writer, output.TableFormatterOptions{ + Columns: columns, + }) + } - return nil, nil + // Fallback to simple formatting + return nil, a.formatter.Format(displayRules, a.writer, nil) } // Action for MCP consent clear command @@ -339,35 +397,38 @@ func (a *mcpConsentClearAction) Run(ctx context.Context) (*actions.ActionResult, // Command heading fmt.Fprintf(a.console.Handles().Stdout, "Clearing MCP consent rules...\n\n") - var scope consent.ConsentScope + var scope consent.Scope switch a.flags.scope { case "global": scope = consent.ScopeGlobal case "project": scope = consent.ScopeProject + case "session": + scope = consent.ScopeSession default: - return nil, fmt.Errorf("invalid scope: %s (allowed: global, project)", a.flags.scope) + return nil, fmt.Errorf("invalid scope: %s (allowed: global, project, session)", a.flags.scope) } var err error - if a.flags.toolID != "" { - // Clear specific tool - err = a.consentManager.ClearConsentByToolID(ctx, a.flags.toolID, scope) + if a.flags.target != "" { + // Clear specific target + target := consent.Target(a.flags.target) + err = a.consentManager.ClearConsentByTarget(ctx, target, scope) if err == nil { - fmt.Fprintf(a.console.Handles().Stdout, "Cleared consent for tool: %s\n", a.flags.toolID) + fmt.Fprintf(a.console.Handles().Stdout, "Cleared consent for target: %s\n", a.flags.target) } } else { - // Get confirmation message based on type filter + // Get confirmation message based on context filter confirmMessage := fmt.Sprintf("Are you sure you want to clear all consent rules for scope '%s'?", a.flags.scope) - if a.flags.ruleType != "" { + if a.flags.operationContext != "" { confirmMessage = fmt.Sprintf( "Are you sure you want to clear all %s consent rules for scope '%s'?", - a.flags.ruleType, + a.flags.operationContext, a.flags.scope, ) } - // Clear all rules for scope (with optional type filtering) + // Clear all rules for scope (with optional context filtering) confirmed, confirmErr := a.console.Confirm(ctx, input.ConsoleOptions{ Message: confirmMessage, }) @@ -380,19 +441,19 @@ func (a *mcpConsentClearAction) Run(ctx context.Context) (*actions.ActionResult, return nil, nil } - if a.flags.ruleType != "" { - // Type-specific clearing using the new consent manager method - ruleType, parseErr := consent.ParseConsentRuleType(a.flags.ruleType) + if a.flags.operationContext != "" { + // Context-specific clearing using the new consent manager method + operationContext, parseErr := consent.ParseOperationContext(a.flags.operationContext) if parseErr != nil { return nil, parseErr } - err = a.consentManager.ClearConsentsByType(ctx, scope, ruleType) + err = a.consentManager.ClearConsentsByOperationContext(ctx, scope, operationContext) if err == nil { fmt.Fprintf( a.console.Handles().Stdout, "Cleared all %s consent rules for scope: %s\n", - a.flags.ruleType, + a.flags.operationContext, a.flags.scope, ) } @@ -451,303 +512,115 @@ func (a *mcpConsentGrantAction) Run(ctx context.Context) (*actions.ActionResult, return nil, fmt.Errorf("specify either --global or --server") } - // Validate scope - if a.flags.scope != "all" && a.flags.scope != "read-only" { - return nil, fmt.Errorf("--scope must be 'all' or 'read-only'") + // Validate action type + var actionType consent.ActionType + switch a.flags.action { + case "readonly": + actionType = consent.ActionReadOnly + case "all": + actionType = consent.ActionAny + default: + return nil, fmt.Errorf("--action must be 'readonly' or 'all'") } - // Validate type using the new parser - ruleType, err := consent.ParseConsentRuleType(a.flags.ruleType) - if err != nil { - return nil, err + // Validate operation context + var operationContext consent.OperationType + switch a.flags.operation { + case "tool": + operationContext = consent.OperationTypeTool + case "sampling": + operationContext = consent.OperationTypeSampling + default: + return nil, fmt.Errorf("--context must be 'tool' or 'sampling'") + } + + // Validate permission + var permission consent.Permission + switch a.flags.permission { + case "allow": + permission = consent.PermissionAllow + case "deny": + permission = consent.PermissionDeny + case "prompt": + permission = consent.PermissionPrompt + default: + return nil, fmt.Errorf("--decision must be 'allow', 'deny', or 'prompt'") } - // For sampling type, tool-specific grants are not supported - if ruleType == consent.ConsentRuleTypeSampling && a.flags.tool != "" { + // Validate rule scope + var ruleScope consent.Scope + switch a.flags.ruleScope { + case "session": + ruleScope = consent.ScopeSession + case "project": + ruleScope = consent.ScopeProject + case "global": + ruleScope = consent.ScopeGlobal + default: + return nil, fmt.Errorf("--scope must be 'session', 'project', or 'global'") + } + + // For sampling context, tool-specific grants are not supported + if operationContext == consent.OperationTypeSampling && a.flags.tool != "" { return nil, fmt.Errorf("--tool is not supported for sampling rules") } - // Build rule - var toolID string - var ruleScope consent.RuleScope + // Build target + var target consent.Target var description string - if a.flags.scope == "read-only" { - ruleScope = consent.RuleScopeReadOnly - } else { - ruleScope = consent.RuleScopeAll - } - if a.flags.globalFlag { - toolID = "*" - if ruleType == consent.ConsentRuleTypeSampling { - if a.flags.scope == "read-only" { - description = "all read-only sampling globally" + target = consent.NewGlobalTarget() + if operationContext == consent.OperationTypeSampling { + if actionType == consent.ActionReadOnly { + description = fmt.Sprintf("all read-only sampling globally (%s)", permission) } else { - description = "all sampling globally" + description = fmt.Sprintf("all sampling globally (%s)", permission) } } else { - if a.flags.scope == "read-only" { - description = "all read-only tools globally" + if actionType == consent.ActionReadOnly { + description = fmt.Sprintf("all read-only tools globally (%s)", permission) } else { - description = "all tools globally" + description = fmt.Sprintf("all tools globally (%s)", permission) } } } else if a.flags.tool != "" { - toolID = fmt.Sprintf("%s/%s", a.flags.server, a.flags.tool) - if a.flags.scope == "read-only" { - description = fmt.Sprintf("read-only tool %s from server %s", a.flags.tool, a.flags.server) + target = consent.NewToolTarget(a.flags.server, a.flags.tool) + if actionType == consent.ActionReadOnly { + description = fmt.Sprintf("read-only tool %s from server %s (%s)", a.flags.tool, a.flags.server, permission) } else { - description = fmt.Sprintf("tool %s from server %s", a.flags.tool, a.flags.server) + description = fmt.Sprintf("tool %s from server %s (%s)", a.flags.tool, a.flags.server, permission) } } else { - toolID = fmt.Sprintf("%s/*", a.flags.server) - if ruleType == consent.ConsentRuleTypeSampling { - if a.flags.scope == "read-only" { - description = fmt.Sprintf("read-only sampling from server %s", a.flags.server) + target = consent.NewServerTarget(a.flags.server) + if operationContext == consent.OperationTypeSampling { + if actionType == consent.ActionReadOnly { + description = fmt.Sprintf("read-only sampling from server %s (%s)", a.flags.server, permission) } else { - description = fmt.Sprintf("all sampling from server %s", a.flags.server) + description = fmt.Sprintf("all sampling from server %s (%s)", a.flags.server, permission) } } else { - if a.flags.scope == "read-only" { - description = fmt.Sprintf("read-only tools from server %s", a.flags.server) + if actionType == consent.ActionReadOnly { + description = fmt.Sprintf("read-only tools from server %s (%s)", a.flags.server, permission) } else { - description = fmt.Sprintf("all tools from server %s", a.flags.server) + description = fmt.Sprintf("all tools from server %s (%s)", a.flags.server, permission) } } } rule := consent.ConsentRule{ - Type: ruleType, - ToolID: toolID, - Permission: consent.ConsentAlways, - RuleScope: ruleScope, + Scope: ruleScope, + Target: target, + Action: actionType, + Operation: operationContext, + Permission: permission, } - if err := a.consentManager.GrantConsent(ctx, rule, consent.ScopeGlobal); err != nil { + if err := a.consentManager.GrantConsent(ctx, rule, ruleScope); err != nil { return nil, fmt.Errorf("failed to grant consent: %w", err) } - fmt.Fprintf(a.console.Handles().Stdout, "Granted trust for %s\n", description) + fmt.Fprintf(a.console.Handles().Stdout, "Granted rule for %s\n", description) return nil, nil } - -// Flags for MCP consent revoke command -type mcpConsentRevokeFlags struct { - globalOptions *internal.GlobalCommandOptions - tool string - server string - globalFlag bool - scope string - toolPattern string - ruleType string -} - -func newMcpConsentRevokeFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentRevokeFlags { - flags := &mcpConsentRevokeFlags{} - flags.Bind(cmd.Flags(), global) - return flags -} - -func (f *mcpConsentRevokeFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { - f.globalOptions = global - local.StringVar(&f.tool, "tool", "", "Specific tool name (requires --server)") - local.StringVar(&f.server, "server", "", "Server name") - local.BoolVar(&f.globalFlag, "global", false, "Apply globally to all servers") - local.StringVar(&f.scope, "scope", "all", "Scope of the rule: 'all' or 'read-only'") - local.StringVar( - &f.toolPattern, - "rule-pattern", - "", - "Revoke trust for a specific rule pattern (e.g., 'server/tool' or 'server/*')", - ) - local.StringVar(&f.ruleType, "type", "", "Type of rule to revoke: 'tool' or 'sampling' (default: all types)") -} - -// Action for MCP consent revoke command -type mcpConsentRevokeAction struct { - flags *mcpConsentRevokeFlags - console input.Console - userConfigManager config.UserConfigManager - consentManager consent.ConsentManager -} - -func newMcpConsentRevokeAction( - flags *mcpConsentRevokeFlags, - console input.Console, - userConfigManager config.UserConfigManager, - consentManager consent.ConsentManager, -) actions.Action { - return &mcpConsentRevokeAction{ - flags: flags, - console: console, - userConfigManager: userConfigManager, - consentManager: consentManager, - } -} - -func (a *mcpConsentRevokeAction) Run(ctx context.Context) (*actions.ActionResult, error) { - // Command heading - fmt.Fprintf(a.console.Handles().Stdout, "Revoking MCP consent rules...\n\n") - - // Count options set - optionsSet := 0 - if a.flags.globalFlag { - optionsSet++ - } - if a.flags.server != "" { - optionsSet++ - } - if a.flags.toolPattern != "" { - optionsSet++ - } - - if optionsSet == 0 { - return nil, fmt.Errorf("specify one of: --global, --server, or --rule-pattern") - } - - if optionsSet > 1 { - return nil, fmt.Errorf("specify only one option at a time") - } - - // Validate flag combinations for new structure - if a.flags.tool != "" && a.flags.server == "" { - return nil, fmt.Errorf("--tool requires --server") - } - - if a.flags.globalFlag && (a.flags.server != "" || a.flags.tool != "") { - return nil, fmt.Errorf("--global cannot be combined with --server or --tool") - } - - // Validate scope - if a.flags.scope != "all" && a.flags.scope != "read-only" { - return nil, fmt.Errorf("--scope must be 'all' or 'read-only'") - } - - // Validate type if specified - var ruleType consent.ConsentRuleType - if a.flags.ruleType != "" { - var err error - ruleType, err = consent.ParseConsentRuleType(a.flags.ruleType) - if err != nil { - return nil, err - } - - // For sampling type, tool-specific revocations are not supported - if ruleType == consent.ConsentRuleTypeSampling && a.flags.tool != "" { - return nil, fmt.Errorf("--tool is not supported for sampling rules") - } - } - - var toolID string - var description string - - if a.flags.toolPattern != "" { - toolID = a.flags.toolPattern - if a.flags.ruleType != "" { - description = fmt.Sprintf("%s trust for pattern: %s", a.flags.ruleType, a.flags.toolPattern) - } else { - description = fmt.Sprintf("trust for pattern: %s", a.flags.toolPattern) - } - } else if a.flags.globalFlag { - toolID = "*" - if a.flags.ruleType == "sampling" { - if a.flags.scope == "read-only" { - description = "global read-only sampling trust" - } else { - description = "global sampling trust" - } - } else if a.flags.ruleType == "tool" { - if a.flags.scope == "read-only" { - description = "global read-only tool trust" - } else { - description = "global tool trust" - } - } else { - if a.flags.scope == "read-only" { - description = "global read-only trust" - } else { - description = "global trust" - } - } - } else if a.flags.tool != "" { - toolID = fmt.Sprintf("%s/%s", a.flags.server, a.flags.tool) - if a.flags.scope == "read-only" { - description = fmt.Sprintf("read-only trust for tool %s from server %s", a.flags.tool, a.flags.server) - } else { - description = fmt.Sprintf("trust for tool %s from server %s", a.flags.tool, a.flags.server) - } - } else { - toolID = fmt.Sprintf("%s/*", a.flags.server) - if a.flags.ruleType == "sampling" { - if a.flags.scope == "read-only" { - description = fmt.Sprintf("read-only sampling trust for server: %s", a.flags.server) - } else { - description = fmt.Sprintf("sampling trust for server: %s", a.flags.server) - } - } else if a.flags.ruleType == "tool" { - if a.flags.scope == "read-only" { - description = fmt.Sprintf("read-only tool trust for server: %s", a.flags.server) - } else { - description = fmt.Sprintf("tool trust for server: %s", a.flags.server) - } - } else { - if a.flags.scope == "read-only" { - description = fmt.Sprintf("read-only trust for server: %s", a.flags.server) - } else { - description = fmt.Sprintf("trust for server: %s", a.flags.server) - } - } - } - - // If type filtering is requested, use the new consent manager method - if a.flags.ruleType != "" { - rules, err := a.consentManager.ListConsentsByType(ctx, consent.ScopeGlobal, ruleType) - if err != nil { - return nil, fmt.Errorf("failed to list consent rules: %w", err) - } - - rulesToClear := make([]consent.ConsentRule, 0) - for _, rule := range rules { - if ruleMatchesPattern(rule.ToolID, toolID) { - rulesToClear = append(rulesToClear, rule) - } - } - - if len(rulesToClear) == 0 { - fmt.Fprintf(a.console.Handles().Stdout, "No matching %s rules found\n", a.flags.ruleType) - return nil, nil - } - - for _, rule := range rulesToClear { - if err := a.consentManager.ClearConsentByToolID(ctx, rule.ToolID, consent.ScopeGlobal); err != nil { - return nil, fmt.Errorf("failed to revoke consent for %s: %w", rule.ToolID, err) - } - } - } else { - if err := a.consentManager.ClearConsentByToolID(ctx, toolID, consent.ScopeGlobal); err != nil { - return nil, fmt.Errorf("failed to revoke consent: %w", err) - } - } - - fmt.Fprintf(a.console.Handles().Stdout, "Revoked %s\n", description) - - return nil, nil -} - -// ruleMatchesPattern checks if a rule's toolID matches the given pattern -func ruleMatchesPattern(ruleToolID, pattern string) bool { - if pattern == "*" { - return true - } - if pattern == ruleToolID { - return true - } - // Handle server/* patterns - if strings.HasSuffix(pattern, "/*") { - serverPattern := strings.TrimSuffix(pattern, "/*") - return strings.HasPrefix(ruleToolID, serverPattern+"/") || ruleToolID == serverPattern+"/*" - } - return false -} diff --git a/cli/azd/internal/agent/consent/checker.go b/cli/azd/internal/agent/consent/checker.go index de0eeb4f10e..eee347a9a94 100644 --- a/cli/azd/internal/agent/consent/checker.go +++ b/cli/azd/internal/agent/consent/checker.go @@ -6,6 +6,7 @@ package consent import ( "context" "fmt" + "strings" "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/azure/azure-dev/cli/azd/pkg/ux" @@ -39,11 +40,11 @@ func (cc *ConsentChecker) CheckToolConsent( // Create consent request consentRequest := ConsentRequest{ - ToolID: toolID, - ServerName: cc.serverName, - Type: ConsentRuleTypeTool, // This is a tool execution request - SessionID: "", // Not needed since each manager represents one session - Annotations: annotations, + ToolID: toolID, + ServerName: cc.serverName, + OperationContext: OperationTypeTool, // This is a tool execution request + SessionID: "", // Not needed since each manager represents one session + Annotations: annotations, } return cc.consentMgr.CheckConsent(ctx, consentRequest) @@ -58,10 +59,10 @@ func (cc *ConsentChecker) CheckSamplingConsent( // Create consent request for sampling consentRequest := ConsentRequest{ - ToolID: toolID, - ServerName: cc.serverName, - Type: ConsentRuleTypeSampling, // This is a sampling request - SessionID: "", // Not needed since each manager represents one session + ToolID: toolID, + ServerName: cc.serverName, + OperationContext: OperationTypeSampling, // This is a sampling request + SessionID: "", // Not needed since each manager represents one session } return cc.consentMgr.CheckConsent(ctx, consentRequest) @@ -147,7 +148,7 @@ func (cc *ConsentChecker) PromptAndGrantConsent( } // Grant consent based on user choice - return cc.grantConsentFromChoice(ctx, toolID, choice, ConsentRuleTypeTool) + return cc.grantConsentFromChoice(ctx, toolID, choice, OperationTypeTool) } // promptForToolConsent shows an interactive consent prompt and returns the user's choice @@ -188,7 +189,7 @@ func (cc *ConsentChecker) promptForToolConsent( } // Add server trust option if not already trusted - if !cc.isServerAlreadyTrusted(ctx, ConsentRuleTypeTool) { + if !cc.isServerAlreadyTrusted(ctx, OperationTypeTool) { choices = append(choices, &ux.SelectChoice{ Value: "server", Label: "Allow all tools from this server", @@ -235,29 +236,29 @@ func (cc *ConsentChecker) promptForToolConsent( return choices[*choiceIndex].Value, nil } -// isServerAlreadyTrusted checks if the server is already trusted for the specified rule type -func (cc *ConsentChecker) isServerAlreadyTrusted(ctx context.Context, ruleType ConsentRuleType) bool { - // Create a mock request to check if server has trust for the specified rule type +// isServerAlreadyTrusted checks if the server is already trusted for the specified operation context +func (cc *ConsentChecker) isServerAlreadyTrusted(ctx context.Context, operationContext OperationType) bool { + // Create a mock request to check if server has trust for the specified operation context request := ConsentRequest{ - ToolID: fmt.Sprintf("%s/test-tool", cc.serverName), - ServerName: cc.serverName, - Type: ruleType, - SessionID: "", + ToolID: fmt.Sprintf("%s/test-tool", cc.serverName), + ServerName: cc.serverName, + OperationContext: operationContext, + SessionID: "", } // For tool requests, add annotations to avoid readonly-only matches - if ruleType == ConsentRuleTypeTool { + if operationContext == OperationTypeTool { request.Annotations = mcp.ToolAnnotation{} // No readonly hint } - // Check if server has trust for this rule type + // Check if server has trust for this operation context decision, err := cc.consentMgr.CheckConsent(ctx, request) if err != nil { return false } - // Server is trusted if it's allowed and the reason indicates server-level trust - return decision.Allowed && (decision.Reason == "server trusted" || decision.Reason == "server_always") + // Server is trusted if it's allowed + return decision.Allowed } // grantConsentFromChoice processes the user's consent choice and saves the appropriate rule @@ -265,79 +266,99 @@ func (cc *ConsentChecker) grantConsentFromChoice( ctx context.Context, toolID string, choice string, - ruleType ConsentRuleType, + operationContext OperationType, ) error { var rule ConsentRule - var scope ConsentScope + var scope Scope + + // Parse server and tool from toolID + parts := strings.Split(toolID, "/") + if len(parts) != 2 { + return fmt.Errorf("invalid toolID format: %s", toolID) + } + serverName := parts[0] + toolName := parts[1] switch choice { case "once": rule = ConsentRule{ - Type: ruleType, - ToolID: toolID, - Permission: ConsentOnce, + Scope: ScopeSession, + Target: NewToolTarget(serverName, toolName), + Action: ActionAny, + Operation: operationContext, + Permission: PermissionAllow, } scope = ScopeSession case "session": rule = ConsentRule{ - Type: ruleType, - ToolID: toolID, - Permission: ConsentSession, + Scope: ScopeSession, + Target: NewToolTarget(serverName, toolName), + Action: ActionAny, + Operation: operationContext, + Permission: PermissionAllow, } scope = ScopeSession case "project": rule = ConsentRule{ - Type: ruleType, - ToolID: toolID, - Permission: ConsentProject, + Scope: ScopeProject, + Target: NewToolTarget(serverName, toolName), + Action: ActionAny, + Operation: operationContext, + Permission: PermissionAllow, } scope = ScopeProject case "always": rule = ConsentRule{ - Type: ruleType, - ToolID: toolID, - Permission: ConsentAlways, + Scope: ScopeGlobal, + Target: NewToolTarget(serverName, toolName), + Action: ActionAny, + Operation: operationContext, + Permission: PermissionAllow, } scope = ScopeGlobal case "server": // Grant trust to entire server rule = ConsentRule{ - Type: ruleType, - ToolID: fmt.Sprintf("%s/*", cc.serverName), - Permission: ConsentServerAlways, - RuleScope: RuleScopeAll, + Scope: ScopeGlobal, + Target: NewServerTarget(serverName), + Action: ActionAny, + Operation: operationContext, + Permission: PermissionAllow, } scope = ScopeGlobal case "global": rule = ConsentRule{ - Type: ruleType, - ToolID: "*", - Permission: ConsentAlways, - RuleScope: RuleScopeAll, + Scope: ScopeGlobal, + Target: NewGlobalTarget(), + Action: ActionAny, + Operation: operationContext, + Permission: PermissionAllow, } scope = ScopeGlobal case "readonly_server": - // Grant trust to readonly tools from this server (only for tool rules) - if ruleType != ConsentRuleTypeTool { + // Grant trust to readonly tools from this server (only for tool context) + if operationContext != OperationTypeTool { return fmt.Errorf("readonly server option only available for tool consent") } rule = ConsentRule{ - Type: ruleType, - ToolID: fmt.Sprintf("%s/*", cc.serverName), - Permission: ConsentAlways, - RuleScope: RuleScopeReadOnly, + Scope: ScopeGlobal, + Target: NewServerTarget(serverName), + Action: ActionReadOnly, + Operation: operationContext, + Permission: PermissionAllow, } scope = ScopeGlobal case "readonly_global": - // Grant trust to all readonly tools globally (only for tool rules) - if ruleType != ConsentRuleTypeTool { + // Grant trust to all readonly tools globally (only for tool context) + if operationContext != OperationTypeTool { return fmt.Errorf("readonly global option only available for tool consent") } rule = ConsentRule{ - Type: ruleType, - ToolID: "*", - Permission: ConsentAlways, - RuleScope: RuleScopeReadOnly, + Scope: ScopeGlobal, + Target: NewGlobalTarget(), + Action: ActionReadOnly, + Operation: operationContext, + Permission: PermissionAllow, } scope = ScopeGlobal default: @@ -364,7 +385,7 @@ func (cc *ConsentChecker) PromptAndGrantSamplingConsent( } // Grant sampling consent based on user choice - return cc.grantConsentFromChoice(ctx, toolID, choice, ConsentRuleTypeSampling) + return cc.grantConsentFromChoice(ctx, toolID, choice, OperationTypeSampling) } // promptForSamplingConsent shows an interactive sampling consent prompt and returns the user's choice @@ -405,7 +426,7 @@ func (cc *ConsentChecker) promptForSamplingConsent( } // Add server trust option if not already trusted for sampling - if !cc.isServerAlreadyTrusted(ctx, ConsentRuleTypeSampling) { + if !cc.isServerAlreadyTrusted(ctx, OperationTypeSampling) { choices = append(choices, &ux.SelectChoice{ Value: "server", Label: "Allow sampling for all tools from this server", diff --git a/cli/azd/internal/agent/consent/manager.go b/cli/azd/internal/agent/consent/manager.go index d38a3da3f95..4f2ac498c55 100644 --- a/cli/azd/internal/agent/consent/manager.go +++ b/cli/azd/internal/agent/consent/manager.go @@ -6,6 +6,7 @@ package consent import ( "context" "fmt" + "strings" "sync" "time" @@ -93,12 +94,12 @@ func (cm *consentManager) CheckConsent(ctx context.Context, request ConsentReque } // GrantConsent grants consent for a tool -func (cm *consentManager) GrantConsent(ctx context.Context, rule ConsentRule, scope ConsentScope) error { +func (cm *consentManager) GrantConsent(ctx context.Context, rule ConsentRule, scope Scope) error { rule.GrantedAt = time.Now() - // Set default RuleScope if not specified (backward compatibility) - if rule.RuleScope == "" { - rule.RuleScope = RuleScopeAll + // Validate the rule + if err := rule.Validate(); err != nil { + return fmt.Errorf("invalid consent rule: %w", err) } switch scope { @@ -114,7 +115,7 @@ func (cm *consentManager) GrantConsent(ctx context.Context, rule ConsentRule, sc } // ListConsents lists consent rules for a given scope -func (cm *consentManager) ListConsents(ctx context.Context, scope ConsentScope) ([]ConsentRule, error) { +func (cm *consentManager) ListConsents(ctx context.Context, scope Scope) ([]ConsentRule, error) { switch scope { case ScopeSession: return cm.getSessionRules(), nil @@ -128,7 +129,7 @@ func (cm *consentManager) ListConsents(ctx context.Context, scope ConsentScope) } // ClearConsents clears all consent rules for a given scope -func (cm *consentManager) ClearConsents(ctx context.Context, scope ConsentScope) error { +func (cm *consentManager) ClearConsents(ctx context.Context, scope Scope) error { switch scope { case ScopeSession: return cm.clearSessionRules() @@ -141,25 +142,25 @@ func (cm *consentManager) ClearConsents(ctx context.Context, scope ConsentScope) } } -// ClearConsentByToolID clears consent for a specific tool -func (cm *consentManager) ClearConsentByToolID(ctx context.Context, toolID string, scope ConsentScope) error { +// ClearConsentByTarget clears consent for a specific target +func (cm *consentManager) ClearConsentByTarget(ctx context.Context, target Target, scope Scope) error { switch scope { case ScopeSession: - return cm.removeSessionRule(toolID) + return cm.removeSessionRule(target) case ScopeProject: return fmt.Errorf("project-level consent removal not yet implemented") case ScopeGlobal: - return cm.removeGlobalRule(ctx, toolID) + return cm.removeGlobalRule(ctx, target) default: return fmt.Errorf("unknown consent scope: %s", scope) } } -// ListConsentsByType lists consent rules filtered by type for a given scope -func (cm *consentManager) ListConsentsByType( +// ListConsentsByOperationContext lists consent rules filtered by operation context for a given scope +func (cm *consentManager) ListConsentsByOperationContext( ctx context.Context, - scope ConsentScope, - ruleType ConsentRuleType, + scope Scope, + operationContext OperationType, ) ([]ConsentRule, error) { allRules, err := cm.ListConsents(ctx, scope) if err != nil { @@ -168,7 +169,7 @@ func (cm *consentManager) ListConsentsByType( filteredRules := make([]ConsentRule, 0) for _, rule := range allRules { - if rule.Type == ruleType { + if rule.Operation == operationContext { filteredRules = append(filteredRules, rule) } } @@ -176,16 +177,20 @@ func (cm *consentManager) ListConsentsByType( return filteredRules, nil } -// ClearConsentsByType clears all consent rules of a specific type for a given scope -func (cm *consentManager) ClearConsentsByType(ctx context.Context, scope ConsentScope, ruleType ConsentRuleType) error { - rules, err := cm.ListConsentsByType(ctx, scope, ruleType) +// ClearConsentsByOperationContext clears all consent rules of a specific operation context for a given scope +func (cm *consentManager) ClearConsentsByOperationContext( + ctx context.Context, + scope Scope, + operationContext OperationType, +) error { + rules, err := cm.ListConsentsByOperationContext(ctx, scope, operationContext) if err != nil { return fmt.Errorf("failed to list consent rules: %w", err) } for _, rule := range rules { - if err := cm.ClearConsentByToolID(ctx, rule.ToolID, scope); err != nil { - return fmt.Errorf("failed to clear consent for tool %s: %w", rule.ToolID, err) + if err := cm.ClearConsentByTarget(ctx, rule.Target, scope); err != nil { + return fmt.Errorf("failed to clear consent for target %s: %w", rule.Target, err) } } @@ -211,18 +216,14 @@ func (cm *consentManager) WrapTools(tools []common.AnnotatedTool) []common.Annot // evaluateRule evaluates a consent rule and returns a decision func (cm *consentManager) evaluateRule(rule ConsentRule) *ConsentDecision { switch rule.Permission { - case ConsentDeny: + case PermissionDeny: return &ConsentDecision{Allowed: false, Reason: "explicitly denied"} - case ConsentPrompt: + case PermissionPrompt: return &ConsentDecision{Allowed: false, RequiresPrompt: true, Reason: "requires prompt"} - case ConsentOnce: - // For one-time consent, we allow it but mark it for removal - // The caller should handle removing this rule after use - return &ConsentDecision{Allowed: true, Reason: "one-time consent"} - case ConsentSession, ConsentProject, ConsentAlways, ConsentServerAlways: - return &ConsentDecision{Allowed: true, Reason: string(rule.Permission)} + case PermissionAllow: + return &ConsentDecision{Allowed: true, Reason: "allowed"} default: - return &ConsentDecision{Allowed: false, RequiresPrompt: true, Reason: "unknown permission level"} + return &ConsentDecision{Allowed: false, RequiresPrompt: true, Reason: "unknown decision"} } } @@ -270,7 +271,8 @@ func (cm *consentManager) addGlobalRule(ctx context.Context, rule ConsentRule) e func (cm *consentManager) addOrUpdateRule(rules []ConsentRule, newRule ConsentRule) []ConsentRule { // Check if rule already exists and update it for i, rule := range rules { - if rule.ToolID == newRule.ToolID { + if rule.Target == newRule.Target && rule.Operation == newRule.Operation && + rule.Action == newRule.Action { rules[i] = newRule return rules } @@ -352,14 +354,14 @@ func (cm *consentManager) clearGlobalRules(ctx context.Context) error { } // removeSessionRule removes a specific rule from session rules -func (cm *consentManager) removeSessionRule(toolID string) error { +func (cm *consentManager) removeSessionRule(target Target) error { cm.sessionMutex.Lock() defer cm.sessionMutex.Unlock() // Filter out the rule to remove filtered := make([]ConsentRule, 0, len(cm.sessionRules)) for _, rule := range cm.sessionRules { - if rule.ToolID != toolID { + if rule.Target != target { filtered = append(filtered, rule) } } @@ -368,22 +370,8 @@ func (cm *consentManager) removeSessionRule(toolID string) error { return nil } -// removeSessionRuleByIndex removes a rule by its index (for cleanup after one-time use) -func (cm *consentManager) removeSessionRuleByIndex(index int) error { - cm.sessionMutex.Lock() - defer cm.sessionMutex.Unlock() - - if index < 0 || index >= len(cm.sessionRules) { - return nil // Index out of bounds, nothing to remove - } - - // Remove the rule at the specified index - cm.sessionRules = append(cm.sessionRules[:index], cm.sessionRules[index+1:]...) - return nil -} - // removeGlobalRule removes a specific rule from global configuration -func (cm *consentManager) removeGlobalRule(ctx context.Context, toolID string) error { +func (cm *consentManager) removeGlobalRule(ctx context.Context, target Target) error { userConfig, err := cm.userConfigManager.Load() if err != nil { return fmt.Errorf("failed to load user config: %w", err) @@ -399,7 +387,7 @@ func (cm *consentManager) removeGlobalRule(ctx context.Context, toolID string) e // Filter out the rule to remove filtered := make([]ConsentRule, 0, len(consentConfig.Rules)) for _, rule := range consentConfig.Rules { - if rule.ToolID != toolID { + if rule.Target != target { filtered = append(filtered, rule) } } @@ -417,19 +405,26 @@ func (cm *consentManager) removeGlobalRule(ctx context.Context, toolID string) e func (cm *consentManager) checkUnifiedRules(ctx context.Context, request ConsentRequest) *ConsentDecision { isReadOnlyTool := request.Annotations.ReadOnlyHint != nil && *request.Annotations.ReadOnlyHint + // Build the target for this request + requestTarget := NewToolTarget(request.ServerName, request.ToolID) + // Check session rules first cm.sessionMutex.RLock() sessionRules := cm.sessionRules cm.sessionMutex.RUnlock() - if decision := cm.findMatchingUnifiedRule(sessionRules, request, isReadOnlyTool); decision != nil { + if decision := cm.findMatchingRule( + sessionRules, requestTarget, request.OperationContext, isReadOnlyTool, + ); decision != nil { return decision } // Check project rules if request.ProjectPath != "" { if projectRules, err := cm.getProjectRules(ctx, request.ProjectPath); err == nil { - if decision := cm.findMatchingUnifiedRule(projectRules, request, isReadOnlyTool); decision != nil { + if decision := cm.findMatchingRule( + projectRules, requestTarget, request.OperationContext, isReadOnlyTool, + ); decision != nil { return decision } } @@ -437,7 +432,9 @@ func (cm *consentManager) checkUnifiedRules(ctx context.Context, request Consent // Check global rules if globalRules, err := cm.getGlobalRules(ctx); err == nil { - if decision := cm.findMatchingUnifiedRule(globalRules, request, isReadOnlyTool); decision != nil { + if decision := cm.findMatchingRule( + globalRules, requestTarget, request.OperationContext, isReadOnlyTool, + ); decision != nil { return decision } } @@ -445,83 +442,87 @@ func (cm *consentManager) checkUnifiedRules(ctx context.Context, request Consent return nil } -// findMatchingUnifiedRule finds a matching rule using unified pattern and scope matching -func (cm *consentManager) findMatchingUnifiedRule( +// findMatchingRule finds a matching rule using target pattern matching +func (cm *consentManager) findMatchingRule( rules []ConsentRule, - request ConsentRequest, + requestTarget Target, + operationContext OperationType, isReadOnlyTool bool, ) *ConsentDecision { - // Process rules in order: deny rules first, then allow rules - // This implements: Explicit deny > Global scope > Server scope > Tool scope precedence + // Process rules in precedence order: deny rules first, then allow rules + // Precedence: Explicit deny > Global scope > Server scope > Tool scope // First pass: Check for deny rules for _, rule := range rules { - if rule.Permission == ConsentDeny && rule.Type == request.Type && cm.ruleMatches(rule, request, isReadOnlyTool) { + if rule.Permission == PermissionDeny && rule.Operation == operationContext && + cm.targetMatches(rule.Target, requestTarget) && cm.actionMatches(rule.Action, isReadOnlyTool) { return &ConsentDecision{Allowed: false, Reason: "explicitly denied"} } } - // Second pass: Check for allow rules in precedence order + // Second pass: Check for allow/prompt rules in precedence order // Global patterns first (* pattern) - for i, rule := range rules { - if rule.Permission != ConsentDeny && rule.Type == request.Type && rule.ToolID == "*" && - cm.ruleMatches(rule, request, isReadOnlyTool) { - return cm.evaluateAllowRule(rule, request, i) + for _, rule := range rules { + if rule.Permission != PermissionDeny && rule.Operation == operationContext && + (rule.Target == "*" || rule.Target == "*/*") && + cm.actionMatches(rule.Action, isReadOnlyTool) { + return cm.evaluateRule(rule) } } // Server patterns next (server/* pattern) - serverPattern := fmt.Sprintf("%s/*", request.ServerName) - for i, rule := range rules { - if rule.Permission != ConsentDeny && rule.Type == request.Type && rule.ToolID == serverPattern && - cm.ruleMatches(rule, request, isReadOnlyTool) { - return cm.evaluateAllowRule(rule, request, i) + serverPattern := NewServerTarget(string(requestTarget[:strings.Index(string(requestTarget), "/")])) + for _, rule := range rules { + if rule.Permission != PermissionDeny && rule.Operation == operationContext && + rule.Target == serverPattern && + cm.actionMatches(rule.Action, isReadOnlyTool) { + return cm.evaluateRule(rule) } } // Specific tool patterns last (exact match) - for i, rule := range rules { - if rule.Permission != ConsentDeny && rule.Type == request.Type && rule.ToolID == request.ToolID && - cm.ruleMatches(rule, request, isReadOnlyTool) { - return cm.evaluateAllowRule(rule, request, i) + for _, rule := range rules { + if rule.Permission != PermissionDeny && rule.Operation == operationContext && + rule.Target == requestTarget && + cm.actionMatches(rule.Action, isReadOnlyTool) { + return cm.evaluateRule(rule) } } return nil } -// ruleMatches checks if a rule matches the request considering scope restrictions -func (cm *consentManager) ruleMatches(rule ConsentRule, request ConsentRequest, isReadOnlyTool bool) bool { - // Default to "all" scope for backward compatibility - ruleScope := rule.RuleScope - if ruleScope == "" { - ruleScope = RuleScopeAll +// targetMatches checks if a rule target matches the request target +func (cm *consentManager) targetMatches(ruleTarget, requestTarget Target) bool { + ruleStr := string(ruleTarget) + requestStr := string(requestTarget) + + // Global wildcards + if ruleStr == "*" || ruleStr == "*/*" { + return true } - // Check scope restrictions - switch ruleScope { - case RuleScopeReadOnly: + // Server wildcard + if strings.HasSuffix(ruleStr, "/*") { + serverName := ruleStr[:len(ruleStr)-2] + return strings.HasPrefix(requestStr, serverName+"/") + } + + // Exact match + return ruleStr == requestStr +} + +// actionMatches checks if a rule action matches the request (considering readonly restrictions) +func (cm *consentManager) actionMatches(ruleAction ActionType, isReadOnlyTool bool) bool { + switch ruleAction { + case ActionReadOnly: // Rule only applies to read-only tools return isReadOnlyTool - case RuleScopeAll: + case ActionAny: // Rule applies to all tools return true default: - // Unknown scope, default to not matching + // Unknown action, default to not matching return false } } - -// evaluateAllowRule evaluates an allow rule and handles one-time cleanup -func (cm *consentManager) evaluateAllowRule(rule ConsentRule, request ConsentRequest, ruleIndex int) *ConsentDecision { - decision := cm.evaluateRule(rule) - - // If this is a one-time consent rule, remove it after evaluation - if decision.Allowed && rule.Permission == ConsentOnce { - go func(index int) { - cm.removeSessionRuleByIndex(index) - }(ruleIndex) - } - - return decision -} diff --git a/cli/azd/internal/agent/consent/types.go b/cli/azd/internal/agent/consent/types.go index 7a95f3419ce..2aead9c9866 100644 --- a/cli/azd/internal/agent/consent/types.go +++ b/cli/azd/internal/agent/consent/types.go @@ -6,6 +6,7 @@ package consent import ( "context" "fmt" + "strings" "sync" "time" @@ -13,68 +14,163 @@ import ( "github.com/mark3labs/mcp-go/mcp" ) -// ConsentLevel represents the level of consent granted for a tool -type ConsentLevel string - -// ConsentScope represents where consent rules are stored -type ConsentScope string - -// RuleScope represents what types of tools a rule applies to -type RuleScope string - -// ConsentRuleType represents the type of consent rule -type ConsentRuleType string +// Scope defines the rule applicability level +type Scope string const ( - ConsentDeny ConsentLevel = "deny" - ConsentPrompt ConsentLevel = "prompt" - ConsentOnce ConsentLevel = "once" - ConsentSession ConsentLevel = "session" - ConsentProject ConsentLevel = "project" - ConsentAlways ConsentLevel = "always" - ConsentServerAlways ConsentLevel = "server-always" // All tools from server - ConsentServerReadOnly ConsentLevel = "server-readonly" // Read-only tools from server + ScopeSession Scope = "session" + ScopeProject Scope = "project" + ScopeGlobal Scope = "global" ) +// ActionType defines the kind of action the rule controls +type ActionType string + const ( - ScopeGlobal ConsentScope = "global" - ScopeProject ConsentScope = "project" - ScopeSession ConsentScope = "session" + ActionReadOnly ActionType = "readonly" + ActionAny ActionType = "any" ) +// OperationType defines the feature or context for the rule +type OperationType string + const ( - RuleScopeAll RuleScope = "all" // All tools matching the pattern - RuleScopeReadOnly RuleScope = "readonly" // Only read-only tools matching the pattern + OperationTypeTool OperationType = "tool" // running tools + OperationTypeSampling OperationType = "sampling" // sampling requests ) +// Permission is the consent outcome for a rule +type Permission string + const ( - ConsentRuleTypeTool ConsentRuleType = "tool" // Tool execution consent - ConsentRuleTypeSampling ConsentRuleType = "sampling" // LLM sampling consent + PermissionAllow Permission = "allow" + PermissionDeny Permission = "deny" + PermissionPrompt Permission = "prompt" ) -// AllowedRuleTypes contains the valid rule types for command validation -var AllowedRuleTypes = []string{ - string(ConsentRuleTypeTool), - string(ConsentRuleTypeSampling), +// Target is a consolidated string combining server and tool in the form "server/tool" +// Wildcards supported, e.g., "server/*" means all tools in that server, "*" or "*/*" means all servers/tools +type Target string + +// NewToolTarget creates a target for a specific tool +func NewToolTarget(server, tool string) Target { + return Target(fmt.Sprintf("%s/%s", server, tool)) +} + +// NewServerTarget creates a target for all tools in a server +func NewServerTarget(server string) Target { + return Target(fmt.Sprintf("%s/*", server)) +} + +// NewGlobalTarget creates a target for all servers and tools +func NewGlobalTarget() Target { + return Target("*/*") +} + +// Validate checks if the target format is valid +func (t Target) Validate() error { + str := string(t) + if str == "" { + return fmt.Errorf("target cannot be empty") + } + if str == "*" || str == "*/*" { + return nil // Global wildcards are valid + } + parts := strings.Split(str, "/") + if len(parts) != 2 { + return fmt.Errorf("target must be in format 'server/tool', 'server/*', or '*'") + } + if parts[0] == "" { + return fmt.Errorf("server part of target cannot be empty") + } + if parts[1] == "" { + return fmt.Errorf("tool part of target cannot be empty") + } + return nil +} + +// AllowedOperationContexts contains the valid operation contexts for command validation +var AllowedOperationContexts = []string{ + string(OperationTypeTool), + string(OperationTypeSampling), } -// ParseConsentRuleType converts a string to ConsentRuleType with validation -func ParseConsentRuleType(ruleTypeStr string) (ConsentRuleType, error) { - for _, allowedType := range AllowedRuleTypes { - if ruleTypeStr == allowedType { - return ConsentRuleType(ruleTypeStr), nil +// ParseOperationContext converts a string to OperationContext with validation +func ParseOperationContext(contextStr string) (OperationType, error) { + for _, allowedContext := range AllowedOperationContexts { + if contextStr == allowedContext { + return OperationType(contextStr), nil } } - return "", fmt.Errorf("invalid rule type: %s (allowed: %v)", ruleTypeStr, AllowedRuleTypes) + return "", fmt.Errorf("invalid operation context: %s (allowed: %v)", contextStr, AllowedOperationContexts) } -// ConsentRule represents a single consent rule for a tool +// ConsentRule represents a single consent rule entry type ConsentRule struct { - ToolID string `json:"toolId"` - Type ConsentRuleType `json:"type"` // Type of consent rule (tool, sampling, etc.) - Permission ConsentLevel `json:"permission"` // Permission level for this rule type - RuleScope RuleScope `json:"scope,omitempty"` // Defaults to "all" for backward compatibility - GrantedAt time.Time `json:"grantedAt"` + Scope Scope `json:"scope"` + Target Target `json:"target"` // e.g. "myServer/myTool", "myServer/*", "*" + Action ActionType `json:"action"` + Operation OperationType `json:"operation"` + Permission Permission `json:"permission"` + GrantedAt time.Time `json:"grantedAt"` +} + +// Validate checks if the consent rule is valid +func (r ConsentRule) Validate() error { + if err := r.Target.Validate(); err != nil { + return fmt.Errorf("invalid target: %w", err) + } + + // Validate enums have valid values + validScopes := []Scope{ScopeSession, ScopeProject, ScopeGlobal} + validScope := false + for _, scope := range validScopes { + if r.Scope == scope { + validScope = true + break + } + } + if !validScope { + return fmt.Errorf("invalid scope: %s", r.Scope) + } + + validActions := []ActionType{ActionReadOnly, ActionAny} + validAction := false + for _, action := range validActions { + if r.Action == action { + validAction = true + break + } + } + if !validAction { + return fmt.Errorf("invalid action: %s", r.Action) + } + + validContexts := []OperationType{OperationTypeTool, OperationTypeSampling} + validContext := false + for _, context := range validContexts { + if r.Operation == context { + validContext = true + break + } + } + if !validContext { + return fmt.Errorf("invalid operation context: %s", r.Operation) + } + + validDecisions := []Permission{PermissionAllow, PermissionDeny, PermissionPrompt} + validDecision := false + for _, decision := range validDecisions { + if r.Permission == decision { + validDecision = true + break + } + } + if !validDecision { + return fmt.Errorf("invalid decision: %s", r.Permission) + } + + return nil } // ConsentConfig represents the MCP consent configuration @@ -84,13 +180,13 @@ type ConsentConfig struct { // ConsentRequest represents a request to check consent for a tool type ConsentRequest struct { - ToolID string - ServerName string - Type ConsentRuleType // Type of consent being requested (tool, sampling, etc.) - Parameters map[string]interface{} - SessionID string - ProjectPath string - Annotations mcp.ToolAnnotation + ToolID string + ServerName string + OperationContext OperationType // Type of consent being requested (tool, sampling, etc.) + Parameters map[string]interface{} + SessionID string + ProjectPath string + Annotations mcp.ToolAnnotation } // ConsentDecision represents the result of a consent check @@ -103,12 +199,16 @@ type ConsentDecision struct { // ConsentManager manages consent rules and decisions type ConsentManager interface { CheckConsent(ctx context.Context, request ConsentRequest) (*ConsentDecision, error) - GrantConsent(ctx context.Context, rule ConsentRule, scope ConsentScope) error - ListConsents(ctx context.Context, scope ConsentScope) ([]ConsentRule, error) - ListConsentsByType(ctx context.Context, scope ConsentScope, ruleType ConsentRuleType) ([]ConsentRule, error) - ClearConsents(ctx context.Context, scope ConsentScope) error - ClearConsentsByType(ctx context.Context, scope ConsentScope, ruleType ConsentRuleType) error - ClearConsentByToolID(ctx context.Context, toolID string, scope ConsentScope) error + GrantConsent(ctx context.Context, rule ConsentRule, scope Scope) error + ListConsents(ctx context.Context, scope Scope) ([]ConsentRule, error) + ListConsentsByOperationContext( + ctx context.Context, + scope Scope, + operationContext OperationType, + ) ([]ConsentRule, error) + ClearConsents(ctx context.Context, scope Scope) error + ClearConsentsByOperationContext(ctx context.Context, scope Scope, operationContext OperationType) error + ClearConsentByTarget(ctx context.Context, target Target, scope Scope) error // Tool wrapping methods WrapTool(tool common.AnnotatedTool) common.AnnotatedTool From 31805e3f22dbc485c371e143b908d8e0fbea4d1f Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Tue, 12 Aug 2025 17:44:59 -0700 Subject: [PATCH 066/116] Revises tool prompts and adds annotations to azd tools --- cli/azd/internal/agent/consent/checker.go | 29 +++++++++---------- .../mcp/tools/azd_architecture_planning.go | 4 +++ .../mcp/tools/azd_azure_yaml_generation.go | 4 +++ .../mcp/tools/azd_discovery_analysis.go | 4 +++ .../mcp/tools/azd_docker_generation.go | 4 +++ .../mcp/tools/azd_iac_generation_rules.go | 4 +++ .../tools/azd_infrastructure_generation.go | 4 +++ cli/azd/internal/mcp/tools/azd_plan_init.go | 4 +++ .../mcp/tools/azd_project_validation.go | 4 +++ cli/azd/internal/mcp/tools/azd_sample.go | 4 +++ cli/azd/internal/mcp/tools/azd_yaml_schema.go | 4 +++ 11 files changed, 54 insertions(+), 15 deletions(-) diff --git a/cli/azd/internal/agent/consent/checker.go b/cli/azd/internal/agent/consent/checker.go index eee347a9a94..c4a06f550c7 100644 --- a/cli/azd/internal/agent/consent/checker.go +++ b/cli/azd/internal/agent/consent/checker.go @@ -158,7 +158,7 @@ func (cc *ConsentChecker) promptForToolConsent( annotations mcp.ToolAnnotation, ) (string, error) { message := fmt.Sprintf( - "Tool %s (%s) requires consent.\n\nHow would you like to proceed?", + "The tool %s from %s wants to run.\n\nWhat would you like to do?", output.WithHighLightFormat(toolName), output.WithHighLightFormat(cc.serverName), ) @@ -168,23 +168,23 @@ func (cc *ConsentChecker) promptForToolConsent( choices := []*ux.SelectChoice{ { Value: "deny", - Label: "Deny - Block this tool execution", + Label: "No - Block this tool", }, { Value: "once", - Label: "Allow once - Execute this time only", + Label: "Yes, just this time", }, { Value: "session", - Label: "Allow for session - Allow until restart", + Label: "Yes, until I restart azd", }, { Value: "project", - Label: "Allow for project - Remember for this project", + Label: "Yes, remember for this project", }, { Value: "always", - Label: "Allow always - Remember globally", + Label: "Yes, always allow this tool", }, } @@ -221,7 +221,7 @@ func (cc *ConsentChecker) promptForToolConsent( HelpMessage: helpMessage, Choices: choices, EnableFiltering: ux.Ptr(false), - DisplayCount: 10, + DisplayCount: 5, }) choiceIndex, err := selector.Ask(ctx) @@ -394,8 +394,7 @@ func (cc *ConsentChecker) promptForSamplingConsent( toolName, toolDesc string, ) (string, error) { message := fmt.Sprintf( - "Tool %s (%s) wants to send data to an external language model for processing.\n\n"+ - "How would you like to proceed?", + "The tool %s from %s wants to send data to an AI service.\n\nThis helps improve responses but shares information externally.\n\nWhat would you like to do?", output.WithHighLightFormat(toolName), output.WithHighLightFormat(cc.serverName), ) @@ -405,23 +404,23 @@ func (cc *ConsentChecker) promptForSamplingConsent( choices := []*ux.SelectChoice{ { Value: "deny", - Label: "Deny - Block this sampling request", + Label: "No - Don't send data", }, { Value: "once", - Label: "Allow once - Allow this sampling request only", + Label: "Yes, just this time", }, { Value: "session", - Label: "Allow for session - Allow sampling until restart", + Label: "Yes, until I restart azd", }, { Value: "project", - Label: "Allow for project - Remember for this project", + Label: "Yes, remember for this project", }, { Value: "always", - Label: "Allow always - Remember globally for this tool", + Label: "Yes, always allow this tool", }, } @@ -444,7 +443,7 @@ func (cc *ConsentChecker) promptForSamplingConsent( HelpMessage: helpMessage, Choices: choices, EnableFiltering: ux.Ptr(false), - DisplayCount: 10, + DisplayCount: 5, }) choiceIndex, err := selector.Ask(ctx) diff --git a/cli/azd/internal/mcp/tools/azd_architecture_planning.go b/cli/azd/internal/mcp/tools/azd_architecture_planning.go index 960c74d0cce..0f48a2dfe57 100644 --- a/cli/azd/internal/mcp/tools/azd_architecture_planning.go +++ b/cli/azd/internal/mcp/tools/azd_architecture_planning.go @@ -16,6 +16,10 @@ func NewAzdArchitecturePlanningTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_architecture_planning", + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(false), mcp.WithDescription( `Returns instructions for selecting appropriate Azure services for discovered application components and designing infrastructure architecture. diff --git a/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go b/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go index eaa0d51d705..1fb8f334bc4 100644 --- a/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go +++ b/cli/azd/internal/mcp/tools/azd_azure_yaml_generation.go @@ -16,6 +16,10 @@ func NewAzdAzureYamlGenerationTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_azure_yaml_generation", + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(false), mcp.WithDescription( `Returns instructions for generating the azure.yaml configuration file with proper service hosting, build, and deployment settings for AZD projects. diff --git a/cli/azd/internal/mcp/tools/azd_discovery_analysis.go b/cli/azd/internal/mcp/tools/azd_discovery_analysis.go index 1f4f6bc5087..7d36397cf9d 100644 --- a/cli/azd/internal/mcp/tools/azd_discovery_analysis.go +++ b/cli/azd/internal/mcp/tools/azd_discovery_analysis.go @@ -16,6 +16,10 @@ func NewAzdDiscoveryAnalysisTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_discovery_analysis", + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(false), mcp.WithDescription( `Returns instructions for performing comprehensive discovery and analysis of application components to prepare for Azure Developer CLI (AZD) initialization. diff --git a/cli/azd/internal/mcp/tools/azd_docker_generation.go b/cli/azd/internal/mcp/tools/azd_docker_generation.go index c784cfe61cb..a63825d69c3 100644 --- a/cli/azd/internal/mcp/tools/azd_docker_generation.go +++ b/cli/azd/internal/mcp/tools/azd_docker_generation.go @@ -16,6 +16,10 @@ func NewAzdDockerGenerationTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_docker_generation", + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(false), mcp.WithDescription( `Returns instructions for generating optimized Dockerfiles and container configurations for containerizable services in AZD projects. diff --git a/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go b/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go index 8e2c3f1199b..028e5555839 100644 --- a/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go +++ b/cli/azd/internal/mcp/tools/azd_iac_generation_rules.go @@ -16,6 +16,10 @@ func NewAzdIacGenerationRulesTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_iac_generation_rules", + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(false), mcp.WithDescription( `Returns comprehensive rules and guidelines for generating Bicep Infrastructure as Code files and modules for AZD projects. diff --git a/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go b/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go index bab4cfd9cf6..c29eff36f5c 100644 --- a/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go +++ b/cli/azd/internal/mcp/tools/azd_infrastructure_generation.go @@ -16,6 +16,10 @@ func NewAzdInfrastructureGenerationTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_infrastructure_generation", + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(false), mcp.WithDescription( `Returns instructions for generating modular Bicep infrastructure templates following Azure security and operational best practices for AZD projects. diff --git a/cli/azd/internal/mcp/tools/azd_plan_init.go b/cli/azd/internal/mcp/tools/azd_plan_init.go index 051d233295f..23a78e823eb 100644 --- a/cli/azd/internal/mcp/tools/azd_plan_init.go +++ b/cli/azd/internal/mcp/tools/azd_plan_init.go @@ -16,6 +16,10 @@ func NewAzdPlanInitTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_plan_init", + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(false), mcp.WithDescription( `Returns instructions for orchestrating complete AZD application initialization using structured phases with specialized tools. diff --git a/cli/azd/internal/mcp/tools/azd_project_validation.go b/cli/azd/internal/mcp/tools/azd_project_validation.go index af913410b72..e8b62641298 100644 --- a/cli/azd/internal/mcp/tools/azd_project_validation.go +++ b/cli/azd/internal/mcp/tools/azd_project_validation.go @@ -16,6 +16,10 @@ func NewAzdProjectValidationTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_project_validation", + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(false), mcp.WithDescription( `Returns instructions for validating AZD project by running comprehensive checks on azure.yaml schema, Bicep templates, environment setup, packaging, and deployment preview. diff --git a/cli/azd/internal/mcp/tools/azd_sample.go b/cli/azd/internal/mcp/tools/azd_sample.go index c380812346b..61defeb8cf7 100644 --- a/cli/azd/internal/mcp/tools/azd_sample.go +++ b/cli/azd/internal/mcp/tools/azd_sample.go @@ -12,6 +12,10 @@ func NewSamplingTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_sample_test", + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(false), mcp.WithDescription("Runs MCP sampling to test sampling behavior"), ), Handler: func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { diff --git a/cli/azd/internal/mcp/tools/azd_yaml_schema.go b/cli/azd/internal/mcp/tools/azd_yaml_schema.go index 8b65faaddbe..b132078bb4f 100644 --- a/cli/azd/internal/mcp/tools/azd_yaml_schema.go +++ b/cli/azd/internal/mcp/tools/azd_yaml_schema.go @@ -16,6 +16,10 @@ func NewAzdYamlSchemaTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_yaml_schema", + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(false), mcp.WithDescription( `Gets the Azure YAML JSON schema file specification and structure for azure.yaml `+ `configuration files used in AZD.`, From d5dfe2b5258b0e6635185619e463a916d669f302 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Wed, 13 Aug 2025 09:55:39 -0700 Subject: [PATCH 067/116] Moves flag parsing to consent package --- cli/azd/cmd/mcp.go | 183 +++++++++---------- cli/azd/internal/agent/consent/checker.go | 70 ++++---- cli/azd/internal/agent/consent/manager.go | 204 ++++++++++++++++++---- cli/azd/internal/agent/consent/types.go | 87 +++++++-- 4 files changed, 360 insertions(+), 184 deletions(-) diff --git a/cli/azd/cmd/mcp.go b/cli/azd/cmd/mcp.go index 722657f130a..af790a2b1f3 100644 --- a/cli/azd/cmd/mcp.go +++ b/cli/azd/cmd/mcp.go @@ -67,7 +67,7 @@ azd functionality through the Model Context Protocol interface.`, OutputFormats: []output.Format{output.JsonFormat, output.TableFormat}, DefaultFormat: output.TableFormat, ActionResolver: newMcpConsentListAction, - FlagsResolver: newMcpConsentFlags, + FlagsResolver: newMcpConsentListFlags, }) // azd mcp consent clear @@ -81,7 +81,7 @@ azd functionality through the Model Context Protocol interface.`, OutputFormats: []output.Format{output.NoneFormat}, DefaultFormat: output.NoneFormat, ActionResolver: newMcpConsentClearAction, - FlagsResolver: newMcpConsentFlags, + FlagsResolver: newMcpConsentClearFlags, }) // azd mcp consent grant @@ -189,37 +189,37 @@ func (a *mcpStartAction) Run(ctx context.Context) (*actions.ActionResult, error) return nil, nil } -// Flags for MCP consent commands -type mcpConsentFlags struct { - global *internal.GlobalCommandOptions - scope string - target string - operationContext string +// Flags for MCP consent list command +type mcpConsentListFlags struct { + global *internal.GlobalCommandOptions + scope string + target string + operation string } -func newMcpConsentFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentFlags { - flags := &mcpConsentFlags{} +func newMcpConsentListFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentListFlags { + flags := &mcpConsentListFlags{} flags.Bind(cmd.Flags(), global) return flags } -func (f *mcpConsentFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { +func (f *mcpConsentListFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { f.global = global - local.StringVar(&f.scope, "scope", "global", "Consent scope (global, project, session)") + local.StringVar(&f.scope, "scope", "global", "Consent scope (global, project)") local.StringVar(&f.target, "target", "", "Specific target to operate on (server/tool format)") - local.StringVar(&f.operationContext, "context", "", "Operation context to filter by (tool, sampling)") + local.StringVar(&f.operation, "operation", "", "Operation to filter by (tool, sampling)") } // Flags for MCP consent grant command type mcpConsentGrantFlags struct { - globalOptions *internal.GlobalCommandOptions - tool string - server string - globalFlag bool - action string - operation string - permission string - ruleScope string + global *internal.GlobalCommandOptions + tool string + server string + globalFlag bool + action string + operation string + permission string + ruleScope string } func newMcpConsentGrantFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentGrantFlags { @@ -229,19 +229,40 @@ func newMcpConsentGrantFlags(cmd *cobra.Command, global *internal.GlobalCommandO } func (f *mcpConsentGrantFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { - f.globalOptions = global + f.global = global local.StringVar(&f.tool, "tool", "", "Specific tool name (requires --server)") local.StringVar(&f.server, "server", "", "Server name") local.BoolVar(&f.globalFlag, "global", false, "Apply globally to all servers") local.StringVar(&f.action, "action", "all", "Action type: 'all' or 'readonly'") local.StringVar(&f.operation, "operation", "tool", "Operation type: 'tool' or 'sampling'") local.StringVar(&f.permission, "permission", "allow", "Permission: 'allow', 'deny', or 'prompt'") - local.StringVar(&f.ruleScope, "scope", "global", "Rule scope: 'session', 'project', or 'global'") + local.StringVar(&f.ruleScope, "scope", "global", "Rule scope: 'global', or 'project'") +} + +// Flags for MCP consent clear command +type mcpConsentClearFlags struct { + global *internal.GlobalCommandOptions + scope string + target string + operation string +} + +func newMcpConsentClearFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentClearFlags { + flags := &mcpConsentClearFlags{} + flags.Bind(cmd.Flags(), global) + return flags +} + +func (f *mcpConsentClearFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { + f.global = global + local.StringVar(&f.scope, "scope", "global", "Consent scope (global, project)") + local.StringVar(&f.target, "target", "", "Specific target to operate on (server/tool format)") + local.StringVar(&f.operation, "operation", "", "Operation to filter by (tool, sampling)") } // Action for MCP consent list command type mcpConsentListAction struct { - flags *mcpConsentFlags + flags *mcpConsentListFlags formatter output.Formatter writer io.Writer console input.Console @@ -250,7 +271,7 @@ type mcpConsentListAction struct { } func newMcpConsentListAction( - flags *mcpConsentFlags, + flags *mcpConsentListFlags, formatter output.Formatter, writer io.Writer, console input.Console, @@ -268,30 +289,22 @@ func newMcpConsentListAction( } func (a *mcpConsentListAction) Run(ctx context.Context) (*actions.ActionResult, error) { - var scope consent.Scope - switch a.flags.scope { - case "global": - scope = consent.ScopeGlobal - case "project": - scope = consent.ScopeProject - case "session": - scope = consent.ScopeSession - default: - return nil, fmt.Errorf("invalid scope: %s (allowed: global, project, session)", a.flags.scope) + scope, err := consent.ParseScope(a.flags.scope) + if err != nil { + return nil, err } var rules []consent.ConsentRule - var err error // Use operation context-filtered method if context is specified - if a.flags.operationContext != "" { - operationContext, parseErr := consent.ParseOperationContext(a.flags.operationContext) + if a.flags.operation != "" { + operation, parseErr := consent.ParseOperationType(a.flags.operation) if parseErr != nil { return nil, parseErr } - rules, err = a.consentManager.ListConsentsByOperationContext(ctx, scope, operationContext) + rules, err = a.consentManager.ListConsentsByOperationType(ctx, scope, operation) } else { - rules, err = a.consentManager.ListConsents(ctx, scope) + rules, err = a.consentManager.ListConsentRules(ctx, scope) } if err != nil { @@ -300,8 +313,8 @@ func (a *mcpConsentListAction) Run(ctx context.Context) (*actions.ActionResult, if len(rules) == 0 { var typeInfo string - if a.flags.operationContext != "" { - typeInfo = fmt.Sprintf(" of context '%s'", a.flags.operationContext) + if a.flags.operation != "" { + typeInfo = fmt.Sprintf(" of context '%s'", a.flags.operation) } fmt.Fprintf(a.writer, "No consent rules found%s.\n", typeInfo) return nil, nil @@ -373,14 +386,14 @@ func (a *mcpConsentListAction) Run(ctx context.Context) (*actions.ActionResult, // Action for MCP consent clear command type mcpConsentClearAction struct { - flags *mcpConsentFlags + flags *mcpConsentClearFlags console input.Console userConfigManager config.UserConfigManager consentManager consent.ConsentManager } func newMcpConsentClearAction( - flags *mcpConsentFlags, + flags *mcpConsentClearFlags, console input.Console, userConfigManager config.UserConfigManager, consentManager consent.ConsentManager, @@ -397,19 +410,11 @@ func (a *mcpConsentClearAction) Run(ctx context.Context) (*actions.ActionResult, // Command heading fmt.Fprintf(a.console.Handles().Stdout, "Clearing MCP consent rules...\n\n") - var scope consent.Scope - switch a.flags.scope { - case "global": - scope = consent.ScopeGlobal - case "project": - scope = consent.ScopeProject - case "session": - scope = consent.ScopeSession - default: - return nil, fmt.Errorf("invalid scope: %s (allowed: global, project, session)", a.flags.scope) + scope, err := consent.ParseScope(a.flags.scope) + if err != nil { + return nil, err } - var err error if a.flags.target != "" { // Clear specific target target := consent.Target(a.flags.target) @@ -420,10 +425,10 @@ func (a *mcpConsentClearAction) Run(ctx context.Context) (*actions.ActionResult, } else { // Get confirmation message based on context filter confirmMessage := fmt.Sprintf("Are you sure you want to clear all consent rules for scope '%s'?", a.flags.scope) - if a.flags.operationContext != "" { + if a.flags.operation != "" { confirmMessage = fmt.Sprintf( "Are you sure you want to clear all %s consent rules for scope '%s'?", - a.flags.operationContext, + a.flags.operation, a.flags.scope, ) } @@ -441,19 +446,19 @@ func (a *mcpConsentClearAction) Run(ctx context.Context) (*actions.ActionResult, return nil, nil } - if a.flags.operationContext != "" { + if a.flags.operation != "" { // Context-specific clearing using the new consent manager method - operationContext, parseErr := consent.ParseOperationContext(a.flags.operationContext) + operation, parseErr := consent.ParseOperationType(a.flags.operation) if parseErr != nil { return nil, parseErr } - err = a.consentManager.ClearConsentsByOperationContext(ctx, scope, operationContext) + err = a.consentManager.ClearConsentsByOperationType(ctx, scope, operation) if err == nil { fmt.Fprintf( a.console.Handles().Stdout, "Cleared all %s consent rules for scope: %s\n", - a.flags.operationContext, + a.flags.operation, a.flags.scope, ) } @@ -513,55 +518,31 @@ func (a *mcpConsentGrantAction) Run(ctx context.Context) (*actions.ActionResult, } // Validate action type - var actionType consent.ActionType - switch a.flags.action { - case "readonly": - actionType = consent.ActionReadOnly - case "all": - actionType = consent.ActionAny - default: - return nil, fmt.Errorf("--action must be 'readonly' or 'all'") + actionType, err := consent.ParseActionType(a.flags.action) + if err != nil { + return nil, err } // Validate operation context - var operationContext consent.OperationType - switch a.flags.operation { - case "tool": - operationContext = consent.OperationTypeTool - case "sampling": - operationContext = consent.OperationTypeSampling - default: - return nil, fmt.Errorf("--context must be 'tool' or 'sampling'") + operation, err := consent.ParseOperationType(a.flags.operation) + if err != nil { + return nil, err } // Validate permission - var permission consent.Permission - switch a.flags.permission { - case "allow": - permission = consent.PermissionAllow - case "deny": - permission = consent.PermissionDeny - case "prompt": - permission = consent.PermissionPrompt - default: - return nil, fmt.Errorf("--decision must be 'allow', 'deny', or 'prompt'") + permission, err := consent.ParsePermission(a.flags.permission) + if err != nil { + return nil, err } // Validate rule scope - var ruleScope consent.Scope - switch a.flags.ruleScope { - case "session": - ruleScope = consent.ScopeSession - case "project": - ruleScope = consent.ScopeProject - case "global": - ruleScope = consent.ScopeGlobal - default: - return nil, fmt.Errorf("--scope must be 'session', 'project', or 'global'") + ruleScope, err := consent.ParseScope(a.flags.ruleScope) + if err != nil { + return nil, err } // For sampling context, tool-specific grants are not supported - if operationContext == consent.OperationTypeSampling && a.flags.tool != "" { + if operation == consent.OperationTypeSampling && a.flags.tool != "" { return nil, fmt.Errorf("--tool is not supported for sampling rules") } @@ -571,7 +552,7 @@ func (a *mcpConsentGrantAction) Run(ctx context.Context) (*actions.ActionResult, if a.flags.globalFlag { target = consent.NewGlobalTarget() - if operationContext == consent.OperationTypeSampling { + if operation == consent.OperationTypeSampling { if actionType == consent.ActionReadOnly { description = fmt.Sprintf("all read-only sampling globally (%s)", permission) } else { @@ -593,7 +574,7 @@ func (a *mcpConsentGrantAction) Run(ctx context.Context) (*actions.ActionResult, } } else { target = consent.NewServerTarget(a.flags.server) - if operationContext == consent.OperationTypeSampling { + if operation == consent.OperationTypeSampling { if actionType == consent.ActionReadOnly { description = fmt.Sprintf("read-only sampling from server %s (%s)", a.flags.server, permission) } else { @@ -612,7 +593,7 @@ func (a *mcpConsentGrantAction) Run(ctx context.Context) (*actions.ActionResult, Scope: ruleScope, Target: target, Action: actionType, - Operation: operationContext, + Operation: operation, Permission: permission, } diff --git a/cli/azd/internal/agent/consent/checker.go b/cli/azd/internal/agent/consent/checker.go index c4a06f550c7..8ce16523264 100644 --- a/cli/azd/internal/agent/consent/checker.go +++ b/cli/azd/internal/agent/consent/checker.go @@ -40,11 +40,10 @@ func (cc *ConsentChecker) CheckToolConsent( // Create consent request consentRequest := ConsentRequest{ - ToolID: toolID, - ServerName: cc.serverName, - OperationContext: OperationTypeTool, // This is a tool execution request - SessionID: "", // Not needed since each manager represents one session - Annotations: annotations, + ToolID: toolID, + ServerName: cc.serverName, + Operation: OperationTypeTool, // This is a tool execution request + Annotations: annotations, } return cc.consentMgr.CheckConsent(ctx, consentRequest) @@ -59,10 +58,9 @@ func (cc *ConsentChecker) CheckSamplingConsent( // Create consent request for sampling consentRequest := ConsentRequest{ - ToolID: toolID, - ServerName: cc.serverName, - OperationContext: OperationTypeSampling, // This is a sampling request - SessionID: "", // Not needed since each manager represents one session + ToolID: toolID, + ServerName: cc.serverName, + Operation: OperationTypeSampling, // This is a sampling request } return cc.consentMgr.CheckConsent(ctx, consentRequest) @@ -178,16 +176,21 @@ func (cc *ConsentChecker) promptForToolConsent( Value: "session", Label: "Yes, until I restart azd", }, - { + } + + // Add project option only if we have an environment context + if cc.consentMgr.IsProjectScopeAvailable(ctx) { + choices = append(choices, &ux.SelectChoice{ Value: "project", Label: "Yes, remember for this project", - }, - { - Value: "always", - Label: "Yes, always allow this tool", - }, + }) } + choices = append(choices, &ux.SelectChoice{ + Value: "always", + Label: "Yes, always allow this tool", + }) + // Add server trust option if not already trusted if !cc.isServerAlreadyTrusted(ctx, OperationTypeTool) { choices = append(choices, &ux.SelectChoice{ @@ -237,17 +240,16 @@ func (cc *ConsentChecker) promptForToolConsent( } // isServerAlreadyTrusted checks if the server is already trusted for the specified operation context -func (cc *ConsentChecker) isServerAlreadyTrusted(ctx context.Context, operationContext OperationType) bool { +func (cc *ConsentChecker) isServerAlreadyTrusted(ctx context.Context, operation OperationType) bool { // Create a mock request to check if server has trust for the specified operation context request := ConsentRequest{ - ToolID: fmt.Sprintf("%s/test-tool", cc.serverName), - ServerName: cc.serverName, - OperationContext: operationContext, - SessionID: "", + ToolID: fmt.Sprintf("%s/test-tool", cc.serverName), + ServerName: cc.serverName, + Operation: operation, } // For tool requests, add annotations to avoid readonly-only matches - if operationContext == OperationTypeTool { + if operation == OperationTypeTool { request.Annotations = mcp.ToolAnnotation{} // No readonly hint } @@ -266,7 +268,7 @@ func (cc *ConsentChecker) grantConsentFromChoice( ctx context.Context, toolID string, choice string, - operationContext OperationType, + operation OperationType, ) error { var rule ConsentRule var scope Scope @@ -285,7 +287,7 @@ func (cc *ConsentChecker) grantConsentFromChoice( Scope: ScopeSession, Target: NewToolTarget(serverName, toolName), Action: ActionAny, - Operation: operationContext, + Operation: operation, Permission: PermissionAllow, } scope = ScopeSession @@ -294,7 +296,7 @@ func (cc *ConsentChecker) grantConsentFromChoice( Scope: ScopeSession, Target: NewToolTarget(serverName, toolName), Action: ActionAny, - Operation: operationContext, + Operation: operation, Permission: PermissionAllow, } scope = ScopeSession @@ -303,7 +305,7 @@ func (cc *ConsentChecker) grantConsentFromChoice( Scope: ScopeProject, Target: NewToolTarget(serverName, toolName), Action: ActionAny, - Operation: operationContext, + Operation: operation, Permission: PermissionAllow, } scope = ScopeProject @@ -312,7 +314,7 @@ func (cc *ConsentChecker) grantConsentFromChoice( Scope: ScopeGlobal, Target: NewToolTarget(serverName, toolName), Action: ActionAny, - Operation: operationContext, + Operation: operation, Permission: PermissionAllow, } scope = ScopeGlobal @@ -322,7 +324,7 @@ func (cc *ConsentChecker) grantConsentFromChoice( Scope: ScopeGlobal, Target: NewServerTarget(serverName), Action: ActionAny, - Operation: operationContext, + Operation: operation, Permission: PermissionAllow, } scope = ScopeGlobal @@ -331,33 +333,33 @@ func (cc *ConsentChecker) grantConsentFromChoice( Scope: ScopeGlobal, Target: NewGlobalTarget(), Action: ActionAny, - Operation: operationContext, + Operation: operation, Permission: PermissionAllow, } scope = ScopeGlobal case "readonly_server": // Grant trust to readonly tools from this server (only for tool context) - if operationContext != OperationTypeTool { + if operation != OperationTypeTool { return fmt.Errorf("readonly server option only available for tool consent") } rule = ConsentRule{ Scope: ScopeGlobal, Target: NewServerTarget(serverName), Action: ActionReadOnly, - Operation: operationContext, + Operation: operation, Permission: PermissionAllow, } scope = ScopeGlobal case "readonly_global": // Grant trust to all readonly tools globally (only for tool context) - if operationContext != OperationTypeTool { + if operation != OperationTypeTool { return fmt.Errorf("readonly global option only available for tool consent") } rule = ConsentRule{ Scope: ScopeGlobal, Target: NewGlobalTarget(), Action: ActionReadOnly, - Operation: operationContext, + Operation: operation, Permission: PermissionAllow, } scope = ScopeGlobal @@ -394,7 +396,9 @@ func (cc *ConsentChecker) promptForSamplingConsent( toolName, toolDesc string, ) (string, error) { message := fmt.Sprintf( - "The tool %s from %s wants to send data to an AI service.\n\nThis helps improve responses but shares information externally.\n\nWhat would you like to do?", + "The tool %s from %s wants to send data to an AI service.\n\n"+ + "This helps improve responses but shares information externally.\n\n"+ + "What would you like to do?", output.WithHighLightFormat(toolName), output.WithHighLightFormat(cc.serverName), ) diff --git a/cli/azd/internal/agent/consent/manager.go b/cli/azd/internal/agent/consent/manager.go index 4f2ac498c55..96aa91fa2ce 100644 --- a/cli/azd/internal/agent/consent/manager.go +++ b/cli/azd/internal/agent/consent/manager.go @@ -12,7 +12,9 @@ import ( "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/azure/azure-dev/cli/azd/pkg/config" + "github.com/azure/azure-dev/cli/azd/pkg/environment" "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/lazy" ) const ( @@ -20,6 +22,8 @@ const ( ) // Global state for tracking current executing tool +// This is a work around right now since the MCP protocol does not contain enough information in the sampling requests +// Specifically, the tool name and server are not included in the request context var ( executingTool = &ExecutingTool{} ) @@ -60,6 +64,7 @@ func GetCurrentExecutingTool() *ExecutingTool { // consentManager implements the ConsentManager interface type consentManager struct { + lazyEnvManager *lazy.Lazy[environment.Manager] console input.Console userConfigManager config.UserConfigManager sessionRules []ConsentRule // Rules for this session @@ -68,10 +73,12 @@ type consentManager struct { // NewConsentManager creates a new consent manager func NewConsentManager( + lazyEnvManager *lazy.Lazy[environment.Manager], console input.Console, userConfigManager config.UserConfigManager, ) ConsentManager { return &consentManager{ + lazyEnvManager: lazyEnvManager, console: console, userConfigManager: userConfigManager, sessionRules: make([]ConsentRule, 0), @@ -114,13 +121,13 @@ func (cm *consentManager) GrantConsent(ctx context.Context, rule ConsentRule, sc } } -// ListConsents lists consent rules for a given scope -func (cm *consentManager) ListConsents(ctx context.Context, scope Scope) ([]ConsentRule, error) { +// ListConsentRules lists consent rules for a given scope +func (cm *consentManager) ListConsentRules(ctx context.Context, scope Scope) ([]ConsentRule, error) { switch scope { case ScopeSession: return cm.getSessionRules(), nil case ScopeProject: - return cm.getProjectRules(ctx, "") + return cm.getProjectRules(ctx) case ScopeGlobal: return cm.getGlobalRules(ctx) default: @@ -134,7 +141,7 @@ func (cm *consentManager) ClearConsents(ctx context.Context, scope Scope) error case ScopeSession: return cm.clearSessionRules() case ScopeProject: - return fmt.Errorf("project-level consent clearing not yet implemented") + return cm.clearProjectRules(ctx) case ScopeGlobal: return cm.clearGlobalRules(ctx) default: @@ -148,7 +155,7 @@ func (cm *consentManager) ClearConsentByTarget(ctx context.Context, target Targe case ScopeSession: return cm.removeSessionRule(target) case ScopeProject: - return fmt.Errorf("project-level consent removal not yet implemented") + return cm.removeProjectRule(ctx, target) case ScopeGlobal: return cm.removeGlobalRule(ctx, target) default: @@ -156,20 +163,20 @@ func (cm *consentManager) ClearConsentByTarget(ctx context.Context, target Targe } } -// ListConsentsByOperationContext lists consent rules filtered by operation context for a given scope -func (cm *consentManager) ListConsentsByOperationContext( +// ListConsentsByOperationType lists consent rules filtered by operation context for a given scope +func (cm *consentManager) ListConsentsByOperationType( ctx context.Context, scope Scope, - operationContext OperationType, + operation OperationType, ) ([]ConsentRule, error) { - allRules, err := cm.ListConsents(ctx, scope) + allRules, err := cm.ListConsentRules(ctx, scope) if err != nil { return nil, err } filteredRules := make([]ConsentRule, 0) for _, rule := range allRules { - if rule.Operation == operationContext { + if rule.Operation == operation { filteredRules = append(filteredRules, rule) } } @@ -177,13 +184,13 @@ func (cm *consentManager) ListConsentsByOperationContext( return filteredRules, nil } -// ClearConsentsByOperationContext clears all consent rules of a specific operation context for a given scope -func (cm *consentManager) ClearConsentsByOperationContext( +// ClearConsentsByOperationType clears all consent rules of a specific operation context for a given scope +func (cm *consentManager) ClearConsentsByOperationType( ctx context.Context, scope Scope, - operationContext OperationType, + operation OperationType, ) error { - rules, err := cm.ListConsentsByOperationContext(ctx, scope, operationContext) + rules, err := cm.ListConsentsByOperationType(ctx, scope, operation) if err != nil { return fmt.Errorf("failed to list consent rules: %w", err) } @@ -197,6 +204,18 @@ func (cm *consentManager) ClearConsentsByOperationContext( return nil } +// IsProjectScopeAvailable checks if project scope is available (i.e., we have an environment context) +func (cm *consentManager) IsProjectScopeAvailable(ctx context.Context) bool { + envManager, err := cm.lazyEnvManager.GetValue() + if err != nil { + return false + } + + // Try to get the current environment + _, err = envManager.Get(ctx, "") + return err == nil +} + // WrapTool wraps a single langchaingo tool with consent protection func (cm *consentManager) WrapTool(tool common.AnnotatedTool) common.AnnotatedTool { return newConsentWrapperTool(tool, cm.console, cm) @@ -238,9 +257,36 @@ func (cm *consentManager) addSessionRule(rule ConsentRule) error { // addProjectRule adds a rule to the project configuration func (cm *consentManager) addProjectRule(ctx context.Context, rule ConsentRule) error { - // This would need to be implemented with the environment manager - // For now, return an error to indicate it's not implemented - return fmt.Errorf("project-level consent not yet implemented") + if !cm.IsProjectScopeAvailable(ctx) { + return fmt.Errorf("project scope is not available (no environment context)") + } + + envManager, err := cm.lazyEnvManager.GetValue() + if err != nil { + return fmt.Errorf("no environment available for project-level consent: %w", err) + } + + // Get the current environment - this will be the active environment + env, err := envManager.Get(ctx, "") + if err != nil { + return fmt.Errorf("failed to get current environment: %w", err) + } + + var consentConfig ConsentConfig + if exists, err := env.Config.GetSection(ConfigKeyMCPConsent, &consentConfig); err != nil { + return fmt.Errorf("failed to get consent config from environment: %w", err) + } else if !exists { + consentConfig = ConsentConfig{} + } + + // Add or update the rule + consentConfig.Rules = cm.addOrUpdateRule(consentConfig.Rules, rule) + + if err := env.Config.Set(ConfigKeyMCPConsent, consentConfig); err != nil { + return fmt.Errorf("failed to set consent config in environment: %w", err) + } + + return envManager.Save(ctx, env) } // addGlobalRule adds a rule to the global configuration @@ -294,9 +340,32 @@ func (cm *consentManager) getSessionRules() []ConsentRule { } // getProjectRules returns project-level consent rules -func (cm *consentManager) getProjectRules(ctx context.Context, projectPath string) ([]ConsentRule, error) { - // TODO: Implement project-level consent rules - return []ConsentRule{}, nil +func (cm *consentManager) getProjectRules(ctx context.Context) ([]ConsentRule, error) { + if !cm.IsProjectScopeAvailable(ctx) { + return nil, fmt.Errorf("project scope is not available (no environment context)") + } + + envManager, err := cm.lazyEnvManager.GetValue() + if err != nil { + // No environment available - return empty rules without error + return []ConsentRule{}, nil + } + + // Get the current environment - this will be the active environment + env, err := envManager.Get(ctx, "") + if err != nil { + // Environment not found - return empty rules without error + return []ConsentRule{}, nil + } + + var consentConfig ConsentConfig + if exists, err := env.Config.GetSection(ConfigKeyMCPConsent, &consentConfig); err != nil { + return nil, fmt.Errorf("failed to get consent config from environment: %w", err) + } else if !exists { + return []ConsentRule{}, nil + } + + return consentConfig.Rules, nil } // getGlobalRules returns global consent rules @@ -353,6 +422,34 @@ func (cm *consentManager) clearGlobalRules(ctx context.Context) error { return cm.userConfigManager.Save(userConfig) } +// clearProjectRules clears all project-level consent rules +func (cm *consentManager) clearProjectRules(ctx context.Context) error { + if !cm.IsProjectScopeAvailable(ctx) { + return fmt.Errorf("project scope is not available (no environment context)") + } + + envManager, err := cm.lazyEnvManager.GetValue() + if err != nil { + return fmt.Errorf("no environment available for project-level consent: %w", err) + } + + // Get the current environment + env, err := envManager.Get(ctx, "") + if err != nil { + return fmt.Errorf("failed to get current environment: %w", err) + } + + consentConfig := ConsentConfig{ + Rules: []ConsentRule{}, + } + + if err := env.Config.Set(ConfigKeyMCPConsent, consentConfig); err != nil { + return fmt.Errorf("failed to clear consent config in environment: %w", err) + } + + return envManager.Save(ctx, env) +} + // removeSessionRule removes a specific rule from session rules func (cm *consentManager) removeSessionRule(target Target) error { cm.sessionMutex.Lock() @@ -370,6 +467,47 @@ func (cm *consentManager) removeSessionRule(target Target) error { return nil } +// removeProjectRule removes a specific rule from project configuration +func (cm *consentManager) removeProjectRule(ctx context.Context, target Target) error { + if !cm.IsProjectScopeAvailable(ctx) { + return fmt.Errorf("project scope is not available (no environment context)") + } + + envManager, err := cm.lazyEnvManager.GetValue() + if err != nil { + return fmt.Errorf("no environment available for project-level consent: %w", err) + } + + // Get the current environment + env, err := envManager.Get(ctx, "") + if err != nil { + return fmt.Errorf("failed to get current environment: %w", err) + } + + var consentConfig ConsentConfig + if exists, err := env.Config.GetSection(ConfigKeyMCPConsent, &consentConfig); err != nil { + return fmt.Errorf("failed to get consent config from environment: %w", err) + } else if !exists { + return nil // Nothing to remove + } + + // Filter out the rule to remove + filtered := make([]ConsentRule, 0, len(consentConfig.Rules)) + for _, rule := range consentConfig.Rules { + if rule.Target != target { + filtered = append(filtered, rule) + } + } + + consentConfig.Rules = filtered + + if err := env.Config.Set(ConfigKeyMCPConsent, consentConfig); err != nil { + return fmt.Errorf("failed to update consent config in environment: %w", err) + } + + return envManager.Save(ctx, env) +} + // removeGlobalRule removes a specific rule from global configuration func (cm *consentManager) removeGlobalRule(ctx context.Context, target Target) error { userConfig, err := cm.userConfigManager.Load() @@ -405,8 +543,8 @@ func (cm *consentManager) removeGlobalRule(ctx context.Context, target Target) e func (cm *consentManager) checkUnifiedRules(ctx context.Context, request ConsentRequest) *ConsentDecision { isReadOnlyTool := request.Annotations.ReadOnlyHint != nil && *request.Annotations.ReadOnlyHint - // Build the target for this request - requestTarget := NewToolTarget(request.ServerName, request.ToolID) + // Build the target for this request - ToolID is already in "server/tool" format + requestTarget := Target(request.ToolID) // Check session rules first cm.sessionMutex.RLock() @@ -414,16 +552,16 @@ func (cm *consentManager) checkUnifiedRules(ctx context.Context, request Consent cm.sessionMutex.RUnlock() if decision := cm.findMatchingRule( - sessionRules, requestTarget, request.OperationContext, isReadOnlyTool, + sessionRules, requestTarget, request.Operation, isReadOnlyTool, ); decision != nil { return decision } - // Check project rules - if request.ProjectPath != "" { - if projectRules, err := cm.getProjectRules(ctx, request.ProjectPath); err == nil { + // Check project rules if environment is available + if cm.IsProjectScopeAvailable(ctx) { + if projectRules, err := cm.getProjectRules(ctx); err == nil { if decision := cm.findMatchingRule( - projectRules, requestTarget, request.OperationContext, isReadOnlyTool, + projectRules, requestTarget, request.Operation, isReadOnlyTool, ); decision != nil { return decision } @@ -433,7 +571,7 @@ func (cm *consentManager) checkUnifiedRules(ctx context.Context, request Consent // Check global rules if globalRules, err := cm.getGlobalRules(ctx); err == nil { if decision := cm.findMatchingRule( - globalRules, requestTarget, request.OperationContext, isReadOnlyTool, + globalRules, requestTarget, request.Operation, isReadOnlyTool, ); decision != nil { return decision } @@ -446,7 +584,7 @@ func (cm *consentManager) checkUnifiedRules(ctx context.Context, request Consent func (cm *consentManager) findMatchingRule( rules []ConsentRule, requestTarget Target, - operationContext OperationType, + operation OperationType, isReadOnlyTool bool, ) *ConsentDecision { // Process rules in precedence order: deny rules first, then allow rules @@ -454,7 +592,7 @@ func (cm *consentManager) findMatchingRule( // First pass: Check for deny rules for _, rule := range rules { - if rule.Permission == PermissionDeny && rule.Operation == operationContext && + if rule.Permission == PermissionDeny && rule.Operation == operation && cm.targetMatches(rule.Target, requestTarget) && cm.actionMatches(rule.Action, isReadOnlyTool) { return &ConsentDecision{Allowed: false, Reason: "explicitly denied"} } @@ -463,7 +601,7 @@ func (cm *consentManager) findMatchingRule( // Second pass: Check for allow/prompt rules in precedence order // Global patterns first (* pattern) for _, rule := range rules { - if rule.Permission != PermissionDeny && rule.Operation == operationContext && + if rule.Permission != PermissionDeny && rule.Operation == operation && (rule.Target == "*" || rule.Target == "*/*") && cm.actionMatches(rule.Action, isReadOnlyTool) { return cm.evaluateRule(rule) @@ -473,7 +611,7 @@ func (cm *consentManager) findMatchingRule( // Server patterns next (server/* pattern) serverPattern := NewServerTarget(string(requestTarget[:strings.Index(string(requestTarget), "/")])) for _, rule := range rules { - if rule.Permission != PermissionDeny && rule.Operation == operationContext && + if rule.Permission != PermissionDeny && rule.Operation == operation && rule.Target == serverPattern && cm.actionMatches(rule.Action, isReadOnlyTool) { return cm.evaluateRule(rule) @@ -482,7 +620,7 @@ func (cm *consentManager) findMatchingRule( // Specific tool patterns last (exact match) for _, rule := range rules { - if rule.Permission != PermissionDeny && rule.Operation == operationContext && + if rule.Permission != PermissionDeny && rule.Operation == operation && rule.Target == requestTarget && cm.actionMatches(rule.Action, isReadOnlyTool) { return cm.evaluateRule(rule) diff --git a/cli/azd/internal/agent/consent/types.go b/cli/azd/internal/agent/consent/types.go index 2aead9c9866..6f05719d1e1 100644 --- a/cli/azd/internal/agent/consent/types.go +++ b/cli/azd/internal/agent/consent/types.go @@ -89,20 +89,72 @@ func (t Target) Validate() error { return nil } -// AllowedOperationContexts contains the valid operation contexts for command validation -var AllowedOperationContexts = []string{ +// AllowedOperationTypes contains the valid operation contexts for command validation +var AllowedOperationTypes = []string{ string(OperationTypeTool), string(OperationTypeSampling), } -// ParseOperationContext converts a string to OperationContext with validation -func ParseOperationContext(contextStr string) (OperationType, error) { - for _, allowedContext := range AllowedOperationContexts { +// ParseOperationType converts a string to OperationType with validation +func ParseOperationType(contextStr string) (OperationType, error) { + for _, allowedContext := range AllowedOperationTypes { if contextStr == allowedContext { return OperationType(contextStr), nil } } - return "", fmt.Errorf("invalid operation context: %s (allowed: %v)", contextStr, AllowedOperationContexts) + return "", fmt.Errorf("invalid operation context: %s (allowed: %v)", contextStr, AllowedOperationTypes) +} + +// AllowedScopes contains the valid scopes for command validation +var AllowedScopes = []string{ + string(ScopeGlobal), + string(ScopeProject), + string(ScopeSession), +} + +// ParseScope converts a string to Scope with validation +func ParseScope(scopeStr string) (Scope, error) { + for _, allowedScope := range AllowedScopes { + if scopeStr == allowedScope { + return Scope(scopeStr), nil + } + } + return "", fmt.Errorf("invalid scope: %s (allowed: %v)", scopeStr, AllowedScopes) +} + +// AllowedActionTypes contains the valid action types for command validation +var AllowedActionTypes = []string{ + "readonly", + "all", +} + +// ParseActionType converts a string to ActionType with validation +func ParseActionType(actionStr string) (ActionType, error) { + switch actionStr { + case "readonly": + return ActionReadOnly, nil + case "all": + return ActionAny, nil + default: + return "", fmt.Errorf("invalid action type: %s (allowed: %v)", actionStr, AllowedActionTypes) + } +} + +// AllowedPermissions contains the valid permissions for command validation +var AllowedPermissions = []string{ + string(PermissionAllow), + string(PermissionDeny), + string(PermissionPrompt), +} + +// ParsePermission converts a string to Permission with validation +func ParsePermission(permissionStr string) (Permission, error) { + for _, allowedPermission := range AllowedPermissions { + if permissionStr == allowedPermission { + return Permission(permissionStr), nil + } + } + return "", fmt.Errorf("invalid permission: %s (allowed: %v)", permissionStr, AllowedPermissions) } // ConsentRule represents a single consent rule entry @@ -180,13 +232,11 @@ type ConsentConfig struct { // ConsentRequest represents a request to check consent for a tool type ConsentRequest struct { - ToolID string - ServerName string - OperationContext OperationType // Type of consent being requested (tool, sampling, etc.) - Parameters map[string]interface{} - SessionID string - ProjectPath string - Annotations mcp.ToolAnnotation + ToolID string + ServerName string + Operation OperationType // Type of consent being requested (tool, sampling, etc.) + Parameters map[string]interface{} + Annotations mcp.ToolAnnotation } // ConsentDecision represents the result of a consent check @@ -200,16 +250,19 @@ type ConsentDecision struct { type ConsentManager interface { CheckConsent(ctx context.Context, request ConsentRequest) (*ConsentDecision, error) GrantConsent(ctx context.Context, rule ConsentRule, scope Scope) error - ListConsents(ctx context.Context, scope Scope) ([]ConsentRule, error) - ListConsentsByOperationContext( + ListConsentRules(ctx context.Context, scope Scope) ([]ConsentRule, error) + ListConsentsByOperationType( ctx context.Context, scope Scope, - operationContext OperationType, + operation OperationType, ) ([]ConsentRule, error) ClearConsents(ctx context.Context, scope Scope) error - ClearConsentsByOperationContext(ctx context.Context, scope Scope, operationContext OperationType) error + ClearConsentsByOperationType(ctx context.Context, scope Scope, operation OperationType) error ClearConsentByTarget(ctx context.Context, target Target, scope Scope) error + // Environment context methods + IsProjectScopeAvailable(ctx context.Context) bool + // Tool wrapping methods WrapTool(tool common.AnnotatedTool) common.AnnotatedTool WrapTools(tools []common.AnnotatedTool) []common.AnnotatedTool From 73aab2954b4f089e0aac8bd6838f79b7e7e7377c Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Wed, 13 Aug 2025 11:51:08 -0700 Subject: [PATCH 068/116] Made updates to consent filtering APIs --- cli/azd/cmd/mcp.go | 325 ++++++++++++++-------- cli/azd/internal/agent/consent/checker.go | 11 +- cli/azd/internal/agent/consent/manager.go | 214 +++++++------- cli/azd/internal/agent/consent/types.go | 60 +++- 4 files changed, 358 insertions(+), 252 deletions(-) diff --git a/cli/azd/cmd/mcp.go b/cli/azd/cmd/mcp.go index af790a2b1f3..17394894508 100644 --- a/cli/azd/cmd/mcp.go +++ b/cli/azd/cmd/mcp.go @@ -7,6 +7,7 @@ import ( "context" "fmt" "io" + "strings" "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" @@ -15,6 +16,7 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/config" "github.com/azure/azure-dev/cli/azd/pkg/input" "github.com/azure/azure-dev/cli/azd/pkg/output" + "github.com/azure/azure-dev/cli/azd/pkg/output/ux" "github.com/mark3labs/mcp-go/server" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -99,9 +101,6 @@ Examples: # Grant always permission to all tools globally azd mcp consent grant --global --permission always - # Grant session permission to a specific server - azd mcp consent grant --server my-server --permission session - # Grant project permission to a specific tool with read-only scope azd mcp consent grant --server my-server --tool my-tool --permission project --scope read-only`, Args: cobra.NoArgs, @@ -191,10 +190,12 @@ func (a *mcpStartAction) Run(ctx context.Context) (*actions.ActionResult, error) // Flags for MCP consent list command type mcpConsentListFlags struct { - global *internal.GlobalCommandOptions - scope string - target string - operation string + global *internal.GlobalCommandOptions + scope string + target string + operation string + action string + permission string } func newMcpConsentListFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentListFlags { @@ -205,9 +206,16 @@ func newMcpConsentListFlags(cmd *cobra.Command, global *internal.GlobalCommandOp func (f *mcpConsentListFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { f.global = global - local.StringVar(&f.scope, "scope", "global", "Consent scope (global, project)") + local.StringVar( + &f.scope, + "scope", + "", + "Consent scope to filter by (global, project). If not specified, lists rules from all scopes.", + ) local.StringVar(&f.target, "target", "", "Specific target to operate on (server/tool format)") local.StringVar(&f.operation, "operation", "", "Operation to filter by (tool, sampling)") + local.StringVar(&f.action, "action", "", "Action type to filter by (readonly, any)") + local.StringVar(&f.permission, "permission", "", "Permission to filter by (allow, deny, prompt)") } // Flags for MCP consent grant command @@ -219,7 +227,7 @@ type mcpConsentGrantFlags struct { action string operation string permission string - ruleScope string + scope string } func newMcpConsentGrantFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentGrantFlags { @@ -236,15 +244,17 @@ func (f *mcpConsentGrantFlags) Bind(local *pflag.FlagSet, global *internal.Globa local.StringVar(&f.action, "action", "all", "Action type: 'all' or 'readonly'") local.StringVar(&f.operation, "operation", "tool", "Operation type: 'tool' or 'sampling'") local.StringVar(&f.permission, "permission", "allow", "Permission: 'allow', 'deny', or 'prompt'") - local.StringVar(&f.ruleScope, "scope", "global", "Rule scope: 'global', or 'project'") + local.StringVar(&f.scope, "scope", "global", "Rule scope: 'global', or 'project'") } // Flags for MCP consent clear command type mcpConsentClearFlags struct { - global *internal.GlobalCommandOptions - scope string - target string - operation string + global *internal.GlobalCommandOptions + scope string + target string + operation string + action string + permission string } func newMcpConsentClearFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentClearFlags { @@ -255,9 +265,16 @@ func newMcpConsentClearFlags(cmd *cobra.Command, global *internal.GlobalCommandO func (f *mcpConsentClearFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { f.global = global - local.StringVar(&f.scope, "scope", "global", "Consent scope (global, project)") + local.StringVar( + &f.scope, + "scope", + "", + "Consent scope to filter by (global, project). If not specified, clears rules from all scopes.", + ) local.StringVar(&f.target, "target", "", "Specific target to operate on (server/tool format)") local.StringVar(&f.operation, "operation", "", "Operation to filter by (tool, sampling)") + local.StringVar(&f.action, "action", "", "Action type to filter by (readonly, any)") + local.StringVar(&f.permission, "permission", "", "Permission to filter by (allow, deny, prompt)") } // Action for MCP consent list command @@ -289,34 +306,71 @@ func newMcpConsentListAction( } func (a *mcpConsentListAction) Run(ctx context.Context) (*actions.ActionResult, error) { - scope, err := consent.ParseScope(a.flags.scope) - if err != nil { - return nil, err + // Build filter options based on provided flags + var filterOptions []consent.FilterOption + + // Add scope filter if provided + if a.flags.scope != "" { + scope, err := consent.ParseScope(a.flags.scope) + if err != nil { + return nil, err + } + filterOptions = append(filterOptions, consent.WithScope(scope)) } - var rules []consent.ConsentRule - - // Use operation context-filtered method if context is specified + // Add operation filter if provided if a.flags.operation != "" { operation, parseErr := consent.ParseOperationType(a.flags.operation) if parseErr != nil { return nil, parseErr } - rules, err = a.consentManager.ListConsentsByOperationType(ctx, scope, operation) - } else { - rules, err = a.consentManager.ListConsentRules(ctx, scope) + filterOptions = append(filterOptions, consent.WithOperation(operation)) + } + + // Add target filter if provided + if a.flags.target != "" { + target := consent.Target(a.flags.target) + filterOptions = append(filterOptions, consent.WithTarget(target)) } + // Add action filter if provided + if a.flags.action != "" { + action, parseErr := consent.ParseActionType(a.flags.action) + if parseErr != nil { + return nil, parseErr + } + filterOptions = append(filterOptions, consent.WithAction(action)) + } + + // Add permission filter if provided + if a.flags.permission != "" { + permission, parseErr := consent.ParsePermission(a.flags.permission) + if parseErr != nil { + return nil, parseErr + } + filterOptions = append(filterOptions, consent.WithPermission(permission)) + } + + // Get rules with filters + rules, err := a.consentManager.ListConsentRules(ctx, filterOptions...) if err != nil { return nil, fmt.Errorf("failed to list consent rules: %w", err) } if len(rules) == 0 { - var typeInfo string - if a.flags.operation != "" { - typeInfo = fmt.Sprintf(" of context '%s'", a.flags.operation) + filterDesc := formatConsentDescription( + a.flags.scope, + a.flags.action, + a.flags.operation, + a.flags.target, + a.flags.permission, + ) + + if filterDesc != "" { + fmt.Fprintf(a.writer, "No consent rules found matching filters: %s.\n", filterDesc) + } else { + fmt.Fprintf(a.writer, "No consent rules found.\n") } - fmt.Fprintf(a.writer, "No consent rules found%s.\n", typeInfo) return nil, nil } @@ -408,74 +462,105 @@ func newMcpConsentClearAction( func (a *mcpConsentClearAction) Run(ctx context.Context) (*actions.ActionResult, error) { // Command heading - fmt.Fprintf(a.console.Handles().Stdout, "Clearing MCP consent rules...\n\n") + a.console.MessageUxItem(ctx, &ux.MessageTitle{ + Title: "Clear MCP consent rules (azd mcp consent clear)", + TitleNote: "Removes consent rules for MCP tools and servers", + }) - scope, err := consent.ParseScope(a.flags.scope) - if err != nil { - return nil, err + a.console.Message(ctx, "") + + // Build filter options based on provided flags + var filterOptions []consent.FilterOption + + // Add scope filter if provided + if a.flags.scope != "" { + scope, err := consent.ParseScope(a.flags.scope) + if err != nil { + return nil, err + } + filterOptions = append(filterOptions, consent.WithScope(scope)) } + // Add operation filter if provided + if a.flags.operation != "" { + operation, parseErr := consent.ParseOperationType(a.flags.operation) + if parseErr != nil { + return nil, parseErr + } + filterOptions = append(filterOptions, consent.WithOperation(operation)) + } + + // Add target filter if provided if a.flags.target != "" { - // Clear specific target target := consent.Target(a.flags.target) - err = a.consentManager.ClearConsentByTarget(ctx, target, scope) - if err == nil { - fmt.Fprintf(a.console.Handles().Stdout, "Cleared consent for target: %s\n", a.flags.target) - } - } else { - // Get confirmation message based on context filter - confirmMessage := fmt.Sprintf("Are you sure you want to clear all consent rules for scope '%s'?", a.flags.scope) - if a.flags.operation != "" { - confirmMessage = fmt.Sprintf( - "Are you sure you want to clear all %s consent rules for scope '%s'?", - a.flags.operation, - a.flags.scope, - ) - } + filterOptions = append(filterOptions, consent.WithTarget(target)) + } - // Clear all rules for scope (with optional context filtering) - confirmed, confirmErr := a.console.Confirm(ctx, input.ConsoleOptions{ - Message: confirmMessage, - }) - if confirmErr != nil { - return nil, confirmErr + // Add action filter if provided + if a.flags.action != "" { + action, parseErr := consent.ParseActionType(a.flags.action) + if parseErr != nil { + return nil, parseErr } + filterOptions = append(filterOptions, consent.WithAction(action)) + } - if !confirmed { - fmt.Fprintf(a.console.Handles().Stdout, "Operation cancelled.\n") - return nil, nil + // Add permission filter if provided + if a.flags.permission != "" { + permission, parseErr := consent.ParsePermission(a.flags.permission) + if parseErr != nil { + return nil, parseErr } + filterOptions = append(filterOptions, consent.WithPermission(permission)) + } - if a.flags.operation != "" { - // Context-specific clearing using the new consent manager method - operation, parseErr := consent.ParseOperationType(a.flags.operation) - if parseErr != nil { - return nil, parseErr - } - - err = a.consentManager.ClearConsentsByOperationType(ctx, scope, operation) - if err == nil { - fmt.Fprintf( - a.console.Handles().Stdout, - "Cleared all %s consent rules for scope: %s\n", - a.flags.operation, - a.flags.scope, - ) - } - } else { - // Clear all rules for scope - err = a.consentManager.ClearConsents(ctx, scope) - if err == nil { - fmt.Fprintf(a.console.Handles().Stdout, "Cleared all consent rules for scope: %s\n", a.flags.scope) - } - } + // Build confirmation message based on filters + filterDesc := formatConsentDescription( + a.flags.scope, + a.flags.action, + a.flags.operation, + a.flags.target, + a.flags.permission, + ) + + var confirmMessage string + if filterDesc != "" { + confirmMessage = fmt.Sprintf("Are you sure you want to clear consent rules for %s?", filterDesc) + } else { + confirmMessage = "Are you sure you want to clear all consent rules?" + } + + // Get confirmation + confirmed, confirmErr := a.console.Confirm(ctx, input.ConsoleOptions{ + Message: confirmMessage, + }) + if confirmErr != nil { + return nil, confirmErr } + if !confirmed { + fmt.Fprintf(a.console.Handles().Stdout, "Operation cancelled.\n") + return nil, nil + } + + // Clear rules with filters + err := a.consentManager.ClearConsentRules(ctx, filterOptions...) if err != nil { return nil, fmt.Errorf("failed to clear consent rules: %w", err) } - return nil, nil + // Success message + if filterDesc != "" { + fmt.Fprintf(a.console.Handles().Stdout, "Cleared consent rules for %s.\n", filterDesc) + } else { + fmt.Fprintf(a.console.Handles().Stdout, "Cleared all consent rules.\n") + } + + return &actions.ActionResult{ + Message: &actions.ResultMessage{ + Header: "Consent rules cleared successfully", + }, + }, nil } // Action for MCP consent grant command @@ -502,7 +587,12 @@ func newMcpConsentGrantAction( func (a *mcpConsentGrantAction) Run(ctx context.Context) (*actions.ActionResult, error) { // Command heading - fmt.Fprintf(a.console.Handles().Stdout, "Granting MCP consent rules...\n\n") + a.console.MessageUxItem(ctx, &ux.MessageTitle{ + Title: "Grant MCP consent rules (azd mcp consent grant)", + TitleNote: "Creates consent rules that allow MCP tools to execute without prompting", + }) + + a.console.Message(ctx, "") // Validate flag combinations if a.flags.tool != "" && a.flags.server == "" { @@ -536,7 +626,7 @@ func (a *mcpConsentGrantAction) Run(ctx context.Context) (*actions.ActionResult, } // Validate rule scope - ruleScope, err := consent.ParseScope(a.flags.ruleScope) + ruleScope, err := consent.ParseScope(a.flags.scope) if err != nil { return nil, err } @@ -548,45 +638,13 @@ func (a *mcpConsentGrantAction) Run(ctx context.Context) (*actions.ActionResult, // Build target var target consent.Target - var description string if a.flags.globalFlag { target = consent.NewGlobalTarget() - if operation == consent.OperationTypeSampling { - if actionType == consent.ActionReadOnly { - description = fmt.Sprintf("all read-only sampling globally (%s)", permission) - } else { - description = fmt.Sprintf("all sampling globally (%s)", permission) - } - } else { - if actionType == consent.ActionReadOnly { - description = fmt.Sprintf("all read-only tools globally (%s)", permission) - } else { - description = fmt.Sprintf("all tools globally (%s)", permission) - } - } } else if a.flags.tool != "" { target = consent.NewToolTarget(a.flags.server, a.flags.tool) - if actionType == consent.ActionReadOnly { - description = fmt.Sprintf("read-only tool %s from server %s (%s)", a.flags.tool, a.flags.server, permission) - } else { - description = fmt.Sprintf("tool %s from server %s (%s)", a.flags.tool, a.flags.server, permission) - } } else { target = consent.NewServerTarget(a.flags.server) - if operation == consent.OperationTypeSampling { - if actionType == consent.ActionReadOnly { - description = fmt.Sprintf("read-only sampling from server %s (%s)", a.flags.server, permission) - } else { - description = fmt.Sprintf("all sampling from server %s (%s)", a.flags.server, permission) - } - } else { - if actionType == consent.ActionReadOnly { - description = fmt.Sprintf("read-only tools from server %s (%s)", a.flags.server, permission) - } else { - description = fmt.Sprintf("all tools from server %s (%s)", a.flags.server, permission) - } - } } rule := consent.ConsentRule{ @@ -597,11 +655,46 @@ func (a *mcpConsentGrantAction) Run(ctx context.Context) (*actions.ActionResult, Permission: permission, } - if err := a.consentManager.GrantConsent(ctx, rule, ruleScope); err != nil { + // Generate description using helper function + description := formatConsentRuleDescription(rule) + + if err := a.consentManager.GrantConsent(ctx, rule); err != nil { return nil, fmt.Errorf("failed to grant consent: %w", err) } fmt.Fprintf(a.console.Handles().Stdout, "Granted rule for %s\n", description) - return nil, nil + return &actions.ActionResult{ + Message: &actions.ResultMessage{ + Header: "Consent rule granted successfully", + }, + }, nil +} + +// formatConsentDescription creates a simple description with whatever parts exist +func formatConsentDescription(scope, action, operation, target, permission string) string { + var parts []string + + if scope != "" { + parts = append(parts, fmt.Sprintf("Scope: %s", scope)) + } + if target != "" { + parts = append(parts, fmt.Sprintf("Target: %s", target)) + } + if operation != "" { + parts = append(parts, fmt.Sprintf("Context: %s", operation)) + } + if action != "" { + parts = append(parts, fmt.Sprintf("Action: %s", action)) + } + if permission != "" { + parts = append(parts, fmt.Sprintf("Permission: %s", permission)) + } + + return strings.Join(parts, ", ") +} + +// Legacy wrapper for backward compatibility +func formatConsentRuleDescription(rule consent.ConsentRule) string { + return formatConsentDescription(string(rule.Scope), string(rule.Action), string(rule.Operation), string(rule.Target), string(rule.Permission)) } diff --git a/cli/azd/internal/agent/consent/checker.go b/cli/azd/internal/agent/consent/checker.go index 8ce16523264..2eba1befbb5 100644 --- a/cli/azd/internal/agent/consent/checker.go +++ b/cli/azd/internal/agent/consent/checker.go @@ -271,7 +271,6 @@ func (cc *ConsentChecker) grantConsentFromChoice( operation OperationType, ) error { var rule ConsentRule - var scope Scope // Parse server and tool from toolID parts := strings.Split(toolID, "/") @@ -290,7 +289,6 @@ func (cc *ConsentChecker) grantConsentFromChoice( Operation: operation, Permission: PermissionAllow, } - scope = ScopeSession case "session": rule = ConsentRule{ Scope: ScopeSession, @@ -299,7 +297,6 @@ func (cc *ConsentChecker) grantConsentFromChoice( Operation: operation, Permission: PermissionAllow, } - scope = ScopeSession case "project": rule = ConsentRule{ Scope: ScopeProject, @@ -308,7 +305,6 @@ func (cc *ConsentChecker) grantConsentFromChoice( Operation: operation, Permission: PermissionAllow, } - scope = ScopeProject case "always": rule = ConsentRule{ Scope: ScopeGlobal, @@ -317,7 +313,6 @@ func (cc *ConsentChecker) grantConsentFromChoice( Operation: operation, Permission: PermissionAllow, } - scope = ScopeGlobal case "server": // Grant trust to entire server rule = ConsentRule{ @@ -327,7 +322,6 @@ func (cc *ConsentChecker) grantConsentFromChoice( Operation: operation, Permission: PermissionAllow, } - scope = ScopeGlobal case "global": rule = ConsentRule{ Scope: ScopeGlobal, @@ -336,7 +330,6 @@ func (cc *ConsentChecker) grantConsentFromChoice( Operation: operation, Permission: PermissionAllow, } - scope = ScopeGlobal case "readonly_server": // Grant trust to readonly tools from this server (only for tool context) if operation != OperationTypeTool { @@ -349,7 +342,6 @@ func (cc *ConsentChecker) grantConsentFromChoice( Operation: operation, Permission: PermissionAllow, } - scope = ScopeGlobal case "readonly_global": // Grant trust to all readonly tools globally (only for tool context) if operation != OperationTypeTool { @@ -362,12 +354,11 @@ func (cc *ConsentChecker) grantConsentFromChoice( Operation: operation, Permission: PermissionAllow, } - scope = ScopeGlobal default: return fmt.Errorf("unknown consent choice: %s", choice) } - return cc.consentMgr.GrantConsent(ctx, rule, scope) + return cc.consentMgr.GrantConsent(ctx, rule) } // PromptAndGrantSamplingConsent shows sampling consent prompt and grants permission based on user choice diff --git a/cli/azd/internal/agent/consent/manager.go b/cli/azd/internal/agent/consent/manager.go index 96aa91fa2ce..bed1e6e3d5a 100644 --- a/cli/azd/internal/agent/consent/manager.go +++ b/cli/azd/internal/agent/consent/manager.go @@ -101,7 +101,7 @@ func (cm *consentManager) CheckConsent(ctx context.Context, request ConsentReque } // GrantConsent grants consent for a tool -func (cm *consentManager) GrantConsent(ctx context.Context, rule ConsentRule, scope Scope) error { +func (cm *consentManager) GrantConsent(ctx context.Context, rule ConsentRule) error { rule.GrantedAt = time.Now() // Validate the rule @@ -109,7 +109,7 @@ func (cm *consentManager) GrantConsent(ctx context.Context, rule ConsentRule, sc return fmt.Errorf("invalid consent rule: %w", err) } - switch scope { + switch rule.Scope { case ScopeSession: return cm.addSessionRule(rule) case ScopeProject: @@ -117,66 +117,55 @@ func (cm *consentManager) GrantConsent(ctx context.Context, rule ConsentRule, sc case ScopeGlobal: return cm.addGlobalRule(ctx, rule) default: - return fmt.Errorf("unknown consent scope: %s", scope) + return fmt.Errorf("unknown consent scope: %s", rule.Scope) } } -// ListConsentRules lists consent rules for a given scope -func (cm *consentManager) ListConsentRules(ctx context.Context, scope Scope) ([]ConsentRule, error) { - switch scope { - case ScopeSession: - return cm.getSessionRules(), nil - case ScopeProject: - return cm.getProjectRules(ctx) - case ScopeGlobal: - return cm.getGlobalRules(ctx) - default: - return nil, fmt.Errorf("unknown consent scope: %s", scope) +// ListConsentRules lists consent rules across all scopes with optional filtering +func (cm *consentManager) ListConsentRules(ctx context.Context, options ...FilterOption) ([]ConsentRule, error) { + // Build filter options + var opts FilterOptions + for _, option := range options { + option(&opts) } -} -// ClearConsents clears all consent rules for a given scope -func (cm *consentManager) ClearConsents(ctx context.Context, scope Scope) error { - switch scope { - case ScopeSession: - return cm.clearSessionRules() - case ScopeProject: - return cm.clearProjectRules(ctx) - case ScopeGlobal: - return cm.clearGlobalRules(ctx) - default: - return fmt.Errorf("unknown consent scope: %s", scope) + // Always query across all scopes + allRules := make([]ConsentRule, 0) + + // Get session rules + sessionRules := cm.getSessionRules() + for _, rule := range sessionRules { + rule.Scope = ScopeSession // Ensure scope is set + allRules = append(allRules, rule) } -} -// ClearConsentByTarget clears consent for a specific target -func (cm *consentManager) ClearConsentByTarget(ctx context.Context, target Target, scope Scope) error { - switch scope { - case ScopeSession: - return cm.removeSessionRule(target) - case ScopeProject: - return cm.removeProjectRule(ctx, target) - case ScopeGlobal: - return cm.removeGlobalRule(ctx, target) - default: - return fmt.Errorf("unknown consent scope: %s", scope) + // Get project rules if available + if cm.IsProjectScopeAvailable(ctx) { + if projectRules, err := cm.getProjectRules(ctx); err == nil { + for _, rule := range projectRules { + rule.Scope = ScopeProject // Ensure scope is set + allRules = append(allRules, rule) + } + } } -} -// ListConsentsByOperationType lists consent rules filtered by operation context for a given scope -func (cm *consentManager) ListConsentsByOperationType( - ctx context.Context, - scope Scope, - operation OperationType, -) ([]ConsentRule, error) { - allRules, err := cm.ListConsentRules(ctx, scope) - if err != nil { - return nil, err + // Get global rules + if globalRules, err := cm.getGlobalRules(ctx); err == nil { + for _, rule := range globalRules { + rule.Scope = ScopeGlobal // Ensure scope is set + allRules = append(allRules, rule) + } + } + + // Apply filters if any options are provided + if len(options) == 0 { + return allRules, nil } + // Apply all filters filteredRules := make([]ConsentRule, 0) for _, rule := range allRules { - if rule.Operation == operation { + if cm.ruleMatchesFilters(rule, opts) { filteredRules = append(filteredRules, rule) } } @@ -184,20 +173,68 @@ func (cm *consentManager) ListConsentsByOperationType( return filteredRules, nil } -// ClearConsentsByOperationType clears all consent rules of a specific operation context for a given scope -func (cm *consentManager) ClearConsentsByOperationType( - ctx context.Context, - scope Scope, - operation OperationType, -) error { - rules, err := cm.ListConsentsByOperationType(ctx, scope, operation) - if err != nil { - return fmt.Errorf("failed to list consent rules: %w", err) +// ruleMatchesFilters checks if a rule matches the given filter options +func (cm *consentManager) ruleMatchesFilters(rule ConsentRule, opts FilterOptions) bool { + // Check scope filter + if opts.Scope != nil && rule.Scope != *opts.Scope { + return false } - for _, rule := range rules { - if err := cm.ClearConsentByTarget(ctx, rule.Target, scope); err != nil { - return fmt.Errorf("failed to clear consent for target %s: %w", rule.Target, err) + // Check operation filter + if opts.Operation != nil && rule.Operation != *opts.Operation { + return false + } + + // Check target filter + if opts.Target != nil && rule.Target != *opts.Target { + return false + } + + // Check action filter + if opts.Action != nil && rule.Action != *opts.Action { + return false + } + + // Check permission filter + if opts.Permission != nil && rule.Permission != *opts.Permission { + return false + } + + return true +} + +// ClearConsentRules clears consent rules matching the specified filter options +func (cm *consentManager) ClearConsentRules(ctx context.Context, options ...FilterOption) error { + // First, get all rules that match the filter criteria + rulesToClear, err := cm.ListConsentRules(ctx, options...) + if err != nil { + return fmt.Errorf("failed to list consent rules for clearing: %w", err) + } + + // Group rules by scope for efficient clearing + rulesByScope := make(map[Scope][]ConsentRule) + for _, rule := range rulesToClear { + rulesByScope[rule.Scope] = append(rulesByScope[rule.Scope], rule) + } + + // Clear rules by scope + for scope, rules := range rulesByScope { + for _, rule := range rules { + var err error + switch scope { + case ScopeSession: + err = cm.removeSessionRule(rule.Target) + case ScopeProject: + err = cm.removeProjectRule(ctx, rule.Target) + case ScopeGlobal: + err = cm.removeGlobalRule(ctx, rule.Target) + default: + err = fmt.Errorf("unknown consent scope: %s", scope) + } + + if err != nil { + return fmt.Errorf("failed to clear consent for target %s in scope %s: %w", rule.Target, scope, err) + } } } @@ -395,61 +432,6 @@ func (cm *consentManager) getGlobalConsentConfig(ctx context.Context) (*ConsentC return &consentConfig, nil } -// clearSessionRules clears all rules for this session -func (cm *consentManager) clearSessionRules() error { - cm.sessionMutex.Lock() - defer cm.sessionMutex.Unlock() - - cm.sessionRules = make([]ConsentRule, 0) - return nil -} - -// clearGlobalRules clears all global consent rules -func (cm *consentManager) clearGlobalRules(ctx context.Context) error { - userConfig, err := cm.userConfigManager.Load() - if err != nil { - return fmt.Errorf("failed to load user config: %w", err) - } - - consentConfig := ConsentConfig{ - Rules: []ConsentRule{}, - } - - if err := userConfig.Set(ConfigKeyMCPConsent, consentConfig); err != nil { - return fmt.Errorf("failed to clear consent config: %w", err) - } - - return cm.userConfigManager.Save(userConfig) -} - -// clearProjectRules clears all project-level consent rules -func (cm *consentManager) clearProjectRules(ctx context.Context) error { - if !cm.IsProjectScopeAvailable(ctx) { - return fmt.Errorf("project scope is not available (no environment context)") - } - - envManager, err := cm.lazyEnvManager.GetValue() - if err != nil { - return fmt.Errorf("no environment available for project-level consent: %w", err) - } - - // Get the current environment - env, err := envManager.Get(ctx, "") - if err != nil { - return fmt.Errorf("failed to get current environment: %w", err) - } - - consentConfig := ConsentConfig{ - Rules: []ConsentRule{}, - } - - if err := env.Config.Set(ConfigKeyMCPConsent, consentConfig); err != nil { - return fmt.Errorf("failed to clear consent config in environment: %w", err) - } - - return envManager.Save(ctx, env) -} - // removeSessionRule removes a specific rule from session rules func (cm *consentManager) removeSessionRule(target Target) error { cm.sessionMutex.Lock() diff --git a/cli/azd/internal/agent/consent/types.go b/cli/azd/internal/agent/consent/types.go index 6f05719d1e1..eb6154cba21 100644 --- a/cli/azd/internal/agent/consent/types.go +++ b/cli/azd/internal/agent/consent/types.go @@ -157,6 +157,53 @@ func ParsePermission(permissionStr string) (Permission, error) { return "", fmt.Errorf("invalid permission: %s (allowed: %v)", permissionStr, AllowedPermissions) } +// FilterOption represents a functional option for filtering consent rules +type FilterOption func(*FilterOptions) + +// FilterOptions contains the filtering options for listing consent rules +type FilterOptions struct { + Scope *Scope + Operation *OperationType + Target *Target + Action *ActionType + Permission *Permission +} + +// WithScope filters rules by scope +func WithScope(scope Scope) FilterOption { + return func(opts *FilterOptions) { + opts.Scope = &scope + } +} + +// WithOperation filters rules by operation type +func WithOperation(operation OperationType) FilterOption { + return func(opts *FilterOptions) { + opts.Operation = &operation + } +} + +// WithTarget filters rules by target pattern +func WithTarget(target Target) FilterOption { + return func(opts *FilterOptions) { + opts.Target = &target + } +} + +// WithAction filters rules by action type +func WithAction(action ActionType) FilterOption { + return func(opts *FilterOptions) { + opts.Action = &action + } +} + +// WithPermission filters rules by permission type +func WithPermission(permission Permission) FilterOption { + return func(opts *FilterOptions) { + opts.Permission = &permission + } +} + // ConsentRule represents a single consent rule entry type ConsentRule struct { Scope Scope `json:"scope"` @@ -249,16 +296,9 @@ type ConsentDecision struct { // ConsentManager manages consent rules and decisions type ConsentManager interface { CheckConsent(ctx context.Context, request ConsentRequest) (*ConsentDecision, error) - GrantConsent(ctx context.Context, rule ConsentRule, scope Scope) error - ListConsentRules(ctx context.Context, scope Scope) ([]ConsentRule, error) - ListConsentsByOperationType( - ctx context.Context, - scope Scope, - operation OperationType, - ) ([]ConsentRule, error) - ClearConsents(ctx context.Context, scope Scope) error - ClearConsentsByOperationType(ctx context.Context, scope Scope, operation OperationType) error - ClearConsentByTarget(ctx context.Context, target Target, scope Scope) error + GrantConsent(ctx context.Context, rule ConsentRule) error + ListConsentRules(ctx context.Context, options ...FilterOption) ([]ConsentRule, error) + ClearConsentRules(ctx context.Context, options ...FilterOption) error // Environment context methods IsProjectScopeAvailable(ctx context.Context) bool From 22476af570a8cddb8ba37b49ef1b2fb38d644900 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Wed, 13 Aug 2025 11:57:45 -0700 Subject: [PATCH 069/116] Renamed 'clear' to 'revoke' --- cli/azd/cmd/mcp.go | 67 ++++++++++++++++++---------------------------- 1 file changed, 26 insertions(+), 41 deletions(-) diff --git a/cli/azd/cmd/mcp.go b/cli/azd/cmd/mcp.go index 17394894508..11076df92be 100644 --- a/cli/azd/cmd/mcp.go +++ b/cli/azd/cmd/mcp.go @@ -72,18 +72,18 @@ azd functionality through the Model Context Protocol interface.`, FlagsResolver: newMcpConsentListFlags, }) - // azd mcp consent clear - consentGroup.Add("clear", &actions.ActionDescriptorOptions{ + // azd mcp consent revoke + consentGroup.Add("revoke", &actions.ActionDescriptorOptions{ Command: &cobra.Command{ - Use: "clear", - Short: "Clear consent rules.", - Long: "Clear consent rules for MCP tools.", + Use: "revoke", + Short: "Revoke consent rules.", + Long: "Revoke consent rules for MCP tools.", Args: cobra.NoArgs, }, OutputFormats: []output.Format{output.NoneFormat}, DefaultFormat: output.NoneFormat, - ActionResolver: newMcpConsentClearAction, - FlagsResolver: newMcpConsentClearFlags, + ActionResolver: newMcpConsentRevokeAction, + FlagsResolver: newMcpConsentRevokeFlags, }) // azd mcp consent grant @@ -111,21 +111,6 @@ Examples: FlagsResolver: newMcpConsentGrantFlags, }) - // TODO: Re-implement revoke command with new structure - // azd mcp consent revoke - // consentGroup.Add("revoke", &actions.ActionDescriptorOptions{ - // Command: &cobra.Command{ - // Use: "revoke", - // Short: "Revoke consent trust rules.", - // Long: "Revoke specific consent rules for MCP tools and servers.", - // Args: cobra.NoArgs, - // }, - // OutputFormats: []output.Format{output.NoneFormat}, - // DefaultFormat: output.NoneFormat, - // ActionResolver: newMcpConsentRevokeAction, - // FlagsResolver: newMcpConsentRevokeFlags, - // }) - return group } @@ -247,8 +232,8 @@ func (f *mcpConsentGrantFlags) Bind(local *pflag.FlagSet, global *internal.Globa local.StringVar(&f.scope, "scope", "global", "Rule scope: 'global', or 'project'") } -// Flags for MCP consent clear command -type mcpConsentClearFlags struct { +// Flags for MCP consent revoke command +type mcpConsentRevokeFlags struct { global *internal.GlobalCommandOptions scope string target string @@ -257,19 +242,19 @@ type mcpConsentClearFlags struct { permission string } -func newMcpConsentClearFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentClearFlags { - flags := &mcpConsentClearFlags{} +func newMcpConsentRevokeFlags(cmd *cobra.Command, global *internal.GlobalCommandOptions) *mcpConsentRevokeFlags { + flags := &mcpConsentRevokeFlags{} flags.Bind(cmd.Flags(), global) return flags } -func (f *mcpConsentClearFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { +func (f *mcpConsentRevokeFlags) Bind(local *pflag.FlagSet, global *internal.GlobalCommandOptions) { f.global = global local.StringVar( &f.scope, "scope", "", - "Consent scope to filter by (global, project). If not specified, clears rules from all scopes.", + "Consent scope to filter by (global, project). If not specified, revokes rules from all scopes.", ) local.StringVar(&f.target, "target", "", "Specific target to operate on (server/tool format)") local.StringVar(&f.operation, "operation", "", "Operation to filter by (tool, sampling)") @@ -438,21 +423,21 @@ func (a *mcpConsentListAction) Run(ctx context.Context) (*actions.ActionResult, return nil, a.formatter.Format(displayRules, a.writer, nil) } -// Action for MCP consent clear command -type mcpConsentClearAction struct { - flags *mcpConsentClearFlags +// Action for MCP consent revoke command +type mcpConsentRevokeAction struct { + flags *mcpConsentRevokeFlags console input.Console userConfigManager config.UserConfigManager consentManager consent.ConsentManager } -func newMcpConsentClearAction( - flags *mcpConsentClearFlags, +func newMcpConsentRevokeAction( + flags *mcpConsentRevokeFlags, console input.Console, userConfigManager config.UserConfigManager, consentManager consent.ConsentManager, ) actions.Action { - return &mcpConsentClearAction{ + return &mcpConsentRevokeAction{ flags: flags, console: console, userConfigManager: userConfigManager, @@ -460,10 +445,10 @@ func newMcpConsentClearAction( } } -func (a *mcpConsentClearAction) Run(ctx context.Context) (*actions.ActionResult, error) { +func (a *mcpConsentRevokeAction) Run(ctx context.Context) (*actions.ActionResult, error) { // Command heading a.console.MessageUxItem(ctx, &ux.MessageTitle{ - Title: "Clear MCP consent rules (azd mcp consent clear)", + Title: "Revoke MCP consent rules (azd mcp consent revoke)", TitleNote: "Removes consent rules for MCP tools and servers", }) @@ -525,9 +510,9 @@ func (a *mcpConsentClearAction) Run(ctx context.Context) (*actions.ActionResult, var confirmMessage string if filterDesc != "" { - confirmMessage = fmt.Sprintf("Are you sure you want to clear consent rules for %s?", filterDesc) + confirmMessage = fmt.Sprintf("Are you sure you want to revoke consent rules for %s?", filterDesc) } else { - confirmMessage = "Are you sure you want to clear all consent rules?" + confirmMessage = "Are you sure you want to revoke all consent rules?" } // Get confirmation @@ -551,14 +536,14 @@ func (a *mcpConsentClearAction) Run(ctx context.Context) (*actions.ActionResult, // Success message if filterDesc != "" { - fmt.Fprintf(a.console.Handles().Stdout, "Cleared consent rules for %s.\n", filterDesc) + fmt.Fprintf(a.console.Handles().Stdout, "Revoked consent rules for %s.\n", filterDesc) } else { - fmt.Fprintf(a.console.Handles().Stdout, "Cleared all consent rules.\n") + fmt.Fprintf(a.console.Handles().Stdout, "Revoked all consent rules.\n") } return &actions.ActionResult{ Message: &actions.ResultMessage{ - Header: "Consent rules cleared successfully", + Header: "Consent rules revoked successfully", }, }, nil } From 59c56945291064c4677ce87f61105f28faf8af53 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 15 Aug 2025 11:44:37 -0700 Subject: [PATCH 070/116] Addresses linter issues --- cli/azd/cmd/mcp.go | 8 +++++++- cli/azd/internal/agent/agent_factory.go | 3 +++ cli/azd/internal/agent/tools/common/utils.go | 3 +++ cli/azd/internal/mcp/tools/azd_sample.go | 3 +++ 4 files changed, 16 insertions(+), 1 deletion(-) diff --git a/cli/azd/cmd/mcp.go b/cli/azd/cmd/mcp.go index 11076df92be..b4aba22f350 100644 --- a/cli/azd/cmd/mcp.go +++ b/cli/azd/cmd/mcp.go @@ -681,5 +681,11 @@ func formatConsentDescription(scope, action, operation, target, permission strin // Legacy wrapper for backward compatibility func formatConsentRuleDescription(rule consent.ConsentRule) string { - return formatConsentDescription(string(rule.Scope), string(rule.Action), string(rule.Operation), string(rule.Target), string(rule.Permission)) + return formatConsentDescription( + string(rule.Scope), + string(rule.Action), + string(rule.Operation), + string(rule.Target), + string(rule.Permission), + ) } diff --git a/cli/azd/internal/agent/agent_factory.go b/cli/azd/internal/agent/agent_factory.go index 864a85caee0..28de495a073 100644 --- a/cli/azd/internal/agent/agent_factory.go +++ b/cli/azd/internal/agent/agent_factory.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package agent import ( diff --git a/cli/azd/internal/agent/tools/common/utils.go b/cli/azd/internal/agent/tools/common/utils.go index 352ab27d5be..1897e3c2cb6 100644 --- a/cli/azd/internal/agent/tools/common/utils.go +++ b/cli/azd/internal/agent/tools/common/utils.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package common import "github.com/tmc/langchaingo/tools" diff --git a/cli/azd/internal/mcp/tools/azd_sample.go b/cli/azd/internal/mcp/tools/azd_sample.go index 61defeb8cf7..5b40666e502 100644 --- a/cli/azd/internal/mcp/tools/azd_sample.go +++ b/cli/azd/internal/mcp/tools/azd_sample.go @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package tools import ( From e324121eefb299f3c7d0d8bf8e57f43704b44721 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 15 Aug 2025 18:23:03 -0700 Subject: [PATCH 071/116] Update azure yaml validation tool --- cli/azd/.vscode/cspell.yaml | 5 + cli/azd/cmd/init.go | 18 +- .../internal/agent/conversational_agent.go | 7 +- .../internal/agent/logging/action_logger.go | 13 +- cli/azd/internal/agent/one_shot_agent.go | 2 +- cli/azd/internal/mcp/tools/azd_yaml_schema.go | 125 +- .../mcp/tools/azd_yaml_schema_test.go | 191 ++ .../tools/prompts/azd_iac_generation_rules.md | 2 - .../prompts/azd_infrastructure_generation.md | 15 +- .../mcp/tools/prompts/azure.yaml.json | 1819 ----------------- cli/azd/internal/mcp/tools/prompts/prompts.go | 3 - cli/azd/internal/mcp/tools/types.go | 10 + cli/azd/pkg/apphost/manifest.go | 9 +- cli/azd/pkg/output/colors.go | 8 + go.mod | 2 + go.sum | 4 + 16 files changed, 373 insertions(+), 1860 deletions(-) create mode 100644 cli/azd/internal/mcp/tools/azd_yaml_schema_test.go delete mode 100644 cli/azd/internal/mcp/tools/prompts/azure.yaml.json create mode 100644 cli/azd/internal/mcp/tools/types.go diff --git a/cli/azd/.vscode/cspell.yaml b/cli/azd/.vscode/cspell.yaml index 16fd4325393..46f93d37e85 100644 --- a/cli/azd/.vscode/cspell.yaml +++ b/cli/azd/.vscode/cspell.yaml @@ -182,6 +182,11 @@ overrides: - golines - technicalterm - Errorf + - filename: internal/mcp/tools/azd_yaml_schema.go + words: + - santhosh + - tekuri + - jsonschema ignorePaths: - "**/*_test.go" - "**/mock*.go" diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index 23b0451176c..c3ebec7116e 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -352,8 +352,8 @@ func (i *initAction) Run(ctx context.Context) (*actions.ActionResult, error) { header = fmt.Sprintf("Initialized environment %s.", env.Name()) followUp = "" - case initWithCopilot: - if err := i.initAppWithCopilot(ctx); err != nil { + case initWithAgent: + if err := i.initAppWithAgent(ctx); err != nil { return nil, err } default: @@ -372,7 +372,7 @@ func (i *initAction) Run(ctx context.Context) (*actions.ActionResult, error) { }, nil } -func (i *initAction) initAppWithCopilot(ctx context.Context) error { +func (i *initAction) initAppWithAgent(ctx context.Context) error { // Warn user that this is an alpha feature i.console.WarnForFeature(ctx, llm.FeatureLlm) @@ -455,7 +455,7 @@ Do not stop until all tasks are complete and fully resolved. i.console.StopSpinner(ctx, step.Name, input.StepDone) i.console.Message(ctx, "") - i.console.Message(ctx, color.MagentaString("🤖 AZD Copilot:")) + i.console.Message(ctx, fmt.Sprintf("%s:", output.AzdAgentLabel())) i.console.Message(ctx, output.WithMarkdown(agentOutput)) i.console.Message(ctx, "") } @@ -519,7 +519,7 @@ func (i *initAction) collectAndApplyFeedback( i.console.StopSpinner(ctx, "Submitting feedback", input.StepDone) i.console.Message(ctx, "") - i.console.Message(ctx, color.MagentaString("🤖 AZD Copilot:")) + i.console.Message(ctx, fmt.Sprintf("%s:", output.AzdAgentLabel())) i.console.Message(ctx, output.WithMarkdown(feedbackOutput)) i.console.Message(ctx, "") } @@ -544,7 +544,7 @@ const ( initFromApp initAppTemplate initEnvironment - initWithCopilot + initWithAgent ) func promptInitType(console input.Console, ctx context.Context, featuresManager *alpha.FeatureManager) (initType, error) { @@ -553,9 +553,9 @@ func promptInitType(console input.Console, ctx context.Context, featuresManager "Select a template", } - // Only include AZD Copilot option if the LLM feature is enabled + // Only include AZD agent option if the LLM feature is enabled if featuresManager.IsEnabled(llm.FeatureLlm) { - options = append(options, fmt.Sprintf("AZD Copilot %s", color.YellowString("(Alpha)"))) + options = append(options, fmt.Sprintf("%s %s", output.AzdAgentLabel(), color.YellowString("(Alpha)"))) } selection, err := console.Select(ctx, input.ConsoleOptions{ @@ -574,7 +574,7 @@ func promptInitType(console input.Console, ctx context.Context, featuresManager case 2: // Only return initWithCopilot if the LLM feature is enabled and we have 3 options if featuresManager.IsEnabled(llm.FeatureLlm) { - return initWithCopilot, nil + return initWithAgent, nil } fallthrough default: diff --git a/cli/azd/internal/agent/conversational_agent.go b/cli/azd/internal/agent/conversational_agent.go index 9115e2fe719..5dcb2a1ae19 100644 --- a/cli/azd/internal/agent/conversational_agent.go +++ b/cli/azd/internal/agent/conversational_agent.go @@ -23,7 +23,7 @@ import ( //go:embed prompts/conversational.txt var conversational_prompt_template string -// ConversationalAzdAiAgent represents an enhanced AZD Copilot agent with conversation memory, +// ConversationalAzdAiAgent represents an enhanced `azd` agent with conversation memory, // tool filtering, and interactive capabilities type ConversationalAzdAiAgent struct { *agentBase @@ -91,9 +91,6 @@ func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, args ...st // It accepts an optional initial query and handles user input/output with proper formatting. // The conversation continues until the user types "exit" or "quit". func (aai *ConversationalAzdAiAgent) StartConversation(ctx context.Context, args ...string) (string, error) { - fmt.Println("🤖 AZD Copilot - Interactive Mode") - fmt.Println("═══════════════════════════════════════════════════════════") - // Handle initial query if provided var initialQuery string if len(args) > 0 { @@ -126,7 +123,7 @@ func (aai *ConversationalAzdAiAgent) StartConversation(ctx context.Context, args } if strings.ToLower(userInput) == "exit" || strings.ToLower(userInput) == "quit" { - fmt.Println("👋 Goodbye! Thanks for using AZD Copilot!") + fmt.Println("👋 Goodbye! Thanks for using azd Agent!") break } diff --git a/cli/azd/internal/agent/logging/action_logger.go b/cli/azd/internal/agent/logging/action_logger.go index 94a36192ffa..786b14d51e0 100644 --- a/cli/azd/internal/agent/logging/action_logger.go +++ b/cli/azd/internal/agent/logging/action_logger.go @@ -10,6 +10,7 @@ import ( "regexp" "strings" + "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/fatih/color" "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/llms" @@ -77,7 +78,7 @@ func (al *ActionLogger) HandleLLMGenerateContentEnd(ctx context.Context, res *ll if thought != "" { // Skip thoughts that contain "Do I need to use a tool?" if !strings.Contains(strings.ToLower(thought), "do i need to use a tool?") { - color.White("\n🤖 Agent: %s\n", thought) + color.White("\n%s: %s\n", output.AzdAgentLabel(), thought) } } } @@ -197,21 +198,21 @@ func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.Age if len(params) > 0 { paramStr = strings.Join(params, ", ") paramStr = truncateString(paramStr, 100) - output := fmt.Sprintf("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, paramStr) + output := fmt.Sprintf("\n%s: Calling %s tool with %s\n", output.AzdAgentLabel(), action.Tool, paramStr) color.Green(output) } else { - output := fmt.Sprintf("\n🤖 Agent: Calling %s tool\n", action.Tool) + output := fmt.Sprintf("\n%s: Calling %s tool\n", output.AzdAgentLabel(), action.Tool) color.Green(output) } } else { // JSON parsing failed, show the input as text with truncation toolInput := strings.TrimSpace(action.ToolInput) if toolInput == "" { - output := fmt.Sprintf("\n🤖 Agent: Calling %s tool\n", action.Tool) + output := fmt.Sprintf("\n%s: Calling %s tool\n", output.AzdAgentLabel(), action.Tool) color.Green(output) } else { toolInput = truncateString(toolInput, 100) - color.Green("\n🤖 Agent: Calling %s tool with %s\n", action.Tool, toolInput) + color.Green("\n%s: Calling %s tool with %s\n", output.AzdAgentLabel(), action.Tool, toolInput) } } } @@ -231,7 +232,7 @@ func (al *ActionLogger) HandleAgentFinish(ctx context.Context, finish schema.Age if len(matches) > 1 { summary := strings.TrimSpace(matches[1]) - color.White("\n🤖 Agent: %s\n", summary) + color.White("\n%s: %s\n", output.AzdAgentLabel(), summary) } // If "AI:" not found, don't print anything } diff --git a/cli/azd/internal/agent/one_shot_agent.go b/cli/azd/internal/agent/one_shot_agent.go index 40742518945..af37bea9798 100644 --- a/cli/azd/internal/agent/one_shot_agent.go +++ b/cli/azd/internal/agent/one_shot_agent.go @@ -15,7 +15,7 @@ import ( "github.com/tmc/langchaingo/prompts" ) -// OneShotAzdAiAgent represents an AZD Copilot agent designed for single-request processing +// OneShotAzdAiAgent represents an `azd` agent designed for single-request processing // without conversation memory, optimized for one-time queries and responses type OneShotAzdAiAgent struct { *agentBase diff --git a/cli/azd/internal/mcp/tools/azd_yaml_schema.go b/cli/azd/internal/mcp/tools/azd_yaml_schema.go index b132078bb4f..a5b7e1b0732 100644 --- a/cli/azd/internal/mcp/tools/azd_yaml_schema.go +++ b/cli/azd/internal/mcp/tools/azd_yaml_schema.go @@ -5,30 +5,141 @@ package tools import ( "context" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "time" - "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" "github.com/mark3labs/mcp-go/mcp" "github.com/mark3labs/mcp-go/server" + "github.com/santhosh-tekuri/jsonschema/v6" + "gopkg.in/yaml.v3" ) // NewAzdYamlSchemaTool creates a new azd yaml schema tool func NewAzdYamlSchemaTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( - "azd_yaml_schema", + "azd_validate_azure_yaml", mcp.WithReadOnlyHintAnnotation(true), mcp.WithIdempotentHintAnnotation(true), mcp.WithDestructiveHintAnnotation(false), mcp.WithOpenWorldHintAnnotation(false), mcp.WithDescription( - `Gets the Azure YAML JSON schema file specification and structure for azure.yaml `+ - `configuration files used in AZD.`, + `Validates an azure.yaml against the official azure.yaml JSON schema and returns the results.`, + ), + mcp.WithString("path", + mcp.Description("Path to the azure.yaml file"), + mcp.Required(), ), ), - Handler: handleAzdYamlSchema, + Handler: HandleAzdYamlSchema, } } -func handleAzdYamlSchema(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return mcp.NewToolResultText(prompts.AzdYamlSchemaPrompt), nil +func HandleAzdYamlSchema(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + azureYamlPath, err := request.RequireString("path") + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + + f, err := os.Open(azureYamlPath) + if err != nil { + return errorResult("azure.yaml not found: " + err.Error()), nil + } + defer f.Close() + + var yamlData interface{} + yamlBytes, err := io.ReadAll(f) + if err != nil { + return errorResult("Failed to read azure.yaml: " + err.Error()), nil + } + if err := yaml.Unmarshal(yamlBytes, &yamlData); err != nil { + return errorResult("Failed to unmarshal azure.yaml: " + err.Error()), nil + } + jsonBytes, err := json.Marshal(yamlData) + if err != nil { + return errorResult("Failed to marshal azure.yaml to JSON: " + err.Error()), nil + } + + var jsonObj interface{} + if err := json.Unmarshal(jsonBytes, &jsonObj); err != nil { + return errorResult("Failed to unmarshal JSON: " + err.Error()), nil + } + + // Attempt to validate against stable and alpha schemas + schemas := []struct { + url string + result string + }{ + { + "https://raw.githubusercontent.com/Azure/azure-dev/refs/heads/main/schemas/v1.0/azure.yaml.json", + "azure.yaml is valid against the stable schema.", + }, + { + "https://raw.githubusercontent.com/Azure/azure-dev/refs/heads/main/schemas/alpha/azure.yaml.json", + "azure.yaml is valid against the alpha schema.", + }, + } + + loader := jsonschema.SchemeURLLoader{ + "file": jsonschema.FileLoader{}, + "https": newHttpsUrlLoader(), + } + + var validationErr error + + for _, s := range schemas { + compiler := jsonschema.NewCompiler() + compiler.UseLoader(loader) + + schema, err := compiler.Compile(s.url) + if err == nil { + if err := schema.Validate(jsonObj); err == nil { + return mcp.NewToolResultText(s.result), nil + } else { + validationErr = err + } + } + } + + if validationErr != nil { + return errorResult(validationErr.Error()), nil + } + + return errorResult("an error occurred while validating azure.yaml"), nil +} + +func errorResult(msg string) *mcp.CallToolResult { + resp := ErrorResponse{Error: true, Message: msg} + jsonResp, _ := json.MarshalIndent(resp, "", " ") + return mcp.NewToolResultText(string(jsonResp)) +} + +type httpsUrlLoader http.Client + +func (l *httpsUrlLoader) Load(url string) (any, error) { + client := (*http.Client)(l) + resp, err := client.Get(url) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + _ = resp.Body.Close() + return nil, fmt.Errorf("%s returned status code %d", url, resp.StatusCode) + } + defer resp.Body.Close() + + return jsonschema.UnmarshalJSON(resp.Body) +} + +func newHttpsUrlLoader() *httpsUrlLoader { + httpLoader := httpsUrlLoader(http.Client{ + Timeout: 15 * time.Second, + }) + + return &httpLoader } diff --git a/cli/azd/internal/mcp/tools/azd_yaml_schema_test.go b/cli/azd/internal/mcp/tools/azd_yaml_schema_test.go new file mode 100644 index 00000000000..295d394bce0 --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_yaml_schema_test.go @@ -0,0 +1,191 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + "os" + "testing" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/stretchr/testify/require" +) + +func getText(result *mcp.CallToolResult) string { + if len(result.Content) > 0 { + if txt, ok := result.Content[0].(mcp.TextContent); ok { + return txt.Text + } + } + return "" +} + +func TestHandleAzdYamlSchema_ValidYaml(t *testing.T) { + // Arrange + tmpDir := t.TempDir() + tmpFile, err := os.CreateTemp(tmpDir, "azure.yaml") + require.NoError(t, err) + + validYaml := []byte("name: testapp\n") + _, err = tmpFile.Write(validYaml) + require.NoError(t, err) + tmpFile.Close() + yamlPath := tmpFile.Name() + + oldWd, _ := os.Getwd() + os.Chdir(tmpDir) + defer os.Chdir(oldWd) + os.Rename(yamlPath, "azure.yaml") + defer os.Remove("azure.yaml") + + req := mcp.CallToolRequest{} + req.Params.Arguments = map[string]any{"path": "azure.yaml"} + + // Act + result, err := HandleAzdYamlSchema(context.Background(), req) + + // Assert + require.NoError(t, err) + text := getText(result) + require.Contains(t, text, "azure.yaml is valid against the stable schema.") +} + +func TestHandleAzdYamlSchema_MissingYaml(t *testing.T) { + // Arrange + tmpDir := t.TempDir() + oldWd, _ := os.Getwd() + os.Chdir(tmpDir) + defer os.Chdir(oldWd) + + req := mcp.CallToolRequest{} + req.Params.Arguments = map[string]any{"path": "azure.yaml"} + + // Act + result, err := HandleAzdYamlSchema(context.Background(), req) + + // Assert + require.NoError(t, err) + text := getText(result) + require.Contains(t, text, "azure.yaml not found") +} + +func TestHandleAzdYamlSchema_InvalidYaml(t *testing.T) { + // Arrange + tmpDir := t.TempDir() + tmpFile, err := os.CreateTemp(tmpDir, "azure.yaml") + require.NoError(t, err) + + invalidYaml := []byte("name: !@#$\n") + _, err = tmpFile.Write(invalidYaml) + require.NoError(t, err) + tmpFile.Close() + yamlPath := tmpFile.Name() + + oldWd, _ := os.Getwd() + os.Chdir(tmpDir) + defer os.Chdir(oldWd) + os.Rename(yamlPath, "azure.yaml") + defer os.Remove("azure.yaml") + + req := mcp.CallToolRequest{} + req.Params.Arguments = map[string]any{"path": "azure.yaml"} + + // Act + result, err := HandleAzdYamlSchema(context.Background(), req) + + // Assert + require.NoError(t, err) + text := getText(result) + require.Contains(t, text, "Failed to unmarshal azure.yaml") +} + +func TestHandleAzdYamlSchema_YamlNotValidSyntax(t *testing.T) { + // Arrange + tmpDir := t.TempDir() + tmpFile, err := os.CreateTemp(tmpDir, "azure.yaml") + require.NoError(t, err) + + invalidYaml := []byte("name: !@#$\n:bad") // not valid YAML syntax + _, err = tmpFile.Write(invalidYaml) + require.NoError(t, err) + tmpFile.Close() + yamlPath := tmpFile.Name() + + oldWd, _ := os.Getwd() + os.Chdir(tmpDir) + defer os.Chdir(oldWd) + os.Rename(yamlPath, "azure.yaml") + defer os.Remove("azure.yaml") + + req := mcp.CallToolRequest{} + req.Params.Arguments = map[string]any{"path": "azure.yaml"} + + // Act + result, err := HandleAzdYamlSchema(context.Background(), req) + + // Assert + require.NoError(t, err) + text := getText(result) + require.Contains(t, text, "Failed to unmarshal azure.yaml") +} + +func TestHandleAzdYamlSchema_YamlValidButSchemaInvalid(t *testing.T) { + // Arrange + tmpDir := t.TempDir() + tmpFile, err := os.CreateTemp(tmpDir, "azure.yaml") + require.NoError(t, err) + + invalidSchemaYaml := []byte("not_a_schema_field: true\n") // valid YAML, but not valid against schema + _, err = tmpFile.Write(invalidSchemaYaml) + require.NoError(t, err) + tmpFile.Close() + yamlPath := tmpFile.Name() + + oldWd, _ := os.Getwd() + os.Chdir(tmpDir) + defer os.Chdir(oldWd) + os.Rename(yamlPath, "azure.yaml") + defer os.Remove("azure.yaml") + + req := mcp.CallToolRequest{} + req.Params.Arguments = map[string]any{"path": "azure.yaml"} + + // Act + result, err := HandleAzdYamlSchema(context.Background(), req) + + // Assert + require.NoError(t, err) + text := getText(result) + require.Contains(t, text, "additional properties 'not_a_schema_field' not allowed") +} + +func TestHandleAzdYamlSchema_InvalidYaml_Structural(t *testing.T) { + // Arrange + tmpDir := t.TempDir() + tmpFile, err := os.CreateTemp(tmpDir, "azure.yaml") + require.NoError(t, err) + + invalidYaml := []byte("name: 123\n") // valid YAML, but not valid type for schema + _, err = tmpFile.Write(invalidYaml) + require.NoError(t, err) + tmpFile.Close() + yamlPath := tmpFile.Name() + + oldWd, _ := os.Getwd() + os.Chdir(tmpDir) + defer os.Chdir(oldWd) + os.Rename(yamlPath, "azure.yaml") + defer os.Remove("azure.yaml") + + req := mcp.CallToolRequest{} + req.Params.Arguments = map[string]any{"path": "azure.yaml"} + + // Act + result, err := HandleAzdYamlSchema(context.Background(), req) + + // Assert + require.NoError(t, err) + text := getText(result) + require.Contains(t, text, "jsonschema validation failed") +} diff --git a/cli/azd/internal/mcp/tools/prompts/azd_iac_generation_rules.md b/cli/azd/internal/mcp/tools/prompts/azd_iac_generation_rules.md index 49d7bb80107..5119022b711 100644 --- a/cli/azd/internal/mcp/tools/prompts/azd_iac_generation_rules.md +++ b/cli/azd/internal/mcp/tools/prompts/azd_iac_generation_rules.md @@ -161,5 +161,3 @@ var resourceName = '${name}-${resourceSuffix}' - [ ] No hard-coded secrets, tenant IDs, or subscription IDs - [ ] Parameters have appropriate validation decorators - [ ] Security best practices followed (Key Vault, managed identities, diagnostics) -- [ ] Bicep CLI validation passes without errors (`az bicep build`) -- [ ] Deployment validation successful (`az deployment sub validate`) diff --git a/cli/azd/internal/mcp/tools/prompts/azd_infrastructure_generation.md b/cli/azd/internal/mcp/tools/prompts/azd_infrastructure_generation.md index f7b902bb239..3f35111d600 100644 --- a/cli/azd/internal/mcp/tools/prompts/azd_infrastructure_generation.md +++ b/cli/azd/internal/mcp/tools/prompts/azd_infrastructure_generation.md @@ -2,16 +2,17 @@ ✅ **Agent Task List** -1. Use `azd_iac_generation_rules` tool to get complete IaC rules and conventions -2. **Inventory existing IaC files** - scan current working directory for all `.bicep` files -3. Read `azd-arch-plan.md` to get the **IaC File Generation Checklist** -4. Create directory structure in `./infra` following IaC rules -5. For each file in the IaC checklist: +1. Strictly follow Azure and Bicep best practices in all code generation +2. Strictly follow AZD IaC generation rules during all code generation +3. **Inventory existing IaC files** - scan current working directory for all `.bicep` files +4. Read `azd-arch-plan.md` to get the **IaC File Generation Checklist** +5. Create directory structure in `./infra` following IaC rules +6. For each file in the IaC checklist: - **If file exists**: Intelligently update to match requirements, preserve user customizations where possible - **If file missing**: Generate new file following templates and best practices - **Flag conflicts**: Note any incompatible configurations but proceed with updates -6. Validate all generated bicep templates compile without errors or warnings -7. Update the IaC checklist section in existing `azd-arch-plan.md` by marking completed files as [x] while preserving existing content +7. Validate all generated bicep templates compile without errors or warnings +8. Update the IaC checklist section in existing `azd-arch-plan.md` by marking completed files as [x] while preserving existing content 📄 **Required Outputs** diff --git a/cli/azd/internal/mcp/tools/prompts/azure.yaml.json b/cli/azd/internal/mcp/tools/prompts/azure.yaml.json deleted file mode 100644 index 747fd7fa649..00000000000 --- a/cli/azd/internal/mcp/tools/prompts/azure.yaml.json +++ /dev/null @@ -1,1819 +0,0 @@ -{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://raw.githubusercontent.com/Azure/azure-dev/main/schemas/v1.0/azure.yaml.json", - "type": "object", - "required": [ - "name" - ], - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "minLength": 2, - "title": "Name of the application", - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$", - "description": "The application name. Only lowercase letters, numbers, and hyphens (-) are allowed. The name must start and end with a letter or number." - }, - "resourceGroup": { - "type": "string", - "minLength": 3, - "maxLength": 64, - "title": "Name of the Azure resource group", - "description": "When specified will override the resource group name used for infrastructure provisioning. Supports environment variable substitution." - }, - "metadata": { - "type": "object", - "properties": { - "template": { - "type": "string", - "title": "Identifier of the template from which the application was created. Optional.", - "examples": [ - "todo-nodejs-mongo@0.0.1-beta" - ] - } - } - }, - "infra": { - "type": "object", - "title": "The infrastructure configuration used for the application", - "description": "Optional. Provides additional configuration for Azure infrastructure provisioning.", - "additionalProperties": true, - "properties": { - "provider": { - "type": "string", - "title": "Type of infrastructure provisioning provider", - "description": "Optional. The infrastructure provisioning provider used to provision the Azure resources for the application. (Default: bicep)", - "enum": [ - "bicep", - "terraform" - ] - }, - "path": { - "type": "string", - "title": "Path to the location that contains Azure provisioning templates", - "description": "Optional. The relative folder path to the Azure provisioning templates for the specified provider. (Default: infra)" - }, - "module": { - "type": "string", - "title": "Name of the default module within the Azure provisioning templates", - "description": "Optional. The name of the Azure provisioning module used when provisioning resources. (Default: main)" - } - } - }, - "services": { - "type": "object", - "title": "Definition of services that comprise the application", - "minProperties": 1, - "additionalProperties": { - "type": "object", - "additionalProperties": false, - "required": [ - "host" - ], - "properties": { - "apiVersion": { - "type": "string", - "title": "Resource provider API version for deployments", - "description": "Optional. The resource provider API version to use for the service. If not specified, the default SDK API version is used. Only valid when host is 'containerapp'." - }, - "resourceGroup": { - "type": "string", - "title": "Name of the Azure resource group that contains the resource", - "description": "By default, the CLI will discover the Azure resource within the default resource group. When specified, the CLI will instead find the Azure resource within the specified resource group. Supports environment variable substitution." - }, - "resourceName": { - "type": "string", - "title": "Name of the Azure resource that implements the service", - "description": "By default, the CLI will discover the Azure resource with tag 'azd-service-name' set to the current service's name. When specified, the CLI will instead find the Azure resource with the matching resource name. Supports environment variable substitution." - }, - "project": { - "type": "string", - "title": "Path to the service source code directory" - }, - "image": { - "type": "string", - "title": "Optional. The source image to be used for the container image instead of building from source. Supports environment variable substitution.", - "description": "If omitted, container image will be built from source specified in the 'project' property. Setting both 'project' and 'image' is invalid." - }, - "host": { - "type": "string", - "title": "Required. The type of Azure resource used for service implementation", - "description": "The Azure service that will be used as the target for deployment operations for the service.", - "enum": [ - "appservice", - "containerapp", - "function", - "springapp", - "staticwebapp", - "aks", - "ai.endpoint" - ] - }, - "language": { - "type": "string", - "title": "Service implementation language", - "enum": [ - "dotnet", - "csharp", - "fsharp", - "py", - "python", - "js", - "ts", - "java", - "docker" - ] - }, - "module": { - "type": "string", - "title": "(DEPRECATED) Path of the infrastructure module used to deploy the service relative to the root infra folder", - "description": "If omitted, the CLI will assume the module name is the same as the service name. This property will be deprecated in a future release." - }, - "dist": { - "type": "string", - "title": "Relative path to service deployment artifacts" - }, - "docker": { - "$ref": "#/definitions/docker" - }, - "k8s": { - "$ref": "#/definitions/aksOptions" - }, - "config": { - "type": "object", - "additionalProperties": true - }, - "hooks": { - "type": "object", - "title": "Service level hooks", - "description": "Hooks should match `service` event names prefixed with `pre` or `post` depending on when the script should execute. When specifying paths they should be relative to the service path.", - "additionalProperties": false, - "properties": { - "predeploy": { - "title": "pre deploy hook", - "description": "Runs before the service is deployed to Azure", - "$ref": "#/definitions/hooks" - }, - "postdeploy": { - "title": "post deploy hook", - "description": "Runs after the service is deployed to Azure", - "$ref": "#/definitions/hooks" - }, - "prerestore": { - "title": "pre restore hook", - "description": "Runs before the service dependencies are restored", - "$ref": "#/definitions/hooks" - }, - "postrestore": { - "title": "post restore hook", - "description": "Runs after the service dependencies are restored", - "$ref": "#/definitions/hooks" - }, - "prebuild": { - "title": "pre build hook", - "description": "Runs before the service is built", - "$ref": "#/definitions/hooks" - }, - "postbuild": { - "title": "post build hook", - "description": "Runs after the service is built", - "$ref": "#/definitions/hooks" - }, - "prepackage": { - "title": "pre package hook", - "description": "Runs before the service is deployment package is created", - "$ref": "#/definitions/hooks" - }, - "postpackage": { - "title": "post package hook", - "description": "Runs after the service is deployment package is created", - "$ref": "#/definitions/hooks" - } - } - } - }, - "allOf": [ - { - "if": { - "properties": { - "host": { - "const": "containerapp" - } - } - }, - "then": { - "anyOf": [ - { - "required": [ - "image" - ], - "properties": { - "language": false - }, - "not": { - "required": [ - "project" - ] - } - }, - { - "required": [ - "project" - ], - "not": { - "required": [ - "image" - ] - } - } - ] - } - }, - { - "if": { - "not": { - "properties": { - "host": { - "const": "containerapp" - } - } - } - }, - "then": { - "properties": { - "image": false - } - } - }, - { - "if": { - "not": { - "properties": { - "host": { - "enum": [ - "containerapp", - "aks", - "ai.endpoint" - ] - } - } - } - }, - "then": { - "required": [ - "project", - "language" - ], - "properties": { - "docker": false - } - } - }, - { - "if": { - "properties": { - "host": { - "const": "ai.endpoint" - } - } - }, - "then": { - "required": [ - "config" - ], - "properties": { - "config": { - "$ref": "#/definitions/aiEndpointConfig", - "title": "The Azure AI endpoint configuration.", - "description": "Required. Provides additional configuration for Azure AI online endpoint deployment." - } - } - } - }, - { - "if": { - "not": { - "properties": { - "host": { - "enum": [ - "aks" - ] - } - } - } - }, - "then": { - "properties": { - "k8s": false - } - } - }, - { - "if": { - "properties": { - "language": { - "const": "java" - } - } - }, - "then": { - "properties": { - "dist": { - "type": "string", - "description": "Optional. The path to the directory containing a single Java archive file (.jar/.ear/.war), or the path to the specific Java archive file to be included in the deployment artifact. If omitted, the CLI will detect the output directory based on the build system in-use. For maven, the default output directory 'target' is assumed." - } - } - } - }, - { - "if": { - "not": { - "properties": { - "host": { - "const": "containerapp" - } - } - } - }, - "then": { - "properties": { - "apiVersion": false - } - } - }, - { - "properties": { - "dist": { - "type": "string", - "description": "Optional. The CLI will use files under this path to create the deployment artifact (ZIP file). If omitted, all files under service project directory will be included." - } - } - } - ] - } - }, - "resources": { - "type": "object", - "additionalProperties": { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "title": "Type of resource", - "description": "The type of resource to be created. (Example: db.postgres)", - "enum": [ - "db.postgres", - "db.mysql", - "db.redis", - "db.mongo", - "db.cosmos", - "ai.openai.model", - "ai.project", - "ai.search", - "host.containerapp", - "host.appservice", - "messaging.eventhubs", - "messaging.servicebus", - "storage", - "keyvault" - ] - }, - "uses": { - "type": "array", - "title": "Other resources that this resource uses", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "existing": { - "type": "boolean", - "title": "An existing resource for referencing purposes", - "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", - "default": false - } - }, - "allOf": [ - { - "if": { - "properties": { - "type": { - "const": "host.appservice" - } - } - }, - "then": { - "$ref": "#/definitions/appServiceResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "host.containerapp" - } - } - }, - "then": { - "$ref": "#/definitions/containerAppResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "ai.openai.model" - } - } - }, - "then": { - "$ref": "#/definitions/aiModelResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "ai.project" - } - } - }, - "then": { - "$ref": "#/definitions/aiProjectResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "ai.search" - } - } - }, - "then": { - "$ref": "#/definitions/aiSearchResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "db.postgres" - } - } - }, - "then": { - "$ref": "#/definitions/genericDbResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "db.mysql" - } - } - }, - "then": { - "$ref": "#/definitions/genericDbResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "db.redis" - } - } - }, - "then": { - "$ref": "#/definitions/genericDbResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "db.mongo" - } - } - }, - "then": { - "$ref": "#/definitions/genericDbResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "db.cosmos" - } - } - }, - "then": { - "$ref": "#/definitions/cosmosDbResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "messaging.eventhubs" - } - } - }, - "then": { - "$ref": "#/definitions/eventHubsResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "messaging.servicebus" - } - } - }, - "then": { - "$ref": "#/definitions/serviceBusResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "storage" - } - } - }, - "then": { - "$ref": "#/definitions/storageAccountResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "keyvault" - } - } - }, - "then": { - "$ref": "#/definitions/keyVaultResource" - } - } - ] - } - }, - "pipeline": { - "type": "object", - "title": "Definition of continuous integration pipeline", - "properties": { - "provider": { - "type": "string", - "title": "Type of pipeline provider", - "description": "Optional. The pipeline provider to be used for continuous integration. (Default: github)", - "enum": [ - "github", - "azdo" - ] - }, - "variables": { - "type": "array", - "title": "Optional. List of azd environment variables to be used in the pipeline as variables.", - "description": "If variable is found on azd environment, it is set as a variable for the pipeline.", - "items": { - "type": "string" - } - }, - "secrets": { - "type": "array", - "title": "Optional. List of azd environment variables to be used in the pipeline as secrets.", - "description": "If variable is found on azd environment, it is set as a secret for the pipeline.", - "items": { - "type": "string" - } - } - } - }, - "hooks": { - "type": "object", - "title": "Command level hooks", - "description": "Hooks should match `azd` command names prefixed with `pre` or `post` depending on when the script should execute. When specifying paths they should be relative to the project path.", - "additionalProperties": false, - "properties": { - "preprovision": { - "title": "pre provision hook", - "description": "Runs before the `provision` command", - "$ref": "#/definitions/hooks" - }, - "postprovision": { - "title": "post provision hook", - "description": "Runs after the `provision` command", - "$ref": "#/definitions/hooks" - }, - "preinfracreate": { - "title": "pre infra create hook", - "description": "Runs before the `infra create` or `provision` commands", - "$ref": "#/definitions/hooks" - }, - "postinfracreate": { - "title": "post infra create hook", - "description": "Runs after the `infra create` or `provision` commands", - "$ref": "#/definitions/hooks" - }, - "preinfradelete": { - "title": "pre infra delete hook", - "description": "Runs before the `infra delete` or `down` commands", - "$ref": "#/definitions/hooks" - }, - "postinfradelete": { - "title": "post infra delete hook", - "description": "Runs after the `infra delete` or `down` commands", - "$ref": "#/definitions/hooks" - }, - "predown": { - "title": "pre down hook", - "description": "Runs before the `infra delete` or `down` commands", - "$ref": "#/definitions/hooks" - }, - "postdown": { - "title": "post down hook", - "description": "Runs after the `infra delete` or `down` commands", - "$ref": "#/definitions/hooks" - }, - "preup": { - "title": "pre up hook", - "description": "Runs before the `up` command", - "$ref": "#/definitions/hooks" - }, - "postup": { - "title": "post up hook", - "description": "Runs after the `up` command", - "$ref": "#/definitions/hooks" - }, - "prepackage": { - "title": "pre package hook", - "description": "Runs before the `package` command", - "$ref": "#/definitions/hooks" - }, - "postpackage": { - "title": "post package hook", - "description": "Runs after the `package` command", - "$ref": "#/definitions/hooks" - }, - "predeploy": { - "title": "pre deploy hook", - "description": "Runs before the `deploy` command", - "$ref": "#/definitions/hooks" - }, - "postdeploy": { - "title": "post deploy hook", - "description": "Runs after the `deploy` command", - "$ref": "#/definitions/hooks" - }, - "prerestore": { - "title": "pre restore hook", - "description": "Runs before the `restore` command", - "$ref": "#/definitions/hooks" - }, - "postrestore": { - "title": "post restore hook", - "description": "Runs after the `restore` command", - "$ref": "#/definitions/hooks" - } - } - }, - "requiredVersions": { - "type": "object", - "additionalProperties": false, - "properties": { - "azd": { - "type": "string", - "title": "A range of supported versions of `azd` for this project", - "description": "A range of supported versions of `azd` for this project. If the version of `azd` is outside this range, the project will fail to load. Optional (allows all versions if absent).", - "examples": [ - ">= 0.6.0-beta.3" - ] - } - } - }, - "state": { - "type": "object", - "title": "The state configuration used for the project.", - "description": "Optional. Provides additional configuration for state management.", - "additionalProperties": false, - "properties": { - "remote": { - "type": "object", - "additionalProperties": false, - "title": "The remote state configuration.", - "description": "Optional. Provides additional configuration for remote state management such as Azure Blob Storage.", - "required": [ - "backend" - ], - "properties": { - "backend": { - "type": "string", - "title": "The remote state backend type.", - "description": "Optional. The remote state backend type. (Default: AzureBlobStorage)", - "default": "AzureBlobStorage", - "enum": [ - "AzureBlobStorage" - ] - }, - "config": { - "type": "object", - "additionalProperties": true - } - }, - "allOf": [ - { - "if": { - "properties": { - "backend": { - "const": "AzureBlobStorage" - } - } - }, - "then": { - "required": [ - "config" - ], - "properties": { - "config": { - "$ref": "#/definitions/azureBlobStorageConfig" - } - } - } - } - ] - } - } - }, - "platform": { - "type": "object", - "title": "The platform configuration used for the project.", - "description": "Optional. Provides additional configuration for platform specific features such as Azure Dev Center.", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "title": "The platform type.", - "description": "Required. The platform type. (Example: devcenter)", - "enum": [ - "devcenter" - ] - }, - "config": { - "type": "object", - "additionalProperties": true - } - }, - "allOf": [ - { - "if": { - "properties": { - "type": { - "const": "devcenter" - } - } - }, - "then": { - "properties": { - "config": { - "$ref": "#/definitions/azureDevCenterConfig" - } - } - } - } - ] - }, - "workflows": { - "type": "object", - "title": "The workflows configuration used for the project.", - "description": "Optional. Provides additional configuration for workflows such as override azd up behavior.", - "additionalProperties": false, - "properties": { - "up": { - "title": "The up workflow configuration", - "description": "When specified will override the default behavior for the azd up workflow. Common use cases include changing the order of the provision, package and deploy commands.", - "$ref": "#/definitions/workflow" - } - } - }, - "cloud": { - "type": "object", - "title": "The cloud configuration used for the project.", - "description": "Optional. Provides additional configuration for deploying to sovereign clouds such as Azure Government. The default cloud is AzureCloud.", - "additionalProperties": false, - "properties": { - "name": { - "enum": [ - "AzureCloud", - "AzureChinaCloud", - "AzureUSGovernment" - ] - } - } - } - }, - "definitions": { - "hooks": { - "anyOf": [ - { - "$ref": "#/definitions/hook" - }, - { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/hook" - } - } - ] - }, - "hook": { - "type": "object", - "additionalProperties": false, - "properties": { - "shell": { - "type": "string", - "title": "Type of shell to execute scripts", - "description": "Optional. The type of shell to use for the hook. (Default: sh)", - "enum": [ - "sh", - "pwsh" - ], - "default": "sh" - }, - "run": { - "type": "string", - "title": "Required. The inline script or relative path of your scripts from the project or service path", - "description": "When specifying an inline script you also must specify the `shell` to use. This is automatically inferred when using paths." - }, - "continueOnError": { - "type": "boolean", - "default": false, - "title": "Whether or not a script error will halt the azd command", - "description": "Optional. When set to true will continue to run the command even after a script error has occurred. (Default: false)" - }, - "interactive": { - "type": "boolean", - "default": false, - "title": "Whether the script will run in interactive mode", - "description": "Optional. When set to true will bind the script to stdin, stdout & stderr of the running console. (Default: false)" - }, - "windows": { - "title": "The hook configuration used for Windows environments", - "description": "When specified overrides the hook configuration when executed in Windows environments", - "default": null, - "$ref": "#/definitions/hook" - }, - "posix": { - "title": "The hook configuration used for POSIX (Linux & MacOS) environments", - "description": "When specified overrides the hook configuration when executed in POSIX environments", - "default": null, - "$ref": "#/definitions/hook" - }, - "secrets": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "title": "Optional. Map of azd environment variables to hook secrets.", - "description": "If variable was set as a secret in the environment, the secret value will be passed to the hook.", - "examples": [ - { - "WITH_SECRET_VALUE": "ENV_VAR_WITH_SECRET" - } - ] - } - }, - "allOf": [ - { - "if": { - "allOf": [ - { - "required": [ - "windows" - ] - }, - { - "required": [ - "posix" - ] - } - ] - }, - "then": { - "properties": { - "run": false, - "shell": false, - "interactive": false, - "continueOnError": false, - "secrets": false - } - } - }, - { - "if": { - "anyOf": [ - { - "required": [ - "interactive" - ] - }, - { - "required": [ - "continueOnError" - ] - }, - { - "required": [ - "secrets" - ] - }, - { - "required": [ - "shell" - ] - } - ] - }, - "then": { - "required": [ - "run" - ] - } - } - ] - }, - "docker": { - "type": "object", - "description": "This is only applicable when `host` is `containerapp` or `aks`", - "additionalProperties": false, - "properties": { - "path": { - "type": "string", - "title": "The path to the Dockerfile", - "description": "Path to the Dockerfile is relative to your service", - "default": "./Dockerfile" - }, - "context": { - "type": "string", - "title": "The docker build context", - "description": "When specified overrides the default context", - "default": "." - }, - "platform": { - "type": "string", - "title": "The platform target", - "default": "amd64" - }, - "registry": { - "type": "string", - "title": "Optional. The container registry to push the image to.", - "description": "If omitted, will default to value of AZURE_CONTAINER_REGISTRY_ENDPOINT environment variable. Supports environment variable substitution." - }, - "image": { - "type": "string", - "title": "Optional. The name that will be applied to the built container image.", - "description": "If omitted, will default to the '{appName}/{serviceName}-{environmentName}'. Supports environment variable substitution." - }, - "tag": { - "type": "string", - "title": "The tag that will be applied to the built container image.", - "description": "If omitted, will default to 'azd-deploy-{unix time (seconds)}'. Supports environment variable substitution. For example, to generate unique tags for a given release: myapp/myimage:${DOCKER_IMAGE_TAG}" - }, - "buildArgs": { - "type": "array", - "title": "Optional. Build arguments to pass to the docker build command", - "description": "Build arguments to pass to the docker build command.", - "items": { - "type": "string" - } - }, - "remoteBuild": { - "type": "boolean", - "title": "Optional. Whether to build the image remotely", - "description": "If set to true, the image will be built remotely using the Azure Container Registry remote build feature. If set to false, the image will be built locally using Docker." - } - } - }, - "aksOptions": { - "type": "object", - "title": "Optional. The Azure Kubernetes Service (AKS) configuration options", - "additionalProperties": false, - "properties": { - "deploymentPath": { - "type": "string", - "title": "Optional. The relative path from the service path to the k8s deployment manifests. (Default: manifests)", - "description": "When set it will override the default deployment path location for k8s deployment manifests.", - "default": "manifests" - }, - "namespace": { - "type": "string", - "title": "Optional. The k8s namespace of the deployed resources. (Default: Project name)", - "description": "When specified a new k8s namespace will be created if it does not already exist" - }, - "deployment": { - "type": "object", - "title": "Optional. The k8s deployment configuration", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "title": "Optional. The name of the k8s deployment resource to use during deployment. (Default: Service name)", - "description": "Used during deployment to ensure if the k8s deployment rollout has been completed. If not set will search for a deployment resource in the same namespace that contains the service name." - } - } - }, - "service": { - "type": "object", - "title": "Optional. The k8s service configuration", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "title": "Optional. The name of the k8s service resource to use as the default service endpoint. (Default: Service name)", - "description": "Used when determining endpoints for the default service resource. If not set will search for a deployment resource in the same namespace that contains the service name." - } - } - }, - "ingress": { - "type": "object", - "title": "Optional. The k8s ingress configuration", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "title": "Optional. The name of the k8s ingress resource to use as the default service endpoint. (Default: Service name)", - "description": "Used when determining endpoints for the default ingress resource. If not set will search for a deployment resource in the same namespace that contains the service name." - }, - "relativePath": { - "type": "string", - "title": "Optional. The relative path to the service from the root of your ingress controller.", - "description": "When set will be appended to the root of your ingress resource path." - } - } - }, - "helm": { - "type": "object", - "title": "Optional. The helm configuration", - "additionalProperties": false, - "properties": { - "repositories": { - "type": "array", - "title": "Optional. The helm repositories to add", - "description": "When set will add the helm repositories to the helm client.", - "minItems": 1, - "items": { - "type": "object", - "additionalProperties": false, - "required": [ - "name", - "url" - ], - "properties": { - "name": { - "type": "string", - "title": "The name of the helm repository", - "description": "The name of the helm repository to add." - }, - "url": { - "type": "string", - "title": "The url of the helm repository", - "description": "The url of the helm repository to add." - } - } - } - }, - "releases": { - "type": "array", - "title": "Optional. The helm releases to install", - "description": "When set will install the helm releases to the k8s cluster.", - "minItems": 1, - "items": { - "type": "object", - "additionalProperties": false, - "required": [ - "name", - "chart" - ], - "properties": { - "name": { - "type": "string", - "title": "The name of the helm release", - "description": "The name of the helm release to install." - }, - "chart": { - "type": "string", - "title": "The name of the helm chart", - "description": "The name of the helm chart to install." - }, - "version": { - "type": "string", - "title": "The version of the helm chart", - "description": "The version of the helm chart to install." - }, - "namespace": { - "type": "string", - "title": "Optional. The k8s namespace to install the helm chart", - "description": "When set will install the helm chart to the specified namespace. Defaults to the service namespace." - }, - "values": { - "type": "string", - "title": "Optional. Relative path from service to a values.yaml to pass to the helm chart", - "description": "When set will pass the values to the helm chart." - } - } - } - } - } - }, - "kustomize": { - "type": "object", - "title": "Optional. The kustomize configuration", - "additionalProperties": false, - "properties": { - "dir": { - "type": "string", - "title": "Optional. The relative path to the kustomize directory.", - "description": "When set will use the kustomize directory to deploy to the k8s cluster. Supports environment variable substitution." - }, - "edits": { - "type": "array", - "title": "Optional. The kustomize edits to apply before deployment.", - "description": "When set will apply the edits to the kustomize directory before deployment. Supports environment variable substitution.", - "items": { - "type": "string" - } - }, - "env": { - "type": "object", - "title": "Optional. The environment key/value pairs used to generate a .env file.", - "description": "When set will generate a .env file in the kustomize directory. Values support environment variable substitution.", - "additionalProperties": { - "type": [ - "string", - "boolean", - "number" - ] - } - } - } - } - } - }, - "azureBlobStorageConfig": { - "type": "object", - "title": "The Azure Blob Storage remote state backend configuration.", - "description": "Optional. Provides additional configuration for remote state management such as Azure Blob Storage.", - "additionalProperties": false, - "required": [ - "accountName" - ], - "properties": { - "accountName": { - "type": "string", - "title": "The Azure Storage account name.", - "description": "Required. The Azure Storage account name." - }, - "containerName": { - "type": "string", - "title": "The Azure Storage container name.", - "description": "Optional. The Azure Storage container name. Defaults to project name if not specified." - }, - "endpoint": { - "type": "string", - "title": "The Azure Storage endpoint.", - "description": "Optional. The Azure Storage endpoint. (Default: blob.core.windows.net)" - } - } - }, - "azureDevCenterConfig": { - "type": "object", - "title": "The dev center configuration used for the project.", - "description": "Optional. Provides additional project configuration for Azure Dev Center integration.", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "title": "The name of the Azure Dev Center", - "description": "Optional. Used as the default dev center for this project." - }, - "project": { - "type": "string", - "title": "The name of the Azure Dev Center project.", - "description": "Optional. Used as the default dev center project for this project." - }, - "catalog": { - "type": "string", - "title": "The name of the Azure Dev Center catalog.", - "description": "Optional. Used as the default dev center catalog for this project." - }, - "environmentDefinition": { - "type": "string", - "title": "The name of the Dev Center catalog environment definition.", - "description": "Optional. Used as the default dev center environment definition for this project." - }, - "environmentType": { - "type": "string", - "title": "The Dev Center project environment type used for the deployment environment.", - "description": "Optional. Used as the default environment type for this project." - } - } - }, - "workflow": { - "anyOf": [ - { - "type": "object", - "additionalProperties": false, - "required": [ - "steps" - ], - "properties": { - "steps": { - "type": "array", - "title": "The steps to execute in the workflow", - "description": "The steps to execute in the workflow. (Example: provision, package, deploy)", - "minItems": 1, - "items": { - "type": "object", - "$ref": "#/definitions/workflowStep" - } - } - } - }, - { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/workflowStep" - } - } - ] - }, - "workflowStep": { - "properties": { - "azd": { - "title": "The azd command command configuration", - "description": "The azd command configuration to execute. (Example: up)", - "$ref": "#/definitions/azdCommand" - } - } - }, - "azdCommand": { - "anyOf": [ - { - "type": "string", - "title": "The azd command to execute", - "description": "The name and args of the azd command to execute. (Example: deploy --all)" - }, - { - "type": "object", - "additionalProperties": false, - "required": [ - "args" - ], - "properties": { - "args": { - "type": "array", - "title": "The arguments or flags to pass to the azd command", - "description": "The arguments to pass to the azd command. (Example: --all)", - "minItems": 1 - } - } - } - ] - }, - "aiComponentConfig": { - "type": "object", - "properties": { - "name": { - "type": "string", - "title": "Name of the AI component.", - "description": "Optional. When omitted AZD will generate a name based on the component type and the service name. Supports environment variable substitution." - }, - "path": { - "type": "string", - "title": "Path to the AI component configuration file or path.", - "description": "Required. The path to the AI component configuration file or path to the AI component source code." - }, - "overrides": { - "type": "object", - "title": "A map of key value pairs used to override the AI component configuration.", - "description": "Optional. Supports environment variable substitution.", - "additionalProperties": { - "type": "string" - } - } - }, - "required": [ - "path" - ] - }, - "aiDeploymentConfig": { - "allOf": [ - { - "$ref": "#/definitions/aiComponentConfig" - }, - { - "type": "object", - "properties": { - "environment": { - "type": "object", - "title": "A map of key/value pairs to set as environment variables for the deployment.", - "description": "Optional. Values support OS & AZD environment variable substitution.", - "additionalProperties": { - "type": "string" - } - } - } - } - ] - }, - "aiEndpointConfig": { - "type": "object", - "additionalProperties": false, - "properties": { - "workspace": { - "type": "string", - "title": "The name of the AI Studio project workspace.", - "description": "Optional. When omitted AZD will use the value specified in the 'AZUREAI_PROJECT_NAME' environment variable. Supports environment variable substitution." - }, - "flow": { - "$ref": "#/definitions/aiComponentConfig", - "title": "The Azure AI Studio Prompt Flow configuration.", - "description": "Optional. When omitted a prompt flow will be not created." - }, - "environment": { - "$ref": "#/definitions/aiComponentConfig", - "title": "The Azure AI Studio custom environment configuration.", - "description": "Optional. When omitted a custom environment will not be created." - }, - "model": { - "$ref": "#/definitions/aiComponentConfig", - "title": "The Azure AI Studio model configuration.", - "description": "Optional. When omitted a model will not be created." - }, - "deployment": { - "$ref": "#/definitions/aiDeploymentConfig", - "title": "The Azure AI Studio online endpoint deployment configuration.", - "description": "Required. A new online endpoint deployment will be created and traffic will automatically to shifted to the new deployment upon successful completion." - } - }, - "required": [ - "deployment" - ] - }, - "appServiceResource": { - "type": "object", - "description": "An Azure App Service web app.", - "additionalProperties": false, - "required": [ - "port", - "runtime" - ], - "properties": { - "type": { - "type": "string", - "const": "host.appservice" - }, - "uses": { - "type": "array", - "title": "Other resources that this resource uses", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "port": { - "type": "integer", - "title": "Port that the web app listens on", - "description": "Optional. The port that the web app listens on. (Default: 80)" - }, - "env": { - "type": "array", - "title": "Environment variables to set for the web app", - "items": { - "type": "object", - "required": [ - "name" - ], - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "title": "Name of the environment variable" - }, - "value": { - "type": "string", - "title": "Value of the environment variable. Supports environment variable substitution." - }, - "secret": { - "type": "string", - "title": "Secret value of the environment variable. Supports environment variable substitution." - } - } - } - }, - "runtime": { - "type": "object", - "title": "Runtime stack configuration", - "description": "Required. The language runtime configuration for the App Service web app.", - "required": [ - "stack", - "version" - ], - "properties": { - "stack": { - "type": "string", - "title": "Language runtime stack", - "description": "Required. The language runtime stack.", - "enum": [ - "node", - "python" - ] - }, - "version": { - "type": "string", - "title": "Runtime stack version", - "description": "Required. The language runtime version. Format varies by stack. (Example: '22-lts' for Node, '3.13' for Python)" - } - } - }, - "startupCommand": { - "type": "string", - "title": "Startup command", - "description": "Optional. Startup command that will be run as part of web app startup." - } - } - }, - "containerAppResource": { - "type": "object", - "description": "A Docker-based container app.", - "additionalProperties": false, - "required": [ - "port" - ], - "properties": { - "type": { - "type": "string", - "const": "host.containerapp" - }, - "uses": { - "type": "array", - "title": "Other resources that this resource uses", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "port": { - "type": "integer", - "title": "Port that the container app listens on", - "description": "Optional. The port that the container app listens on. (Default: 80)" - }, - "env": { - "type": "array", - "title": "Environment variables to set for the container app", - "items": { - "type": "object", - "required": [ - "name" - ], - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "title": "Name of the environment variable" - }, - "value": { - "type": "string", - "title": "Value of the environment variable. Supports environment variable substitution." - }, - "secret": { - "type": "string", - "title": "Secret value of the environment variable. Supports environment variable substitution." - } - } - } - } - } - }, - "aiModelResource": { - "type": "object", - "description": "A deployed, ready-to-use AI model.", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "const": "ai.openai.model" - }, - "existing": { - "type": "boolean", - "title": "An existing resource for referencing purposes", - "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", - "default": false - }, - "model": { - "type": "object", - "description": "The underlying AI model.", - "additionalProperties": false, - "required": [ - "name", - "version" - ], - "properties": { - "name": { - "type": "string", - "title": "The name of the AI model.", - "description": "Required. The name of the AI model." - }, - "version": { - "type": "string", - "title": "The version of the AI model.", - "description": "Required. The version of the AI model." - } - } - } - }, - "allOf": [ - { - "if": { - "properties": { - "existing": { - "const": false - } - } - }, - "then": { - "required": [ - "model" - ] - } - } - ] - }, - "aiProjectResource": { - "type": "object", - "description": "An Azure AI Foundry project with models.", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "const": "ai.project" - }, - "existing": { - "type": "boolean", - "title": "An existing resource for referencing purposes", - "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", - "default": false - }, - "models": { - "type": "array", - "title": "AI models to deploy", - "description": "Optional. The AI models to be deployed as part of the AI project.", - "minItems": 1, - "items": { - "type": "object", - "additionalProperties": false, - "required": [ - "name", - "version", - "format", - "sku" - ], - "properties": { - "name": { - "type": "string", - "title": "The name of the AI model.", - "description": "Required. The name of the AI model." - }, - "version": { - "type": "string", - "title": "The version of the AI model.", - "description": "Required. The version of the AI model." - }, - "format": { - "type": "string", - "title": "The format of the AI model.", - "description": "Required. The format of the AI model. (Example: Microsoft, OpenAI)" - }, - "sku": { - "type": "object", - "title": "The SKU configuration for the AI model.", - "description": "Required. The SKU details for the AI model.", - "additionalProperties": false, - "required": [ - "name", - "usageName", - "capacity" - ], - "properties": { - "name": { - "type": "string", - "title": "The name of the SKU.", - "description": "Required. The name of the SKU. (Example: GlobalStandard)" - }, - "usageName": { - "type": "string", - "title": "The usage name of the SKU.", - "description": "Required. The usage name of the SKU for billing purposes. (Example: AIServices.GlobalStandard.MaaS, OpenAI.GlobalStandard.gpt-4o-mini)" - }, - "capacity": { - "type": "integer", - "title": "The capacity of the SKU.", - "description": "Required. The capacity of the SKU." - } - } - } - } - } - } - } - }, - "aiSearchResource": { - "type": "object", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "const": "ai.search" - }, - "existing": { - "type": "boolean", - "title": "An existing resource for referencing purposes", - "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", - "default": false - } - } - }, - "genericDbResource": { - "type": "object", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "title": "Type of resource", - "description": "The type of resource to be created. (Example: db.postgres)", - "enum": [ - "db.postgres", - "db.redis", - "db.mysql", - "db.mongo" - ] - } - } - }, - "cosmosDbResource": { - "type": "object", - "description": "A deployed, ready-to-use Azure Cosmos DB for NoSQL database.", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "const": "db.cosmos" - }, - "containers": { - "type": "array", - "title": "Containers", - "description": "Containers to be created to store data. Each container stores a collection of items.", - "items": { - "type": "object", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "title": "Container name.", - "description": "Required. The name of the container." - }, - "partitionKeys": { - "type": "array", - "title": "Partition keys.", - "description": "Required. The partition key(s) used to distribute data across partitions. The ordering of keys matters. By default, a single partition key '/id' is naturally a great choice for most applications.", - "minLength": 1, - "maxLength": 3, - "items": { - "type": "string" - } - } - } - } - } - } - }, - "eventHubsResource": { - "type": "object", - "description": "An Azure Event Hubs namespace.", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "const": "messaging.eventhubs" - }, - "existing": { - "type": "boolean", - "title": "An existing resource for referencing purposes", - "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", - "default": false - }, - "hubs": { - "type": "array", - "title": "Hubs to create in the Event Hubs namespace", - "additionalProperties": false, - "items": { - "type": "string" - }, - "uniqueItems": true - } - } - }, - "serviceBusResource": { - "type": "object", - "description": "An Azure Service Bus namespace.", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "const": "messaging.servicebus" - }, - "existing": { - "type": "boolean", - "title": "An existing resource for referencing purposes", - "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", - "default": false - }, - "queues": { - "type": "array", - "title": "Queues to create in the Service Bus namespace", - "additionalProperties": false, - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "topics": { - "type": "array", - "title": "Topics to create in the Service Bus namespace", - "additionalProperties": false, - "items": { - "type": "string" - }, - "uniqueItems": true - } - } - }, - "storageAccountResource": { - "type": "object", - "description": "A deployed, ready-to-use Azure Storage Account.", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "const": "storage" - }, - "existing": { - "type": "boolean", - "title": "An existing resource for referencing purposes", - "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", - "default": false - }, - "containers": { - "type": "array", - "title": "Azure Storage Account container names.", - "description": "The container names of Azure Storage Account.", - "items": { - "type": "string", - "title": "Azure Storage Account container name", - "description": "The container name of Azure Storage Account." - } - } - } - }, - "keyVaultResource": { - "type": "object", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "const": "keyvault" - }, - "existing": { - "type": "boolean", - "title": "An existing resource for referencing purposes", - "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", - "default": false - } - } - } - } -} \ No newline at end of file diff --git a/cli/azd/internal/mcp/tools/prompts/prompts.go b/cli/azd/internal/mcp/tools/prompts/prompts.go index 7273140321a..b5166fcef63 100644 --- a/cli/azd/internal/mcp/tools/prompts/prompts.go +++ b/cli/azd/internal/mcp/tools/prompts/prompts.go @@ -13,9 +13,6 @@ var AzdPlanInitPrompt string //go:embed azd_iac_generation_rules.md var AzdIacRulesPrompt string -//go:embed azure.yaml.json -var AzdYamlSchemaPrompt string - //go:embed azd_discovery_analysis.md var AzdDiscoveryAnalysisPrompt string diff --git a/cli/azd/internal/mcp/tools/types.go b/cli/azd/internal/mcp/tools/types.go new file mode 100644 index 00000000000..31cb128d1a1 --- /dev/null +++ b/cli/azd/internal/mcp/tools/types.go @@ -0,0 +1,10 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +// ErrorResponse represents a JSON error response structure that can be reused across all tools +type ErrorResponse struct { + Error bool `json:"error"` + Message string `json:"message"` +} diff --git a/cli/azd/pkg/apphost/manifest.go b/cli/azd/pkg/apphost/manifest.go index c9b046f8722..f70c568d3e6 100644 --- a/cli/azd/pkg/apphost/manifest.go +++ b/cli/azd/pkg/apphost/manifest.go @@ -61,7 +61,14 @@ func (m *Manifest) Warnings() string { output.WithWarningFormat("Deprecation Warning:"))) sb.WriteString(" This mode is deprecated since Aspire 9.4.") //nolint:lll - sb.WriteString(fmt.Sprintf(" See more: %s", output.WithLinkFormat("https://learn.microsoft.com/dotnet/aspire/whats-new/dotnet-aspire-9.4#-azure-container-apps-hybrid-mode-removal"))) + sb.WriteString( + fmt.Sprintf( + " See more: %s", + output.WithLinkFormat( + "https://learn.microsoft.com/dotnet/aspire/whats-new/dotnet-aspire-9.4#-azure-container-apps-hybrid-mode-removal", + ), + ), + ) } return sb.String() diff --git a/cli/azd/pkg/output/colors.go b/cli/azd/pkg/output/colors.go index c828a122bc9..d5e63dca9b4 100644 --- a/cli/azd/pkg/output/colors.go +++ b/cli/azd/pkg/output/colors.go @@ -59,6 +59,14 @@ func WithBackticks(s string) string { return fmt.Sprintf("`%s`", s) } +func AzdLabel() string { + return "[azd]" +} + +func AzdAgentLabel() string { + return color.HiMagentaString(fmt.Sprintf("🤖 %s Agent", AzdLabel())) +} + // WithMarkdown converts markdown to terminal-friendly colorized output using glamour. // This provides rich markdown rendering including bold, italic, code blocks, headers, etc. func WithMarkdown(markdownText string) string { diff --git a/go.mod b/go.mod index 4e4b44ec27b..469a46b8391 100644 --- a/go.mod +++ b/go.mod @@ -141,6 +141,8 @@ require ( github.com/pkoukk/tiktoken-go v0.1.6 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect + github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/segmentio/encoding v0.4.1 // indirect github.com/shopspring/decimal v1.2.0 // indirect diff --git a/go.sum b/go.sum index 42bdd6a3c6f..8b482d86308 100644 --- a/go.sum +++ b/go.sum @@ -387,6 +387,10 @@ github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/segmentio/encoding v0.4.1 h1:KLGaLSW0jrmhB58Nn4+98spfvPvmo4Ci1P/WIQ9wn7w= From 4724432cb9b9a02c19b88de33c72cf475a4d94da Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Mon, 18 Aug 2025 10:18:06 -0700 Subject: [PATCH 072/116] Updates prompts and markdown output --- .../mcp/tools/prompts/azd_infrastructure_generation.md | 9 ++++----- .../internal/mcp/tools/prompts/azd_project_validation.md | 3 +-- cli/azd/pkg/output/colors.go | 4 ---- 3 files changed, 5 insertions(+), 11 deletions(-) diff --git a/cli/azd/internal/mcp/tools/prompts/azd_infrastructure_generation.md b/cli/azd/internal/mcp/tools/prompts/azd_infrastructure_generation.md index 3f35111d600..f9f251f8f3c 100644 --- a/cli/azd/internal/mcp/tools/prompts/azd_infrastructure_generation.md +++ b/cli/azd/internal/mcp/tools/prompts/azd_infrastructure_generation.md @@ -7,12 +7,13 @@ 3. **Inventory existing IaC files** - scan current working directory for all `.bicep` files 4. Read `azd-arch-plan.md` to get the **IaC File Generation Checklist** 5. Create directory structure in `./infra` following IaC rules -6. For each file in the IaC checklist: +6. During code generation always use the latest version for each resource type using the bicep schema tool +7. For each file in the IaC checklist: - **If file exists**: Intelligently update to match requirements, preserve user customizations where possible - **If file missing**: Generate new file following templates and best practices - **Flag conflicts**: Note any incompatible configurations but proceed with updates -7. Validate all generated bicep templates compile without errors or warnings -8. Update the IaC checklist section in existing `azd-arch-plan.md` by marking completed files as [x] while preserving existing content +8. Validate all generated bicep templates compile without errors or warnings +9. Update the IaC checklist section in existing `azd-arch-plan.md` by marking completed files as [x] while preserving existing content 📄 **Required Outputs** @@ -156,8 +157,6 @@ The `./infra/main.parameters.json` file is critical for AZD integration and must - [ ] Main.bicep template created/updated with subscription scope and resource group - [ ] Module templates generated/updated for all services listed in checklist - [ ] Parameter files created/updated with appropriate defaults -- [ ] All Bicep templates compile without errors or warnings (`az bicep build`) -- [ ] Templates validate successfully (`az deployment sub validate`) - [ ] Naming conventions and tagging implemented correctly - [ ] Security best practices implemented (Key Vault, managed identities) - [ ] **IaC checklist in `azd-arch-plan.md` updated** by marking completed files as [x] while preserving existing content diff --git a/cli/azd/internal/mcp/tools/prompts/azd_project_validation.md b/cli/azd/internal/mcp/tools/prompts/azd_project_validation.md index 0a28e9a7bc3..67764111bcd 100644 --- a/cli/azd/internal/mcp/tools/prompts/azd_project_validation.md +++ b/cli/azd/internal/mcp/tools/prompts/azd_project_validation.md @@ -46,8 +46,7 @@ **2. Bicep Template Validation:** - Scan `./infra` directory for `.bicep` files using file search -- Execute `az bicep build --file ` for each template -- Run `az deployment sub validate --template-file ./infra/main.bicep --parameters ./infra/main.parameters.json --location ` +- Execute `azd provision --preview --no-prompt` to validate infrastructure templates - Verify all templates compile without errors and dependencies are correct - **MANDATORY:** Fix ALL compilation errors before proceeding - Clean up any generated `` files generated during bicep validation diff --git a/cli/azd/pkg/output/colors.go b/cli/azd/pkg/output/colors.go index d5e63dca9b4..9cab9677bb0 100644 --- a/cli/azd/pkg/output/colors.go +++ b/cli/azd/pkg/output/colors.go @@ -70,10 +70,6 @@ func AzdAgentLabel() string { // WithMarkdown converts markdown to terminal-friendly colorized output using glamour. // This provides rich markdown rendering including bold, italic, code blocks, headers, etc. func WithMarkdown(markdownText string) string { - markdownText = strings.Trim(markdownText, "\n") - markdownText = strings.TrimPrefix(markdownText, "```markdown") - markdownText = strings.TrimSuffix(markdownText, "```") - // Get dynamic console width with fallback to 120 consoleWidth := getConsoleWidth() From 5ee0b022c1584c2635f2fc8d9f35aab3a8de77e4 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Mon, 18 Aug 2025 10:27:02 -0700 Subject: [PATCH 073/116] Updates MCP command registration after rebase --- cli/azd/cmd/mcp.go | 8 +++++--- cli/azd/cmd/root.go | 35 +++++------------------------------ cli/azd/pkg/llm/manager.go | 4 ++-- 3 files changed, 12 insertions(+), 35 deletions(-) diff --git a/cli/azd/cmd/mcp.go b/cli/azd/cmd/mcp.go index b4aba22f350..332448e1d94 100644 --- a/cli/azd/cmd/mcp.go +++ b/cli/azd/cmd/mcp.go @@ -26,9 +26,11 @@ import ( func mcpActions(root *actions.ActionDescriptor) *actions.ActionDescriptor { group := root.Add("mcp", &actions.ActionDescriptorOptions{ Command: &cobra.Command{ - Use: "mcp", - Short: "Manage Model Context Protocol (MCP) server.", - Hidden: true, + Use: "mcp", + Short: "Manage Model Context Protocol (MCP) server.", + }, + GroupingOptions: actions.CommandGroupOptions{ + RootLevelHelp: actions.CmdGroupAlpha, }, }) diff --git a/cli/azd/cmd/root.go b/cli/azd/cmd/root.go index 3ebddebb7af..c20fde61faa 100644 --- a/cli/azd/cmd/root.go +++ b/cli/azd/cmd/root.go @@ -130,7 +130,6 @@ func NewRootCmd( templatesActions(root) authActions(root) hooksActions(root) - mcpActions(root) root.Add("version", &actions.ActionDescriptorOptions{ Command: &cobra.Command{ @@ -409,6 +408,11 @@ func NewRootCmd( } } + // Enable MCP commands when LLM feature is enabled + if alphaFeatureManager.IsEnabled(llm.FeatureLlm) { + mcpActions(root) + } + return nil }) @@ -416,35 +420,6 @@ func NewRootCmd( panic(err) } - if err := rootContainer.Invoke(func(alphaFeatureManager *alpha.FeatureManager) error { - llmEnabledError := llm.IsLlmFeatureEnabled(alphaFeatureManager) - if llmEnabledError != nil { - root.Add("mcp", &actions.ActionDescriptorOptions{ - Command: &cobra.Command{ - RunE: func(cmd *cobra.Command, args []string) error { - return llmEnabledError - }, - }, - }) - } else { - root.Add("mcp", &actions.ActionDescriptorOptions{ - Command: newMcpCmd(), - FlagsResolver: newMcpFlags, - ActionResolver: newMcpAction, - HelpOptions: actions.ActionHelpOptions{ - Description: getCmdMcpHelpDescription, - Footer: getCmdMcpHelpFooter, - }, - GroupingOptions: actions.CommandGroupOptions{ - RootLevelHelp: actions.CmdGroupAlpha, - }, - }) - } - return nil - }); err != nil { - panic(fmt.Errorf("Failed to initialize LLM feature: %w", err)) - } - // Initialize the platform specific components for the IoC container // Only container resolution errors will return an error // Invalid configurations will fall back to default platform diff --git a/cli/azd/pkg/llm/manager.go b/cli/azd/pkg/llm/manager.go index 8ba4f88bcff..c03ac5b717b 100644 --- a/cli/azd/pkg/llm/manager.go +++ b/cli/azd/pkg/llm/manager.go @@ -18,9 +18,9 @@ func IsLlmFeatureEnabled(alphaManager *alpha.FeatureManager) error { if alphaManager == nil { panic("alphaManager cannot be nil") } - if !alphaManager.IsEnabled(featureLlm) { + if !alphaManager.IsEnabled(FeatureLlm) { return fmt.Errorf("the LLM feature is not enabled. Please enable it using the command: \"%s\"", - alpha.GetEnableCommand(featureLlm)) + alpha.GetEnableCommand(FeatureLlm)) } return nil } From 584ae77fb5e7feb54e47203373923d0762cad71e Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Mon, 18 Aug 2025 13:06:42 -0700 Subject: [PATCH 074/116] Updates tool prompts --- .../tools/prompts/azd_iac_generation_rules.md | 1 + .../prompts/azd_infrastructure_generation.md | 5 ++- .../mcp/tools/prompts/azd_plan_init.md | 15 ++++--- .../tools/prompts/azd_project_validation.md | 42 +++++++------------ 4 files changed, 31 insertions(+), 32 deletions(-) diff --git a/cli/azd/internal/mcp/tools/prompts/azd_iac_generation_rules.md b/cli/azd/internal/mcp/tools/prompts/azd_iac_generation_rules.md index 5119022b711..d472440046c 100644 --- a/cli/azd/internal/mcp/tools/prompts/azd_iac_generation_rules.md +++ b/cli/azd/internal/mcp/tools/prompts/azd_iac_generation_rules.md @@ -53,6 +53,7 @@ **Security and Compliance:** - **FORBIDDEN:** Hard-code secrets, connection strings, or sensitive values +- **REQUIRED:** Use latest API versions and schema for all bicep resource types using available tools - **REQUIRED:** Use Key Vault references for secrets - **REQUIRED:** Enable diagnostic settings and logging where applicable - **REQUIRED:** Follow principle of least privilege for managed identities diff --git a/cli/azd/internal/mcp/tools/prompts/azd_infrastructure_generation.md b/cli/azd/internal/mcp/tools/prompts/azd_infrastructure_generation.md index f9f251f8f3c..b44f63962d8 100644 --- a/cli/azd/internal/mcp/tools/prompts/azd_infrastructure_generation.md +++ b/cli/azd/internal/mcp/tools/prompts/azd_infrastructure_generation.md @@ -29,7 +29,10 @@ 🧠 **Execution Guidelines** -**CRITICAL:** Use `azd_iac_generation_rules` tool first to get complete file structure, naming conventions, and compliance requirements. +**Use Tools:** + +- Use AZD IaC generation rules tool first to get complete file structure, naming conventions, and compliance requirements. +- Use Bicep Schema tool get get the latest API version and bicep schema for each resource type **Inventory Existing IaC Files:** diff --git a/cli/azd/internal/mcp/tools/prompts/azd_plan_init.md b/cli/azd/internal/mcp/tools/prompts/azd_plan_init.md index fceea6c0f96..e8bd2e1685c 100644 --- a/cli/azd/internal/mcp/tools/prompts/azd_plan_init.md +++ b/cli/azd/internal/mcp/tools/prompts/azd_plan_init.md @@ -46,14 +46,19 @@ **Phase 4: File Generation (Execute in Sequence)** -1. **Azure.yaml Configuration:** `azd_azure_yaml_generation` (Required for all projects) -2. **Infrastructure Templates:** `azd_infrastructure_generation` (Required for all projects) -3. **Docker Configurations:** `azd_docker_generation` (Required for containerizable services) +Using available tools - Generate the following files: + +1. **Docker Configurations:** Generate docker files (Required for containerizable services) +2. **Infrastructure Templates:** Generate IaC infrastructure templates (Required for all projects) +3. **Azure.yaml Configuration:** Generate `azure.yaml` file (Required for all projects) **Phase 5: Project Validation and Environment Setup** -- Tool: `azd_project_validation` -- Validates azure.yaml against schema, compiles Bicep templates +Using available tools - Perform and end-to-end AZD project validation + +- Validates azure.yaml against schema +- Validate AZD environment exists +- Validate infrastructure templates - Ensures AZD environment exists, tests packaging, validates deployment preview - Provides readiness confirmation diff --git a/cli/azd/internal/mcp/tools/prompts/azd_project_validation.md b/cli/azd/internal/mcp/tools/prompts/azd_project_validation.md index 67764111bcd..6771623a0d1 100644 --- a/cli/azd/internal/mcp/tools/prompts/azd_project_validation.md +++ b/cli/azd/internal/mcp/tools/prompts/azd_project_validation.md @@ -22,20 +22,19 @@ 🧠 **Execution Guidelines** -**CRITICAL REQUIREMENT:** Resolve ALL issues found during validation before proceeding to the next step. No validation step should be considered successful until all errors, warnings, and issues have been fully addressed. +**CRITICAL REQUIREMENT:** Resolve ALL issues found during validation before proceeding to the next step. +No validation step should be considered successful until all errors, warnings, and issues have been fully addressed. -**Pre-Validation Setup:** +**Validation Execution Steps:** -**0. Load Architecture Plan:** +**1. Load Architecture Plan:** - Read existing `azd-arch-plan.md` to understand current project architecture and context - Review any previous validation results or known issues - Understand the project structure and service configurations from the plan - **MANDATORY:** Must load and review architecture plan before starting validation -**Validation Execution Steps:** - -**1. Azure.yaml Schema Validation:** +**2. Azure.yaml Schema Validation:** - Check if `azure.yaml` exists in current directory - Validate `azure.yaml` against AZD schema using available tools @@ -43,14 +42,6 @@ - Verify service definitions and configurations are correct - **MANDATORY:** Fix ALL schema violations before proceeding -**2. Bicep Template Validation:** - -- Scan `./infra` directory for `.bicep` files using file search -- Execute `azd provision --preview --no-prompt` to validate infrastructure templates -- Verify all templates compile without errors and dependencies are correct -- **MANDATORY:** Fix ALL compilation errors before proceeding -- Clean up any generated `` files generated during bicep validation - **3. AZD Environment Validation:** - Execute `azd env list` to check available environments @@ -60,7 +51,15 @@ - Ensure `AZURE_SUBSCRIPTION_ID` azd environment variable is set to the users current Azure subscription - **MANDATORY:** Fix environment issues before proceeding -**4. Package Validation:** +**4. Bicep Template Validation:** + +- Scan `./infra` directory for `.bicep` files using file search +- Review AZD IaC generation rules and guidelines and resolve any all issues +- Execute `azd provision --preview --no-prompt` to validate infrastructure templates +- **MANDATORY:** Fix ALL compilation errors before proceeding +- Clean up any generated `` files generated during bicep validation + +**5. Package Validation:** - Execute `azd package --no-prompt` command and monitor output - Verify all service source paths are valid @@ -68,14 +67,6 @@ - Ensure all build artifacts are created correctly - **MANDATORY:** Fix ALL packaging errors before proceeding -**5. Deployment Preview Validation:** - -- Execute `azd provision --preview --no-prompt` command -- Verify Azure authentication is working -- Check resource group creation plan and Bicep module deployment -- Ensure parameter values are properly resolved -- **MANDATORY:** Fix ALL preview errors before proceeding - **Error Resolution Requirements:** - **Azure.yaml Schema Errors:** Validate azure.yaml using available tools @@ -88,12 +79,11 @@ - [ ] `azd-arch-plan.md` loaded and reviewed for project context - [ ] `azure.yaml` passes schema validation with NO errors or warnings -- [ ] ALL Bicep templates compile without errors or warnings - [ ] AZD environment exists and is properly configured with NO issues -- [ ] `azd package` completes without errors or warnings with ALL services packaging successfully +- [ ] ALL Bicep templates compile without errors or warnings - [ ] `azd provision --preview` completes without errors or warnings with ALL resources validating correctly +- [ ] `azd package` completes without errors or warnings with ALL services packaging successfully - [ ] ALL service configurations are valid with NO missing or incorrect settings - [ ] NO missing dependencies or configuration issues remain - [ ] Validation results added to existing `azd-arch-plan.md` while preserving existing content - [ ] Project confirmed ready for deployment with `azd up` - From 6f98217e6de8c6f641a5807b3c1fc1c2d1a1ca63 Mon Sep 17 00:00:00 2001 From: hemarina Date: Mon, 18 Aug 2025 14:46:22 -0700 Subject: [PATCH 075/116] add azd error troubleshooting tool md --- cli/azd/cmd/mcp.go | 1 + cli/azd/internal/cmd/provision.go | 84 +---- .../mcp/tools/azd_provision_common_error.go | 35 ++ .../azd_provision_error_troubleshooting.go | 36 ++ .../mcp/tools/prompts/azd_common_error.md | 14 + .../prompts/azd_error_troubleshooting.md | 268 ++++++++++++++ cli/azd/internal/mcp/tools/prompts/prompts.go | 6 + cli/azd/pkg/agentRunner/agentRunner.go | 332 ------------------ cli/azd/pkg/llm/manager.go | 4 +- go.sum | 7 +- 10 files changed, 379 insertions(+), 408 deletions(-) create mode 100644 cli/azd/internal/mcp/tools/azd_provision_common_error.go create mode 100644 cli/azd/internal/mcp/tools/azd_provision_error_troubleshooting.go create mode 100644 cli/azd/internal/mcp/tools/prompts/azd_common_error.md create mode 100644 cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md delete mode 100644 cli/azd/pkg/agentRunner/agentRunner.go diff --git a/cli/azd/cmd/mcp.go b/cli/azd/cmd/mcp.go index 7e78c9e8a80..ff9e4f079f8 100644 --- a/cli/azd/cmd/mcp.go +++ b/cli/azd/cmd/mcp.go @@ -99,6 +99,7 @@ func (a *mcpStartAction) Run(ctx context.Context) (*actions.ActionResult, error) tools.NewAzdIacGenerationRulesTool(), tools.NewAzdProjectValidationTool(), tools.NewAzdYamlSchemaTool(), + tools.NewAzdErrorTroubleShootingTool(), ) // Start the server using stdio transport diff --git a/cli/azd/internal/cmd/provision.go b/cli/azd/internal/cmd/provision.go index 89d95d2fb3e..cc7bc5224f4 100644 --- a/cli/azd/internal/cmd/provision.go +++ b/cli/azd/internal/cmd/provision.go @@ -16,14 +16,12 @@ import ( "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" "github.com/azure/azure-dev/cli/azd/pkg/account" - "github.com/azure/azure-dev/cli/azd/pkg/agentRunner" "github.com/azure/azure-dev/cli/azd/pkg/alpha" "github.com/azure/azure-dev/cli/azd/pkg/azapi" "github.com/azure/azure-dev/cli/azd/pkg/cloud" "github.com/azure/azure-dev/cli/azd/pkg/environment" "github.com/azure/azure-dev/cli/azd/pkg/infra/provisioning" "github.com/azure/azure-dev/cli/azd/pkg/input" - "github.com/azure/azure-dev/cli/azd/pkg/llm" "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/azure/azure-dev/cli/azd/pkg/output/ux" "github.com/azure/azure-dev/cli/azd/pkg/project" @@ -115,7 +113,6 @@ type ProvisionAction struct { importManager *project.ImportManager alphaFeatureManager *alpha.FeatureManager portalUrlBase string - llmManager llm.Manager } func NewProvisionAction( @@ -133,7 +130,6 @@ func NewProvisionAction( subManager *account.SubscriptionsManager, alphaFeatureManager *alpha.FeatureManager, cloud *cloud.Cloud, - llmManager llm.Manager, ) actions.Action { return &ProvisionAction{ flags: flags, @@ -150,7 +146,6 @@ func NewProvisionAction( importManager: importManager, alphaFeatureManager: alphaFeatureManager, portalUrlBase: cloud.PortalUrlBase, - llmManager: llmManager, } } @@ -163,54 +158,6 @@ func (p *ProvisionAction) SetFlags(flags *ProvisionFlags) { p.flags = flags } -func (p *ProvisionAction) errorWithSuggestion(ctx context.Context, originalError error) error { - // Show preview of the error - previewWriter := p.console.ShowPreviewer(ctx, - &input.ShowPreviewerOptions{ - Prefix: " ", - MaxLineCount: 20, - Title: "Error Preview", - }) - fmt.Fprintf(previewWriter, "%s", originalError.Error()) - - // Ask user if they want to get error suggestions from AI - selection, err := p.console.Select(ctx, input.ConsoleOptions{ - Message: "Do you want to get error suggestions from AI?", - Options: []string{ - "Yes", - "No", - }, - }) - - p.console.StopPreviewer(ctx, false) - - if err != nil { - return fmt.Errorf("prompting failed to get error suggestions: %w", err) - } - - switch selection { - case 0: // get error suggestion - // it takes around 30-60s - p.console.MessageUxItem(ctx, &ux.MessageTitle{ - Title: "Getting AI error suggestions", - TitleNote: "Getting AI error suggestions can take some time", - }) - - result, errSampling := agentRunner.Run(ctx, p.console, p.llmManager, originalError) - // If llm/sampling fails, we still want to return the original error - if errSampling != nil { - fmt.Printf("Not able to get AI error suggestions: %s\n", errSampling) - return originalError - } - - return &internal.ErrorWithSuggestion{Err: originalError, Suggestion: result} - case 1: // don't get error suggestion - return originalError - } - - return originalError -} - func (p *ProvisionAction) Run(ctx context.Context) (*actions.ActionResult, error) { if p.flags.noProgress { fmt.Fprintln( @@ -239,23 +186,23 @@ func (p *ProvisionAction) Run(ctx context.Context) (*actions.ActionResult, error startTime := time.Now() if err := p.projectManager.Initialize(ctx, p.projectConfig); err != nil { - return nil, p.errorWithSuggestion(ctx, err) + return nil, err } if err := p.projectManager.EnsureAllTools(ctx, p.projectConfig, nil); err != nil { - return nil, p.errorWithSuggestion(ctx, err) + return nil, err } infra, err := p.importManager.ProjectInfrastructure(ctx, p.projectConfig) if err != nil { - return nil, p.errorWithSuggestion(ctx, err) + return nil, err } defer func() { _ = infra.Cleanup() }() infraOptions := infra.Options infraOptions.IgnoreDeploymentState = p.flags.ignoreDeploymentState if err := p.provisionManager.Initialize(ctx, p.projectConfig.Path, infraOptions); err != nil { - return nil, p.errorWithSuggestion(ctx, fmt.Errorf("initializing provisioning manager: %w", err)) + return nil, fmt.Errorf("initializing provisioning manager: %w", err) } // Get Subscription to Display in Command Title Note @@ -317,18 +264,18 @@ func (p *ProvisionAction) Run(ctx context.Context) (*actions.ActionResult, error if p.formatter.Kind() == output.JsonFormat { stateResult, err := p.provisionManager.State(ctx, nil) if err != nil { - return nil, p.errorWithSuggestion(ctx, fmt.Errorf( + return nil, fmt.Errorf( "deployment failed and the deployment result is unavailable: %w", multierr.Combine(err, err), - )) + ) } if err := p.formatter.Format( provisioning.NewEnvRefreshResultFromState(stateResult.State), p.writer, nil); err != nil { - return nil, p.errorWithSuggestion(ctx, fmt.Errorf( + return nil, fmt.Errorf( "deployment failed and the deployment result could not be displayed: %w", multierr.Combine(err, err), - )) + ) } } @@ -366,8 +313,7 @@ func (p *ProvisionAction) Run(ctx context.Context) (*actions.ActionResult, error } } - return nil, p.errorWithSuggestion(ctx, fmt.Errorf("deployment failed: %w", err)) - + return nil, fmt.Errorf("deployment failed: %w", err) } if previewMode { @@ -400,7 +346,7 @@ func (p *ProvisionAction) Run(ctx context.Context) (*actions.ActionResult, error servicesStable, err := p.importManager.ServiceStable(ctx, p.projectConfig) if err != nil { - return nil, p.errorWithSuggestion(ctx, err) + return nil, err } for _, svc := range servicesStable { @@ -413,25 +359,25 @@ func (p *ProvisionAction) Run(ctx context.Context) (*actions.ActionResult, error } if err := svc.RaiseEvent(ctx, project.ServiceEventEnvUpdated, eventArgs); err != nil { - return nil, p.errorWithSuggestion(ctx, err) + return nil, err } } if p.formatter.Kind() == output.JsonFormat { stateResult, err := p.provisionManager.State(ctx, nil) if err != nil { - return nil, p.errorWithSuggestion(ctx, fmt.Errorf( + return nil, fmt.Errorf( "deployment succeeded but the deployment result is unavailable: %w", multierr.Combine(err, err), - )) + ) } if err := p.formatter.Format( provisioning.NewEnvRefreshResultFromState(stateResult.State), p.writer, nil); err != nil { - return nil, p.errorWithSuggestion(ctx, fmt.Errorf( + return nil, fmt.Errorf( "deployment succeeded but the deployment result could not be displayed: %w", multierr.Combine(err, err), - )) + ) } } diff --git a/cli/azd/internal/mcp/tools/azd_provision_common_error.go b/cli/azd/internal/mcp/tools/azd_provision_common_error.go new file mode 100644 index 00000000000..36f78f21fae --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_provision_common_error.go @@ -0,0 +1,35 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdCommonErrorTool creates a new azd common error tool +func NewAzdCommonErrorTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_common_error", + mcp.WithDescription( + `Returns instructions for diagnosing common error type and providing suggested actions for resolution. + +The LLM agent should execute these instructions using available tools. + +Use this tool when: +- Need to identify the type of error and get actionable suggestions +- Ready to troubleshoot errors`, + ), + ), + Handler: handleAzdCommonError, + } +} + +func handleAzdCommonError(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdCommonErrorPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/azd_provision_error_troubleshooting.go b/cli/azd/internal/mcp/tools/azd_provision_error_troubleshooting.go new file mode 100644 index 00000000000..80f8b803114 --- /dev/null +++ b/cli/azd/internal/mcp/tools/azd_provision_error_troubleshooting.go @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tools + +import ( + "context" + + "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +// NewAzdErrorTroubleShootingTool creates a new azd error troubleshooting tool +func NewAzdErrorTroubleShootingTool() server.ServerTool { + return server.ServerTool{ + Tool: mcp.NewTool( + "azd_error_troubleshooting", + mcp.WithDescription( + `Returns instructions for diagnosing any error from azd commands and providing suggested actions for resolution. + +The LLM agent should execute these instructions using available tools. + +Use this tool when: +- Error occurs when running azd commands +- Need to identify the type of error and get actionable suggestions +- Ready to troubleshoot errors`, + ), + ), + Handler: handleAzdErrorTroubleShooting, + } +} + +func handleAzdErrorTroubleShooting(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return mcp.NewToolResultText(prompts.AzdErrorTroubleShootingPrompt), nil +} diff --git a/cli/azd/internal/mcp/tools/prompts/azd_common_error.md b/cli/azd/internal/mcp/tools/prompts/azd_common_error.md new file mode 100644 index 00000000000..745fddd7146 --- /dev/null +++ b/cli/azd/internal/mcp/tools/prompts/azd_common_error.md @@ -0,0 +1,14 @@ +# AZD Provision Common Error Resolution Instructions + +✅ **Agent Task List** + + +📄 **Required Outputs** + +- +🧠 **Execution Guidelines** + + +📌 **Completion Checklist** + +- [ ] Error message clearly understood and root cause identified \ No newline at end of file diff --git a/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md b/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md new file mode 100644 index 00000000000..5ac3867e4c7 --- /dev/null +++ b/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md @@ -0,0 +1,268 @@ +# AZD Error Troubleshooting and Resolution Instructions + +✅ **Agent Task List** + +1. **Error Classification:** Identify the specific error type (Azure REST API, ARM Deployment, Authentication, Local Tool Installation or General) +2. **Error Analysis:** Explain what the error means and its root causes +3. **Troubleshooting Steps:** Provide manual, Azure Portal and Azure CLI-based solutions only if user installed Azure CLI +4. **Infrastructure Fixes:** Suggest specific Bicep or Terraform file corrections based on user's infra folder +5. **Verification:** Provide Azure Portal to validate fixes and Azure CLI-based solutions only if user installed Azure CLI +6. **Resolution Confirmation:** Ensure the issue is fully resolved. If issue still exists, retry the task list to fix the error. + +📄 **Required Outputs** + +- Clear error explanation and root cause analysis +- Step-by-step troubleshooting instructions +- Specific infrastructure code fixes for Bicep or Terraform files based on user usage +- Azure Portal navigation instructions for verification +- Azure CLI commands for validation and testing if user installed Azure CLI +- Actionable next steps for resolution + +🧠 **Execution Guidelines** + +## Azure REST API Response Errors + +**Error Pattern:** HTTP status codes (400, 401, 403, 404, 429, 500, etc.) with Azure error codes + +**Troubleshooting Approach:** + +1. **Error Analysis** + - Decode the HTTP status code meaning + - Interpret the Azure-specific error code + - Identify affected Azure resource or service + +2. **Manual Troubleshooting Steps** + - Provide manual Troubleshooting Steps for Azure Portal + - Check Azure Portal for resource status + - Verify resource quotas and limits + - Review subscription and resource group permissions + - Validate resource naming conventions and conflicts + +3. **If user installed Azure CLI, Azure CLI Troubleshooting Steps. Otherwise use azure portal instructions** + ```bash + # Check subscription and tenant + az account show + az account list + + # Verify resource group permissions + az role assignment list --resource-group + + # Check quota usage + az vm list-usage --location + az network list-usages --location + ``` + +4. **Infrastructure Code Fixes** + - **Bicep Files:** Correct resource names, SKUs, locations, dependencies + - **Terraform Files:** Fix provider configurations, resource arguments, data sources + - Update parameter files with valid values + +5. **Verification Commands if user installed Azure CLI. Otherwise skip this part** + ```bash + # Validate Bicep templates + az bicep build --file main.bicep + az deployment group validate --resource-group --template-file main.bicep + + # Validate Terraform configurations + terraform validate + terraform plan + ``` + +## Azure ARM Deployment Errors + +**Error Pattern:** Deployment validation failures, resource provisioning errors, template errors + +**Troubleshooting Approach:** + +1. **Error Analysis** + - Identify failing deployment operation + - Locate specific resource causing failure + - Review deployment validation messages + +2. **Manual Troubleshooting Steps** + - Navigate to Azure Portal → Resource Groups → Deployments + - Review failed deployment details and error messages + - Check resource dependencies and prerequisite resources + - Verify template parameter values + +3. **If user installed Azure CLI, Azure CLI Troubleshooting Steps. Otherwise use azure portal instructions** + ```bash + # List recent deployments + az deployment group list --resource-group + + # Get deployment details + az deployment group show --name --resource-group + + # Check deployment operations + az deployment operation group list --name --resource-group + ``` + +4. **Infrastructure Code Fixes** + - **Bicep Files:** + - Fix template syntax errors + - Correct resource property values + - Update API versions + - Fix parameter and variable references + - Resolve dependency chains + + - **Terraform Files:** + - Correct resource configurations + - Fix provider version constraints + - Update data source queries + - Resolve resource dependencies + +5. **Verification Commands if user installed Azure CLI. Otherwise skip this part** + ```bash + # Test deployment in validate-only mode + az deployment group validate --resource-group --template-file main.bicep --parameters @parameters.json + + # Deploy with what-if analysis + az deployment group what-if --resource-group --template-file main.bicep --parameters @parameters.json + ``` + +## Azure Authentication Errors + +**Error Pattern:** Authentication failures, token expiration, permission denied, tenant/subscription issues + +**Troubleshooting Approach:** + +1. **Error Analysis** + - Identify authentication method in use (device code, service principal, managed identity, interactive) + - Determine if issue is token expiration, insufficient permissions, or configuration + +2. **Manual Troubleshooting Steps** + - Check Azure Portal → Azure Active Directory → Users/Service Principals + - Verify subscription access and role assignments + - Review tenant and subscription IDs + +3. **AZD Authentication Commands** + ```bash + # Clear current authentication + azd auth logout + + # Re-authenticate with device code + azd auth login + + # Login with specific tenant + azd auth login --tenant-id + + # Check current authentication status + azd auth login --check-status + ``` + +4. **Environment Variable Verification** + - Check Azure-related environment variables in .azure folder + +## Local Tool Installation Errors + +**Error Pattern:** Missing or incorrectly installed local development tools (Docker, Node.js, Python, .NET, etc.) + +**Troubleshooting Approach:** + +1. **Error Analysis** + - Identify which local tool is missing or misconfigured + - Determine if it's a PATH issue, version incompatibility, or complete absence + - Check if tool is required for specific service in azure.yaml + +2. **Manual Troubleshooting Steps** + - Verify tool installation by checking system PATH + - Check installed version against azd requirements + - Review azure.yaml for specific tool version requirements + - Validate tool configuration and permissions + +3. **Tool-Specific Installation and Verification** + + **Docker:** + ```bash + # Check Docker installation + docker --version + docker info + + # Verify Docker daemon is running + docker ps + ``` + - Windows: Install Docker Desktop from docker.com + - macOS: Install Docker Desktop from docker.com + - Linux: Follow distribution-specific Docker installation guide + + **Node.js and npm:** + ```bash + # Check Node.js installation + node --version + npm --version + ``` + - Download from nodejs.org (LTS version recommended) + - Verify npm is included with Node.js installation + + **Python:** + ```bash + # Check Python installation + python --version + python3 --version + pip --version + ``` + - Download from python.org (3.8+ recommended) + - Ensure pip is installed and updated + + **.NET:** + ```bash + # Check .NET installation + dotnet --version + dotnet --list-sdks + ``` + - Download from dotnet.microsoft.com + - Install appropriate SDK version for your project + + **Git:** + ```bash + # Check Git installation + git --version + ``` + - Download from git-scm.com + - Configure user name and email after installation + +4. **PATH and Environment Configuration** + ```bash + # Check PATH environment variable + echo $PATH # Linux/macOS + echo %PATH% # Windows + ``` + +5. **Tool Version Compatibility Verification** + - Check azd documentation for minimum supported versions + - Update tools to compatible versions if needed + - Verify tool integration with azd project requirements + +6. **Post-Installation Verification** + ```bash + # Test azd provision with preview + azd provision --preview + ``` + +## General AZD Errors + +**Error Pattern:** Miscellaneous errors not falling into above categories + +**Troubleshooting Approach:** + +1. **Error Analysis** + - Review error message for specific component failure + - Identify if error is related to configuration, dependencies, or environment + - Provide solution based on error analysis + +2. **Common Resolution Patterns** + +- **Quota Exceeded:** Request quota increase in Azure Portal +- **Permission Denied:** Add required role assignments through Azure Portal +- **Resource Name Conflicts:** Update names in Bicep or Terraform files with unique suffixes +- **API Version Issues:** Update to latest stable API versions in templates +- **Location Constraints:** Verify service availability in target Azure region + +📌 **Completion Checklist** + +- [ ] Error message clearly understood and root cause identified +- [ ] Appropriate troubleshooting steps executed successfully +- [ ] Infrastructure code corrections implemented and validated +- [ ] Azure Portal verification completed for affected resources +- [ ] Azure CLI commands confirm successful resolution if user installed Azure CLI. Otherwise, skip this step. +- [ ] AZD command completes without errors diff --git a/cli/azd/internal/mcp/tools/prompts/prompts.go b/cli/azd/internal/mcp/tools/prompts/prompts.go index 7273140321a..d597aaf061e 100644 --- a/cli/azd/internal/mcp/tools/prompts/prompts.go +++ b/cli/azd/internal/mcp/tools/prompts/prompts.go @@ -33,3 +33,9 @@ var AzdDockerGenerationPrompt string //go:embed azd_project_validation.md var AzdProjectValidationPrompt string + +//go:embed azd_provision_error_troubleshooting.md +var AzdErrorTroubleShootingPrompt string + +//go:embed azd_common_error.md +var AzdCommonErrorPrompt string diff --git a/cli/azd/pkg/agentRunner/agentRunner.go b/cli/azd/pkg/agentRunner/agentRunner.go deleted file mode 100644 index 7c7487892e4..00000000000 --- a/cli/azd/pkg/agentRunner/agentRunner.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package agentRunner - -import ( - "context" - "errors" - "fmt" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/azure/azure-dev/cli/azd/pkg/auth" - "github.com/azure/azure-dev/cli/azd/pkg/azapi" - "github.com/azure/azure-dev/cli/azd/pkg/input" - "github.com/azure/azure-dev/cli/azd/pkg/llm" - langchaingo_mcp_adapter "github.com/i2y/langchaingo-mcp-adapter" - "github.com/mark3labs/mcp-go/client" - "github.com/mark3labs/mcp-go/client/transport" - "github.com/mark3labs/mcp-go/mcp" - "github.com/tmc/langchaingo/agents" - "github.com/tmc/langchaingo/chains" - "github.com/tmc/langchaingo/llms" - "github.com/tmc/langchaingo/schema" -) - -type samplingHandler struct { - llmClient llms.Model - console input.Console -} - -func (s *samplingHandler) CreateMessage( - ctx context.Context, request mcp.CreateMessageRequest) (*mcp.CreateMessageResult, error) { - // Enhanced logging for debugging - log.Printf("🔬 MCP Sampling Request received!\n") - log.Printf(" Request ID: %v\n", ctx.Value("requestId")) - log.Printf(" Max tokens: %d\n", request.MaxTokens) - log.Printf(" Temperature: %f\n", request.Temperature) - log.Printf(" Model preferences: %v\n", request.ModelPreferences) - log.Printf(" Number of messages: %d\n", len(request.Messages)) - - // Debug: Print message details - for i, msg := range request.Messages { - log.Printf(" Message %d: Role=%s, Content=%v\n", i, msg.Role, msg.Content) - } - - // Convert MCP messages to LLM format - var llmMessages []llms.MessageContent - for _, msg := range request.Messages { - var content []llms.ContentPart - - // Handle the Content field which can be different types - switch contentType := msg.Content.(type) { - case mcp.TextContent: - log.Printf(" Processing TextContent: %s\n", contentType.Text) - content = append(content, llms.TextPart(contentType.Text)) - case string: - log.Printf(" Processing string content: %s\n", contentType) - content = append(content, llms.TextPart(contentType)) - default: - // Try to convert to string as fallback - contentStr := fmt.Sprintf("%v", msg.Content) - log.Printf(" Processing unknown content type: %s\n", contentStr) - content = append(content, llms.TextPart(contentStr)) - } - - // Map MCP roles to LLM roles - var role llms.ChatMessageType - switch msg.Role { - case mcp.RoleUser: - role = llms.ChatMessageTypeHuman - case mcp.RoleAssistant: - role = llms.ChatMessageTypeAI - default: - role = llms.ChatMessageTypeSystem - } - - llmMessages = append(llmMessages, llms.MessageContent{ - Role: role, - Parts: content, - }) - } - - // Generate response using the LLM - log.Printf("🧠 Generating response with LLM (messages: %d)...\n", len(llmMessages)) - response, err := s.llmClient.GenerateContent(ctx, llmMessages) - if err != nil { - log.Printf("❌ LLM generation error: %v\n", err) - return nil, fmt.Errorf("failed to generate LLM response: %w", err) - } - - // Extract text from the response - var responseText string - if len(response.Choices) > 0 && len(response.Choices[0].Content) > 0 { - // Convert the response content to string - responseText = string(response.Choices[0].Content) - log.Printf("📝 Raw LLM response: %s\n", responseText) - } - - if responseText == "" { - responseText = "No response generated" - log.Printf("⚠️ Using fallback response\n") - } - - log.Printf("✅ LLM response generated (length: %d): %s\n", len(responseText), responseText[:min(100, len(responseText))]) - - // Return the MCP result using the same format as the MCP server - result := &mcp.CreateMessageResult{ - SamplingMessage: mcp.SamplingMessage{ - Role: mcp.RoleAssistant, - Content: responseText, - }, - Model: "llm-delegated", - } - - log.Printf("🎯 Returning sampling result with model: %s\n", result.Model) - return result, nil -} - -// Helper function for min -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func Run(ctx context.Context, console input.Console, llmManager llm.Manager, errNeedSuggestion error) (string, error) { - llmInfo, err := llmManager.Info(console.GetWriter()) - if err != nil { - return "", fmt.Errorf("failed to load LLM info: %w", err) - } - llClient, err := llm.LlmClient(llmInfo) - if err != nil { - return "", fmt.Errorf("failed to create LLM client: %w", err) - } - - // Create a callback handler to log agent steps - callbackHandler := &agentLogHandler{console: console} - - // defer mcpClient.Close() - t := transport.NewStdioWithOptions("C:\\Users\\hemarina\\Downloads\\vhvb1989\\azure-dev\\cli\\azd\\tools\\mcp\\mcp.exe", nil, nil) - // Create sampling handler with LLM client - samplingHandler := &samplingHandler{ - llmClient: llClient, - console: console, - } - - mcpClient := client.NewClient(t, client.WithSamplingHandler(samplingHandler)) - if err := mcpClient.Start(ctx); err != nil { - return "", fmt.Errorf("failed to start MCP client: %w", err) - } - defer mcpClient.Close() - - log.Println("🔌 MCP client created with sampling handler") - - // Create adapter - adapter, err := langchaingo_mcp_adapter.New(mcpClient) - - if err != nil { - log.Fatalf("Failed to create adapter: %v", err) - } - - // Load tools from MCP server - tools, err := adapter.Tools() - if err != nil { - log.Fatalf("Failed to get tools: %v", err) - } - - agent := agents.NewOneShotAgent(llClient, tools, agents.WithCallbacksHandler(callbackHandler)) - - executor := agents.NewExecutor(agent) - - log.Println("🤖 Starting AI agent execution...") - log.Printf(" Agent has %d tools available from MCP server\n", len(tools)) - log.Println(" Sampling handler is configured for MCP tool requests") - - // ask the agent to describe - // instructions to the error - input := promptingWithDifferentErrors(errNeedSuggestion) - - answer, err := chains.Run(ctx, executor, input, - chains.WithTemperature(0.0), - ) - if err != nil { - return "", fmt.Errorf("failed to exe: %w", err) - } - log.Println("✅ AI agent execution completed") - - return answer, nil -} - -// agentLogHandler implements callbacks.Handler to log agent execution steps -type agentLogHandler struct { - console input.Console - step int -} - -// HandleLLMGenerateContentStart implements callbacks.Handler. -func (h *agentLogHandler) HandleLLMGenerateContentStart(ctx context.Context, ms []llms.MessageContent) { -} - -// HandleRetrieverEnd implements callbacks.Handler. -func (h *agentLogHandler) HandleRetrieverEnd(ctx context.Context, query string, documents []schema.Document) { -} - -// HandleRetrieverStart implements callbacks.Handler. -func (h *agentLogHandler) HandleRetrieverStart(ctx context.Context, query string) { -} - -// HandleStreamingFunc implements callbacks.Handler. -func (h *agentLogHandler) HandleStreamingFunc(ctx context.Context, chunk []byte) { - // use console to stream output - if len(chunk) > 0 { - // Print the chunk to the console - log.Print(string(chunk)) - } -} - -func (h *agentLogHandler) HandleLLMStart(ctx context.Context, prompts []string) { - h.step++ - log.Printf("🧠 Step %d: LLM processing...\n", h.step) - if len(prompts) > 0 && len(prompts[0]) < 200 { - log.Printf(" Prompt: %s\n", prompts[0]) - } -} - -func (h *agentLogHandler) HandleLLMError(ctx context.Context, err error) { - log.Printf("❌ Step %d: LLM error: %v\n", h.step, err) -} - -func (h *agentLogHandler) HandleChainStart(ctx context.Context, inputs map[string]any) { - log.Println("🚀 Agent chain started") -} - -func (h *agentLogHandler) HandleChainEnd(ctx context.Context, outputs map[string]any) { - log.Println("🏁 Agent chain completed") -} - -func (h *agentLogHandler) HandleChainError(ctx context.Context, err error) { - log.Printf("💥 Agent chain error: %v\n", err) -} - -func (h *agentLogHandler) HandleToolStart(ctx context.Context, input string) { - log.Printf("🔧 Using tool with input: %s\n", input) - if input != "" && len(input) < 100 { - log.Printf(" Input: %s\n", input) - } -} - -func (h *agentLogHandler) HandleToolEnd(ctx context.Context, output string) { - if output != "" && len(output) < 150 { - log.Printf(" Output: %s\n", output) - } else { - log.Println(" Tool completed") - } -} - -func (h *agentLogHandler) HandleToolError(ctx context.Context, err error) { - log.Printf(" ❌ Tool error: %v\n", err) -} - -func (h *agentLogHandler) HandleText(ctx context.Context, text string) { - if text != "" && len(text) < 200 { - log.Printf("💭 Agent thinking: %s\n", text) - } -} - -func (h *agentLogHandler) HandleAgentAction(ctx context.Context, action schema.AgentAction) { - log.Printf("🎯 Agent action: %s\n", action.Tool) - if action.ToolInput != "" && len(action.ToolInput) < 100 { - log.Printf(" Tool input: %s\n", action.ToolInput) - } -} - -func (h *agentLogHandler) HandleAgentFinish(ctx context.Context, finish schema.AgentFinish) { - log.Println("🏆 Agent finished successfully") - if finish.ReturnValues != nil { - log.Printf(" Final output: %v\n", finish.ReturnValues) - } -} - -func (h *agentLogHandler) HandleLLMGenerateContentEnd(ctx context.Context, response *llms.ContentResponse) { - log.Println("✨ LLM content generation completed") -} - -func promptingWithDifferentErrors(err error) string { - var respErr *azcore.ResponseError - var armDeployErr *azapi.AzureDeploymentError - var authFailedErr *auth.AuthFailedError - if errors.As(err, &respErr) { - return fmt.Sprintf(`I'm using Azure Developer CLI (azd) and encountered an Azure HTTP response error: %s - -This appears to be an Azure REST API error with status code %d and error code '%s'. Please: - -1. Explain what this specific error means and why it occurred -2. Provide step-by-step troubleshooting instructions without az cli command and instructions with az cli command -3. Suggest specific fixes for Bicep files and Terraform files if this is infrastructure provisioning related -4. If this involves Azure resource permissions, quotas, or configuration issues, provide the exact azure portal instructions and az cli commands to verify the changes from bicep or terraform files works -5. Provide suggestions only if this requires changes to Azure subscription settings, resource group permissions, or service principal setup - -Focus on actionable solutions rather than general advice.`, - err.Error(), respErr.StatusCode, respErr.ErrorCode) - } else if errors.As(err, &armDeployErr) { - return fmt.Sprintf(`I'm using Azure Developer CLI (azd) and encountered an Azure deployment error: %s - -This is a deployment validation or provisioning failure. Please: - -1. Explain what this specific error means and why it occurred -2. Provide step-by-step troubleshooting instructions without az cli command and instructions with az cli command -3. Suggest specific fixes for Bicep files and Terraform files -4. Provide the exact azure portal instructions and az cli commands to verify the suggested changes from bicep or terraform files works - -Focus on actionable solutions rather than general advice.`, - err.Error()) - } else if errors.As(err, &authFailedErr) { - // We should move this part under azd auth command - return fmt.Sprintf(`I'm using Azure Developer CLI (azd) and encountered an authentication error: %s. Please: - -1. Explain what this specific Azure authentication error means and common causes. -2. Identify which auth method is failing (device code, service principal, managed identity, interactive) and what should I do to fix it. -3. Provide specific azd auth commands to re-authenticate: - - azd auth logout - - azd auth login -4. Ensure correct tenant and subscription are selected -5. Verify Azure-related environment variables are correct - -Focus on actionable solutions rather than general advice.`, err.Error()) - } - - return fmt.Sprintf("I'm using Azure Developer CLI (azd) and I encountered an error: %s. Explain the error and what should I do next to fix it. Focus on actionable solutions rather than general advice.", err.Error()) -} diff --git a/cli/azd/pkg/llm/manager.go b/cli/azd/pkg/llm/manager.go index 8ba4f88bcff..c03ac5b717b 100644 --- a/cli/azd/pkg/llm/manager.go +++ b/cli/azd/pkg/llm/manager.go @@ -18,9 +18,9 @@ func IsLlmFeatureEnabled(alphaManager *alpha.FeatureManager) error { if alphaManager == nil { panic("alphaManager cannot be nil") } - if !alphaManager.IsEnabled(featureLlm) { + if !alphaManager.IsEnabled(FeatureLlm) { return fmt.Errorf("the LLM feature is not enabled. Please enable it using the command: \"%s\"", - alpha.GetEnableCommand(featureLlm)) + alpha.GetEnableCommand(FeatureLlm)) } return nil } diff --git a/go.sum b/go.sum index 4981970120f..f7ef6a2a831 100644 --- a/go.sum +++ b/go.sum @@ -143,8 +143,6 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3 github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs= @@ -164,9 +162,6 @@ github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNE github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cli/browser v1.3.0 h1:LejqCrpWr+1pRqmEPDGnTZOjsMe7sehifLynZJuqJpo= github.com/cli/browser v1.3.0/go.mod h1:HH8s+fOAxjhQoBUAsKuPCbqUuxZDhQ2/aD+SzsEfBTk= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -264,6 +259,8 @@ github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18= github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= +github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= +github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= From 8c0f63149a373626dde9a8565505b04b940d7806 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Tue, 19 Aug 2025 09:33:54 -0700 Subject: [PATCH 076/116] wip --- cli/azd/cmd/init.go | 105 ++++++++++++--- cli/azd/internal/agent/agent.go | 13 ++ cli/azd/internal/agent/agent_factory.go | 22 +++- cli/azd/internal/agent/consent/checker.go | 8 +- .../internal/agent/logging/chained_handler.go | 121 ++++++++++++++++++ .../internal/agent/logging/thought_logger.go | 119 +++++++++++++++++ cli/azd/pkg/ux/canvas.go | 32 +++++ cli/azd/pkg/ux/confirm.go | 3 + cli/azd/pkg/ux/multi_select.go | 3 + cli/azd/pkg/ux/prompt.go | 3 + cli/azd/pkg/ux/select.go | 3 + 11 files changed, 405 insertions(+), 27 deletions(-) create mode 100644 cli/azd/internal/agent/logging/chained_handler.go create mode 100644 cli/azd/internal/agent/logging/thought_logger.go diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index c3ebec7116e..eaaad10d141 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -383,6 +383,8 @@ func (i *initAction) initAppWithAgent(ctx context.Context) error { return err } + agentThoughts := azdAgent.Thoughts() + defer cleanup() type initStep struct { @@ -399,27 +401,27 @@ Do not stop until all tasks are complete and fully resolved. initSteps := []initStep{ { - Name: "Running Discovery & Analysis", + Name: "Step 1: Running Discovery & Analysis", Description: "Run a deep discovery and analysis on the current working directory.", }, { - Name: "Generating Architecture Plan", + Name: "Step 2: Generating Architecture Plan", Description: "Create a high-level architecture plan for the application.", }, { - Name: "Generating Dockerfile(s)", + Name: "Step 3: Generating Dockerfile(s)", Description: "Generate a Dockerfile for the application components as needed.", }, { - Name: "Generating infrastructure", + Name: "Step 4: Generating infrastructure", Description: "Generate infrastructure as code (IaC) for the application.", }, { - Name: "Generating azure.yaml file", + Name: "Step 5: Generating azure.yaml file", Description: "Generate an azure.yaml file for the application.", }, { - Name: "Validating project", + Name: "Step 6: Validating project", Description: "Validate the project structure and configuration.", }, } @@ -427,25 +429,36 @@ Do not stop until all tasks are complete and fully resolved. for idx, step := range initSteps { // Collect and apply feedback for next steps if idx > 0 { + feedbackMsg := fmt.Sprintf("Any feedback before continuing to %s?", step.Name) if err := i.collectAndApplyFeedback( ctx, azdAgent, - "Any feedback before continuing to the next step?", + agentThoughts, + feedbackMsg, ); err != nil { return err } } // Run Step - i.console.ShowSpinner(ctx, step.Name, input.Step) + i.console.Message(ctx, color.MagentaString(step.Name)) fullTaskInput := fmt.Sprintf(taskInput, strings.Join([]string{ step.Description, "Provide a very brief summary in markdown format that includes any files generated during this step.", }, "\n")) + thoughtsCtx, cancelThoughts := context.WithCancel(ctx) + cleanup, err := renderThoughts(thoughtsCtx, agentThoughts) + if err != nil { + cancelThoughts() + return err + } + agentOutput, err := azdAgent.SendMessage(ctx, fullTaskInput) + cancelThoughts() + cleanup() + if err != nil { - i.console.StopSpinner(ctx, fmt.Sprintf("%s (With errors)", step.Name), input.StepWarning) if agentOutput != "" { i.console.Message(ctx, output.WithMarkdown(agentOutput)) } @@ -453,7 +466,6 @@ Do not stop until all tasks are complete and fully resolved. return err } - i.console.StopSpinner(ctx, step.Name, input.StepDone) i.console.Message(ctx, "") i.console.Message(ctx, fmt.Sprintf("%s:", output.AzdAgentLabel())) i.console.Message(ctx, output.WithMarkdown(agentOutput)) @@ -461,7 +473,7 @@ Do not stop until all tasks are complete and fully resolved. } // Post-completion feedback loop - if err := i.postCompletionFeedbackLoop(ctx, azdAgent); err != nil { + if err := i.postCompletionFeedbackLoop(ctx, azdAgent, agentThoughts); err != nil { return err } @@ -472,6 +484,7 @@ Do not stop until all tasks are complete and fully resolved. func (i *initAction) collectAndApplyFeedback( ctx context.Context, azdAgent agent.Agent, + agentThoughts <-chan string, promptMessage string, ) error { // Loop to allow multiple rounds of feedback @@ -493,7 +506,7 @@ func (i *initAction) collectAndApplyFeedback( } userInputPrompt := uxlib.NewPrompt(&uxlib.PromptOptions{ - Message: "💭 You", + Message: "You", PlaceHolder: "Provide feedback or changes to the project", Required: true, IgnoreHintKeys: true, @@ -507,17 +520,26 @@ func (i *initAction) collectAndApplyFeedback( i.console.Message(ctx, "") if userInput != "" { - i.console.ShowSpinner(ctx, "Submitting feedback", input.Step) + i.console.Message(ctx, color.MagentaString("Feedback")) + + thoughtsCtx, cancelThoughts := context.WithCancel(ctx) + cleanup, err := renderThoughts(thoughtsCtx, agentThoughts) + if err != nil { + cancelThoughts() + return err + } + feedbackOutput, err := azdAgent.SendMessage(ctx, userInput) + cancelThoughts() + cleanup() + if err != nil { - i.console.StopSpinner(ctx, "Submitting feedback (With errors)", input.StepWarning) if feedbackOutput != "" { i.console.Message(ctx, output.WithMarkdown(feedbackOutput)) } return err } - i.console.StopSpinner(ctx, "Submitting feedback", input.StepDone) i.console.Message(ctx, "") i.console.Message(ctx, fmt.Sprintf("%s:", output.AzdAgentLabel())) i.console.Message(ctx, output.WithMarkdown(feedbackOutput)) @@ -529,12 +551,16 @@ func (i *initAction) collectAndApplyFeedback( } // postCompletionFeedbackLoop provides a final opportunity for feedback after all steps complete -func (i *initAction) postCompletionFeedbackLoop(ctx context.Context, azdAgent agent.Agent) error { +func (i *initAction) postCompletionFeedbackLoop( + ctx context.Context, + azdAgent agent.Agent, + agentThoughts <-chan string, +) error { i.console.Message(ctx, "") i.console.Message(ctx, "🎉 All initialization steps completed!") i.console.Message(ctx, "") - return i.collectAndApplyFeedback(ctx, azdAgent, "Any additional feedback or changes you'd like to make?") + return i.collectAndApplyFeedback(ctx, azdAgent, agentThoughts, "Any final feedback or changes?") } type initType int @@ -547,6 +573,51 @@ const ( initWithAgent ) +func renderThoughts(ctx context.Context, agentThoughts <-chan string) (func(), error) { + var latestThought string + spinner := uxlib.NewSpinner(&uxlib.SpinnerOptions{ + Text: "Thinking...", + }) + + canvas := uxlib.NewCanvas( + spinner, + uxlib.NewVisualElement(func(printer uxlib.Printer) error { + printer.Fprintln() + printer.Fprintln() + + if latestThought != "" { + printer.Fprintln(color.HiBlackString(latestThought)) + printer.Fprintln() + printer.Fprintln() + } + + return nil + })) + + go func() { + defer canvas.Clear() + + for { + select { + case thought := <-agentThoughts: + latestThought = thought + case <-ctx.Done(): + latestThought = "" + return + case <-time.After(200 * time.Millisecond): + } + + canvas.Update() + } + }() + + cleanup := func() { + canvas.Clear() + } + + return cleanup, canvas.Run() +} + func promptInitType(console input.Console, ctx context.Context, featuresManager *alpha.FeatureManager) (initType, error) { options := []string{ "Scan current directory", // This now covers minimal project creation too diff --git a/cli/azd/internal/agent/agent.go b/cli/azd/internal/agent/agent.go index f11c11c6dc0..2339c7b511a 100644 --- a/cli/azd/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -22,10 +22,17 @@ type agentBase struct { executor *agents.Executor tools []common.AnnotatedTool callbacksHandler callbacks.Handler + thoughtChan chan string } type Agent interface { SendMessage(ctx context.Context, args ...string) (string, error) + Thoughts() <-chan string +} + +// Thoughts returns a channel for receiving thoughts generated by the agent +func (a *agentBase) Thoughts() <-chan string { + return a.thoughtChan } // AgentOption is a functional option for configuring an Agent @@ -59,6 +66,12 @@ func WithCallbacksHandler(handler callbacks.Handler) AgentOption { } } +func WithThoughtChannel(thoughtChan chan string) AgentOption { + return func(agent *agentBase) { + agent.thoughtChan = thoughtChan + } +} + // toolNames returns a comma-separated string of all tool names in the provided slice func toolNames(tools []common.AnnotatedTool) string { var tn strings.Builder diff --git a/cli/azd/internal/agent/agent_factory.go b/cli/azd/internal/agent/agent_factory.go index 28de495a073..1c5248ac4fd 100644 --- a/cli/azd/internal/agent/agent_factory.go +++ b/cli/azd/internal/agent/agent_factory.go @@ -32,17 +32,26 @@ func NewAgentFactory( } func (f *AgentFactory) Create(opts ...AgentOption) (Agent, func() error, error) { - fileLogger, cleanup, err := logging.NewFileLoggerDefault() + fileLogger, loggerCleanup, err := logging.NewFileLoggerDefault() if err != nil { - return nil, cleanup, err + return nil, loggerCleanup, err } - defaultModelContainer, err := f.llmManager.GetDefaultModel(llm.WithLogger(fileLogger)) + thoughtChan := make(chan string) + thoughtHandler := logging.NewThoughtLogger(thoughtChan) + chainedHandler := logging.NewChainedHandler(fileLogger, thoughtHandler) + + cleanup := func() error { + close(thoughtChan) + return loggerCleanup() + } + + defaultModelContainer, err := f.llmManager.GetDefaultModel(llm.WithLogger(chainedHandler)) if err != nil { return nil, cleanup, err } - samplingModelContainer, err := f.llmManager.GetDefaultModel(llm.WithLogger(fileLogger)) + samplingModelContainer, err := f.llmManager.GetDefaultModel(llm.WithLogger(chainedHandler)) if err != nil { return nil, cleanup, err } @@ -86,7 +95,10 @@ func (f *AgentFactory) Create(opts ...AgentOption) (Agent, func() error, error) allOptions := []AgentOption{} allOptions = append(allOptions, opts...) - allOptions = append(allOptions, WithTools(protectedTools...)) + allOptions = append(allOptions, + WithThoughtChannel(thoughtChan), + WithTools(protectedTools...), + ) azdAgent, err := NewConversationalAzdAiAgent(defaultModelContainer.Model, allOptions...) if err != nil { diff --git a/cli/azd/internal/agent/consent/checker.go b/cli/azd/internal/agent/consent/checker.go index 2eba1befbb5..9318c5ed12f 100644 --- a/cli/azd/internal/agent/consent/checker.go +++ b/cli/azd/internal/agent/consent/checker.go @@ -156,7 +156,7 @@ func (cc *ConsentChecker) promptForToolConsent( annotations mcp.ToolAnnotation, ) (string, error) { message := fmt.Sprintf( - "The tool %s from %s wants to run.\n\nWhat would you like to do?", + "Allow tool %s from server %s to run?", output.WithHighLightFormat(toolName), output.WithHighLightFormat(cc.serverName), ) @@ -166,7 +166,7 @@ func (cc *ConsentChecker) promptForToolConsent( choices := []*ux.SelectChoice{ { Value: "deny", - Label: "No - Block this tool", + Label: "No, not right now", }, { Value: "once", @@ -387,9 +387,7 @@ func (cc *ConsentChecker) promptForSamplingConsent( toolName, toolDesc string, ) (string, error) { message := fmt.Sprintf( - "The tool %s from %s wants to send data to an AI service.\n\n"+ - "This helps improve responses but shares information externally.\n\n"+ - "What would you like to do?", + "Allow tool %s from server %s to communicate with the AI Model?", output.WithHighLightFormat(toolName), output.WithHighLightFormat(cc.serverName), ) diff --git a/cli/azd/internal/agent/logging/chained_handler.go b/cli/azd/internal/agent/logging/chained_handler.go new file mode 100644 index 00000000000..4412bf388a4 --- /dev/null +++ b/cli/azd/internal/agent/logging/chained_handler.go @@ -0,0 +1,121 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package logging + +import ( + "context" + + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/llms" + "github.com/tmc/langchaingo/schema" +) + +// ChainedHandler forwards calls to multiple callbacks.Handler in order. +type ChainedHandler struct { + handlers []callbacks.Handler +} + +// NewChainedHandler creates a new ChainedHandler with the provided handlers. +func NewChainedHandler(handlers ...callbacks.Handler) *ChainedHandler { + return &ChainedHandler{handlers: handlers} +} + +// Compile-time check to ensure ChainedHandler implements callbacks.Handler +var _ callbacks.Handler = &ChainedHandler{} + +func (c *ChainedHandler) HandleText(ctx context.Context, text string) { + for _, h := range c.handlers { + h.HandleText(ctx, text) + } +} + +func (c *ChainedHandler) HandleLLMStart(ctx context.Context, prompts []string) { + for _, h := range c.handlers { + h.HandleLLMStart(ctx, prompts) + } +} + +func (c *ChainedHandler) HandleLLMGenerateContentStart(ctx context.Context, ms []llms.MessageContent) { + for _, h := range c.handlers { + h.HandleLLMGenerateContentStart(ctx, ms) + } +} + +func (c *ChainedHandler) HandleLLMGenerateContentEnd(ctx context.Context, res *llms.ContentResponse) { + for _, h := range c.handlers { + h.HandleLLMGenerateContentEnd(ctx, res) + } +} + +func (c *ChainedHandler) HandleLLMError(ctx context.Context, err error) { + for _, h := range c.handlers { + h.HandleLLMError(ctx, err) + } +} + +func (c *ChainedHandler) HandleChainStart(ctx context.Context, inputs map[string]any) { + for _, h := range c.handlers { + h.HandleChainStart(ctx, inputs) + } +} + +func (c *ChainedHandler) HandleChainEnd(ctx context.Context, outputs map[string]any) { + for _, h := range c.handlers { + h.HandleChainEnd(ctx, outputs) + } +} + +func (c *ChainedHandler) HandleChainError(ctx context.Context, err error) { + for _, h := range c.handlers { + h.HandleChainError(ctx, err) + } +} + +func (c *ChainedHandler) HandleToolStart(ctx context.Context, input string) { + for _, h := range c.handlers { + h.HandleToolStart(ctx, input) + } +} + +func (c *ChainedHandler) HandleToolEnd(ctx context.Context, output string) { + for _, h := range c.handlers { + h.HandleToolEnd(ctx, output) + } +} + +func (c *ChainedHandler) HandleToolError(ctx context.Context, err error) { + for _, h := range c.handlers { + h.HandleToolError(ctx, err) + } +} + +func (c *ChainedHandler) HandleAgentAction(ctx context.Context, action schema.AgentAction) { + for _, h := range c.handlers { + h.HandleAgentAction(ctx, action) + } +} + +func (c *ChainedHandler) HandleAgentFinish(ctx context.Context, finish schema.AgentFinish) { + for _, h := range c.handlers { + h.HandleAgentFinish(ctx, finish) + } +} + +func (c *ChainedHandler) HandleRetrieverStart(ctx context.Context, query string) { + for _, h := range c.handlers { + h.HandleRetrieverStart(ctx, query) + } +} + +func (c *ChainedHandler) HandleRetrieverEnd(ctx context.Context, query string, documents []schema.Document) { + for _, h := range c.handlers { + h.HandleRetrieverEnd(ctx, query, documents) + } +} + +func (c *ChainedHandler) HandleStreamingFunc(ctx context.Context, chunk []byte) { + for _, h := range c.handlers { + h.HandleStreamingFunc(ctx, chunk) + } +} diff --git a/cli/azd/internal/agent/logging/thought_logger.go b/cli/azd/internal/agent/logging/thought_logger.go new file mode 100644 index 00000000000..9453e156fd4 --- /dev/null +++ b/cli/azd/internal/agent/logging/thought_logger.go @@ -0,0 +1,119 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package logging + +import ( + "context" + "regexp" + "strings" + + "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/llms" + "github.com/tmc/langchaingo/schema" +) + +// Compile-time check to ensure ThoughtLogger implements callbacks.Handler +var _ callbacks.Handler = &ThoughtLogger{} + +// ThoughtLogger tracks and logs all agent actions +type ThoughtLogger struct { + ThoughtChan chan<- string +} + +// NewThoughtLogger creates a new action logger with a write-only channel for thoughts +func NewThoughtLogger(thoughtChan chan<- string) *ThoughtLogger { + return &ThoughtLogger{ + ThoughtChan: thoughtChan, + } +} + +// HandleText is called when text is processed +func (al *ThoughtLogger) HandleText(ctx context.Context, text string) { +} + +// HandleLLMGenerateContentStart is called when LLM content generation starts +func (al *ThoughtLogger) HandleLLMGenerateContentStart(ctx context.Context, ms []llms.MessageContent) { +} + +// HandleLLMGenerateContentEnd is called when LLM content generation ends +func (al *ThoughtLogger) HandleLLMGenerateContentEnd(ctx context.Context, res *llms.ContentResponse) { + // Parse and print thoughts as "THOUGHT: " from content + // IF thought contains: "Do I need to use a tool?", omit this thought. + + for _, choice := range res.Choices { + content := choice.Content + + // Find all "Thought:" patterns and extract the content that follows + // (?is) flags: i=case insensitive, s=dot matches newlines + // .*? is non-greedy to stop at the first occurrence of next pattern or end + thoughtRegex := regexp.MustCompile(`(?is)thought:\s*(.*?)(?:\n\s*(?:action|final answer|observation|ai|thought):|$)`) + matches := thoughtRegex.FindAllStringSubmatch(content, -1) + + for _, match := range matches { + if len(match) > 1 { + thought := strings.TrimSpace(match[1]) + if thought != "" { + // Skip thoughts that contain "Do I need to use a tool?" + if !strings.Contains(strings.ToLower(thought), "do i need to use a tool?") { + if al.ThoughtChan != nil { + al.ThoughtChan <- thought + } + } + } + } + } + } +} + +// HandleRetrieverStart is called when retrieval starts +func (al *ThoughtLogger) HandleRetrieverStart(ctx context.Context, query string) { +} + +// HandleRetrieverEnd is called when retrieval ends +func (al *ThoughtLogger) HandleRetrieverEnd(ctx context.Context, query string, documents []schema.Document) { +} + +// HandleToolStart is called when a tool execution starts +func (al *ThoughtLogger) HandleToolStart(ctx context.Context, input string) { +} + +// HandleToolEnd is called when a tool execution ends +func (al *ThoughtLogger) HandleToolEnd(ctx context.Context, output string) { +} + +// HandleToolError is called when a tool execution fails +func (al *ThoughtLogger) HandleToolError(ctx context.Context, err error) { +} + +// HandleLLMStart is called when LLM call starts +func (al *ThoughtLogger) HandleLLMStart(ctx context.Context, prompts []string) { +} + +// HandleChainStart is called when chain execution starts +func (al *ThoughtLogger) HandleChainStart(ctx context.Context, inputs map[string]any) { +} + +// HandleChainEnd is called when chain execution ends +func (al *ThoughtLogger) HandleChainEnd(ctx context.Context, outputs map[string]any) { +} + +// HandleChainError is called when chain execution fails +func (al *ThoughtLogger) HandleChainError(ctx context.Context, err error) { +} + +// HandleAgentAction is called when an agent action is planned +func (al *ThoughtLogger) HandleAgentAction(ctx context.Context, action schema.AgentAction) { +} + +// HandleAgentFinish is called when the agent finishes +func (al *ThoughtLogger) HandleAgentFinish(ctx context.Context, finish schema.AgentFinish) { +} + +// HandleLLMError is called when LLM call fails +func (al *ThoughtLogger) HandleLLMError(ctx context.Context, err error) { +} + +// HandleStreamingFunc handles streaming responses +func (al *ThoughtLogger) HandleStreamingFunc(ctx context.Context, chunk []byte) { +} diff --git a/cli/azd/pkg/ux/canvas.go b/cli/azd/pkg/ux/canvas.go index ae27b703c37..aceadd522e1 100644 --- a/cli/azd/pkg/ux/canvas.go +++ b/cli/azd/pkg/ux/canvas.go @@ -8,6 +8,23 @@ import ( "sync" ) +var inputLock sync.Mutex +var inputCanvas Canvas + +func lockForInput(c Canvas) { + if inputCanvas != c { + inputLock.Lock() + inputCanvas = c + } +} + +func unlockForInput(c Canvas) { + if inputCanvas == c { + inputCanvas = nil + inputLock.Unlock() + } +} + // Canvas is a base component for UX components that require a canvas for rendering. type canvas struct { visuals []Visual @@ -20,6 +37,7 @@ type canvas struct { type Canvas interface { Run() error Update() error + Clear() WithWriter(writer io.Writer) Canvas } @@ -30,6 +48,10 @@ func NewCanvas(visuals ...Visual) Canvas { renderMap: make(map[Visual]*VisualContext), } + for _, visual := range visuals { + visual.WithCanvas(canvas) + } + return canvas } @@ -45,8 +67,18 @@ func (c *canvas) Run() error { return c.Update() } +// Clear clears the canvas. +func (c *canvas) Clear() { + c.printer.ClearCanvas() +} + // Update updates the canvas to force a re-render. func (c *canvas) Update() error { + if inputCanvas != nil && inputCanvas != c { + c.printer.ClearCanvas() + return nil + } + c.updateLock.Lock() defer c.updateLock.Unlock() diff --git a/cli/azd/pkg/ux/confirm.go b/cli/azd/pkg/ux/confirm.go index a685c5346d5..36fc57f6b5d 100644 --- a/cli/azd/pkg/ux/confirm.go +++ b/cli/azd/pkg/ux/confirm.go @@ -114,6 +114,9 @@ func (p *Confirm) Ask(ctx context.Context) (*bool, error) { p.canvas = NewCanvas(p).WithWriter(p.options.Writer) } + lockForInput(p.canvas) + defer unlockForInput(p.canvas) + inputConfig := &internal.InputConfig{ InitialValue: p.displayValue, } diff --git a/cli/azd/pkg/ux/multi_select.go b/cli/azd/pkg/ux/multi_select.go index 78eec6e7ac3..0846538c872 100644 --- a/cli/azd/pkg/ux/multi_select.go +++ b/cli/azd/pkg/ux/multi_select.go @@ -133,6 +133,9 @@ func (p *MultiSelect) Ask(ctx context.Context) ([]*MultiSelectChoice, error) { p.canvas = NewCanvas(p).WithWriter(p.options.Writer) } + lockForInput(p.canvas) + defer unlockForInput(p.canvas) + if !*p.options.EnableFiltering { p.cursor.HideCursor() } diff --git a/cli/azd/pkg/ux/prompt.go b/cli/azd/pkg/ux/prompt.go index 7439af82578..97882cd8930 100644 --- a/cli/azd/pkg/ux/prompt.go +++ b/cli/azd/pkg/ux/prompt.go @@ -127,6 +127,9 @@ func (p *Prompt) Ask(ctx context.Context) (string, error) { p.canvas = NewCanvas(p).WithWriter(p.options.Writer) } + lockForInput(p.canvas) + defer unlockForInput(p.canvas) + inputOptions := &internal.InputConfig{ InitialValue: p.options.DefaultValue, IgnoreHintKeys: p.options.IgnoreHintKeys, diff --git a/cli/azd/pkg/ux/select.go b/cli/azd/pkg/ux/select.go index aceef279097..1c2490dea2a 100644 --- a/cli/azd/pkg/ux/select.go +++ b/cli/azd/pkg/ux/select.go @@ -133,6 +133,9 @@ func (p *Select) Ask(ctx context.Context) (*int, error) { p.canvas = NewCanvas(p).WithWriter(p.options.Writer) } + lockForInput(p.canvas) + defer unlockForInput(p.canvas) + if !*p.options.EnableFiltering { p.cursor.HideCursor() } From 4574d49cc2d1c4cb2304df6894322edd0df9ff45 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Tue, 19 Aug 2025 14:54:08 -0700 Subject: [PATCH 077/116] visual render orchestration --- cli/azd/cmd/init.go | 1 + cli/azd/pkg/ux/canvas.go | 95 ++++++++++++++++++++++++++-------- cli/azd/pkg/ux/confirm.go | 7 ++- cli/azd/pkg/ux/multi_select.go | 7 ++- cli/azd/pkg/ux/prompt.go | 7 ++- cli/azd/pkg/ux/select.go | 7 ++- cli/azd/pkg/ux/visual.go | 7 --- 7 files changed, 95 insertions(+), 36 deletions(-) diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index eaaad10d141..186486bb882 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -613,6 +613,7 @@ func renderThoughts(ctx context.Context, agentThoughts <-chan string) (func(), e cleanup := func() { canvas.Clear() + canvas.Close() } return cleanup, canvas.Run() diff --git a/cli/azd/pkg/ux/canvas.go b/cli/azd/pkg/ux/canvas.go index aceadd522e1..d8b45cd4556 100644 --- a/cli/azd/pkg/ux/canvas.go +++ b/cli/azd/pkg/ux/canvas.go @@ -8,29 +8,11 @@ import ( "sync" ) -var inputLock sync.Mutex -var inputCanvas Canvas - -func lockForInput(c Canvas) { - if inputCanvas != c { - inputLock.Lock() - inputCanvas = c - } -} - -func unlockForInput(c Canvas) { - if inputCanvas == c { - inputCanvas = nil - inputLock.Unlock() - } -} - // Canvas is a base component for UX components that require a canvas for rendering. type canvas struct { visuals []Visual printer Printer writer io.Writer - renderMap map[Visual]*VisualContext updateLock sync.Mutex } @@ -38,20 +20,22 @@ type Canvas interface { Run() error Update() error Clear() + Close() WithWriter(writer io.Writer) Canvas } // NewCanvas creates a new Canvas instance. func NewCanvas(visuals ...Visual) Canvas { canvas := &canvas{ - visuals: visuals, - renderMap: make(map[Visual]*VisualContext), + visuals: visuals, } for _, visual := range visuals { visual.WithCanvas(canvas) } + cm.Add(canvas) + return canvas } @@ -72,9 +56,17 @@ func (c *canvas) Clear() { c.printer.ClearCanvas() } +// Close closes the canvas. +func (c *canvas) Close() { + cm.Remove(c) +} + // Update updates the canvas to force a re-render. func (c *canvas) Update() error { - if inputCanvas != nil && inputCanvas != c { + cm.Lock() + defer cm.Unlock() + + if !cm.CanUpdate(c) { c.printer.ClearCanvas() return nil } @@ -127,3 +119,64 @@ func newCanvasSize() *CanvasSize { Cols: 0, } } + +type canvasManager struct { + items sync.Map + focusedCanvas Canvas + focusLock sync.Mutex + updateLock sync.Mutex +} + +func newCanvasManager() *canvasManager { + return &canvasManager{ + items: sync.Map{}, + } +} + +func (cm *canvasManager) Add(canvas Canvas) { + cm.items.Store(canvas, struct{}{}) +} + +func (cm *canvasManager) Remove(canvas Canvas) { + cm.items.Delete(canvas) +} + +func (cm *canvasManager) Lock() { + cm.updateLock.Lock() +} + +func (cm *canvasManager) Unlock() { + cm.updateLock.Unlock() +} + +// Focus sets the focused canvas and clears non-focused canvases. +func (cm *canvasManager) Focus(canvas Canvas) func() { + cm.Lock() + defer cm.Unlock() + + cm.focusLock.Lock() + cm.focusedCanvas = canvas + + // Clear non-focused canvases + cm.items.Range(func(key, value any) bool { + if c, ok := key.(Canvas); ok && c != canvas { + c.Clear() + } + return true + }) + + return func() { + cm.focusedCanvas = nil + cm.focusLock.Unlock() + } +} + +func (cm *canvasManager) CanUpdate(canvas Canvas) bool { + if cm.focusedCanvas == nil || cm.focusedCanvas == canvas { + return true + } + + return false +} + +var cm = newCanvasManager() diff --git a/cli/azd/pkg/ux/confirm.go b/cli/azd/pkg/ux/confirm.go index 36fc57f6b5d..5b6ab4b7713 100644 --- a/cli/azd/pkg/ux/confirm.go +++ b/cli/azd/pkg/ux/confirm.go @@ -114,8 +114,11 @@ func (p *Confirm) Ask(ctx context.Context) (*bool, error) { p.canvas = NewCanvas(p).WithWriter(p.options.Writer) } - lockForInput(p.canvas) - defer unlockForInput(p.canvas) + release := cm.Focus(p.canvas) + defer func() { + release() + p.canvas.Close() + }() inputConfig := &internal.InputConfig{ InitialValue: p.displayValue, diff --git a/cli/azd/pkg/ux/multi_select.go b/cli/azd/pkg/ux/multi_select.go index 0846538c872..c238f4f2454 100644 --- a/cli/azd/pkg/ux/multi_select.go +++ b/cli/azd/pkg/ux/multi_select.go @@ -133,8 +133,11 @@ func (p *MultiSelect) Ask(ctx context.Context) ([]*MultiSelectChoice, error) { p.canvas = NewCanvas(p).WithWriter(p.options.Writer) } - lockForInput(p.canvas) - defer unlockForInput(p.canvas) + release := cm.Focus(p.canvas) + defer func() { + release() + p.canvas.Close() + }() if !*p.options.EnableFiltering { p.cursor.HideCursor() diff --git a/cli/azd/pkg/ux/prompt.go b/cli/azd/pkg/ux/prompt.go index 97882cd8930..7b49d1fce35 100644 --- a/cli/azd/pkg/ux/prompt.go +++ b/cli/azd/pkg/ux/prompt.go @@ -127,8 +127,11 @@ func (p *Prompt) Ask(ctx context.Context) (string, error) { p.canvas = NewCanvas(p).WithWriter(p.options.Writer) } - lockForInput(p.canvas) - defer unlockForInput(p.canvas) + release := cm.Focus(p.canvas) + defer func() { + release() + p.canvas.Close() + }() inputOptions := &internal.InputConfig{ InitialValue: p.options.DefaultValue, diff --git a/cli/azd/pkg/ux/select.go b/cli/azd/pkg/ux/select.go index 1c2490dea2a..344378c1258 100644 --- a/cli/azd/pkg/ux/select.go +++ b/cli/azd/pkg/ux/select.go @@ -133,8 +133,11 @@ func (p *Select) Ask(ctx context.Context) (*int, error) { p.canvas = NewCanvas(p).WithWriter(p.options.Writer) } - lockForInput(p.canvas) - defer unlockForInput(p.canvas) + release := cm.Focus(p.canvas) + defer func() { + release() + p.canvas.Close() + }() if !*p.options.EnableFiltering { p.cursor.HideCursor() diff --git a/cli/azd/pkg/ux/visual.go b/cli/azd/pkg/ux/visual.go index 7eb48c5adb6..b71da705837 100644 --- a/cli/azd/pkg/ux/visual.go +++ b/cli/azd/pkg/ux/visual.go @@ -3,13 +3,6 @@ package ux -type VisualContext struct { - // The size of the visual - Size CanvasSize - // The relative row position of the visual within the canvas - Top int -} - type Visual interface { Render(printer Printer) error WithCanvas(canvas Canvas) Visual From 74e31599a982b253df129def1b14eb62f81ba14a Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Tue, 19 Aug 2025 18:01:23 -0700 Subject: [PATCH 078/116] Updates agent output for init --- cli/azd/cmd/init.go | 35 +++++- cli/azd/internal/agent/agent.go | 9 +- cli/azd/internal/agent/agent_factory.go | 3 +- cli/azd/internal/agent/consent/checker.go | 26 ++--- cli/azd/internal/agent/consent/manager.go | 3 + cli/azd/internal/agent/consent/types.go | 52 ++++----- .../internal/agent/conversational_agent.go | 2 +- cli/azd/internal/agent/logging/file_logger.go | 1 - .../internal/agent/logging/thought_logger.go | 107 +++++++++++++++++- .../tools/io/file_io_integration_test.go | 46 ++++---- cli/azd/internal/agent/tools/io/read_file.go | 36 +++--- .../internal/agent/tools/io/read_file_test.go | 2 +- cli/azd/internal/agent/tools/io/write_file.go | 34 +++--- .../agent/tools/io/write_file_test.go | 2 +- cli/azd/pkg/ux/canvas.go | 64 ++++++++--- 15 files changed, 291 insertions(+), 131 deletions(-) diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index 186486bb882..242df304517 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -16,6 +16,7 @@ import ( "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" "github.com/azure/azure-dev/cli/azd/internal/agent" + "github.com/azure/azure-dev/cli/azd/internal/agent/logging" "github.com/azure/azure-dev/cli/azd/internal/repository" "github.com/azure/azure-dev/cli/azd/internal/tracing" "github.com/azure/azure-dev/cli/azd/internal/tracing/fields" @@ -484,7 +485,7 @@ Do not stop until all tasks are complete and fully resolved. func (i *initAction) collectAndApplyFeedback( ctx context.Context, azdAgent agent.Agent, - agentThoughts <-chan string, + agentThoughts <-chan logging.Thought, promptMessage string, ) error { // Loop to allow multiple rounds of feedback @@ -554,7 +555,7 @@ func (i *initAction) collectAndApplyFeedback( func (i *initAction) postCompletionFeedbackLoop( ctx context.Context, azdAgent agent.Agent, - agentThoughts <-chan string, + agentThoughts <-chan logging.Thought, ) error { i.console.Message(ctx, "") i.console.Message(ctx, "🎉 All initialization steps completed!") @@ -573,8 +574,9 @@ const ( initWithAgent ) -func renderThoughts(ctx context.Context, agentThoughts <-chan string) (func(), error) { +func renderThoughts(ctx context.Context, agentThoughts <-chan logging.Thought) (func(), error) { var latestThought string + spinner := uxlib.NewSpinner(&uxlib.SpinnerOptions{ Text: "Thinking...", }) @@ -597,16 +599,39 @@ func renderThoughts(ctx context.Context, agentThoughts <-chan string) (func(), e go func() { defer canvas.Clear() + var latestAction string + var latestActionInput string + var spinnerText string + for { + select { case thought := <-agentThoughts: - latestThought = thought + if thought.Action != "" { + latestAction = thought.Action + latestActionInput = thought.ActionInput + } + if thought.Thought != "" { + latestThought = thought.Thought + } case <-ctx.Done(): - latestThought = "" return case <-time.After(200 * time.Millisecond): } + // Update spinner text + if latestAction == "" { + spinnerText = "Thinking..." + } else { + spinnerText = fmt.Sprintf("Running %s tool", color.GreenString(latestAction)) + if latestActionInput != "" { + spinnerText += " with " + color.GreenString(latestActionInput) + } + + spinnerText += "..." + } + + spinner.UpdateText(spinnerText) canvas.Update() } }() diff --git a/cli/azd/internal/agent/agent.go b/cli/azd/internal/agent/agent.go index 2339c7b511a..035d6ea33d3 100644 --- a/cli/azd/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -8,6 +8,7 @@ import ( "fmt" "strings" + "github.com/azure/azure-dev/cli/azd/internal/agent/logging" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" "github.com/tmc/langchaingo/agents" "github.com/tmc/langchaingo/callbacks" @@ -22,16 +23,16 @@ type agentBase struct { executor *agents.Executor tools []common.AnnotatedTool callbacksHandler callbacks.Handler - thoughtChan chan string + thoughtChan chan logging.Thought } type Agent interface { SendMessage(ctx context.Context, args ...string) (string, error) - Thoughts() <-chan string + Thoughts() <-chan logging.Thought } // Thoughts returns a channel for receiving thoughts generated by the agent -func (a *agentBase) Thoughts() <-chan string { +func (a *agentBase) Thoughts() <-chan logging.Thought { return a.thoughtChan } @@ -66,7 +67,7 @@ func WithCallbacksHandler(handler callbacks.Handler) AgentOption { } } -func WithThoughtChannel(thoughtChan chan string) AgentOption { +func WithThoughtChannel(thoughtChan chan logging.Thought) AgentOption { return func(agent *agentBase) { agent.thoughtChan = thoughtChan } diff --git a/cli/azd/internal/agent/agent_factory.go b/cli/azd/internal/agent/agent_factory.go index 1c5248ac4fd..e97042bf53e 100644 --- a/cli/azd/internal/agent/agent_factory.go +++ b/cli/azd/internal/agent/agent_factory.go @@ -37,7 +37,7 @@ func (f *AgentFactory) Create(opts ...AgentOption) (Agent, func() error, error) return nil, loggerCleanup, err } - thoughtChan := make(chan string) + thoughtChan := make(chan logging.Thought) thoughtHandler := logging.NewThoughtLogger(thoughtChan) chainedHandler := logging.NewChainedHandler(fileLogger, thoughtHandler) @@ -96,6 +96,7 @@ func (f *AgentFactory) Create(opts ...AgentOption) (Agent, func() error, error) allOptions := []AgentOption{} allOptions = append(allOptions, opts...) allOptions = append(allOptions, + WithCallbacksHandler(chainedHandler), WithThoughtChannel(thoughtChan), WithTools(protectedTools...), ) diff --git a/cli/azd/internal/agent/consent/checker.go b/cli/azd/internal/agent/consent/checker.go index 9318c5ed12f..994d3922ebe 100644 --- a/cli/azd/internal/agent/consent/checker.go +++ b/cli/azd/internal/agent/consent/checker.go @@ -36,11 +36,11 @@ func (cc *ConsentChecker) CheckToolConsent( toolName, toolDesc string, annotations mcp.ToolAnnotation, ) (*ConsentDecision, error) { - toolID := fmt.Sprintf("%s/%s", cc.serverName, toolName) + toolId := fmt.Sprintf("%s/%s", cc.serverName, toolName) // Create consent request consentRequest := ConsentRequest{ - ToolID: toolID, + ToolID: toolId, ServerName: cc.serverName, Operation: OperationTypeTool, // This is a tool execution request Annotations: annotations, @@ -54,11 +54,11 @@ func (cc *ConsentChecker) CheckSamplingConsent( ctx context.Context, toolName string, ) (*ConsentDecision, error) { - toolID := fmt.Sprintf("%s/%s", cc.serverName, toolName) + toolId := fmt.Sprintf("%s/%s", cc.serverName, toolName) // Create consent request for sampling consentRequest := ConsentRequest{ - ToolID: toolID, + ToolID: toolId, ServerName: cc.serverName, Operation: OperationTypeSampling, // This is a sampling request } @@ -134,7 +134,7 @@ func (cc *ConsentChecker) PromptAndGrantConsent( toolName, toolDesc string, annotations mcp.ToolAnnotation, ) error { - toolID := fmt.Sprintf("%s/%s", cc.serverName, toolName) + toolId := fmt.Sprintf("%s/%s", cc.serverName, toolName) choice, err := cc.promptForToolConsent(ctx, toolName, toolDesc, annotations) if err != nil { @@ -146,7 +146,7 @@ func (cc *ConsentChecker) PromptAndGrantConsent( } // Grant consent based on user choice - return cc.grantConsentFromChoice(ctx, toolID, choice, OperationTypeTool) + return cc.grantConsentFromChoice(ctx, toolId, choice, OperationTypeTool) } // promptForToolConsent shows an interactive consent prompt and returns the user's choice @@ -266,16 +266,16 @@ func (cc *ConsentChecker) isServerAlreadyTrusted(ctx context.Context, operation // grantConsentFromChoice processes the user's consent choice and saves the appropriate rule func (cc *ConsentChecker) grantConsentFromChoice( ctx context.Context, - toolID string, + toolId string, choice string, operation OperationType, ) error { var rule ConsentRule - // Parse server and tool from toolID - parts := strings.Split(toolID, "/") + // Parse server and tool from toolId + parts := strings.Split(toolId, "/") if len(parts) != 2 { - return fmt.Errorf("invalid toolID format: %s", toolID) + return fmt.Errorf("invalid toolId format: %s", toolId) } serverName := parts[0] toolName := parts[1] @@ -283,7 +283,7 @@ func (cc *ConsentChecker) grantConsentFromChoice( switch choice { case "once": rule = ConsentRule{ - Scope: ScopeSession, + Scope: ScopeOneTime, Target: NewToolTarget(serverName, toolName), Action: ActionAny, Operation: operation, @@ -366,7 +366,7 @@ func (cc *ConsentChecker) PromptAndGrantSamplingConsent( ctx context.Context, toolName, toolDesc string, ) error { - toolID := fmt.Sprintf("%s/%s", cc.serverName, toolName) + toolId := fmt.Sprintf("%s/%s", cc.serverName, toolName) choice, err := cc.promptForSamplingConsent(ctx, toolName, toolDesc) if err != nil { @@ -378,7 +378,7 @@ func (cc *ConsentChecker) PromptAndGrantSamplingConsent( } // Grant sampling consent based on user choice - return cc.grantConsentFromChoice(ctx, toolID, choice, OperationTypeSampling) + return cc.grantConsentFromChoice(ctx, toolId, choice, OperationTypeSampling) } // promptForSamplingConsent shows an interactive sampling consent prompt and returns the user's choice diff --git a/cli/azd/internal/agent/consent/manager.go b/cli/azd/internal/agent/consent/manager.go index bed1e6e3d5a..1f713bf5d10 100644 --- a/cli/azd/internal/agent/consent/manager.go +++ b/cli/azd/internal/agent/consent/manager.go @@ -116,6 +116,9 @@ func (cm *consentManager) GrantConsent(ctx context.Context, rule ConsentRule) er return cm.addProjectRule(ctx, rule) case ScopeGlobal: return cm.addGlobalRule(ctx, rule) + case ScopeOneTime: + // Do not persist one time consent + return nil default: return fmt.Errorf("unknown consent scope: %s", rule.Scope) } diff --git a/cli/azd/internal/agent/consent/types.go b/cli/azd/internal/agent/consent/types.go index eb6154cba21..e6de3dce5db 100644 --- a/cli/azd/internal/agent/consent/types.go +++ b/cli/azd/internal/agent/consent/types.go @@ -21,6 +21,7 @@ const ( ScopeSession Scope = "session" ScopeProject Scope = "project" ScopeGlobal Scope = "global" + ScopeOneTime Scope = "one_time" ) // ActionType defines the kind of action the rule controls @@ -90,15 +91,15 @@ func (t Target) Validate() error { } // AllowedOperationTypes contains the valid operation contexts for command validation -var AllowedOperationTypes = []string{ - string(OperationTypeTool), - string(OperationTypeSampling), +var AllowedOperationTypes = []OperationType{ + OperationTypeTool, + OperationTypeSampling, } // ParseOperationType converts a string to OperationType with validation func ParseOperationType(contextStr string) (OperationType, error) { for _, allowedContext := range AllowedOperationTypes { - if contextStr == allowedContext { + if contextStr == string(allowedContext) { return OperationType(contextStr), nil } } @@ -106,16 +107,17 @@ func ParseOperationType(contextStr string) (OperationType, error) { } // AllowedScopes contains the valid scopes for command validation -var AllowedScopes = []string{ - string(ScopeGlobal), - string(ScopeProject), - string(ScopeSession), +var AllowedScopes = []Scope{ + ScopeGlobal, + ScopeProject, + ScopeSession, + ScopeOneTime, } // ParseScope converts a string to Scope with validation func ParseScope(scopeStr string) (Scope, error) { for _, allowedScope := range AllowedScopes { - if scopeStr == allowedScope { + if scopeStr == string(allowedScope) { return Scope(scopeStr), nil } } @@ -123,9 +125,9 @@ func ParseScope(scopeStr string) (Scope, error) { } // AllowedActionTypes contains the valid action types for command validation -var AllowedActionTypes = []string{ - "readonly", - "all", +var AllowedActionTypes = []ActionType{ + ActionReadOnly, + ActionAny, } // ParseActionType converts a string to ActionType with validation @@ -141,16 +143,16 @@ func ParseActionType(actionStr string) (ActionType, error) { } // AllowedPermissions contains the valid permissions for command validation -var AllowedPermissions = []string{ - string(PermissionAllow), - string(PermissionDeny), - string(PermissionPrompt), +var AllowedPermissions = []Permission{ + PermissionAllow, + PermissionDeny, + PermissionPrompt, } // ParsePermission converts a string to Permission with validation func ParsePermission(permissionStr string) (Permission, error) { for _, allowedPermission := range AllowedPermissions { - if permissionStr == allowedPermission { + if permissionStr == string(allowedPermission) { return Permission(permissionStr), nil } } @@ -221,9 +223,8 @@ func (r ConsentRule) Validate() error { } // Validate enums have valid values - validScopes := []Scope{ScopeSession, ScopeProject, ScopeGlobal} validScope := false - for _, scope := range validScopes { + for _, scope := range AllowedScopes { if r.Scope == scope { validScope = true break @@ -233,9 +234,8 @@ func (r ConsentRule) Validate() error { return fmt.Errorf("invalid scope: %s", r.Scope) } - validActions := []ActionType{ActionReadOnly, ActionAny} validAction := false - for _, action := range validActions { + for _, action := range AllowedActionTypes { if r.Action == action { validAction = true break @@ -245,10 +245,9 @@ func (r ConsentRule) Validate() error { return fmt.Errorf("invalid action: %s", r.Action) } - validContexts := []OperationType{OperationTypeTool, OperationTypeSampling} validContext := false - for _, context := range validContexts { - if r.Operation == context { + for _, operation := range AllowedOperationTypes { + if r.Operation == operation { validContext = true break } @@ -257,10 +256,9 @@ func (r ConsentRule) Validate() error { return fmt.Errorf("invalid operation context: %s", r.Operation) } - validDecisions := []Permission{PermissionAllow, PermissionDeny, PermissionPrompt} validDecision := false - for _, decision := range validDecisions { - if r.Permission == decision { + for _, permission := range AllowedPermissions { + if r.Permission == permission { validDecision = true break } diff --git a/cli/azd/internal/agent/conversational_agent.go b/cli/azd/internal/agent/conversational_agent.go index 5dcb2a1ae19..2e09c9f04ad 100644 --- a/cli/azd/internal/agent/conversational_agent.go +++ b/cli/azd/internal/agent/conversational_agent.go @@ -72,7 +72,7 @@ func NewConversationalAzdAiAgent(llm llms.Model, opts ...AgentOption) (*Conversa // 5. Create executor without separate memory configuration since agent already has it executor := agents.NewExecutor(conversationAgent, - agents.WithMaxIterations(500), // Much higher limit for complex multi-step processes + agents.WithMaxIterations(100), agents.WithMemory(smartMemory), agents.WithCallbacksHandler(azdAgent.callbacksHandler), agents.WithReturnIntermediateSteps(), diff --git a/cli/azd/internal/agent/logging/file_logger.go b/cli/azd/internal/agent/logging/file_logger.go index 07222bbe781..21d5120b596 100644 --- a/cli/azd/internal/agent/logging/file_logger.go +++ b/cli/azd/internal/agent/logging/file_logger.go @@ -197,5 +197,4 @@ func (fl *FileLogger) HandleLLMError(ctx context.Context, err error) { // HandleStreamingFunc handles streaming responses func (fl *FileLogger) HandleStreamingFunc(ctx context.Context, chunk []byte) { - fl.writeAndFlush("STREAMING: %s", string(chunk)) } diff --git a/cli/azd/internal/agent/logging/thought_logger.go b/cli/azd/internal/agent/logging/thought_logger.go index 9453e156fd4..a7af79247b2 100644 --- a/cli/azd/internal/agent/logging/thought_logger.go +++ b/cli/azd/internal/agent/logging/thought_logger.go @@ -5,6 +5,8 @@ package logging import ( "context" + "encoding/json" + "fmt" "regexp" "strings" @@ -16,13 +18,19 @@ import ( // Compile-time check to ensure ThoughtLogger implements callbacks.Handler var _ callbacks.Handler = &ThoughtLogger{} +type Thought struct { + Thought string + Action string + ActionInput string +} + // ThoughtLogger tracks and logs all agent actions type ThoughtLogger struct { - ThoughtChan chan<- string + ThoughtChan chan<- Thought } // NewThoughtLogger creates a new action logger with a write-only channel for thoughts -func NewThoughtLogger(thoughtChan chan<- string) *ThoughtLogger { +func NewThoughtLogger(thoughtChan chan<- Thought) *ThoughtLogger { return &ThoughtLogger{ ThoughtChan: thoughtChan, } @@ -57,7 +65,9 @@ func (al *ThoughtLogger) HandleLLMGenerateContentEnd(ctx context.Context, res *l // Skip thoughts that contain "Do I need to use a tool?" if !strings.Contains(strings.ToLower(thought), "do i need to use a tool?") { if al.ThoughtChan != nil { - al.ThoughtChan <- thought + al.ThoughtChan <- Thought{ + Thought: thought, + } } } } @@ -104,6 +114,97 @@ func (al *ThoughtLogger) HandleChainError(ctx context.Context, err error) { // HandleAgentAction is called when an agent action is planned func (al *ThoughtLogger) HandleAgentAction(ctx context.Context, action schema.AgentAction) { + // Print "Calling " + // Inspect action.ToolInput. Attempt to parse input as JSON + // If is valid JSON and contains a param 'filename' then print filename. + // example: "Calling read_file " + + prioritizedParams := map[string]struct{}{ + "path": {}, + "pattern": {}, + "filename": {}, + "command": {}, + } + + var toolInput map[string]interface{} + if err := json.Unmarshal([]byte(action.ToolInput), &toolInput); err == nil { + // Successfully parsed JSON, create comma-delimited key-value pairs + excludedKeys := map[string]bool{"content": true} + var params []string + + for key, value := range toolInput { + if excludedKeys[key] { + continue + } + + var valueStr string + switch v := value.(type) { + case []interface{}: + // Skip empty arrays + if len(v) == 0 { + continue + } + // Handle arrays by joining with spaces + var strSlice []string + for _, item := range v { + strSlice = append(strSlice, strings.TrimSpace(string(fmt.Sprintf("%v", item)))) + } + valueStr = strings.Join(strSlice, " ") + case map[string]interface{}: + // Skip empty maps + if len(v) == 0 { + continue + } + valueStr = strings.TrimSpace(fmt.Sprintf("%v", v)) + case string: + // Skip empty strings + trimmed := strings.TrimSpace(v) + if trimmed == "" { + continue + } + valueStr = trimmed + default: + valueStr = strings.TrimSpace(fmt.Sprintf("%v", v)) + } + + if valueStr != "" { + params = append(params, fmt.Sprintf("%s: %s", key, valueStr)) + } + } + + // Identify prioritized params + for _, param := range params { + for key := range prioritizedParams { + if strings.HasPrefix(param, key) { + paramStr := truncateString(param, 120) + al.ThoughtChan <- Thought{ + Action: action.Tool, + ActionInput: paramStr, + } + return + } + } + } + + al.ThoughtChan <- Thought{ + Action: action.Tool, + } + + } else { + // JSON parsing failed, show the input as text with truncation + toolInput := strings.TrimSpace(action.ToolInput) + if toolInput == "" || strings.HasPrefix(toolInput, "{") { + al.ThoughtChan <- Thought{ + Action: action.Tool, + } + } else { + toolInput = truncateString(toolInput, 120) + al.ThoughtChan <- Thought{ + Action: action.Tool, + ActionInput: toolInput, + } + } + } } // HandleAgentFinish is called when the agent finishes diff --git a/cli/azd/internal/agent/tools/io/file_io_integration_test.go b/cli/azd/internal/agent/tools/io/file_io_integration_test.go index b4d5aefc0eb..a6613036a25 100644 --- a/cli/azd/internal/agent/tools/io/file_io_integration_test.go +++ b/cli/azd/internal/agent/tools/io/file_io_integration_test.go @@ -44,7 +44,7 @@ func main() { // Step 1: LLM reads the entire file to understand structure readRequest1 := ReadFileRequest{ - FilePath: testFile, + Path: testFile, } result1, err := readTool.Call(context.Background(), mustMarshalJSON(readRequest1)) assert.NoError(t, err) @@ -58,7 +58,7 @@ func main() { // Step 2: LLM reads just the add function (lines 5-7) readRequest2 := ReadFileRequest{ - FilePath: testFile, + Path: testFile, StartLine: 5, EndLine: 7, } @@ -79,7 +79,7 @@ func main() { }` writeRequest := WriteFileRequest{ - Filename: testFile, + Path: testFile, Content: newFunction, StartLine: 5, EndLine: 7, @@ -90,7 +90,7 @@ func main() { // Step 4: LLM reads the updated function to verify change readRequest3 := ReadFileRequest{ - FilePath: testFile, + Path: testFile, StartLine: 5, EndLine: 8, } @@ -106,7 +106,7 @@ func main() { // Step 5: LLM reads main function (which may have shifted) readRequest4 := ReadFileRequest{ - FilePath: testFile, + Path: testFile, StartLine: 9, EndLine: 12, } @@ -156,7 +156,7 @@ logging: // Step 1: LLM scans file structure (first 10 lines) readRequest1 := ReadFileRequest{ - FilePath: configFile, + Path: configFile, StartLine: 1, EndLine: 10, } @@ -172,7 +172,7 @@ logging: // Step 2: LLM focuses on database section readRequest2 := ReadFileRequest{ - FilePath: configFile, + Path: configFile, StartLine: 7, EndLine: 12, } @@ -195,7 +195,7 @@ logging: pool_size: 20` writeRequest1 := WriteFileRequest{ - Filename: configFile, + Path: configFile, Content: newDbConfig, StartLine: 7, EndLine: 11, @@ -206,7 +206,7 @@ logging: // Step 4: LLM reads redis section (which should have moved due to previous edit) readRequest3 := ReadFileRequest{ - FilePath: configFile, + Path: configFile, StartLine: 13, EndLine: 16, } @@ -221,7 +221,7 @@ logging: // Step 5: LLM reads logging section to update it readRequest4 := ReadFileRequest{ - FilePath: configFile, + Path: configFile, StartLine: 17, EndLine: 21, } @@ -243,7 +243,7 @@ logging: rotation: "daily"` writeRequest2 := WriteFileRequest{ - Filename: configFile, + Path: configFile, Content: newLoggingConfig, StartLine: 17, EndLine: 20, @@ -254,7 +254,7 @@ logging: // Step 7: LLM does final validation read of entire file readRequestFinal := ReadFileRequest{ - FilePath: configFile, + Path: configFile, } resultFinal, err := readTool.Call(context.Background(), mustMarshalJSON(readRequestFinal)) assert.NoError(t, err) @@ -327,7 +327,7 @@ class UserService: // Step 1: LLM reads class definition and constructor readRequest1 := ReadFileRequest{ - FilePath: classFile, + Path: classFile, StartLine: 7, EndLine: 12, } @@ -343,7 +343,7 @@ class UserService: // Step 2: LLM reads create_user method with some context readRequest2 := ReadFileRequest{ - FilePath: classFile, + Path: classFile, StartLine: 14, EndLine: 22, } @@ -378,7 +378,7 @@ class UserService: return False` writeRequest1 := WriteFileRequest{ - Filename: classFile, + Path: classFile, Content: improvedCreateUser, StartLine: 14, EndLine: 22, @@ -389,7 +389,7 @@ class UserService: // Step 4: LLM reads get_user method (line numbers shifted due to edit) readRequest3 := ReadFileRequest{ - FilePath: classFile, + Path: classFile, StartLine: 31, EndLine: 38, } @@ -404,7 +404,7 @@ class UserService: // Step 5: LLM reads context around delete_user to understand the pattern readRequest4 := ReadFileRequest{ - FilePath: classFile, + Path: classFile, StartLine: 40, EndLine: 47, } @@ -419,7 +419,7 @@ class UserService: // Step 6: LLM verifies the refactoring by reading the updated create_user method readRequest5 := ReadFileRequest{ - FilePath: classFile, + Path: classFile, StartLine: 14, EndLine: 30, } @@ -459,7 +459,7 @@ Line 10` // Step 1: Read lines 3-5 readRequest1 := ReadFileRequest{ - FilePath: testFile, + Path: testFile, StartLine: 3, EndLine: 5, } @@ -480,7 +480,7 @@ New Line C Line 4` writeRequest := WriteFileRequest{ - Filename: testFile, + Path: testFile, Content: insertContent, StartLine: 3, EndLine: 4, @@ -491,7 +491,7 @@ Line 4` // Step 3: Try to read what was originally line 5 (now line 8) readRequest2 := ReadFileRequest{ - FilePath: testFile, + Path: testFile, StartLine: 8, EndLine: 8, } @@ -506,7 +506,7 @@ Line 4` // Step 4: Read the new inserted content readRequest3 := ReadFileRequest{ - FilePath: testFile, + Path: testFile, StartLine: 4, EndLine: 6, } @@ -523,7 +523,7 @@ Line 4` // Step 5: Verify total line count changed correctly readRequestFull := ReadFileRequest{ - FilePath: testFile, + Path: testFile, } resultFull, err := readTool.Call(context.Background(), mustMarshalJSON(readRequestFull)) assert.NoError(t, err) diff --git a/cli/azd/internal/agent/tools/io/read_file.go b/cli/azd/internal/agent/tools/io/read_file.go index 0eb707dbf75..6c34cc66633 100644 --- a/cli/azd/internal/agent/tools/io/read_file.go +++ b/cli/azd/internal/agent/tools/io/read_file.go @@ -23,7 +23,7 @@ type ReadFileTool struct { // ReadFileRequest represents the JSON payload for file read requests type ReadFileRequest struct { - FilePath string `json:"filePath"` + Path string `json:"path"` StartLine int `json:"startLine,omitempty"` // Optional: 1-based line number to start reading from EndLine int `json:"endLine,omitempty"` // Optional: 1-based line number to end reading at } @@ -31,7 +31,7 @@ type ReadFileRequest struct { // ReadFileResponse represents the JSON output for the read_file tool type ReadFileResponse struct { Success bool `json:"success"` - FilePath string `json:"filePath"` + Path string `json:"path"` Content string `json:"content"` IsTruncated bool `json:"isTruncated"` IsPartial bool `json:"isPartial"` @@ -75,26 +75,26 @@ Returns JSON response with file content and metadata. Input: JSON payload with the following structure: { - "filePath": "path/to/file.txt", + "path": "path/to/file.txt", "startLine": 10, // optional: 1-based line number to start reading from "endLine": 50 // optional: 1-based line number to end reading at } Examples: 1. Read entire file: - {"filePath": "README.md"} + {"path": "README.md"} 2. Read specific line range: - {"filePath": "src/main.go", "startLine": 1, "endLine": 100} + {"path": "src/main.go", "startLine": 1, "endLine": 100} 3. Read from line to end: - {"filePath": "config.go", "startLine": 25} + {"path": "config.go", "startLine": 25} 4. Read from start to line: - {"filePath": "app.py", "endLine": 30} + {"path": "app.py", "endLine": 30} 5. Read single line: - {"filePath": "package.json", "startLine": 42, "endLine": 42} + {"path": "package.json", "startLine": 42, "endLine": 42} Files larger than 100KB are automatically truncated. Files over 1MB show size info only unless specific line range is requested. @@ -144,26 +144,26 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { } // Validate required fields - if req.FilePath == "" { + if req.Path == "" { return t.createErrorResponse(fmt.Errorf("missing filePath"), "Missing required field: filePath cannot be empty") } // Get file info first to check size - fileInfo, err := os.Stat(req.FilePath) + fileInfo, err := os.Stat(req.Path) if err != nil { if os.IsNotExist(err) { return t.createErrorResponse( err, - fmt.Sprintf("File does not exist: %s. Please check file path spelling and location", req.FilePath), + fmt.Sprintf("File does not exist: %s. Please check file path spelling and location", req.Path), ) } - return t.createErrorResponse(err, fmt.Sprintf("Cannot access file %s: %s", req.FilePath, err.Error())) + return t.createErrorResponse(err, fmt.Sprintf("Cannot access file %s: %s", req.Path, err.Error())) } if fileInfo.IsDir() { return t.createErrorResponse( fmt.Errorf("path is a directory"), - fmt.Sprintf("%s is a directory, not a file. Use directory_list tool for directories", req.FilePath), + fmt.Sprintf("%s is a directory, not a file. Use directory_list tool for directories", req.Path), ) } @@ -174,16 +174,16 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { fmt.Errorf("file too large"), fmt.Sprintf( "File %s is too large (%d bytes). Please specify startLine and endLine to read specific sections", - req.FilePath, + req.Path, fileInfo.Size(), ), ) } // Read file content - file, err := os.Open(req.FilePath) + file, err := os.Open(req.Path) if err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Failed to open file %s: %s", req.FilePath, err.Error())) + return t.createErrorResponse(err, fmt.Sprintf("Failed to open file %s: %s", req.Path, err.Error())) } defer file.Close() @@ -195,7 +195,7 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { } if err := scanner.Err(); err != nil { - return t.createErrorResponse(err, fmt.Sprintf("Error reading file %s: %s", req.FilePath, err.Error())) + return t.createErrorResponse(err, fmt.Sprintf("Error reading file %s: %s", req.Path, err.Error())) } totalLines := len(lines) @@ -264,7 +264,7 @@ func (t ReadFileTool) Call(ctx context.Context, input string) (string, error) { // Create success response response := ReadFileResponse{ Success: true, - FilePath: req.FilePath, + Path: req.Path, Content: content, IsTruncated: isTruncated, IsPartial: isPartial, diff --git a/cli/azd/internal/agent/tools/io/read_file_test.go b/cli/azd/internal/agent/tools/io/read_file_test.go index 94395555933..f9a591407b9 100644 --- a/cli/azd/internal/agent/tools/io/read_file_test.go +++ b/cli/azd/internal/agent/tools/io/read_file_test.go @@ -181,7 +181,7 @@ func TestReadFileTool_ReadEntireSmallFile(t *testing.T) { require.NoError(t, err) assert.True(t, response.Success) - assert.Equal(t, testFile, response.FilePath) + assert.Equal(t, testFile, response.Path) assert.Equal(t, testContent, response.Content) assert.False(t, response.IsTruncated) assert.False(t, response.IsPartial) diff --git a/cli/azd/internal/agent/tools/io/write_file.go b/cli/azd/internal/agent/tools/io/write_file.go index c9715b7c66a..8a858b9e44f 100644 --- a/cli/azd/internal/agent/tools/io/write_file.go +++ b/cli/azd/internal/agent/tools/io/write_file.go @@ -23,7 +23,7 @@ type WriteFileTool struct { // WriteFileRequest represents the JSON input for the write_file tool type WriteFileRequest struct { - Filename string `json:"filename"` + Path string `json:"path"` Content string `json:"content"` Mode string `json:"mode,omitempty"` // "write" (default), "append", "create" StartLine int `json:"startLine,omitempty"` // For partial write: 1-based line number (inclusive) @@ -34,7 +34,7 @@ type WriteFileRequest struct { type WriteFileResponse struct { Success bool `json:"success"` Operation string `json:"operation"` - FilePath string `json:"filePath"` + Path string `json:"path"` BytesWritten int `json:"bytesWritten"` IsPartial bool `json:"isPartial"` // True for partial write LineInfo *LineInfo `json:"lineInfo,omitempty"` // For partial write @@ -80,7 +80,7 @@ Line numbers shift when you insert/delete lines, causing corruption if you use s Input: JSON payload with the following structure: { - "filename": "path/to/file.txt", + "path": "path/to/file.txt", "content": "file content here", "mode": "write", "startLine": 5, @@ -107,22 +107,22 @@ Add startLine and endLine to any "write" operation to replace specific lines in EXAMPLES: Full file write (new or existing file): -{"filename": "./main.bicep", "content": "param location string = 'eastus'"} +{"path": "./main.bicep", "content": "param location string = 'eastus'"} Append to file: -{"filename": "./log.txt", "content": "\nNew log entry", "mode": "append"} +{"path": "./log.txt", "content": "\nNew log entry", "mode": "append"} Partial write (replace specific lines in EXISTING file): -{"filename": "./config.json", "content": " \"newSetting\": true,\n \"version\": \"2.0\"", "startLine": 3, "endLine": 4} +{"path": "./config.json", "content": " \"newSetting\": true,\n \"version\": \"2.0\"", "startLine": 3, "endLine": 4} Safe multi-step partial editing workflow: -1. {"filename": "file.py", "startLine": 1, "endLine": 50} // read_file to understand structure -2. {"filename": "file.py", "content": "new function", "startLine": 5, "endLine": 8} // first write -3. {"filename": "file.py", "startLine": 1, "endLine": 50} // RE-READ to get updated line numbers -4. {"filename": "file.py", "content": "updated content", "startLine": 12, "endLine": 15} // use fresh line numbers +1. {"path": "file.py", "startLine": 1, "endLine": 50} // read_file to understand structure +2. {"path": "file.py", "content": "new function", "startLine": 5, "endLine": 8} // first write +3. {"path": "file.py", "startLine": 1, "endLine": 50} // RE-READ to get updated line numbers +4. {"path": "file.py", "content": "updated content", "startLine": 12, "endLine": 15} // use fresh line numbers Create only if doesn't exist: -{"filename": "./new-file.txt", "content": "Initial content", "mode": "create"} +{"path": "./new-file.txt", "content": "Initial content", "mode": "create"} The input must be formatted as a single line valid JSON string.` } @@ -184,7 +184,7 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { } // Validate required fields - if req.Filename == "" { + if req.Path == "" { return t.createErrorResponse(fmt.Errorf("missing filename"), "Missing required field: filename cannot be empty.") } @@ -208,7 +208,7 @@ func (t WriteFileTool) Call(ctx context.Context, input string) (string, error) { } // Validate that file exists for partial write BEFORE attempting - filePath := strings.TrimSpace(req.Filename) + filePath := strings.TrimSpace(req.Path) if _, err := os.Stat(filePath); os.IsNotExist(err) { return t.createErrorResponse( err, @@ -251,7 +251,7 @@ func (t WriteFileTool) handlePartialWrite(ctx context.Context, req WriteFileRequ ) } - filePath := strings.TrimSpace(req.Filename) + filePath := strings.TrimSpace(req.Path) // Read existing file fileBytes, err := os.ReadFile(filePath) @@ -335,7 +335,7 @@ func (t WriteFileTool) handlePartialWrite(ctx context.Context, req WriteFileRequ response := WriteFileResponse{ Success: true, Operation: "Wrote (partial)", - FilePath: filePath, + Path: filePath, BytesWritten: len(newContent), IsPartial: true, LineInfo: &LineInfo{ @@ -362,7 +362,7 @@ func (t WriteFileTool) handlePartialWrite(ctx context.Context, req WriteFileRequ // handleRegularWrite handles normal file writing func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequest, mode string) (string, error) { - filePath := strings.TrimSpace(req.Filename) + filePath := strings.TrimSpace(req.Path) content := t.processContent(req.Content) // Provide feedback for large content @@ -429,7 +429,7 @@ func (t WriteFileTool) handleRegularWrite(ctx context.Context, req WriteFileRequ response := WriteFileResponse{ Success: true, Operation: operation, - FilePath: filePath, + Path: filePath, BytesWritten: len(content), IsPartial: false, FileInfo: FileInfoDetails{ diff --git a/cli/azd/internal/agent/tools/io/write_file_test.go b/cli/azd/internal/agent/tools/io/write_file_test.go index 05fb0cb4937..bc45ebe4888 100644 --- a/cli/azd/internal/agent/tools/io/write_file_test.go +++ b/cli/azd/internal/agent/tools/io/write_file_test.go @@ -86,7 +86,7 @@ func TestWriteFileTool_FullFileWrite(t *testing.T) { assert.True(t, response.Success) assert.Equal(t, "Wrote", response.Operation) - assert.Equal(t, testFile, response.FilePath) + assert.Equal(t, testFile, response.Path) assert.Equal(t, 13, response.BytesWritten) // "Hello, World!" length assert.False(t, response.IsPartial) assert.Nil(t, response.LineInfo) diff --git a/cli/azd/pkg/ux/canvas.go b/cli/azd/pkg/ux/canvas.go index d8b45cd4556..02a7977ad10 100644 --- a/cli/azd/pkg/ux/canvas.go +++ b/cli/azd/pkg/ux/canvas.go @@ -4,7 +4,9 @@ package ux import ( + "bytes" "io" + "os" "sync" ) @@ -14,29 +16,29 @@ type canvas struct { printer Printer writer io.Writer updateLock sync.Mutex + buffer *bytes.Buffer // Single buffer reused for all rendering } type Canvas interface { Run() error Update() error - Clear() + Clear() error Close() WithWriter(writer io.Writer) Canvas } // NewCanvas creates a new Canvas instance. func NewCanvas(visuals ...Visual) Canvas { - canvas := &canvas{ + c := &canvas{ visuals: visuals, + buffer: new(bytes.Buffer), + writer: os.Stdout, } - for _, visual := range visuals { - visual.WithCanvas(canvas) + visual.WithCanvas(c) } - - cm.Add(canvas) - - return canvas + cm.Add(c) + return c } // WithWriter sets the writer for the canvas. @@ -47,13 +49,19 @@ func (c *canvas) WithWriter(writer io.Writer) Canvas { // Run starts the canvas. func (c *canvas) Run() error { - c.printer = NewPrinter(c.writer) + if c.printer == nil { + c.printer = NewPrinter(c.buffer) + } return c.Update() } // Clear clears the canvas. -func (c *canvas) Clear() { +func (c *canvas) Clear() error { + c.updateLock.Lock() + defer c.updateLock.Unlock() + c.printer.ClearCanvas() + return c.writeBufferChunked() } // Close closes the canvas. @@ -67,7 +75,6 @@ func (c *canvas) Update() error { defer cm.Unlock() if !cm.CanUpdate(c) { - c.printer.ClearCanvas() return nil } @@ -75,11 +82,38 @@ func (c *canvas) Update() error { defer c.updateLock.Unlock() if c.printer == nil { - return nil + c.printer = NewPrinter(c.buffer) } c.printer.ClearCanvas() - return c.render() + + if err := c.render(); err != nil { + return err + } + + return c.writeBufferChunked() +} + +func (c *canvas) writeBufferChunked() error { + out := c.buffer.Bytes() + if len(out) > 4096 { + for i := 0; i < len(out); i += 4096 { + end := i + 4096 + if end > len(out) { + end = len(out) + } + if _, err := c.writer.Write(out[i:end]); err != nil { + return err + } + } + } else { + if _, err := c.writer.Write(out); err != nil { + return err + } + } + c.buffer.Reset() + + return nil } func (c *canvas) render() error { @@ -93,11 +127,9 @@ func (c *canvas) render() error { } func (c *canvas) renderVisual(visual Visual) error { - err := visual.Render(c.printer) - if err != nil { + if err := visual.Render(c.printer); err != nil { return err } - return nil } From eecef9fda4ec5913221720becd1c5395d1f2ec02 Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Tue, 19 Aug 2025 18:06:52 -0700 Subject: [PATCH 079/116] Updates read/write file tests --- .../internal/agent/tools/io/read_file_test.go | 48 +++++++++---------- .../agent/tools/io/write_file_test.go | 40 ++++++++-------- 2 files changed, 44 insertions(+), 44 deletions(-) diff --git a/cli/azd/internal/agent/tools/io/read_file_test.go b/cli/azd/internal/agent/tools/io/read_file_test.go index f9a591407b9..f15ceb60d61 100644 --- a/cli/azd/internal/agent/tools/io/read_file_test.go +++ b/cli/azd/internal/agent/tools/io/read_file_test.go @@ -86,7 +86,7 @@ func TestReadFileTool_Call_InvalidJSON(t *testing.T) { func TestReadFileTool_Call_MalformedJSON(t *testing.T) { tool := ReadFileTool{} - result, err := tool.Call(context.Background(), `{"filePath": "test.txt", "unclosed": "value}`) + result, err := tool.Call(context.Background(), `{"path": "test.txt", "unclosed": "value}`) assert.NoError(t, err) @@ -115,7 +115,7 @@ func TestReadFileTool_Call_MissingFilePath(t *testing.T) { func TestReadFileTool_Call_EmptyFilePath(t *testing.T) { tool := ReadFileTool{} - input := `{"filePath": "", "startLine": 1}` + input := `{"path": "", "startLine": 1}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -130,7 +130,7 @@ func TestReadFileTool_Call_EmptyFilePath(t *testing.T) { func TestReadFileTool_Call_FileNotFound(t *testing.T) { tool := ReadFileTool{} - input := `{"filePath": "/nonexistent/file.txt"}` + input := `{"path": "/nonexistent/file.txt"}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -148,7 +148,7 @@ func TestReadFileTool_Call_DirectoryInsteadOfFile(t *testing.T) { tempDir := t.TempDir() tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(tempDir, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s"}`, strings.ReplaceAll(tempDir, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -171,7 +171,7 @@ func TestReadFileTool_ReadEntireSmallFile(t *testing.T) { require.NoError(t, err) tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -201,7 +201,7 @@ func TestReadFileTool_ReadSingleLine(t *testing.T) { require.NoError(t, err) tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s", "startLine": 3, "endLine": 3}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s", "startLine": 3, "endLine": 3}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -231,7 +231,7 @@ func TestReadFileTool_ReadMultipleLines(t *testing.T) { require.NoError(t, err) tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s", "startLine": 2, "endLine": 4}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s", "startLine": 2, "endLine": 4}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -261,7 +261,7 @@ func TestReadFileTool_ReadFromStartToLine(t *testing.T) { require.NoError(t, err) tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s", "endLine": 3}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s", "endLine": 3}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -288,7 +288,7 @@ func TestReadFileTool_ReadFromLineToEnd(t *testing.T) { require.NoError(t, err) tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s", "startLine": 3}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s", "startLine": 3}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -315,7 +315,7 @@ func TestReadFileTool_StartLineOutOfRange(t *testing.T) { require.NoError(t, err) tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s", "startLine": 10, "endLine": 15}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s", "startLine": 10, "endLine": 15}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -337,7 +337,7 @@ func TestReadFileTool_InvalidLineRange_StartGreaterThanEnd(t *testing.T) { require.NoError(t, err) tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s", "startLine": 4, "endLine": 2}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s", "startLine": 4, "endLine": 2}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -359,7 +359,7 @@ func TestReadFileTool_EndLineExceedsTotalLines(t *testing.T) { require.NoError(t, err) tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s", "startLine": 2, "endLine": 10}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s", "startLine": 2, "endLine": 10}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -385,7 +385,7 @@ func TestReadFileTool_EmptyFile(t *testing.T) { require.NoError(t, err) tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -410,7 +410,7 @@ func TestReadFileTool_SingleLineFile(t *testing.T) { require.NoError(t, err) tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -435,7 +435,7 @@ func TestReadFileTool_FileWithOnlyNewlines(t *testing.T) { require.NoError(t, err) tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -466,7 +466,7 @@ func TestReadFileTool_LargeFileWithoutLineRange(t *testing.T) { require.Greater(t, fileInfo.Size(), int64(1024*1024)) // Greater than 1MB tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -496,7 +496,7 @@ func TestReadFileTool_LargeFileWithLineRange(t *testing.T) { require.Greater(t, fileInfo.Size(), int64(1024*1024)) // Greater than 1MB tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s", "startLine": 100, "endLine": 102}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s", "startLine": 100, "endLine": 102}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -530,7 +530,7 @@ func TestReadFileTool_ContentTruncation(t *testing.T) { require.NoError(t, err) tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -557,7 +557,7 @@ func TestReadFileTool_SpecialCharacters(t *testing.T) { require.NoError(t, err) tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -583,7 +583,7 @@ func TestReadFileTool_WindowsLineEndings(t *testing.T) { require.NoError(t, err) tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s", "startLine": 2, "endLine": 2}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s", "startLine": 2, "endLine": 2}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -611,7 +611,7 @@ func TestReadFileTool_FileInfoMetadata(t *testing.T) { require.NoError(t, err) tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s"}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -639,7 +639,7 @@ func TestReadFileTool_JSONResponseStructure(t *testing.T) { require.NoError(t, err) tool := ReadFileTool{} - input := fmt.Sprintf(`{"filePath": "%s", "startLine": 1, "endLine": 1}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s", "startLine": 1, "endLine": 1}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -651,7 +651,7 @@ func TestReadFileTool_JSONResponseStructure(t *testing.T) { // Check required fields exist assert.Contains(t, jsonResult, "success") - assert.Contains(t, jsonResult, "filePath") + assert.Contains(t, jsonResult, "path") assert.Contains(t, jsonResult, "content") assert.Contains(t, jsonResult, "isTruncated") assert.Contains(t, jsonResult, "isPartial") @@ -675,7 +675,7 @@ func TestReadFileTool_ZeroBasedToOneBasedConversion(t *testing.T) { tool := ReadFileTool{} // Test reading line 1 (should be "Line 1", not "Line 2") - input := fmt.Sprintf(`{"filePath": "%s", "startLine": 1, "endLine": 1}`, strings.ReplaceAll(testFile, "\\", "\\\\")) + input := fmt.Sprintf(`{"path": "%s", "startLine": 1, "endLine": 1}`, strings.ReplaceAll(testFile, "\\", "\\\\")) result, err := tool.Call(context.Background(), input) assert.NoError(t, err) diff --git a/cli/azd/internal/agent/tools/io/write_file_test.go b/cli/azd/internal/agent/tools/io/write_file_test.go index bc45ebe4888..07dc846dffb 100644 --- a/cli/azd/internal/agent/tools/io/write_file_test.go +++ b/cli/azd/internal/agent/tools/io/write_file_test.go @@ -50,7 +50,7 @@ func TestWriteFileTool_Call_InvalidJSON(t *testing.T) { func TestWriteFileTool_Call_MalformedJSON(t *testing.T) { tool := WriteFileTool{} // Test with JSON that has parse errors - result, err := tool.Call(context.Background(), `{"filename": "test.txt", "content": "unclosed string}`) + result, err := tool.Call(context.Background(), `{"path": "test.txt", "content": "unclosed string}`) assert.NoError(t, err) assert.Contains(t, result, "error") @@ -74,7 +74,7 @@ func TestWriteFileTool_FullFileWrite(t *testing.T) { testFile := filepath.Join(tempDir, "test.txt") tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "Hello, World!"}` + input := `{"path": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "Hello, World!"}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -108,7 +108,7 @@ func TestWriteFileTool_AppendMode(t *testing.T) { require.NoError(t, err) tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll( + input := `{"path": "` + strings.ReplaceAll( testFile, "\\", "\\\\", @@ -138,7 +138,7 @@ func TestWriteFileTool_CreateMode_Success(t *testing.T) { testFile := filepath.Join(tempDir, "new-file.txt") tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll( + input := `{"path": "` + strings.ReplaceAll( testFile, "\\", "\\\\", @@ -170,7 +170,7 @@ func TestWriteFileTool_CreateMode_FileExists(t *testing.T) { require.NoError(t, err) tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New content", "mode": "create"}` + input := `{"path": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "New content", "mode": "create"}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) @@ -196,7 +196,7 @@ func TestWriteFileTool_PartialWrite_Basic(t *testing.T) { require.NoError(t, err) tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll( + input := `{"path": "` + strings.ReplaceAll( testFile, "\\", "\\\\", @@ -236,7 +236,7 @@ func TestWriteFileTool_PartialWrite_SingleLine(t *testing.T) { require.NoError(t, err) tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll( + input := `{"path": "` + strings.ReplaceAll( testFile, "\\", "\\\\", @@ -275,7 +275,7 @@ func TestWriteFileTool_PartialWrite_SingleLineToMultipleLines(t *testing.T) { tool := WriteFileTool{} // Replace single line 2 with multiple lines - input := `{"filename": "` + strings.ReplaceAll( + input := `{"path": "` + strings.ReplaceAll( testFile, "\\", "\\\\", @@ -310,7 +310,7 @@ func TestWriteFileTool_PartialWrite_FileNotExists(t *testing.T) { testFile := filepath.Join(tempDir, "nonexistent.txt") tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll( + input := `{"path": "` + strings.ReplaceAll( testFile, "\\", "\\\\", @@ -337,21 +337,21 @@ func TestWriteFileTool_PartialWrite_InvalidLineNumbers(t *testing.T) { tool := WriteFileTool{} // Test startLine provided but not endLine - input := `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": 1}` + input := `{"path": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "startLine": 1}` result, err := tool.Call(context.Background(), input) assert.NoError(t, err) assert.Contains(t, result, "error") assert.Contains(t, result, "Both startLine and endLine must be provided") // Test endLine provided but not startLine - input = `{"filename": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "endLine": 1}` + input = `{"path": "` + strings.ReplaceAll(testFile, "\\", "\\\\") + `", "content": "content", "endLine": 1}` result, err = tool.Call(context.Background(), input) assert.NoError(t, err) assert.Contains(t, result, "error") assert.Contains(t, result, "Both startLine and endLine must be provided") // Test startLine < 1 (this will trigger the partial write validation) - input = `{"filename": "` + strings.ReplaceAll( + input = `{"path": "` + strings.ReplaceAll( testFile, "\\", "\\\\", @@ -362,7 +362,7 @@ func TestWriteFileTool_PartialWrite_InvalidLineNumbers(t *testing.T) { assert.Contains(t, result, "Both startLine and endLine must be provided") // 0 is treated as "not provided" // Test valid line numbers but startLine > endLine - input = `{"filename": "` + strings.ReplaceAll( + input = `{"path": "` + strings.ReplaceAll( testFile, "\\", "\\\\", @@ -385,7 +385,7 @@ func TestWriteFileTool_PartialWrite_BeyondFileLength(t *testing.T) { tool := WriteFileTool{} // Try to replace lines 2-5 (beyond file length) - input := `{"filename": "` + strings.ReplaceAll( + input := `{"path": "` + strings.ReplaceAll( testFile, "\\", "\\\\", @@ -420,7 +420,7 @@ func TestWriteFileTool_PartialWrite_PreserveLineEndings(t *testing.T) { require.NoError(t, err) tool := WriteFileTool{} - input := `{"filename": "` + strings.ReplaceAll( + input := `{"path": "` + strings.ReplaceAll( testFile, "\\", "\\\\", @@ -483,7 +483,7 @@ func TestWriteFileTool_Integration_ComplexScenario(t *testing.T) { tool := WriteFileTool{} // Step 1: Create initial file - input := `{"filename": "` + strings.ReplaceAll( + input := `{"path": "` + strings.ReplaceAll( testFile, "\\", "\\\\", @@ -493,7 +493,7 @@ func TestWriteFileTool_Integration_ComplexScenario(t *testing.T) { assert.Contains(t, result, `"success": true`) // Step 2: Append new section - input = `{"filename": "` + strings.ReplaceAll( + input = `{"path": "` + strings.ReplaceAll( testFile, "\\", "\\\\", @@ -503,7 +503,7 @@ func TestWriteFileTool_Integration_ComplexScenario(t *testing.T) { assert.Contains(t, result, `"success": true`) // Step 3: Update specific lines (change port and debug) - input = `{"filename": "` + strings.ReplaceAll( + input = `{"path": "` + strings.ReplaceAll( testFile, "\\", "\\\\", @@ -536,7 +536,7 @@ func TestWriteFileTool_PartialWrite_InvalidLineRanges(t *testing.T) { tool := WriteFileTool{} // Test negative startLine (will be handled by partial write validation) - input := `{"filename": "` + strings.ReplaceAll( + input := `{"path": "` + strings.ReplaceAll( testFile, "\\", "\\\\", @@ -547,7 +547,7 @@ func TestWriteFileTool_PartialWrite_InvalidLineRanges(t *testing.T) { assert.Contains(t, result, "startLine must be") // Test negative endLine - input = `{"filename": "` + strings.ReplaceAll( + input = `{"path": "` + strings.ReplaceAll( testFile, "\\", "\\\\", From e0871ef2077366c62954812aa3186e9d716179c5 Mon Sep 17 00:00:00 2001 From: hemarina Date: Thu, 21 Aug 2025 12:26:43 -0700 Subject: [PATCH 080/116] debug --- cli/azd/cmd/mcp.go | 1 + cli/azd/cmd/middleware/error.go | 91 +++++++++++++++++++ cli/azd/cmd/root.go | 1 + ...on_common_error.go => azd_common_error.go} | 0 ...ooting.go => azd_error_troubleshooting.go} | 4 + .../prompts/azd_error_troubleshooting.md | 2 +- cli/azd/internal/mcp/tools/prompts/prompts.go | 6 ++ 7 files changed, 104 insertions(+), 1 deletion(-) create mode 100644 cli/azd/cmd/middleware/error.go rename cli/azd/internal/mcp/tools/{azd_provision_common_error.go => azd_common_error.go} (100%) rename cli/azd/internal/mcp/tools/{azd_provision_error_troubleshooting.go => azd_error_troubleshooting.go} (86%) diff --git a/cli/azd/cmd/mcp.go b/cli/azd/cmd/mcp.go index 332448e1d94..451b0e53f1a 100644 --- a/cli/azd/cmd/mcp.go +++ b/cli/azd/cmd/mcp.go @@ -163,6 +163,7 @@ func (a *mcpStartAction) Run(ctx context.Context) (*actions.ActionResult, error) tools.NewAzdProjectValidationTool(), tools.NewAzdYamlSchemaTool(), tools.NewSamplingTool(), + tools.NewAzdErrorTroubleShootingTool(), } s.AddTools(allTools...) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go new file mode 100644 index 00000000000..3384b9106ac --- /dev/null +++ b/cli/azd/cmd/middleware/error.go @@ -0,0 +1,91 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package middleware + +import ( + "context" + "fmt" + + "github.com/azure/azure-dev/cli/azd/cmd/actions" + "github.com/azure/azure-dev/cli/azd/internal" + "github.com/azure/azure-dev/cli/azd/internal/agent" + "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/output" + "github.com/azure/azure-dev/cli/azd/pkg/output/ux" +) + +type ErrorMiddleware struct { + options *Options + console input.Console + agentFactory *agent.AgentFactory + global *internal.GlobalCommandOptions +} + +func NewErrorMiddleware(options *Options, console input.Console, agentFactory *agent.AgentFactory, global *internal.GlobalCommandOptions) Middleware { + return &ErrorMiddleware{ + options: options, + console: console, + agentFactory: agentFactory, + global: global, + } +} + +func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.ActionResult, error) { + // if m.options.IsChildAction(ctx) { + // return next(ctx) + // } + var actionResult *actions.ActionResult + var err error + + for { + actionResult, err = next(ctx) + originalError := err + + if err == nil { + break + } + + e.console.StopSpinner(ctx, "", input.Step) + e.console.Message(ctx, output.WithErrorFormat("\nERROR: %s", err.Error())) + + // Explicitly call the troubleshooting tool + azdAgent, cleanup, err := e.agentFactory.Create(agent.WithDebug(e.global.EnableDebugLogging)) + if err != nil { + return nil, err + } + + defer cleanup() + + // TODO: Check the prompt with copilot + agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( + `Steps to follow: + 1. Use available tool to explain and diagnose this error when running azd command. + 2. Resolve the error by iterating and attempting to solve all error until they're working. + This is the error messages: %s`, originalError.Error())) + if err != nil { + if agentOutput != "" { + e.console.Message(ctx, output.WithMarkdown(agentOutput)) + } + + return nil, err + } + + e.console.Message(ctx, "Test") + e.console.Message(ctx, fmt.Sprintf("%s:", output.AzdAgentLabel())) + e.console.Message(ctx, output.WithMarkdown(agentOutput)) + e.console.Message(ctx, "") + } + + if actionResult != nil && actionResult.Message != nil { + displayResult := &ux.ActionResult{ + SuccessMessage: actionResult.Message.Header, + FollowUp: actionResult.Message.FollowUp, + } + + e.console.Message(ctx, "test") + e.console.MessageUxItem(ctx, displayResult) + } + + return actionResult, err +} diff --git a/cli/azd/cmd/root.go b/cli/azd/cmd/root.go index 5e2fdc45b69..588a7668c4f 100644 --- a/cli/azd/cmd/root.go +++ b/cli/azd/cmd/root.go @@ -363,6 +363,7 @@ func NewRootCmd( root. UseMiddleware("debug", middleware.NewDebugMiddleware). UseMiddleware("ux", middleware.NewUxMiddleware). + UseMiddleware("error", middleware.NewErrorMiddleware). UseMiddlewareWhen("telemetry", middleware.NewTelemetryMiddleware, func(descriptor *actions.ActionDescriptor) bool { return !descriptor.Options.DisableTelemetry }). diff --git a/cli/azd/internal/mcp/tools/azd_provision_common_error.go b/cli/azd/internal/mcp/tools/azd_common_error.go similarity index 100% rename from cli/azd/internal/mcp/tools/azd_provision_common_error.go rename to cli/azd/internal/mcp/tools/azd_common_error.go diff --git a/cli/azd/internal/mcp/tools/azd_provision_error_troubleshooting.go b/cli/azd/internal/mcp/tools/azd_error_troubleshooting.go similarity index 86% rename from cli/azd/internal/mcp/tools/azd_provision_error_troubleshooting.go rename to cli/azd/internal/mcp/tools/azd_error_troubleshooting.go index 80f8b803114..1c8c0dfa6cb 100644 --- a/cli/azd/internal/mcp/tools/azd_provision_error_troubleshooting.go +++ b/cli/azd/internal/mcp/tools/azd_error_troubleshooting.go @@ -16,6 +16,10 @@ func NewAzdErrorTroubleShootingTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_error_troubleshooting", + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(false), mcp.WithDescription( `Returns instructions for diagnosing any error from azd commands and providing suggested actions for resolution. diff --git a/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md b/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md index 5ac3867e4c7..a147cd1b06c 100644 --- a/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md +++ b/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md @@ -3,7 +3,7 @@ ✅ **Agent Task List** 1. **Error Classification:** Identify the specific error type (Azure REST API, ARM Deployment, Authentication, Local Tool Installation or General) -2. **Error Analysis:** Explain what the error means and its root causes +2. **Error Analysis:** Explain what the error means and its root causes. Note that this error occurs when running Azure Developer CLI. 3. **Troubleshooting Steps:** Provide manual, Azure Portal and Azure CLI-based solutions only if user installed Azure CLI 4. **Infrastructure Fixes:** Suggest specific Bicep or Terraform file corrections based on user's infra folder 5. **Verification:** Provide Azure Portal to validate fixes and Azure CLI-based solutions only if user installed Azure CLI diff --git a/cli/azd/internal/mcp/tools/prompts/prompts.go b/cli/azd/internal/mcp/tools/prompts/prompts.go index b5166fcef63..8762ecedcba 100644 --- a/cli/azd/internal/mcp/tools/prompts/prompts.go +++ b/cli/azd/internal/mcp/tools/prompts/prompts.go @@ -30,3 +30,9 @@ var AzdDockerGenerationPrompt string //go:embed azd_project_validation.md var AzdProjectValidationPrompt string + +//go:embed azd_common_error.md +var AzdCommonErrorPrompt string + +//go:embed azd_error_troubleshooting.md +var AzdErrorTroubleShootingPrompt string From 762bffdeabce6453d61477196574e516680a353b Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Thu, 21 Aug 2025 17:54:26 -0700 Subject: [PATCH 081/116] Fixes issue where agent hangs on blocking thought channel --- cli/azd/cmd/init.go | 100 +----------------- cli/azd/internal/agent/agent.go | 6 -- .../internal/agent/conversational_agent.go | 85 +++++++++++++++ 3 files changed, 87 insertions(+), 104 deletions(-) diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index 242df304517..e898db0afcc 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -16,7 +16,6 @@ import ( "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" "github.com/azure/azure-dev/cli/azd/internal/agent" - "github.com/azure/azure-dev/cli/azd/internal/agent/logging" "github.com/azure/azure-dev/cli/azd/internal/repository" "github.com/azure/azure-dev/cli/azd/internal/tracing" "github.com/azure/azure-dev/cli/azd/internal/tracing/fields" @@ -384,8 +383,6 @@ func (i *initAction) initAppWithAgent(ctx context.Context) error { return err } - agentThoughts := azdAgent.Thoughts() - defer cleanup() type initStep struct { @@ -434,7 +431,6 @@ Do not stop until all tasks are complete and fully resolved. if err := i.collectAndApplyFeedback( ctx, azdAgent, - agentThoughts, feedbackMsg, ); err != nil { return err @@ -448,17 +444,7 @@ Do not stop until all tasks are complete and fully resolved. "Provide a very brief summary in markdown format that includes any files generated during this step.", }, "\n")) - thoughtsCtx, cancelThoughts := context.WithCancel(ctx) - cleanup, err := renderThoughts(thoughtsCtx, agentThoughts) - if err != nil { - cancelThoughts() - return err - } - agentOutput, err := azdAgent.SendMessage(ctx, fullTaskInput) - cancelThoughts() - cleanup() - if err != nil { if agentOutput != "" { i.console.Message(ctx, output.WithMarkdown(agentOutput)) @@ -474,7 +460,7 @@ Do not stop until all tasks are complete and fully resolved. } // Post-completion feedback loop - if err := i.postCompletionFeedbackLoop(ctx, azdAgent, agentThoughts); err != nil { + if err := i.postCompletionFeedbackLoop(ctx, azdAgent); err != nil { return err } @@ -485,7 +471,6 @@ Do not stop until all tasks are complete and fully resolved. func (i *initAction) collectAndApplyFeedback( ctx context.Context, azdAgent agent.Agent, - agentThoughts <-chan logging.Thought, promptMessage string, ) error { // Loop to allow multiple rounds of feedback @@ -523,17 +508,7 @@ func (i *initAction) collectAndApplyFeedback( if userInput != "" { i.console.Message(ctx, color.MagentaString("Feedback")) - thoughtsCtx, cancelThoughts := context.WithCancel(ctx) - cleanup, err := renderThoughts(thoughtsCtx, agentThoughts) - if err != nil { - cancelThoughts() - return err - } - feedbackOutput, err := azdAgent.SendMessage(ctx, userInput) - cancelThoughts() - cleanup() - if err != nil { if feedbackOutput != "" { i.console.Message(ctx, output.WithMarkdown(feedbackOutput)) @@ -555,13 +530,12 @@ func (i *initAction) collectAndApplyFeedback( func (i *initAction) postCompletionFeedbackLoop( ctx context.Context, azdAgent agent.Agent, - agentThoughts <-chan logging.Thought, ) error { i.console.Message(ctx, "") i.console.Message(ctx, "🎉 All initialization steps completed!") i.console.Message(ctx, "") - return i.collectAndApplyFeedback(ctx, azdAgent, agentThoughts, "Any final feedback or changes?") + return i.collectAndApplyFeedback(ctx, azdAgent, "Any final feedback or changes?") } type initType int @@ -574,76 +548,6 @@ const ( initWithAgent ) -func renderThoughts(ctx context.Context, agentThoughts <-chan logging.Thought) (func(), error) { - var latestThought string - - spinner := uxlib.NewSpinner(&uxlib.SpinnerOptions{ - Text: "Thinking...", - }) - - canvas := uxlib.NewCanvas( - spinner, - uxlib.NewVisualElement(func(printer uxlib.Printer) error { - printer.Fprintln() - printer.Fprintln() - - if latestThought != "" { - printer.Fprintln(color.HiBlackString(latestThought)) - printer.Fprintln() - printer.Fprintln() - } - - return nil - })) - - go func() { - defer canvas.Clear() - - var latestAction string - var latestActionInput string - var spinnerText string - - for { - - select { - case thought := <-agentThoughts: - if thought.Action != "" { - latestAction = thought.Action - latestActionInput = thought.ActionInput - } - if thought.Thought != "" { - latestThought = thought.Thought - } - case <-ctx.Done(): - return - case <-time.After(200 * time.Millisecond): - } - - // Update spinner text - if latestAction == "" { - spinnerText = "Thinking..." - } else { - spinnerText = fmt.Sprintf("Running %s tool", color.GreenString(latestAction)) - if latestActionInput != "" { - spinnerText += " with " + color.GreenString(latestActionInput) - } - - spinnerText += "..." - } - - spinner.UpdateText(spinnerText) - canvas.Update() - } - }() - - cleanup := func() { - canvas.Clear() - canvas.Close() - } - - return cleanup, canvas.Run() -} - func promptInitType(console input.Console, ctx context.Context, featuresManager *alpha.FeatureManager) (initType, error) { options := []string{ "Scan current directory", // This now covers minimal project creation too diff --git a/cli/azd/internal/agent/agent.go b/cli/azd/internal/agent/agent.go index 035d6ea33d3..dcdaaa8c394 100644 --- a/cli/azd/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -28,12 +28,6 @@ type agentBase struct { type Agent interface { SendMessage(ctx context.Context, args ...string) (string, error) - Thoughts() <-chan logging.Thought -} - -// Thoughts returns a channel for receiving thoughts generated by the agent -func (a *agentBase) Thoughts() <-chan logging.Thought { - return a.thoughtChan } // AgentOption is a functional option for configuring an Agent diff --git a/cli/azd/internal/agent/conversational_agent.go b/cli/azd/internal/agent/conversational_agent.go index 2e09c9f04ad..2de21dd8cd3 100644 --- a/cli/azd/internal/agent/conversational_agent.go +++ b/cli/azd/internal/agent/conversational_agent.go @@ -10,8 +10,10 @@ import ( "fmt" "os" "strings" + "time" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + uxlib "github.com/azure/azure-dev/cli/azd/pkg/ux" "github.com/fatih/color" "github.com/tmc/langchaingo/agents" "github.com/tmc/langchaingo/chains" @@ -140,10 +142,93 @@ func (aai *ConversationalAzdAiAgent) StartConversation(ctx context.Context, args // runChain executes a user query through the agent's chain with memory and returns the response func (aai *ConversationalAzdAiAgent) runChain(ctx context.Context, userInput string) (string, error) { + thoughtsCtx, cancelCtx := context.WithCancel(ctx) + cleanup, err := aai.renderThoughts(thoughtsCtx) + if err != nil { + cancelCtx() + return "", err + } + + defer func() { + cleanup() + cancelCtx() + }() + // Execute with enhanced input - agent should automatically handle memory output, err := chains.Run(ctx, aai.executor, userInput) if err != nil { return "", err } + return output, nil } + +func (aai *ConversationalAzdAiAgent) renderThoughts(ctx context.Context) (func(), error) { + var latestThought string + + spinner := uxlib.NewSpinner(&uxlib.SpinnerOptions{ + Text: "Thinking...", + }) + + canvas := uxlib.NewCanvas( + spinner, + uxlib.NewVisualElement(func(printer uxlib.Printer) error { + printer.Fprintln() + printer.Fprintln() + + if latestThought != "" { + printer.Fprintln(color.HiBlackString(latestThought)) + printer.Fprintln() + printer.Fprintln() + } + + return nil + })) + + go func() { + defer canvas.Clear() + + var latestAction string + var latestActionInput string + var spinnerText string + + for { + + select { + case thought := <-aai.thoughtChan: + if thought.Action != "" { + latestAction = thought.Action + latestActionInput = thought.ActionInput + } + if thought.Thought != "" { + latestThought = thought.Thought + } + case <-ctx.Done(): + return + case <-time.After(200 * time.Millisecond): + } + + // Update spinner text + if latestAction == "" { + spinnerText = "Thinking..." + } else { + spinnerText = fmt.Sprintf("Running %s tool", color.GreenString(latestAction)) + if latestActionInput != "" { + spinnerText += " with " + color.GreenString(latestActionInput) + } + + spinnerText += "..." + } + + spinner.UpdateText(spinnerText) + canvas.Update() + } + }() + + cleanup := func() { + canvas.Clear() + canvas.Close() + } + + return cleanup, canvas.Run() +} From 644117f876403ecf00eeea74e76bf54a5d31d693 Mon Sep 17 00:00:00 2001 From: hemarina Date: Wed, 27 Aug 2025 22:41:07 -0700 Subject: [PATCH 082/116] fix error middleware workflow bug --- cli/azd/cmd/middleware/error.go | 185 +++++++++++++----- .../mcp/tools/azd_error_troubleshooting.go | 8 +- .../prompts/azd_error_troubleshooting.md | 60 +++--- 3 files changed, 166 insertions(+), 87 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index 3384b9106ac..c4dced4e88f 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -5,87 +5,170 @@ package middleware import ( "context" + "errors" "fmt" + "strings" "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" "github.com/azure/azure-dev/cli/azd/internal/agent" + "github.com/azure/azure-dev/cli/azd/pkg/alpha" "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/llm" "github.com/azure/azure-dev/cli/azd/pkg/output" - "github.com/azure/azure-dev/cli/azd/pkg/output/ux" + "github.com/azure/azure-dev/cli/azd/pkg/tools" ) type ErrorMiddleware struct { - options *Options - console input.Console - agentFactory *agent.AgentFactory - global *internal.GlobalCommandOptions + options *Options + console input.Console + agentFactory *agent.AgentFactory + global *internal.GlobalCommandOptions + featuresManager *alpha.FeatureManager } -func NewErrorMiddleware(options *Options, console input.Console, agentFactory *agent.AgentFactory, global *internal.GlobalCommandOptions) Middleware { +func NewErrorMiddleware(options *Options, console input.Console, agentFactory *agent.AgentFactory, global *internal.GlobalCommandOptions, featuresManager *alpha.FeatureManager) Middleware { return &ErrorMiddleware{ - options: options, - console: console, - agentFactory: agentFactory, - global: global, + options: options, + console: console, + agentFactory: agentFactory, + global: global, + featuresManager: featuresManager, } } func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.ActionResult, error) { - // if m.options.IsChildAction(ctx) { - // return next(ctx) - // } - var actionResult *actions.ActionResult - var err error - - for { - actionResult, err = next(ctx) + if e.featuresManager.IsEnabled(llm.FeatureLlm) { + if e.options.IsChildAction(ctx) { + return next(ctx) + } + + actionResult, err := next(ctx) + attempt := 0 + var previousError error originalError := err - if err == nil { - break + // skipAnalyzingErrors := []error{ + // context.Canceled, + // } + skipAnalyzingErrors := []string{ + "environment already initialized", + "interrupt", } - e.console.StopSpinner(ctx, "", input.Step) - e.console.Message(ctx, output.WithErrorFormat("\nERROR: %s", err.Error())) + for { + if err == nil { + break + } - // Explicitly call the troubleshooting tool - azdAgent, cleanup, err := e.agentFactory.Create(agent.WithDebug(e.global.EnableDebugLogging)) - if err != nil { - return nil, err - } + // for _, e := range skipAnalyzingErrors { + // if errors.As(err, &e) || errors.Is(err, e) { + // return actionResult, err + // } + // } - defer cleanup() - - // TODO: Check the prompt with copilot - agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( - `Steps to follow: - 1. Use available tool to explain and diagnose this error when running azd command. - 2. Resolve the error by iterating and attempting to solve all error until they're working. - This is the error messages: %s`, originalError.Error())) - if err != nil { - if agentOutput != "" { - e.console.Message(ctx, output.WithMarkdown(agentOutput)) + for _, s := range skipAnalyzingErrors { + if strings.Contains(err.Error(), s) { + return actionResult, err + } } - return nil, err - } + if previousError != nil && errors.Is(originalError, previousError) { + attempt++ + if attempt > 3 { + e.console.Message(ctx, "AI was unable to resolve the error after multiple attempts. Please review the error and fix it manually.") + return actionResult, err + } + } - e.console.Message(ctx, "Test") - e.console.Message(ctx, fmt.Sprintf("%s:", output.AzdAgentLabel())) - e.console.Message(ctx, output.WithMarkdown(agentOutput)) - e.console.Message(ctx, "") - } + // e.console.Confirm(ctx, input.ConsoleOptions{ + // Message: "Debugger Ready?", + // DefaultValue: true, + // }) + e.console.StopSpinner(ctx, "", input.Step) + e.console.Message(ctx, output.WithErrorFormat("\nERROR: %s", originalError.Error())) + + // Warn user that this is an alpha feature + e.console.WarnForFeature(ctx, llm.FeatureLlm) + + azdAgent, cleanup, err := e.agentFactory.Create(agent.WithDebug(e.global.EnableDebugLogging)) + if err != nil { + return nil, err + } - if actionResult != nil && actionResult.Message != nil { - displayResult := &ux.ActionResult{ - SuccessMessage: actionResult.Message.Header, - FollowUp: actionResult.Message.FollowUp, + defer cleanup() + + agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( + `Steps to follow: + 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. + 2. Provide actionable troubleshooting steps. + Error details: %s`, originalError.Error())) + if err != nil { + if agentOutput != "" { + e.console.Message(ctx, output.WithMarkdown(agentOutput)) + } + + return nil, err + } + + // Ask user if they want to let AI fix the error + selection, err := e.console.Select(ctx, input.ConsoleOptions{ + Message: "Do you want to continue to fix the error using AI?", + Options: []string{ + "Yes", + "No", + }, + }) + + if err != nil { + return nil, fmt.Errorf("prompting failed to confirm selection: %w", err) + } + + switch selection { + case 0: // fix the error + previousError = originalError + agentOutput, err = azdAgent.SendMessage(ctx, fmt.Sprintf( + `Steps to follow: + 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. + 2. Resolve the error by iterating and attempting to solve all errors until the azd command succeeds. + Error details: %s`, originalError.Error())) + + if err != nil { + if agentOutput != "" { + e.console.Message(ctx, output.WithMarkdown(agentOutput)) + } + + return nil, err + } + case 1: + confirm, err := e.console.Confirm(ctx, input.ConsoleOptions{ + Message: "Provide AI generated troubleshooting steps?", + DefaultValue: true, + }) + if err != nil { + return nil, fmt.Errorf("prompting to provide troubleshooting steps: %w", err) + } + + if confirm { + // Provide manual steps for troubleshooting + e.console.Message(ctx, "") + e.console.Message(ctx, fmt.Sprintf("%s:", output.AzdAgentLabel())) + e.console.Message(ctx, output.WithMarkdown(agentOutput)) + e.console.Message(ctx, "") + } + + return actionResult, err + } + + ctx = tools.WithInstalledCheckCache(ctx) + actionResult, err = next(ctx) + originalError = err } - e.console.Message(ctx, "test") - e.console.MessageUxItem(ctx, displayResult) + return actionResult, err } + actionResult, err := next(ctx) + return actionResult, err } diff --git a/cli/azd/internal/mcp/tools/azd_error_troubleshooting.go b/cli/azd/internal/mcp/tools/azd_error_troubleshooting.go index 1c8c0dfa6cb..23a203fbf64 100644 --- a/cli/azd/internal/mcp/tools/azd_error_troubleshooting.go +++ b/cli/azd/internal/mcp/tools/azd_error_troubleshooting.go @@ -21,14 +21,14 @@ func NewAzdErrorTroubleShootingTool() server.ServerTool { mcp.WithDestructiveHintAnnotation(false), mcp.WithOpenWorldHintAnnotation(false), mcp.WithDescription( - `Returns instructions for diagnosing any error from azd commands and providing suggested actions for resolution. + `Returns instructions for diagnosing errors from Azure Developer CLI (azd) commands and provides step-by-step troubleshooting instructions. The LLM agent should execute these instructions using available tools. Use this tool when: -- Error occurs when running azd commands -- Need to identify the type of error and get actionable suggestions -- Ready to troubleshoot errors`, +- Any error occurs during Azure Developer CLI (azd) command execution +- Need to classify, analyze, and resolve errors automatically or with guided steps +- Provide troubleshooting steps for errors`, ), ), Handler: handleAzdErrorTroubleShooting, diff --git a/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md b/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md index a147cd1b06c..9d11bc6213f 100644 --- a/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md +++ b/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md @@ -3,19 +3,17 @@ ✅ **Agent Task List** 1. **Error Classification:** Identify the specific error type (Azure REST API, ARM Deployment, Authentication, Local Tool Installation or General) -2. **Error Analysis:** Explain what the error means and its root causes. Note that this error occurs when running Azure Developer CLI. -3. **Troubleshooting Steps:** Provide manual, Azure Portal and Azure CLI-based solutions only if user installed Azure CLI -4. **Infrastructure Fixes:** Suggest specific Bicep or Terraform file corrections based on user's infra folder -5. **Verification:** Provide Azure Portal to validate fixes and Azure CLI-based solutions only if user installed Azure CLI -6. **Resolution Confirmation:** Ensure the issue is fully resolved. If issue still exists, retry the task list to fix the error. +2. **Error Analysis:** Explain what the error means and its root causes. Note that this error occurs when running Azure Developer CLI +3. **Troubleshooting Steps:** Based on error type (Azure REST API Response Errors, Azure ARM Deployment Errors, Azure Authentication Errors, Local Tool Installation Errors, and General AZD Errors), find the Troubleshooting Approach below and provide troubleshooting approach +4. **Resolution Confirmation:** Ensure the issue is fully resolved. If issue still exists, retry the task list to fix the error 📄 **Required Outputs** - Clear error explanation and root cause analysis - Step-by-step troubleshooting instructions -- Specific infrastructure code fixes for Bicep or Terraform files based on user usage +- Specific infrastructure code fixes for Bicep or Terraform files based on user usage if needed - Azure Portal navigation instructions for verification -- Azure CLI commands for validation and testing if user installed Azure CLI +- Azure CLI commands for validation and testing if needed when user installed Azure CLI - Actionable next steps for resolution 🧠 **Execution Guidelines** @@ -35,10 +33,12 @@ - Provide manual Troubleshooting Steps for Azure Portal - Check Azure Portal for resource status - Verify resource quotas and limits - - Review subscription and resource group permissions - - Validate resource naming conventions and conflicts + - Review subscription and resource group permissions if error related + - Validate resource naming conventions and conflicts if error related 3. **If user installed Azure CLI, Azure CLI Troubleshooting Steps. Otherwise use azure portal instructions** + - Generate Azure CLI related commands if needed + - Consider using following commands if fits: ```bash # Check subscription and tenant az account show @@ -53,11 +53,12 @@ ``` 4. **Infrastructure Code Fixes** - - **Bicep Files:** Correct resource names, SKUs, locations, dependencies - - **Terraform Files:** Fix provider configurations, resource arguments, data sources + - **Bicep Files:** Correct bicep files based on error root cause + - **Terraform Files:** Correct terraform files based on error root cause - Update parameter files with valid values 5. **Verification Commands if user installed Azure CLI. Otherwise skip this part** + - Consider using following commands if fits: ```bash # Validate Bicep templates az bicep build --file main.bicep @@ -70,7 +71,7 @@ ## Azure ARM Deployment Errors -**Error Pattern:** Deployment validation failures, resource provisioning errors, template errors +**Error Pattern:** Deployment validation failures, resource provisioning errors, template errors, etc **Troubleshooting Approach:** @@ -86,6 +87,7 @@ - Verify template parameter values 3. **If user installed Azure CLI, Azure CLI Troubleshooting Steps. Otherwise use azure portal instructions** + - Consider using following commands if fits: ```bash # List recent deployments az deployment group list --resource-group @@ -98,20 +100,12 @@ ``` 4. **Infrastructure Code Fixes** - - **Bicep Files:** - - Fix template syntax errors - - Correct resource property values - - Update API versions - - Fix parameter and variable references - - Resolve dependency chains - - - **Terraform Files:** - - Correct resource configurations - - Fix provider version constraints - - Update data source queries - - Resolve resource dependencies + - **Bicep Files:** Correct bicep files based on error root cause + - **Terraform Files:** Correct terraform files based on error root cause + - Update parameter files with valid values 5. **Verification Commands if user installed Azure CLI. Otherwise skip this part** + - Consider using following commands if fits: ```bash # Test deployment in validate-only mode az deployment group validate --resource-group --template-file main.bicep --parameters @parameters.json @@ -122,7 +116,7 @@ ## Azure Authentication Errors -**Error Pattern:** Authentication failures, token expiration, permission denied, tenant/subscription issues +**Error Pattern:** Authentication failures, token expiration, permission denied, tenant/subscription issues, etc **Troubleshooting Approach:** @@ -136,6 +130,7 @@ - Review tenant and subscription IDs 3. **AZD Authentication Commands** + - Consider using following commands if fits: ```bash # Clear current authentication azd auth logout @@ -234,11 +229,11 @@ - Verify tool integration with azd project requirements 6. **Post-Installation Verification** + - If the error occurs after running command `azd provision`: ```bash # Test azd provision with preview azd provision --preview ``` - ## General AZD Errors **Error Pattern:** Miscellaneous errors not falling into above categories @@ -247,22 +242,23 @@ 1. **Error Analysis** - Review error message for specific component failure - - Identify if error is related to configuration, dependencies, or environment + - Identify and diagnose the error - Provide solution based on error analysis 2. **Common Resolution Patterns** - **Quota Exceeded:** Request quota increase in Azure Portal -- **Permission Denied:** Add required role assignments through Azure Portal +- **Permission Denied:** Add required role assignments through Azure Portal or through Azure CLI if needed when user installed Azure CLI - **Resource Name Conflicts:** Update names in Bicep or Terraform files with unique suffixes - **API Version Issues:** Update to latest stable API versions in templates - **Location Constraints:** Verify service availability in target Azure region +- **Other errors:** Call related tool to fix the error 📌 **Completion Checklist** - [ ] Error message clearly understood and root cause identified - [ ] Appropriate troubleshooting steps executed successfully -- [ ] Infrastructure code corrections implemented and validated -- [ ] Azure Portal verification completed for affected resources -- [ ] Azure CLI commands confirm successful resolution if user installed Azure CLI. Otherwise, skip this step. -- [ ] AZD command completes without errors +- [ ] Infrastructure code corrections implemented and validated if needed +- [ ] For Azure REST API Response Errors or Azure ARM Deployment Errors, Azure Portal verification completed for affected resources if needed +- [ ] For Azure REST API Response Errors or Azure ARM Deployment Errors, Azure CLI commands confirm successful resolution if needed when user installed Azure CLI. Otherwise, skip this step +- [ ] Ensure the issue is fully resolved. If issue still exists, retry the task list to fix the error \ No newline at end of file From a69bb37b34b349153b8af657433617dc8b9e0c77 Mon Sep 17 00:00:00 2001 From: hemarina Date: Wed, 27 Aug 2025 22:51:15 -0700 Subject: [PATCH 083/116] clean up merge --- .vscode/mcp.json | 18 + cli/azd/cmd/root.go | 1 - .../internal/agent/logging/action_logger.go | 247 --- cli/azd/internal/agent/one_shot_agent.go | 84 - cli/azd/internal/agent/prompts/one_shot.txt | 78 - .../internal/agent/tools/http/http_fetcher.go | 67 - cli/azd/internal/agent/tools/http/loader.go | 26 - .../mcp/tools/prompts/azure.yaml.json | 1819 ----------------- cli/azd/tools/mcp/mcp.go | 115 -- go.mod | 1 - 10 files changed, 18 insertions(+), 2438 deletions(-) create mode 100644 .vscode/mcp.json delete mode 100644 cli/azd/internal/agent/logging/action_logger.go delete mode 100644 cli/azd/internal/agent/one_shot_agent.go delete mode 100644 cli/azd/internal/agent/prompts/one_shot.txt delete mode 100644 cli/azd/internal/agent/tools/http/http_fetcher.go delete mode 100644 cli/azd/internal/agent/tools/http/loader.go delete mode 100644 cli/azd/internal/mcp/tools/prompts/azure.yaml.json delete mode 100644 cli/azd/tools/mcp/mcp.go diff --git a/.vscode/mcp.json b/.vscode/mcp.json new file mode 100644 index 00000000000..33525f17df3 --- /dev/null +++ b/.vscode/mcp.json @@ -0,0 +1,18 @@ +{ + "servers": { + "my-mcp-server-azd-provision-sampling-tool": { + "type": "stdio", + "command": "C:\\Users\\hemarina\\Downloads\\vhvb1989\\azure-dev\\cli\\azd\\tools\\mcp\\mcp.exe", + "args": [], + }, + "azd": { + "type": "stdio", + "command": "azd", + "args": [ + "mcp", + "start" + ] + } + }, + "inputs": [] +} \ No newline at end of file diff --git a/cli/azd/cmd/root.go b/cli/azd/cmd/root.go index 45f26ce01f5..559d3fc7b36 100644 --- a/cli/azd/cmd/root.go +++ b/cli/azd/cmd/root.go @@ -129,7 +129,6 @@ func NewRootCmd( templatesActions(root) authActions(root) hooksActions(root) - mcpActions(root) root.Add("version", &actions.ActionDescriptorOptions{ Command: &cobra.Command{ diff --git a/cli/azd/internal/agent/logging/action_logger.go b/cli/azd/internal/agent/logging/action_logger.go deleted file mode 100644 index 786b14d51e0..00000000000 --- a/cli/azd/internal/agent/logging/action_logger.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package logging - -import ( - "context" - "encoding/json" - "fmt" - "regexp" - "strings" - - "github.com/azure/azure-dev/cli/azd/pkg/output" - "github.com/fatih/color" - "github.com/tmc/langchaingo/callbacks" - "github.com/tmc/langchaingo/llms" - "github.com/tmc/langchaingo/schema" -) - -// Compile-time check to ensure ActionLogger implements callbacks.Handler -var _ callbacks.Handler = &ActionLogger{} - -// ActionLogger tracks and logs all agent actions -type ActionLogger struct { - debugEnabled bool -} - -// ActionLoggerOption represents an option for configuring ActionLogger -type ActionLoggerOption func(*ActionLogger) - -// WithDebug enables debug mode for verbose logging -func WithDebug(enabled bool) ActionLoggerOption { - return func(al *ActionLogger) { - al.debugEnabled = enabled - } -} - -// NewActionLogger creates a new action logger -func NewActionLogger(opts ...ActionLoggerOption) *ActionLogger { - al := &ActionLogger{} - - for _, opt := range opts { - opt(al) - } - - return al -} - -// HandleText is called when text is processed -func (al *ActionLogger) HandleText(ctx context.Context, text string) { -} - -// HandleLLMGenerateContentStart is called when LLM content generation starts -func (al *ActionLogger) HandleLLMGenerateContentStart(ctx context.Context, ms []llms.MessageContent) { -} - -// HandleLLMGenerateContentEnd is called when LLM content generation ends -func (al *ActionLogger) HandleLLMGenerateContentEnd(ctx context.Context, res *llms.ContentResponse) { - // Parse and print thoughts as "THOUGHT: " from content - // IF thought contains: "Do I need to use a tool?", omit this thought. - - for _, choice := range res.Choices { - content := choice.Content - - if al.debugEnabled { - color.HiBlack("\nHandleLLMGenerateContentEnd\n%s\n", content) - } - - // Find all "Thought:" patterns and extract the content that follows - // (?is) flags: i=case insensitive, s=dot matches newlines - // .*? is non-greedy to stop at the first occurrence of next pattern or end - thoughtRegex := regexp.MustCompile(`(?is)thought:\s*(.*?)(?:\n\s*(?:action|final answer|observation|ai|thought):|$)`) - matches := thoughtRegex.FindAllStringSubmatch(content, -1) - - for _, match := range matches { - if len(match) > 1 { - thought := strings.TrimSpace(match[1]) - if thought != "" { - // Skip thoughts that contain "Do I need to use a tool?" - if !strings.Contains(strings.ToLower(thought), "do i need to use a tool?") { - color.White("\n%s: %s\n", output.AzdAgentLabel(), thought) - } - } - } - } - } -} - -// HandleRetrieverStart is called when retrieval starts -func (al *ActionLogger) HandleRetrieverStart(ctx context.Context, query string) { -} - -// HandleRetrieverEnd is called when retrieval ends -func (al *ActionLogger) HandleRetrieverEnd(ctx context.Context, query string, documents []schema.Document) { -} - -// HandleToolStart is called when a tool execution starts -func (al *ActionLogger) HandleToolStart(ctx context.Context, input string) { - if al.debugEnabled { - color.HiBlack("\nHandleToolStart\n%s\n", input) - } -} - -// HandleToolEnd is called when a tool execution ends -func (al *ActionLogger) HandleToolEnd(ctx context.Context, output string) { - if al.debugEnabled { - color.HiBlack("\nHandleToolEnd\n%s\n", output) - } -} - -// HandleToolError is called when a tool execution fails -func (al *ActionLogger) HandleToolError(ctx context.Context, err error) { - color.Red("\nTool Error: %s\n", err.Error()) -} - -// HandleLLMStart is called when LLM call starts -func (al *ActionLogger) HandleLLMStart(ctx context.Context, prompts []string) { -} - -// HandleChainStart is called when chain execution starts -func (al *ActionLogger) HandleChainStart(ctx context.Context, inputs map[string]any) { -} - -// HandleChainEnd is called when chain execution ends -func (al *ActionLogger) HandleChainEnd(ctx context.Context, outputs map[string]any) { -} - -// HandleChainError is called when chain execution fails -func (al *ActionLogger) HandleChainError(ctx context.Context, err error) { - color.Red("\n%s\n", err.Error()) -} - -// truncateString truncates a string to maxLen characters and adds "..." if truncated -func truncateString(s string, maxLen int) string { - if len(s) > maxLen { - return s[:maxLen-3] + "..." - } - return s -} - -// HandleAgentAction is called when an agent action is planned -func (al *ActionLogger) HandleAgentAction(ctx context.Context, action schema.AgentAction) { - // Print "Calling " - // Inspect action.ToolInput. Attempt to parse input as JSON - // If is valid JSON and contains a param 'filename' then print filename. - // example: "Calling read_file " - if al.debugEnabled { - color.HiBlack("\nHandleAgentAction\n%s\n", action.Log) - } - - var toolInput map[string]interface{} - if err := json.Unmarshal([]byte(action.ToolInput), &toolInput); err == nil { - // Successfully parsed JSON, create comma-delimited key-value pairs - excludedKeys := map[string]bool{"content": true} - var params []string - - for key, value := range toolInput { - if excludedKeys[key] { - continue - } - - var valueStr string - switch v := value.(type) { - case []interface{}: - // Skip empty arrays - if len(v) == 0 { - continue - } - // Handle arrays by joining with spaces - var strSlice []string - for _, item := range v { - strSlice = append(strSlice, strings.TrimSpace(string(fmt.Sprintf("%v", item)))) - } - valueStr = strings.Join(strSlice, " ") - case map[string]interface{}: - // Skip empty maps - if len(v) == 0 { - continue - } - valueStr = strings.TrimSpace(fmt.Sprintf("%v", v)) - case string: - // Skip empty strings - trimmed := strings.TrimSpace(v) - if trimmed == "" { - continue - } - valueStr = trimmed - default: - valueStr = strings.TrimSpace(fmt.Sprintf("%v", v)) - } - - if valueStr != "" { - params = append(params, fmt.Sprintf("%s: %s", key, valueStr)) - } - } - - var paramStr string - if len(params) > 0 { - paramStr = strings.Join(params, ", ") - paramStr = truncateString(paramStr, 100) - output := fmt.Sprintf("\n%s: Calling %s tool with %s\n", output.AzdAgentLabel(), action.Tool, paramStr) - color.Green(output) - } else { - output := fmt.Sprintf("\n%s: Calling %s tool\n", output.AzdAgentLabel(), action.Tool) - color.Green(output) - } - } else { - // JSON parsing failed, show the input as text with truncation - toolInput := strings.TrimSpace(action.ToolInput) - if toolInput == "" { - output := fmt.Sprintf("\n%s: Calling %s tool\n", output.AzdAgentLabel(), action.Tool) - color.Green(output) - } else { - toolInput = truncateString(toolInput, 100) - color.Green("\n%s: Calling %s tool with %s\n", output.AzdAgentLabel(), action.Tool, toolInput) - } - } -} - -// HandleAgentFinish is called when the agent finishes -func (al *ActionLogger) HandleAgentFinish(ctx context.Context, finish schema.AgentFinish) { - // Find summary from format "AI: " - // Print: - if al.debugEnabled { - color.HiBlack("\nHandleAgentFinish\n%s\n", finish.Log) - } - - // Use regex to find AI summary, capturing everything after "AI:" (including multi-line) - // The (?s) flag makes . match newlines, (.+) captures everything after "AI:" - aiRegex := regexp.MustCompile(`(?is)AI:\s*(.+)`) - matches := aiRegex.FindStringSubmatch(finish.Log) - - if len(matches) > 1 { - summary := strings.TrimSpace(matches[1]) - color.White("\n%s: %s\n", output.AzdAgentLabel(), summary) - } - // If "AI:" not found, don't print anything -} - -// HandleLLMError is called when LLM call fails -func (al *ActionLogger) HandleLLMError(ctx context.Context, err error) { - color.Red("\nLLM Error: %s\n", err.Error()) -} - -// HandleStreamingFunc handles streaming responses -func (al *ActionLogger) HandleStreamingFunc(ctx context.Context, chunk []byte) { -} diff --git a/cli/azd/internal/agent/one_shot_agent.go b/cli/azd/internal/agent/one_shot_agent.go deleted file mode 100644 index af37bea9798..00000000000 --- a/cli/azd/internal/agent/one_shot_agent.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package agent - -import ( - "context" - _ "embed" - "strings" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" - "github.com/tmc/langchaingo/agents" - "github.com/tmc/langchaingo/chains" - "github.com/tmc/langchaingo/llms" - "github.com/tmc/langchaingo/prompts" -) - -// OneShotAzdAiAgent represents an `azd` agent designed for single-request processing -// without conversation memory, optimized for one-time queries and responses -type OneShotAzdAiAgent struct { - *agentBase -} - -//go:embed prompts/one_shot.txt -var one_shot_prompt_template string - -// NewOneShotAzdAiAgent creates a new one-shot agent optimized for single queries. -// It loads tools from multiple sources, filters excluded tools, and configures -// the agent for stateless operation without conversation memory. -func NewOneShotAzdAiAgent(llm llms.Model, opts ...AgentOption) (*OneShotAzdAiAgent, error) { - azdAgent := &OneShotAzdAiAgent{ - agentBase: &agentBase{ - defaultModel: llm, - tools: []common.AnnotatedTool{}, - }, - } - - for _, opt := range opts { - opt(azdAgent.agentBase) - } - - promptTemplate := prompts.PromptTemplate{ - Template: one_shot_prompt_template, - InputVariables: []string{"input", "agent_scratchpad"}, - TemplateFormat: prompts.TemplateFormatGoTemplate, - PartialVariables: map[string]any{ - "tool_names": toolNames(azdAgent.tools), - "tool_descriptions": toolDescriptions(azdAgent.tools), - }, - } - - // 4. Create agent with memory directly integrated - oneShotAgent := agents.NewOneShotAgent(llm, common.ToLangChainTools(azdAgent.tools), - agents.WithPrompt(promptTemplate), - agents.WithCallbacksHandler(azdAgent.callbacksHandler), - agents.WithReturnIntermediateSteps(), - ) - - // 5. Create executor without separate memory configuration since agent already has it - executor := agents.NewExecutor(oneShotAgent, - agents.WithMaxIterations(500), // Much higher limit for complex multi-step processes - agents.WithCallbacksHandler(azdAgent.callbacksHandler), - agents.WithReturnIntermediateSteps(), - ) - - azdAgent.executor = executor - return azdAgent, nil -} - -// SendMessage processes a single message through the one-shot agent and returns the response -func (aai *OneShotAzdAiAgent) SendMessage(ctx context.Context, args ...string) (string, error) { - return aai.runChain(ctx, strings.Join(args, "\n")) -} - -// runChain executes a user query through the one-shot agent without memory persistence -func (aai *OneShotAzdAiAgent) runChain(ctx context.Context, userInput string) (string, error) { - // Execute with enhanced input - agent should automatically handle memory - output, err := chains.Run(ctx, aai.executor, userInput) - if err != nil { - return "", err - } - - return output, nil -} diff --git a/cli/azd/internal/agent/prompts/one_shot.txt b/cli/azd/internal/agent/prompts/one_shot.txt deleted file mode 100644 index 890569e2bc5..00000000000 --- a/cli/azd/internal/agent/prompts/one_shot.txt +++ /dev/null @@ -1,78 +0,0 @@ -You are an Azure Developer CLI (AZD) agent. -You are an expert in generating, building, provisioning, and deploying Azure applications. -Always follow Azure best patterns and practices. -Always automate as many tasks as possible. - -Before starting your initial task, review the available tools. -If any tools exist for best practices, invoke those tools to gather information. -Incorporate any learned best practices into your work. - -When generating code or configuration, ALWAYS save the output to a file. -If a filename is not explicitly provided, generate a meaningful and appropriate name automatically. - ---- - -**Efficiency and Token Usage Guidelines:** - -- Always minimize token usage when interacting with file-related tools. -- Do **not** request large directory globs like `**/*` or attempt to read full directories unless absolutely required. -- Instead, start with: - - High-level file listings (e.g., 1–2 levels deep) - - Only common project root files or config files - - Specific files by name or extension (.csproj, package.json, README.md) -- When reading files, limit the number of files and prefer smaller ones. -- Never request entire folders to be read in a single call. -- If you need to scan deeper, do so **incrementally** and **only if earlier reads indicate it's necessary.** -- When in doubt, prioritize **breadth first, then depth**. - -Failing to follow these heuristics may result in tool failures, token overuse, or excessive latency. - ---- - -You have access to the following tools: - -{{.tool_descriptions}} - -When responding, always use the following format: - -Question: [the input question you must answer] -Thought: [you should always think about what to do] -Action: [the action to take, must be one of [ {{.tool_names}} ]] -Action Input: [the input to the action] -Observation: [the result of the action] -... (this Thought → Action → Action Input → Observation sequence can repeat N times) -Thought: [I now know the final answer] -Final Answer: [the final answer to the original input question] - ---- - -**Important Behavioral Guidelines:** - -- After every Observation, reflect on whether it reveals additional work that must be done. New tasks may emerge from tool outputs — you must identify and complete them before finishing. -- Do **not** assume a task is complete after a single tool call unless you have verified that **all necessary work is complete**. -- Never skip steps or return a Final Answer prematurely. -- Always continue until all identified and implied tasks have been completed using the tools available. -- If the Observation hints at other subtasks, pursue them fully before concluding. - -**Strict Output Format Rules (Do Not Violate):** - -You MUST follow this exact output structure for each tool invocation: - -Thought: [your thought] -Action: [tool name] -Action Input: [input to the tool] -Observation: [result from the tool] - -**Every** Action MUST be followed by an Observation — even if the result is empty, obvious, or a no-op. -Do NOT omit, reorder, or skip any part of this pattern. -Do NOT substitute summaries or explanations for an Observation. - -Only after completing all actions and observations may you finish with: - -Thought: I now know the final answer -Final Answer: [your full, final answer] - -Begin! - -Question: {{.input}} -{{.agent_scratchpad}} diff --git a/cli/azd/internal/agent/tools/http/http_fetcher.go b/cli/azd/internal/agent/tools/http/http_fetcher.go deleted file mode 100644 index 882a7f401ea..00000000000 --- a/cli/azd/internal/agent/tools/http/http_fetcher.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package http - -import ( - "context" - "fmt" - "io" - "net/http" - - "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" - "github.com/mark3labs/mcp-go/mcp" -) - -// HTTPFetcherTool implements the Tool interface for making HTTP requests -type HTTPFetcherTool struct { - common.BuiltInTool -} - -func (t HTTPFetcherTool) Name() string { - return "http_fetcher" -} - -func (t HTTPFetcherTool) Annotations() mcp.ToolAnnotation { - return mcp.ToolAnnotation{ - Title: "Fetch HTTP Endpoint", - ReadOnlyHint: common.ToPtr(true), - DestructiveHint: common.ToPtr(false), - IdempotentHint: common.ToPtr(true), - OpenWorldHint: common.ToPtr(true), - } -} - -func (t HTTPFetcherTool) Description() string { - return "Make HTTP GET requests to fetch content from URLs. Input should be a valid URL." -} - -func (t HTTPFetcherTool) Call(ctx context.Context, input string) (string, error) { - // #nosec G107 - HTTP requests with variable URLs are the intended functionality of this tool - resp, err := http.Get(input) - if err != nil { - return "", fmt.Errorf("failed to fetch URL %s: %w", input, err) - } - - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("HTTP request failed with status: %s", resp.Status) - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - return "", fmt.Errorf("failed to read response body: %w", err) - } - - var output string - // Limit response size to avoid overwhelming the context - if len(body) > 5000 { - output = fmt.Sprintf("Content (first 5000 chars): %s...\n[Content truncated]", string(body[:5000])) - } else { - output = string(body) - output += "\n" - } - - return output, nil -} diff --git a/cli/azd/internal/agent/tools/http/loader.go b/cli/azd/internal/agent/tools/http/loader.go deleted file mode 100644 index b2b5f3f4482..00000000000 --- a/cli/azd/internal/agent/tools/http/loader.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package http - -import ( - "github.com/tmc/langchaingo/callbacks" - "github.com/tmc/langchaingo/tools" -) - -// HttpToolsLoader loads HTTP-related tools -type HttpToolsLoader struct { - callbackHandler callbacks.Handler -} - -func NewHttpToolsLoader(callbackHandler callbacks.Handler) *HttpToolsLoader { - return &HttpToolsLoader{ - callbackHandler: callbackHandler, - } -} - -func (l *HttpToolsLoader) LoadTools() ([]tools.Tool, error) { - return []tools.Tool{ - &HTTPFetcherTool{}, - }, nil -} diff --git a/cli/azd/internal/mcp/tools/prompts/azure.yaml.json b/cli/azd/internal/mcp/tools/prompts/azure.yaml.json deleted file mode 100644 index 747fd7fa649..00000000000 --- a/cli/azd/internal/mcp/tools/prompts/azure.yaml.json +++ /dev/null @@ -1,1819 +0,0 @@ -{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://raw.githubusercontent.com/Azure/azure-dev/main/schemas/v1.0/azure.yaml.json", - "type": "object", - "required": [ - "name" - ], - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "minLength": 2, - "title": "Name of the application", - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$", - "description": "The application name. Only lowercase letters, numbers, and hyphens (-) are allowed. The name must start and end with a letter or number." - }, - "resourceGroup": { - "type": "string", - "minLength": 3, - "maxLength": 64, - "title": "Name of the Azure resource group", - "description": "When specified will override the resource group name used for infrastructure provisioning. Supports environment variable substitution." - }, - "metadata": { - "type": "object", - "properties": { - "template": { - "type": "string", - "title": "Identifier of the template from which the application was created. Optional.", - "examples": [ - "todo-nodejs-mongo@0.0.1-beta" - ] - } - } - }, - "infra": { - "type": "object", - "title": "The infrastructure configuration used for the application", - "description": "Optional. Provides additional configuration for Azure infrastructure provisioning.", - "additionalProperties": true, - "properties": { - "provider": { - "type": "string", - "title": "Type of infrastructure provisioning provider", - "description": "Optional. The infrastructure provisioning provider used to provision the Azure resources for the application. (Default: bicep)", - "enum": [ - "bicep", - "terraform" - ] - }, - "path": { - "type": "string", - "title": "Path to the location that contains Azure provisioning templates", - "description": "Optional. The relative folder path to the Azure provisioning templates for the specified provider. (Default: infra)" - }, - "module": { - "type": "string", - "title": "Name of the default module within the Azure provisioning templates", - "description": "Optional. The name of the Azure provisioning module used when provisioning resources. (Default: main)" - } - } - }, - "services": { - "type": "object", - "title": "Definition of services that comprise the application", - "minProperties": 1, - "additionalProperties": { - "type": "object", - "additionalProperties": false, - "required": [ - "host" - ], - "properties": { - "apiVersion": { - "type": "string", - "title": "Resource provider API version for deployments", - "description": "Optional. The resource provider API version to use for the service. If not specified, the default SDK API version is used. Only valid when host is 'containerapp'." - }, - "resourceGroup": { - "type": "string", - "title": "Name of the Azure resource group that contains the resource", - "description": "By default, the CLI will discover the Azure resource within the default resource group. When specified, the CLI will instead find the Azure resource within the specified resource group. Supports environment variable substitution." - }, - "resourceName": { - "type": "string", - "title": "Name of the Azure resource that implements the service", - "description": "By default, the CLI will discover the Azure resource with tag 'azd-service-name' set to the current service's name. When specified, the CLI will instead find the Azure resource with the matching resource name. Supports environment variable substitution." - }, - "project": { - "type": "string", - "title": "Path to the service source code directory" - }, - "image": { - "type": "string", - "title": "Optional. The source image to be used for the container image instead of building from source. Supports environment variable substitution.", - "description": "If omitted, container image will be built from source specified in the 'project' property. Setting both 'project' and 'image' is invalid." - }, - "host": { - "type": "string", - "title": "Required. The type of Azure resource used for service implementation", - "description": "The Azure service that will be used as the target for deployment operations for the service.", - "enum": [ - "appservice", - "containerapp", - "function", - "springapp", - "staticwebapp", - "aks", - "ai.endpoint" - ] - }, - "language": { - "type": "string", - "title": "Service implementation language", - "enum": [ - "dotnet", - "csharp", - "fsharp", - "py", - "python", - "js", - "ts", - "java", - "docker" - ] - }, - "module": { - "type": "string", - "title": "(DEPRECATED) Path of the infrastructure module used to deploy the service relative to the root infra folder", - "description": "If omitted, the CLI will assume the module name is the same as the service name. This property will be deprecated in a future release." - }, - "dist": { - "type": "string", - "title": "Relative path to service deployment artifacts" - }, - "docker": { - "$ref": "#/definitions/docker" - }, - "k8s": { - "$ref": "#/definitions/aksOptions" - }, - "config": { - "type": "object", - "additionalProperties": true - }, - "hooks": { - "type": "object", - "title": "Service level hooks", - "description": "Hooks should match `service` event names prefixed with `pre` or `post` depending on when the script should execute. When specifying paths they should be relative to the service path.", - "additionalProperties": false, - "properties": { - "predeploy": { - "title": "pre deploy hook", - "description": "Runs before the service is deployed to Azure", - "$ref": "#/definitions/hooks" - }, - "postdeploy": { - "title": "post deploy hook", - "description": "Runs after the service is deployed to Azure", - "$ref": "#/definitions/hooks" - }, - "prerestore": { - "title": "pre restore hook", - "description": "Runs before the service dependencies are restored", - "$ref": "#/definitions/hooks" - }, - "postrestore": { - "title": "post restore hook", - "description": "Runs after the service dependencies are restored", - "$ref": "#/definitions/hooks" - }, - "prebuild": { - "title": "pre build hook", - "description": "Runs before the service is built", - "$ref": "#/definitions/hooks" - }, - "postbuild": { - "title": "post build hook", - "description": "Runs after the service is built", - "$ref": "#/definitions/hooks" - }, - "prepackage": { - "title": "pre package hook", - "description": "Runs before the service is deployment package is created", - "$ref": "#/definitions/hooks" - }, - "postpackage": { - "title": "post package hook", - "description": "Runs after the service is deployment package is created", - "$ref": "#/definitions/hooks" - } - } - } - }, - "allOf": [ - { - "if": { - "properties": { - "host": { - "const": "containerapp" - } - } - }, - "then": { - "anyOf": [ - { - "required": [ - "image" - ], - "properties": { - "language": false - }, - "not": { - "required": [ - "project" - ] - } - }, - { - "required": [ - "project" - ], - "not": { - "required": [ - "image" - ] - } - } - ] - } - }, - { - "if": { - "not": { - "properties": { - "host": { - "const": "containerapp" - } - } - } - }, - "then": { - "properties": { - "image": false - } - } - }, - { - "if": { - "not": { - "properties": { - "host": { - "enum": [ - "containerapp", - "aks", - "ai.endpoint" - ] - } - } - } - }, - "then": { - "required": [ - "project", - "language" - ], - "properties": { - "docker": false - } - } - }, - { - "if": { - "properties": { - "host": { - "const": "ai.endpoint" - } - } - }, - "then": { - "required": [ - "config" - ], - "properties": { - "config": { - "$ref": "#/definitions/aiEndpointConfig", - "title": "The Azure AI endpoint configuration.", - "description": "Required. Provides additional configuration for Azure AI online endpoint deployment." - } - } - } - }, - { - "if": { - "not": { - "properties": { - "host": { - "enum": [ - "aks" - ] - } - } - } - }, - "then": { - "properties": { - "k8s": false - } - } - }, - { - "if": { - "properties": { - "language": { - "const": "java" - } - } - }, - "then": { - "properties": { - "dist": { - "type": "string", - "description": "Optional. The path to the directory containing a single Java archive file (.jar/.ear/.war), or the path to the specific Java archive file to be included in the deployment artifact. If omitted, the CLI will detect the output directory based on the build system in-use. For maven, the default output directory 'target' is assumed." - } - } - } - }, - { - "if": { - "not": { - "properties": { - "host": { - "const": "containerapp" - } - } - } - }, - "then": { - "properties": { - "apiVersion": false - } - } - }, - { - "properties": { - "dist": { - "type": "string", - "description": "Optional. The CLI will use files under this path to create the deployment artifact (ZIP file). If omitted, all files under service project directory will be included." - } - } - } - ] - } - }, - "resources": { - "type": "object", - "additionalProperties": { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "title": "Type of resource", - "description": "The type of resource to be created. (Example: db.postgres)", - "enum": [ - "db.postgres", - "db.mysql", - "db.redis", - "db.mongo", - "db.cosmos", - "ai.openai.model", - "ai.project", - "ai.search", - "host.containerapp", - "host.appservice", - "messaging.eventhubs", - "messaging.servicebus", - "storage", - "keyvault" - ] - }, - "uses": { - "type": "array", - "title": "Other resources that this resource uses", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "existing": { - "type": "boolean", - "title": "An existing resource for referencing purposes", - "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", - "default": false - } - }, - "allOf": [ - { - "if": { - "properties": { - "type": { - "const": "host.appservice" - } - } - }, - "then": { - "$ref": "#/definitions/appServiceResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "host.containerapp" - } - } - }, - "then": { - "$ref": "#/definitions/containerAppResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "ai.openai.model" - } - } - }, - "then": { - "$ref": "#/definitions/aiModelResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "ai.project" - } - } - }, - "then": { - "$ref": "#/definitions/aiProjectResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "ai.search" - } - } - }, - "then": { - "$ref": "#/definitions/aiSearchResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "db.postgres" - } - } - }, - "then": { - "$ref": "#/definitions/genericDbResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "db.mysql" - } - } - }, - "then": { - "$ref": "#/definitions/genericDbResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "db.redis" - } - } - }, - "then": { - "$ref": "#/definitions/genericDbResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "db.mongo" - } - } - }, - "then": { - "$ref": "#/definitions/genericDbResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "db.cosmos" - } - } - }, - "then": { - "$ref": "#/definitions/cosmosDbResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "messaging.eventhubs" - } - } - }, - "then": { - "$ref": "#/definitions/eventHubsResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "messaging.servicebus" - } - } - }, - "then": { - "$ref": "#/definitions/serviceBusResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "storage" - } - } - }, - "then": { - "$ref": "#/definitions/storageAccountResource" - } - }, - { - "if": { - "properties": { - "type": { - "const": "keyvault" - } - } - }, - "then": { - "$ref": "#/definitions/keyVaultResource" - } - } - ] - } - }, - "pipeline": { - "type": "object", - "title": "Definition of continuous integration pipeline", - "properties": { - "provider": { - "type": "string", - "title": "Type of pipeline provider", - "description": "Optional. The pipeline provider to be used for continuous integration. (Default: github)", - "enum": [ - "github", - "azdo" - ] - }, - "variables": { - "type": "array", - "title": "Optional. List of azd environment variables to be used in the pipeline as variables.", - "description": "If variable is found on azd environment, it is set as a variable for the pipeline.", - "items": { - "type": "string" - } - }, - "secrets": { - "type": "array", - "title": "Optional. List of azd environment variables to be used in the pipeline as secrets.", - "description": "If variable is found on azd environment, it is set as a secret for the pipeline.", - "items": { - "type": "string" - } - } - } - }, - "hooks": { - "type": "object", - "title": "Command level hooks", - "description": "Hooks should match `azd` command names prefixed with `pre` or `post` depending on when the script should execute. When specifying paths they should be relative to the project path.", - "additionalProperties": false, - "properties": { - "preprovision": { - "title": "pre provision hook", - "description": "Runs before the `provision` command", - "$ref": "#/definitions/hooks" - }, - "postprovision": { - "title": "post provision hook", - "description": "Runs after the `provision` command", - "$ref": "#/definitions/hooks" - }, - "preinfracreate": { - "title": "pre infra create hook", - "description": "Runs before the `infra create` or `provision` commands", - "$ref": "#/definitions/hooks" - }, - "postinfracreate": { - "title": "post infra create hook", - "description": "Runs after the `infra create` or `provision` commands", - "$ref": "#/definitions/hooks" - }, - "preinfradelete": { - "title": "pre infra delete hook", - "description": "Runs before the `infra delete` or `down` commands", - "$ref": "#/definitions/hooks" - }, - "postinfradelete": { - "title": "post infra delete hook", - "description": "Runs after the `infra delete` or `down` commands", - "$ref": "#/definitions/hooks" - }, - "predown": { - "title": "pre down hook", - "description": "Runs before the `infra delete` or `down` commands", - "$ref": "#/definitions/hooks" - }, - "postdown": { - "title": "post down hook", - "description": "Runs after the `infra delete` or `down` commands", - "$ref": "#/definitions/hooks" - }, - "preup": { - "title": "pre up hook", - "description": "Runs before the `up` command", - "$ref": "#/definitions/hooks" - }, - "postup": { - "title": "post up hook", - "description": "Runs after the `up` command", - "$ref": "#/definitions/hooks" - }, - "prepackage": { - "title": "pre package hook", - "description": "Runs before the `package` command", - "$ref": "#/definitions/hooks" - }, - "postpackage": { - "title": "post package hook", - "description": "Runs after the `package` command", - "$ref": "#/definitions/hooks" - }, - "predeploy": { - "title": "pre deploy hook", - "description": "Runs before the `deploy` command", - "$ref": "#/definitions/hooks" - }, - "postdeploy": { - "title": "post deploy hook", - "description": "Runs after the `deploy` command", - "$ref": "#/definitions/hooks" - }, - "prerestore": { - "title": "pre restore hook", - "description": "Runs before the `restore` command", - "$ref": "#/definitions/hooks" - }, - "postrestore": { - "title": "post restore hook", - "description": "Runs after the `restore` command", - "$ref": "#/definitions/hooks" - } - } - }, - "requiredVersions": { - "type": "object", - "additionalProperties": false, - "properties": { - "azd": { - "type": "string", - "title": "A range of supported versions of `azd` for this project", - "description": "A range of supported versions of `azd` for this project. If the version of `azd` is outside this range, the project will fail to load. Optional (allows all versions if absent).", - "examples": [ - ">= 0.6.0-beta.3" - ] - } - } - }, - "state": { - "type": "object", - "title": "The state configuration used for the project.", - "description": "Optional. Provides additional configuration for state management.", - "additionalProperties": false, - "properties": { - "remote": { - "type": "object", - "additionalProperties": false, - "title": "The remote state configuration.", - "description": "Optional. Provides additional configuration for remote state management such as Azure Blob Storage.", - "required": [ - "backend" - ], - "properties": { - "backend": { - "type": "string", - "title": "The remote state backend type.", - "description": "Optional. The remote state backend type. (Default: AzureBlobStorage)", - "default": "AzureBlobStorage", - "enum": [ - "AzureBlobStorage" - ] - }, - "config": { - "type": "object", - "additionalProperties": true - } - }, - "allOf": [ - { - "if": { - "properties": { - "backend": { - "const": "AzureBlobStorage" - } - } - }, - "then": { - "required": [ - "config" - ], - "properties": { - "config": { - "$ref": "#/definitions/azureBlobStorageConfig" - } - } - } - } - ] - } - } - }, - "platform": { - "type": "object", - "title": "The platform configuration used for the project.", - "description": "Optional. Provides additional configuration for platform specific features such as Azure Dev Center.", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "title": "The platform type.", - "description": "Required. The platform type. (Example: devcenter)", - "enum": [ - "devcenter" - ] - }, - "config": { - "type": "object", - "additionalProperties": true - } - }, - "allOf": [ - { - "if": { - "properties": { - "type": { - "const": "devcenter" - } - } - }, - "then": { - "properties": { - "config": { - "$ref": "#/definitions/azureDevCenterConfig" - } - } - } - } - ] - }, - "workflows": { - "type": "object", - "title": "The workflows configuration used for the project.", - "description": "Optional. Provides additional configuration for workflows such as override azd up behavior.", - "additionalProperties": false, - "properties": { - "up": { - "title": "The up workflow configuration", - "description": "When specified will override the default behavior for the azd up workflow. Common use cases include changing the order of the provision, package and deploy commands.", - "$ref": "#/definitions/workflow" - } - } - }, - "cloud": { - "type": "object", - "title": "The cloud configuration used for the project.", - "description": "Optional. Provides additional configuration for deploying to sovereign clouds such as Azure Government. The default cloud is AzureCloud.", - "additionalProperties": false, - "properties": { - "name": { - "enum": [ - "AzureCloud", - "AzureChinaCloud", - "AzureUSGovernment" - ] - } - } - } - }, - "definitions": { - "hooks": { - "anyOf": [ - { - "$ref": "#/definitions/hook" - }, - { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/hook" - } - } - ] - }, - "hook": { - "type": "object", - "additionalProperties": false, - "properties": { - "shell": { - "type": "string", - "title": "Type of shell to execute scripts", - "description": "Optional. The type of shell to use for the hook. (Default: sh)", - "enum": [ - "sh", - "pwsh" - ], - "default": "sh" - }, - "run": { - "type": "string", - "title": "Required. The inline script or relative path of your scripts from the project or service path", - "description": "When specifying an inline script you also must specify the `shell` to use. This is automatically inferred when using paths." - }, - "continueOnError": { - "type": "boolean", - "default": false, - "title": "Whether or not a script error will halt the azd command", - "description": "Optional. When set to true will continue to run the command even after a script error has occurred. (Default: false)" - }, - "interactive": { - "type": "boolean", - "default": false, - "title": "Whether the script will run in interactive mode", - "description": "Optional. When set to true will bind the script to stdin, stdout & stderr of the running console. (Default: false)" - }, - "windows": { - "title": "The hook configuration used for Windows environments", - "description": "When specified overrides the hook configuration when executed in Windows environments", - "default": null, - "$ref": "#/definitions/hook" - }, - "posix": { - "title": "The hook configuration used for POSIX (Linux & MacOS) environments", - "description": "When specified overrides the hook configuration when executed in POSIX environments", - "default": null, - "$ref": "#/definitions/hook" - }, - "secrets": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "title": "Optional. Map of azd environment variables to hook secrets.", - "description": "If variable was set as a secret in the environment, the secret value will be passed to the hook.", - "examples": [ - { - "WITH_SECRET_VALUE": "ENV_VAR_WITH_SECRET" - } - ] - } - }, - "allOf": [ - { - "if": { - "allOf": [ - { - "required": [ - "windows" - ] - }, - { - "required": [ - "posix" - ] - } - ] - }, - "then": { - "properties": { - "run": false, - "shell": false, - "interactive": false, - "continueOnError": false, - "secrets": false - } - } - }, - { - "if": { - "anyOf": [ - { - "required": [ - "interactive" - ] - }, - { - "required": [ - "continueOnError" - ] - }, - { - "required": [ - "secrets" - ] - }, - { - "required": [ - "shell" - ] - } - ] - }, - "then": { - "required": [ - "run" - ] - } - } - ] - }, - "docker": { - "type": "object", - "description": "This is only applicable when `host` is `containerapp` or `aks`", - "additionalProperties": false, - "properties": { - "path": { - "type": "string", - "title": "The path to the Dockerfile", - "description": "Path to the Dockerfile is relative to your service", - "default": "./Dockerfile" - }, - "context": { - "type": "string", - "title": "The docker build context", - "description": "When specified overrides the default context", - "default": "." - }, - "platform": { - "type": "string", - "title": "The platform target", - "default": "amd64" - }, - "registry": { - "type": "string", - "title": "Optional. The container registry to push the image to.", - "description": "If omitted, will default to value of AZURE_CONTAINER_REGISTRY_ENDPOINT environment variable. Supports environment variable substitution." - }, - "image": { - "type": "string", - "title": "Optional. The name that will be applied to the built container image.", - "description": "If omitted, will default to the '{appName}/{serviceName}-{environmentName}'. Supports environment variable substitution." - }, - "tag": { - "type": "string", - "title": "The tag that will be applied to the built container image.", - "description": "If omitted, will default to 'azd-deploy-{unix time (seconds)}'. Supports environment variable substitution. For example, to generate unique tags for a given release: myapp/myimage:${DOCKER_IMAGE_TAG}" - }, - "buildArgs": { - "type": "array", - "title": "Optional. Build arguments to pass to the docker build command", - "description": "Build arguments to pass to the docker build command.", - "items": { - "type": "string" - } - }, - "remoteBuild": { - "type": "boolean", - "title": "Optional. Whether to build the image remotely", - "description": "If set to true, the image will be built remotely using the Azure Container Registry remote build feature. If set to false, the image will be built locally using Docker." - } - } - }, - "aksOptions": { - "type": "object", - "title": "Optional. The Azure Kubernetes Service (AKS) configuration options", - "additionalProperties": false, - "properties": { - "deploymentPath": { - "type": "string", - "title": "Optional. The relative path from the service path to the k8s deployment manifests. (Default: manifests)", - "description": "When set it will override the default deployment path location for k8s deployment manifests.", - "default": "manifests" - }, - "namespace": { - "type": "string", - "title": "Optional. The k8s namespace of the deployed resources. (Default: Project name)", - "description": "When specified a new k8s namespace will be created if it does not already exist" - }, - "deployment": { - "type": "object", - "title": "Optional. The k8s deployment configuration", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "title": "Optional. The name of the k8s deployment resource to use during deployment. (Default: Service name)", - "description": "Used during deployment to ensure if the k8s deployment rollout has been completed. If not set will search for a deployment resource in the same namespace that contains the service name." - } - } - }, - "service": { - "type": "object", - "title": "Optional. The k8s service configuration", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "title": "Optional. The name of the k8s service resource to use as the default service endpoint. (Default: Service name)", - "description": "Used when determining endpoints for the default service resource. If not set will search for a deployment resource in the same namespace that contains the service name." - } - } - }, - "ingress": { - "type": "object", - "title": "Optional. The k8s ingress configuration", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "title": "Optional. The name of the k8s ingress resource to use as the default service endpoint. (Default: Service name)", - "description": "Used when determining endpoints for the default ingress resource. If not set will search for a deployment resource in the same namespace that contains the service name." - }, - "relativePath": { - "type": "string", - "title": "Optional. The relative path to the service from the root of your ingress controller.", - "description": "When set will be appended to the root of your ingress resource path." - } - } - }, - "helm": { - "type": "object", - "title": "Optional. The helm configuration", - "additionalProperties": false, - "properties": { - "repositories": { - "type": "array", - "title": "Optional. The helm repositories to add", - "description": "When set will add the helm repositories to the helm client.", - "minItems": 1, - "items": { - "type": "object", - "additionalProperties": false, - "required": [ - "name", - "url" - ], - "properties": { - "name": { - "type": "string", - "title": "The name of the helm repository", - "description": "The name of the helm repository to add." - }, - "url": { - "type": "string", - "title": "The url of the helm repository", - "description": "The url of the helm repository to add." - } - } - } - }, - "releases": { - "type": "array", - "title": "Optional. The helm releases to install", - "description": "When set will install the helm releases to the k8s cluster.", - "minItems": 1, - "items": { - "type": "object", - "additionalProperties": false, - "required": [ - "name", - "chart" - ], - "properties": { - "name": { - "type": "string", - "title": "The name of the helm release", - "description": "The name of the helm release to install." - }, - "chart": { - "type": "string", - "title": "The name of the helm chart", - "description": "The name of the helm chart to install." - }, - "version": { - "type": "string", - "title": "The version of the helm chart", - "description": "The version of the helm chart to install." - }, - "namespace": { - "type": "string", - "title": "Optional. The k8s namespace to install the helm chart", - "description": "When set will install the helm chart to the specified namespace. Defaults to the service namespace." - }, - "values": { - "type": "string", - "title": "Optional. Relative path from service to a values.yaml to pass to the helm chart", - "description": "When set will pass the values to the helm chart." - } - } - } - } - } - }, - "kustomize": { - "type": "object", - "title": "Optional. The kustomize configuration", - "additionalProperties": false, - "properties": { - "dir": { - "type": "string", - "title": "Optional. The relative path to the kustomize directory.", - "description": "When set will use the kustomize directory to deploy to the k8s cluster. Supports environment variable substitution." - }, - "edits": { - "type": "array", - "title": "Optional. The kustomize edits to apply before deployment.", - "description": "When set will apply the edits to the kustomize directory before deployment. Supports environment variable substitution.", - "items": { - "type": "string" - } - }, - "env": { - "type": "object", - "title": "Optional. The environment key/value pairs used to generate a .env file.", - "description": "When set will generate a .env file in the kustomize directory. Values support environment variable substitution.", - "additionalProperties": { - "type": [ - "string", - "boolean", - "number" - ] - } - } - } - } - } - }, - "azureBlobStorageConfig": { - "type": "object", - "title": "The Azure Blob Storage remote state backend configuration.", - "description": "Optional. Provides additional configuration for remote state management such as Azure Blob Storage.", - "additionalProperties": false, - "required": [ - "accountName" - ], - "properties": { - "accountName": { - "type": "string", - "title": "The Azure Storage account name.", - "description": "Required. The Azure Storage account name." - }, - "containerName": { - "type": "string", - "title": "The Azure Storage container name.", - "description": "Optional. The Azure Storage container name. Defaults to project name if not specified." - }, - "endpoint": { - "type": "string", - "title": "The Azure Storage endpoint.", - "description": "Optional. The Azure Storage endpoint. (Default: blob.core.windows.net)" - } - } - }, - "azureDevCenterConfig": { - "type": "object", - "title": "The dev center configuration used for the project.", - "description": "Optional. Provides additional project configuration for Azure Dev Center integration.", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "title": "The name of the Azure Dev Center", - "description": "Optional. Used as the default dev center for this project." - }, - "project": { - "type": "string", - "title": "The name of the Azure Dev Center project.", - "description": "Optional. Used as the default dev center project for this project." - }, - "catalog": { - "type": "string", - "title": "The name of the Azure Dev Center catalog.", - "description": "Optional. Used as the default dev center catalog for this project." - }, - "environmentDefinition": { - "type": "string", - "title": "The name of the Dev Center catalog environment definition.", - "description": "Optional. Used as the default dev center environment definition for this project." - }, - "environmentType": { - "type": "string", - "title": "The Dev Center project environment type used for the deployment environment.", - "description": "Optional. Used as the default environment type for this project." - } - } - }, - "workflow": { - "anyOf": [ - { - "type": "object", - "additionalProperties": false, - "required": [ - "steps" - ], - "properties": { - "steps": { - "type": "array", - "title": "The steps to execute in the workflow", - "description": "The steps to execute in the workflow. (Example: provision, package, deploy)", - "minItems": 1, - "items": { - "type": "object", - "$ref": "#/definitions/workflowStep" - } - } - } - }, - { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/workflowStep" - } - } - ] - }, - "workflowStep": { - "properties": { - "azd": { - "title": "The azd command command configuration", - "description": "The azd command configuration to execute. (Example: up)", - "$ref": "#/definitions/azdCommand" - } - } - }, - "azdCommand": { - "anyOf": [ - { - "type": "string", - "title": "The azd command to execute", - "description": "The name and args of the azd command to execute. (Example: deploy --all)" - }, - { - "type": "object", - "additionalProperties": false, - "required": [ - "args" - ], - "properties": { - "args": { - "type": "array", - "title": "The arguments or flags to pass to the azd command", - "description": "The arguments to pass to the azd command. (Example: --all)", - "minItems": 1 - } - } - } - ] - }, - "aiComponentConfig": { - "type": "object", - "properties": { - "name": { - "type": "string", - "title": "Name of the AI component.", - "description": "Optional. When omitted AZD will generate a name based on the component type and the service name. Supports environment variable substitution." - }, - "path": { - "type": "string", - "title": "Path to the AI component configuration file or path.", - "description": "Required. The path to the AI component configuration file or path to the AI component source code." - }, - "overrides": { - "type": "object", - "title": "A map of key value pairs used to override the AI component configuration.", - "description": "Optional. Supports environment variable substitution.", - "additionalProperties": { - "type": "string" - } - } - }, - "required": [ - "path" - ] - }, - "aiDeploymentConfig": { - "allOf": [ - { - "$ref": "#/definitions/aiComponentConfig" - }, - { - "type": "object", - "properties": { - "environment": { - "type": "object", - "title": "A map of key/value pairs to set as environment variables for the deployment.", - "description": "Optional. Values support OS & AZD environment variable substitution.", - "additionalProperties": { - "type": "string" - } - } - } - } - ] - }, - "aiEndpointConfig": { - "type": "object", - "additionalProperties": false, - "properties": { - "workspace": { - "type": "string", - "title": "The name of the AI Studio project workspace.", - "description": "Optional. When omitted AZD will use the value specified in the 'AZUREAI_PROJECT_NAME' environment variable. Supports environment variable substitution." - }, - "flow": { - "$ref": "#/definitions/aiComponentConfig", - "title": "The Azure AI Studio Prompt Flow configuration.", - "description": "Optional. When omitted a prompt flow will be not created." - }, - "environment": { - "$ref": "#/definitions/aiComponentConfig", - "title": "The Azure AI Studio custom environment configuration.", - "description": "Optional. When omitted a custom environment will not be created." - }, - "model": { - "$ref": "#/definitions/aiComponentConfig", - "title": "The Azure AI Studio model configuration.", - "description": "Optional. When omitted a model will not be created." - }, - "deployment": { - "$ref": "#/definitions/aiDeploymentConfig", - "title": "The Azure AI Studio online endpoint deployment configuration.", - "description": "Required. A new online endpoint deployment will be created and traffic will automatically to shifted to the new deployment upon successful completion." - } - }, - "required": [ - "deployment" - ] - }, - "appServiceResource": { - "type": "object", - "description": "An Azure App Service web app.", - "additionalProperties": false, - "required": [ - "port", - "runtime" - ], - "properties": { - "type": { - "type": "string", - "const": "host.appservice" - }, - "uses": { - "type": "array", - "title": "Other resources that this resource uses", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "port": { - "type": "integer", - "title": "Port that the web app listens on", - "description": "Optional. The port that the web app listens on. (Default: 80)" - }, - "env": { - "type": "array", - "title": "Environment variables to set for the web app", - "items": { - "type": "object", - "required": [ - "name" - ], - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "title": "Name of the environment variable" - }, - "value": { - "type": "string", - "title": "Value of the environment variable. Supports environment variable substitution." - }, - "secret": { - "type": "string", - "title": "Secret value of the environment variable. Supports environment variable substitution." - } - } - } - }, - "runtime": { - "type": "object", - "title": "Runtime stack configuration", - "description": "Required. The language runtime configuration for the App Service web app.", - "required": [ - "stack", - "version" - ], - "properties": { - "stack": { - "type": "string", - "title": "Language runtime stack", - "description": "Required. The language runtime stack.", - "enum": [ - "node", - "python" - ] - }, - "version": { - "type": "string", - "title": "Runtime stack version", - "description": "Required. The language runtime version. Format varies by stack. (Example: '22-lts' for Node, '3.13' for Python)" - } - } - }, - "startupCommand": { - "type": "string", - "title": "Startup command", - "description": "Optional. Startup command that will be run as part of web app startup." - } - } - }, - "containerAppResource": { - "type": "object", - "description": "A Docker-based container app.", - "additionalProperties": false, - "required": [ - "port" - ], - "properties": { - "type": { - "type": "string", - "const": "host.containerapp" - }, - "uses": { - "type": "array", - "title": "Other resources that this resource uses", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "port": { - "type": "integer", - "title": "Port that the container app listens on", - "description": "Optional. The port that the container app listens on. (Default: 80)" - }, - "env": { - "type": "array", - "title": "Environment variables to set for the container app", - "items": { - "type": "object", - "required": [ - "name" - ], - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "title": "Name of the environment variable" - }, - "value": { - "type": "string", - "title": "Value of the environment variable. Supports environment variable substitution." - }, - "secret": { - "type": "string", - "title": "Secret value of the environment variable. Supports environment variable substitution." - } - } - } - } - } - }, - "aiModelResource": { - "type": "object", - "description": "A deployed, ready-to-use AI model.", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "const": "ai.openai.model" - }, - "existing": { - "type": "boolean", - "title": "An existing resource for referencing purposes", - "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", - "default": false - }, - "model": { - "type": "object", - "description": "The underlying AI model.", - "additionalProperties": false, - "required": [ - "name", - "version" - ], - "properties": { - "name": { - "type": "string", - "title": "The name of the AI model.", - "description": "Required. The name of the AI model." - }, - "version": { - "type": "string", - "title": "The version of the AI model.", - "description": "Required. The version of the AI model." - } - } - } - }, - "allOf": [ - { - "if": { - "properties": { - "existing": { - "const": false - } - } - }, - "then": { - "required": [ - "model" - ] - } - } - ] - }, - "aiProjectResource": { - "type": "object", - "description": "An Azure AI Foundry project with models.", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "const": "ai.project" - }, - "existing": { - "type": "boolean", - "title": "An existing resource for referencing purposes", - "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", - "default": false - }, - "models": { - "type": "array", - "title": "AI models to deploy", - "description": "Optional. The AI models to be deployed as part of the AI project.", - "minItems": 1, - "items": { - "type": "object", - "additionalProperties": false, - "required": [ - "name", - "version", - "format", - "sku" - ], - "properties": { - "name": { - "type": "string", - "title": "The name of the AI model.", - "description": "Required. The name of the AI model." - }, - "version": { - "type": "string", - "title": "The version of the AI model.", - "description": "Required. The version of the AI model." - }, - "format": { - "type": "string", - "title": "The format of the AI model.", - "description": "Required. The format of the AI model. (Example: Microsoft, OpenAI)" - }, - "sku": { - "type": "object", - "title": "The SKU configuration for the AI model.", - "description": "Required. The SKU details for the AI model.", - "additionalProperties": false, - "required": [ - "name", - "usageName", - "capacity" - ], - "properties": { - "name": { - "type": "string", - "title": "The name of the SKU.", - "description": "Required. The name of the SKU. (Example: GlobalStandard)" - }, - "usageName": { - "type": "string", - "title": "The usage name of the SKU.", - "description": "Required. The usage name of the SKU for billing purposes. (Example: AIServices.GlobalStandard.MaaS, OpenAI.GlobalStandard.gpt-4o-mini)" - }, - "capacity": { - "type": "integer", - "title": "The capacity of the SKU.", - "description": "Required. The capacity of the SKU." - } - } - } - } - } - } - } - }, - "aiSearchResource": { - "type": "object", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "const": "ai.search" - }, - "existing": { - "type": "boolean", - "title": "An existing resource for referencing purposes", - "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", - "default": false - } - } - }, - "genericDbResource": { - "type": "object", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "title": "Type of resource", - "description": "The type of resource to be created. (Example: db.postgres)", - "enum": [ - "db.postgres", - "db.redis", - "db.mysql", - "db.mongo" - ] - } - } - }, - "cosmosDbResource": { - "type": "object", - "description": "A deployed, ready-to-use Azure Cosmos DB for NoSQL database.", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "const": "db.cosmos" - }, - "containers": { - "type": "array", - "title": "Containers", - "description": "Containers to be created to store data. Each container stores a collection of items.", - "items": { - "type": "object", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "title": "Container name.", - "description": "Required. The name of the container." - }, - "partitionKeys": { - "type": "array", - "title": "Partition keys.", - "description": "Required. The partition key(s) used to distribute data across partitions. The ordering of keys matters. By default, a single partition key '/id' is naturally a great choice for most applications.", - "minLength": 1, - "maxLength": 3, - "items": { - "type": "string" - } - } - } - } - } - } - }, - "eventHubsResource": { - "type": "object", - "description": "An Azure Event Hubs namespace.", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "const": "messaging.eventhubs" - }, - "existing": { - "type": "boolean", - "title": "An existing resource for referencing purposes", - "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", - "default": false - }, - "hubs": { - "type": "array", - "title": "Hubs to create in the Event Hubs namespace", - "additionalProperties": false, - "items": { - "type": "string" - }, - "uniqueItems": true - } - } - }, - "serviceBusResource": { - "type": "object", - "description": "An Azure Service Bus namespace.", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "const": "messaging.servicebus" - }, - "existing": { - "type": "boolean", - "title": "An existing resource for referencing purposes", - "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", - "default": false - }, - "queues": { - "type": "array", - "title": "Queues to create in the Service Bus namespace", - "additionalProperties": false, - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "topics": { - "type": "array", - "title": "Topics to create in the Service Bus namespace", - "additionalProperties": false, - "items": { - "type": "string" - }, - "uniqueItems": true - } - } - }, - "storageAccountResource": { - "type": "object", - "description": "A deployed, ready-to-use Azure Storage Account.", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "const": "storage" - }, - "existing": { - "type": "boolean", - "title": "An existing resource for referencing purposes", - "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", - "default": false - }, - "containers": { - "type": "array", - "title": "Azure Storage Account container names.", - "description": "The container names of Azure Storage Account.", - "items": { - "type": "string", - "title": "Azure Storage Account container name", - "description": "The container name of Azure Storage Account." - } - } - } - }, - "keyVaultResource": { - "type": "object", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "const": "keyvault" - }, - "existing": { - "type": "boolean", - "title": "An existing resource for referencing purposes", - "description": "Optional. When set to true, this resource will not be created and instead be used for referencing purposes. (Default: false)", - "default": false - } - } - } - } -} \ No newline at end of file diff --git a/cli/azd/tools/mcp/mcp.go b/cli/azd/tools/mcp/mcp.go deleted file mode 100644 index 0c374f96e62..00000000000 --- a/cli/azd/tools/mcp/mcp.go +++ /dev/null @@ -1,115 +0,0 @@ -package main - -import ( - "context" - "fmt" - "log" - - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" -) - -func main() { - // Create a new MCP server - s := server.NewMCPServer( - "Hello Server 🚀", - "1.0.0", - server.WithToolCapabilities(false), - ) - s.EnableSampling() - - // Define the tool - tool := mcp.NewTool( - "Error_Handler", - mcp.WithDescription("Explain the error and provide a suggestion to fix it for azd provision related errors"), - mcp.WithString("errorPromptProvision", - mcp.Required(), - mcp.Description("Complete error message from 'azd provision' command failure and complete prompt request to the tool"), - ), - ) - - // Register the tool handler - s.AddTool(tool, errorProvisionHandler) - - // Start the server using stdio transport - if err := server.ServeStdio(s); err != nil { - fmt.Printf("Server error: %v\n", err) - } -} - -func errorProvisionHandler(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - log.Println("🤖 Starting errorProvisionHandler session...") - promptRequest, err := request.RequireString("errorPromptProvision") - if err != nil { - return mcp.NewToolResultError(err.Error()), nil - } - - // Get the client session from context - session := server.ClientSessionFromContext(ctx) - if session == nil { - // If no session, return to basic error - return mcp.NewToolResultText(fmt.Sprintf("Failed to connect MCP tool, fallback to original prompt request: %s", promptRequest)), nil - } - - // For VSCode only agent tool, this change will included in a separate PR for MCP tool - // samplingText := fmt.Sprintf(` - // I'm using Azure Developer CLI (azd) and running command 'azd provision'. I encountered the following error: %s - - // Determine if this error is a azure related error or http response error or authentication error or other errors. Please: - - // 1. Explain what this specific error means and why it occurred - // 2. Provide step-by-step troubleshooting instructions - // 3. If it is a azure related error or http response error, check infra folder and suggest specific fixes for Bicep files or Terraform files based on files in infra folder. After that, if user has azure cli installed, provide the exact Azure CLI commands and azure portal instructions to verify the changes works - // 4. Include any relevant azure.yaml configuration changes that might be needed - - // Focus on actionable solutions rather than general advice. - // `, promptRequest) - // Check if the session supports sampling - if samplingSession, ok := session.(server.SessionWithSampling); ok { - // Create a sampling request to get a creative greeting - samplingRequest := mcp.CreateMessageRequest{ - CreateMessageParams: mcp.CreateMessageParams{ - Messages: []mcp.SamplingMessage{ - { - Role: mcp.RoleUser, - Content: mcp.TextContent{ - Type: "text", - Text: fmt.Sprintf("I'm running azd command 'azd provision'. %s", promptRequest), - }, - }, - }, - MaxTokens: 100, - Temperature: 0.8, - }, - } - - log.Printf("🤖 Sampling Request: %+v\n", samplingRequest) - - // Send the sampling request to get a response from the host's LLM - samplingResponse, err := samplingSession.RequestSampling(ctx, samplingRequest) - log.Printf("🤖 Sampling Response: %+v\n", samplingResponse) - if err != nil { - // If sampling fails, fall back to a simple greeting - return mcp.NewToolResultText(fmt.Sprintf("Failed to send sampling request, fallback to original prompt request: %s", promptRequest)), nil - } - - // Extract the generated greeting from the sampling response - var errorSuggestion string - if samplingResponse != nil { - // The response Content field contains the message content - if textContent, ok := samplingResponse.Content.(mcp.TextContent); ok { - errorSuggestion = textContent.Text - } else if contentStr, ok := samplingResponse.Content.(string); ok { - errorSuggestion = contentStr - } - } - - // If we got a response, use it - if errorSuggestion != "" { - return mcp.NewToolResultText(fmt.Sprintf("🤖 AI-Generated Error Suggestion: %s", errorSuggestion)), nil - } - } - - // Fallback to raw error message - return mcp.NewToolResultText(fmt.Sprintf("Failed to generate error suggestions, fallback to original prompt request: %s", promptRequest)), nil -} diff --git a/go.mod b/go.mod index c87f5106f2f..2613968718c 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,6 @@ require ( github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df - github.com/i2y/langchaingo-mcp-adapter v0.0.0-20250623114610-a01671e1c8df github.com/joho/godotenv v1.5.1 github.com/magefile/mage v1.15.0 github.com/mark3labs/mcp-go v0.36.0 From b026808c4fb9cf889d4bdbede31327139dc08556 Mon Sep 17 00:00:00 2001 From: hemarina Date: Wed, 27 Aug 2025 22:52:58 -0700 Subject: [PATCH 084/116] clean up --- .vscode/mcp.json | 18 ------------------ 1 file changed, 18 deletions(-) delete mode 100644 .vscode/mcp.json diff --git a/.vscode/mcp.json b/.vscode/mcp.json deleted file mode 100644 index 33525f17df3..00000000000 --- a/.vscode/mcp.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "servers": { - "my-mcp-server-azd-provision-sampling-tool": { - "type": "stdio", - "command": "C:\\Users\\hemarina\\Downloads\\vhvb1989\\azure-dev\\cli\\azd\\tools\\mcp\\mcp.exe", - "args": [], - }, - "azd": { - "type": "stdio", - "command": "azd", - "args": [ - "mcp", - "start" - ] - } - }, - "inputs": [] -} \ No newline at end of file From adb754b7e8b5f109096ab7a94e7989c8404c8e0b Mon Sep 17 00:00:00 2001 From: hemarina Date: Wed, 27 Aug 2025 22:54:57 -0700 Subject: [PATCH 085/116] minor fix --- cli/azd/cmd/middleware/error.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index c4dced4e88f..4c1af65d046 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -91,12 +91,14 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action // Warn user that this is an alpha feature e.console.WarnForFeature(ctx, llm.FeatureLlm) - azdAgent, cleanup, err := e.agentFactory.Create(agent.WithDebug(e.global.EnableDebugLogging)) + azdAgent, err := e.agentFactory.Create( + agent.WithDebug(e.global.EnableDebugLogging), + ) if err != nil { return nil, err } - defer cleanup() + defer azdAgent.Stop() agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( `Steps to follow: From 8e0df2daa9f8127aef3ad518c925a6295898f7a9 Mon Sep 17 00:00:00 2001 From: hemarina Date: Fri, 29 Aug 2025 17:15:28 -0700 Subject: [PATCH 086/116] add user confirmation prompt --- cli/azd/cmd/middleware/error.go | 120 +++++++++++++----- .../agent/tools/dev/command_executor.go | 10 ++ .../internal/mcp/tools/azd_common_error.go | 4 + .../mcp/tools/azd_error_troubleshooting.go | 7 +- .../prompts/azd_error_troubleshooting.md | 4 +- .../provisioning/bicep/bicep_provider.go | 1 + 6 files changed, 111 insertions(+), 35 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index 4c1af65d046..0077a2ae901 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -17,6 +17,8 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/llm" "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/azure/azure-dev/cli/azd/pkg/tools" + uxlib "github.com/azure/azure-dev/cli/azd/pkg/ux" + "github.com/fatih/color" ) type ErrorMiddleware struct { @@ -48,9 +50,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action var previousError error originalError := err - // skipAnalyzingErrors := []error{ - // context.Canceled, - // } + // TODO: think about Error exclusive or inclusive skipAnalyzingErrors := []string{ "environment already initialized", "interrupt", @@ -61,12 +61,6 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action break } - // for _, e := range skipAnalyzingErrors { - // if errors.As(err, &e) || errors.Is(err, e) { - // return actionResult, err - // } - // } - for _, s := range skipAnalyzingErrors { if strings.Contains(err.Error(), s) { return actionResult, err @@ -102,9 +96,10 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( `Steps to follow: - 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. + 1. Identify, explain and diagnose this error when running azd command and its root cause. 2. Provide actionable troubleshooting steps. Error details: %s`, originalError.Error())) + if err != nil { if agentOutput != "" { e.console.Message(ctx, output.WithMarkdown(agentOutput)) @@ -113,6 +108,23 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action return nil, err } + // Ask if user wants to provide AI generated troubleshooting steps + confirm, err := e.console.Confirm(ctx, input.ConsoleOptions{ + Message: "Provide AI generated troubleshooting steps?", + DefaultValue: true, + }) + if err != nil { + return nil, fmt.Errorf("prompting to provide troubleshooting steps: %w", err) + } + + if confirm { + // Provide manual steps for troubleshooting + e.console.Message(ctx, "") + e.console.Message(ctx, fmt.Sprintf("%s:", output.AzdAgentLabel())) + e.console.Message(ctx, output.WithMarkdown(agentOutput)) + e.console.Message(ctx, "") + } + // Ask user if they want to let AI fix the error selection, err := e.console.Select(ctx, input.ConsoleOptions{ Message: "Do you want to continue to fix the error using AI?", @@ -127,12 +139,13 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action } switch selection { - case 0: // fix the error + // fix the error with AI + case 0: previousError = originalError agentOutput, err = azdAgent.SendMessage(ctx, fmt.Sprintf( `Steps to follow: - 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. - 2. Resolve the error by iterating and attempting to solve all errors until the azd command succeeds. + 1. Identify, explain and diagnose this error when running azd command and its root cause. + 2. Resolve the error with the smallest possible change to the code or configuration. Only fix what is necessary. Error details: %s`, originalError.Error())) if err != nil { @@ -142,27 +155,20 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action return nil, err } - case 1: - confirm, err := e.console.Confirm(ctx, input.ConsoleOptions{ - Message: "Provide AI generated troubleshooting steps?", - DefaultValue: true, - }) - if err != nil { - return nil, fmt.Errorf("prompting to provide troubleshooting steps: %w", err) - } - - if confirm { - // Provide manual steps for troubleshooting - e.console.Message(ctx, "") - e.console.Message(ctx, fmt.Sprintf("%s:", output.AzdAgentLabel())) - e.console.Message(ctx, output.WithMarkdown(agentOutput)) - e.console.Message(ctx, "") - } + // Not fix the error with AI + case 1: return actionResult, err } + // Ask the user to add feedback + if err := e.collectAndApplyFeedback(ctx, azdAgent, "Any feedback or changes?"); err != nil { + return nil, err + } + + // Clear check cache to prevent skip of tool related error ctx = tools.WithInstalledCheckCache(ctx) + actionResult, err = next(ctx) originalError = err } @@ -174,3 +180,59 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action return actionResult, err } + +// collectAndApplyFeedback prompts for user feedback and applies it using the agent +func (e *ErrorMiddleware) collectAndApplyFeedback( + ctx context.Context, + azdAgent agent.Agent, + promptMessage string, +) error { + confirmFeedback := uxlib.NewConfirm(&uxlib.ConfirmOptions{ + Message: promptMessage, + DefaultValue: uxlib.Ptr(false), + HelpMessage: "You will be able to provide and feedback or changes after AI fix.", + }) + + hasFeedback, err := confirmFeedback.Ask(ctx) + if err != nil { + return err + } + + if !*hasFeedback { + e.console.Message(ctx, "") + return nil + } + + userInputPrompt := uxlib.NewPrompt(&uxlib.PromptOptions{ + Message: "You", + PlaceHolder: "Provide feedback or changes to the project", + Required: true, + IgnoreHintKeys: true, + }) + + userInput, err := userInputPrompt.Ask(ctx) + if err != nil { + return fmt.Errorf("failed to collect feedback after AI fix: %w", err) + } + + e.console.Message(ctx, "") + + if userInput != "" { + e.console.Message(ctx, color.MagentaString("Feedback")) + + feedbackOutput, err := azdAgent.SendMessage(ctx, userInput) + if err != nil { + if feedbackOutput != "" { + e.console.Message(ctx, output.WithMarkdown(feedbackOutput)) + } + return err + } + + e.console.Message(ctx, "") + e.console.Message(ctx, fmt.Sprintf("%s:", output.AzdAgentLabel())) + e.console.Message(ctx, output.WithMarkdown(feedbackOutput)) + e.console.Message(ctx, "") + } + + return nil +} diff --git a/cli/azd/internal/agent/tools/dev/command_executor.go b/cli/azd/internal/agent/tools/dev/command_executor.go index 4c71fea4a43..c043724e762 100644 --- a/cli/azd/internal/agent/tools/dev/command_executor.go +++ b/cli/azd/internal/agent/tools/dev/command_executor.go @@ -120,6 +120,16 @@ func (t CommandExecutorTool) Call(ctx context.Context, input string) (string, er return string(jsonData), nil } + if req.Command == "azd" { + errorResponse := common.ErrorResponse{ + Error: true, + Message: "azd command is not supported", + } + + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil + } + // Set defaults if req.Args == nil { req.Args = []string{} diff --git a/cli/azd/internal/mcp/tools/azd_common_error.go b/cli/azd/internal/mcp/tools/azd_common_error.go index 36f78f21fae..dee3ce42671 100644 --- a/cli/azd/internal/mcp/tools/azd_common_error.go +++ b/cli/azd/internal/mcp/tools/azd_common_error.go @@ -16,6 +16,10 @@ func NewAzdCommonErrorTool() server.ServerTool { return server.ServerTool{ Tool: mcp.NewTool( "azd_common_error", + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(false), mcp.WithDescription( `Returns instructions for diagnosing common error type and providing suggested actions for resolution. diff --git a/cli/azd/internal/mcp/tools/azd_error_troubleshooting.go b/cli/azd/internal/mcp/tools/azd_error_troubleshooting.go index 23a203fbf64..0af4ef4c6e4 100644 --- a/cli/azd/internal/mcp/tools/azd_error_troubleshooting.go +++ b/cli/azd/internal/mcp/tools/azd_error_troubleshooting.go @@ -21,14 +21,13 @@ func NewAzdErrorTroubleShootingTool() server.ServerTool { mcp.WithDestructiveHintAnnotation(false), mcp.WithOpenWorldHintAnnotation(false), mcp.WithDescription( - `Returns instructions for diagnosing errors from Azure Developer CLI (azd) commands and provides step-by-step troubleshooting instructions. + `Returns instructions to identify, explain and diagnose the errors from Azure Developer CLI (azd) commands and provides step-by-step troubleshooting instructions. The LLM agent should execute these instructions using available tools. Use this tool when: -- Any error occurs during Azure Developer CLI (azd) command execution -- Need to classify, analyze, and resolve errors automatically or with guided steps -- Provide troubleshooting steps for errors`, +- Request to identify, explain and diagnose the error when running azd command and its root cause +- Provide actionable troubleshooting steps`, ), ), Handler: handleAzdErrorTroubleShooting, diff --git a/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md b/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md index 9d11bc6213f..bb8ae1273f4 100644 --- a/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md +++ b/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md @@ -3,7 +3,7 @@ ✅ **Agent Task List** 1. **Error Classification:** Identify the specific error type (Azure REST API, ARM Deployment, Authentication, Local Tool Installation or General) -2. **Error Analysis:** Explain what the error means and its root causes. Note that this error occurs when running Azure Developer CLI +2. **Error Analysis:** Explain and diagnose what the error means and its root causes. Note that this error occurs when running Azure Developer CLI 3. **Troubleshooting Steps:** Based on error type (Azure REST API Response Errors, Azure ARM Deployment Errors, Azure Authentication Errors, Local Tool Installation Errors, and General AZD Errors), find the Troubleshooting Approach below and provide troubleshooting approach 4. **Resolution Confirmation:** Ensure the issue is fully resolved. If issue still exists, retry the task list to fix the error @@ -261,4 +261,4 @@ - [ ] Infrastructure code corrections implemented and validated if needed - [ ] For Azure REST API Response Errors or Azure ARM Deployment Errors, Azure Portal verification completed for affected resources if needed - [ ] For Azure REST API Response Errors or Azure ARM Deployment Errors, Azure CLI commands confirm successful resolution if needed when user installed Azure CLI. Otherwise, skip this step -- [ ] Ensure the issue is fully resolved. If issue still exists, retry the task list to fix the error \ No newline at end of file +- [ ] Ensure the issue is fully resolved \ No newline at end of file diff --git a/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go b/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go index 4fd87682b13..e8423cf047d 100644 --- a/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go +++ b/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go @@ -444,6 +444,7 @@ func (p *BicepProvider) deploymentState( currentParamsHash string, ) (*azapi.ResourceDeployment, error) { p.console.ShowSpinner(ctx, "Comparing deployment state", input.Step) + defer p.console.StopSpinner(ctx, "", input.Step) prevDeploymentResult, err := p.latestDeploymentResult(ctx, scope) if err != nil { return nil, fmt.Errorf("deployment state error: %w", err) From 590015b84cbd8641eaf17dd00719e1afad5cf617 Mon Sep 17 00:00:00 2001 From: hemarina Date: Fri, 29 Aug 2025 17:33:24 -0700 Subject: [PATCH 087/116] lll --- cli/azd/cmd/middleware/error.go | 14 ++++++++++---- .../mcp/tools/azd_error_troubleshooting.go | 8 +++++--- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index 0077a2ae901..69d90e47b36 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -29,7 +29,12 @@ type ErrorMiddleware struct { featuresManager *alpha.FeatureManager } -func NewErrorMiddleware(options *Options, console input.Console, agentFactory *agent.AgentFactory, global *internal.GlobalCommandOptions, featuresManager *alpha.FeatureManager) Middleware { +func NewErrorMiddleware( + options *Options, console input.Console, + agentFactory *agent.AgentFactory, + global *internal.GlobalCommandOptions, + featuresManager *alpha.FeatureManager, +) Middleware { return &ErrorMiddleware{ options: options, console: console, @@ -70,7 +75,8 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action if previousError != nil && errors.Is(originalError, previousError) { attempt++ if attempt > 3 { - e.console.Message(ctx, "AI was unable to resolve the error after multiple attempts. Please review the error and fix it manually.") + e.console.Message(ctx, "AI was unable to resolve the error after multiple attempts. "+ + "Please review the error and fix it manually.") return actionResult, err } } @@ -96,7 +102,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( `Steps to follow: - 1. Identify, explain and diagnose this error when running azd command and its root cause. + 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. 2. Provide actionable troubleshooting steps. Error details: %s`, originalError.Error())) @@ -144,7 +150,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action previousError = originalError agentOutput, err = azdAgent.SendMessage(ctx, fmt.Sprintf( `Steps to follow: - 1. Identify, explain and diagnose this error when running azd command and its root cause. + 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. 2. Resolve the error with the smallest possible change to the code or configuration. Only fix what is necessary. Error details: %s`, originalError.Error())) diff --git a/cli/azd/internal/mcp/tools/azd_error_troubleshooting.go b/cli/azd/internal/mcp/tools/azd_error_troubleshooting.go index 0af4ef4c6e4..bcc08e668e1 100644 --- a/cli/azd/internal/mcp/tools/azd_error_troubleshooting.go +++ b/cli/azd/internal/mcp/tools/azd_error_troubleshooting.go @@ -21,13 +21,15 @@ func NewAzdErrorTroubleShootingTool() server.ServerTool { mcp.WithDestructiveHintAnnotation(false), mcp.WithOpenWorldHintAnnotation(false), mcp.WithDescription( - `Returns instructions to identify, explain and diagnose the errors from Azure Developer CLI (azd) commands and provides step-by-step troubleshooting instructions. + `Returns instructions for diagnosing errors from Azure Developer CLI (azd) commands and provides +step-by-step troubleshooting instructions. The LLM agent should execute these instructions using available tools. Use this tool when: -- Request to identify, explain and diagnose the error when running azd command and its root cause -- Provide actionable troubleshooting steps`, +- Any error occurs during Azure Developer CLI (azd) command execution +- Need to classify, analyze, and resolve errors automatically or with guided steps +- Provide troubleshooting steps for errors`, ), ), Handler: handleAzdErrorTroubleShooting, From 5f485328ba83f3879af8cc650e7b6274b36096f7 Mon Sep 17 00:00:00 2001 From: hemarina Date: Fri, 29 Aug 2025 21:25:51 -0700 Subject: [PATCH 088/116] fix the bug --- cli/azd/cmd/middleware/error.go | 41 ++++++++++++++++++++++----------- cli/azd/cmd/middleware/ux.go | 16 ++++++++----- 2 files changed, 38 insertions(+), 19 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index 69d90e47b36..ffe88801e5e 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -45,15 +45,19 @@ func NewErrorMiddleware( } func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.ActionResult, error) { + actionResult, err := next(ctx) + if e.featuresManager.IsEnabled(llm.FeatureLlm) { if e.options.IsChildAction(ctx) { return next(ctx) } - actionResult, err := next(ctx) attempt := 0 - var previousError error originalError := err + suggestion := "" + var previousError error + var suggestionErr *internal.ErrorWithSuggestion + var errorWithTraceId *internal.ErrorWithTraceId // TODO: think about Error exclusive or inclusive skipAnalyzingErrors := []string{ @@ -62,13 +66,13 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action } for { - if err == nil { + if originalError == nil { break } for _, s := range skipAnalyzingErrors { - if strings.Contains(err.Error(), s) { - return actionResult, err + if strings.Contains(originalError.Error(), s) { + return actionResult, originalError } } @@ -77,10 +81,11 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action if attempt > 3 { e.console.Message(ctx, "AI was unable to resolve the error after multiple attempts. "+ "Please review the error and fix it manually.") - return actionResult, err + return actionResult, originalError } } + // For debug, will be cleaned // e.console.Confirm(ctx, input.ConsoleOptions{ // Message: "Debugger Ready?", // DefaultValue: true, @@ -88,6 +93,15 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action e.console.StopSpinner(ctx, "", input.Step) e.console.Message(ctx, output.WithErrorFormat("\nERROR: %s", originalError.Error())) + if errors.As(originalError, &errorWithTraceId) { + e.console.Message(ctx, output.WithErrorFormat("TraceID: %s", errorWithTraceId.TraceId)) + } + + if errors.As(originalError, &suggestionErr) { + suggestion = suggestionErr.Suggestion + e.console.Message(ctx, suggestion) + } + // Warn user that this is an alpha feature e.console.WarnForFeature(ctx, llm.FeatureLlm) @@ -100,11 +114,16 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action defer azdAgent.Stop() + errorInput := originalError.Error() + if suggestion != "" { + errorInput += "\n" + "Suggestion: " + suggestion + } + agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( `Steps to follow: 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. 2. Provide actionable troubleshooting steps. - Error details: %s`, originalError.Error())) + Error details: %s`, errorInput)) if err != nil { if agentOutput != "" { @@ -151,8 +170,8 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action agentOutput, err = azdAgent.SendMessage(ctx, fmt.Sprintf( `Steps to follow: 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. - 2. Resolve the error with the smallest possible change to the code or configuration. Only fix what is necessary. - Error details: %s`, originalError.Error())) + 2. Resolve the error by making the minimal, targeted change required to the code or configuration. Avoid unnecessary modifications and focus only on what is essential to restore correct functionality. + Error details: %s`, errorInput)) if err != nil { if agentOutput != "" { @@ -178,12 +197,8 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action actionResult, err = next(ctx) originalError = err } - - return actionResult, err } - actionResult, err := next(ctx) - return actionResult, err } diff --git a/cli/azd/cmd/middleware/ux.go b/cli/azd/cmd/middleware/ux.go index c60d4a99a99..46582309721 100644 --- a/cli/azd/cmd/middleware/ux.go +++ b/cli/azd/cmd/middleware/ux.go @@ -9,20 +9,24 @@ import ( "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" + "github.com/azure/azure-dev/cli/azd/pkg/alpha" "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/llm" "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/azure/azure-dev/cli/azd/pkg/output/ux" ) type UxMiddleware struct { - options *Options - console input.Console + options *Options + console input.Console + featuresManager *alpha.FeatureManager } -func NewUxMiddleware(options *Options, console input.Console) Middleware { +func NewUxMiddleware(options *Options, console input.Console, featuresManager *alpha.FeatureManager) Middleware { return &UxMiddleware{ - options: options, - console: console, + options: options, + console: console, + featuresManager: featuresManager, } } @@ -37,7 +41,7 @@ func (m *UxMiddleware) Run(ctx context.Context, next NextFn) (*actions.ActionRes // Stop the spinner always to un-hide cursor m.console.StopSpinner(ctx, "", input.Step) - if err != nil { + if err != nil && !m.featuresManager.IsEnabled(llm.FeatureLlm) { var suggestionErr *internal.ErrorWithSuggestion var errorWithTraceId *internal.ErrorWithTraceId m.console.Message(ctx, output.WithErrorFormat("\nERROR: %s", err.Error())) From 5937df3e32409e57eb71f4bdc551e6a91b2e2cf7 Mon Sep 17 00:00:00 2001 From: hemarina Date: Fri, 29 Aug 2025 21:31:01 -0700 Subject: [PATCH 089/116] revert this ux fix, cause other bug --- cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go b/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go index e8423cf047d..8b70bf3b434 100644 --- a/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go +++ b/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go @@ -444,7 +444,7 @@ func (p *BicepProvider) deploymentState( currentParamsHash string, ) (*azapi.ResourceDeployment, error) { p.console.ShowSpinner(ctx, "Comparing deployment state", input.Step) - defer p.console.StopSpinner(ctx, "", input.Step) + // defer p.console.StopSpinner(ctx, "", input.Step) prevDeploymentResult, err := p.latestDeploymentResult(ctx, scope) if err != nil { return nil, fmt.Errorf("deployment state error: %w", err) From 80effbc600cd106e068df2d0ff8af1ed8684ac0b Mon Sep 17 00:00:00 2001 From: hemarina Date: Sat, 30 Aug 2025 21:57:51 -0700 Subject: [PATCH 090/116] lll --- cli/azd/cmd/middleware/error.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index ffe88801e5e..c7220bbbb3b 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -170,7 +170,8 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action agentOutput, err = azdAgent.SendMessage(ctx, fmt.Sprintf( `Steps to follow: 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. - 2. Resolve the error by making the minimal, targeted change required to the code or configuration. Avoid unnecessary modifications and focus only on what is essential to restore correct functionality. + 2. Resolve the error by making the minimal, targeted change required to the code or configuration. + Avoid unnecessary modifications and focus only on what is essential to restore correct functionality. Error details: %s`, errorInput)) if err != nil { From 0e3905ba0631e3346b9a63bf1daf626b3a0eb67a Mon Sep 17 00:00:00 2001 From: hemarina Date: Tue, 2 Sep 2025 19:33:07 -0700 Subject: [PATCH 091/116] minor fix on comment and skipAnalyzingErrors --- cli/azd/cmd/middleware/error.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index c7220bbbb3b..ea37315d952 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -63,6 +63,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action skipAnalyzingErrors := []string{ "environment already initialized", "interrupt", + "no project exists", } for { @@ -85,11 +86,6 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action } } - // For debug, will be cleaned - // e.console.Confirm(ctx, input.ConsoleOptions{ - // Message: "Debugger Ready?", - // DefaultValue: true, - // }) e.console.StopSpinner(ctx, "", input.Step) e.console.Message(ctx, output.WithErrorFormat("\nERROR: %s", originalError.Error())) @@ -150,6 +146,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action e.console.Message(ctx, "") } + // TODO: update to "GitHub Copilot for Azure" // Ask user if they want to let AI fix the error selection, err := e.console.Select(ctx, input.ConsoleOptions{ Message: "Do you want to continue to fix the error using AI?", @@ -187,6 +184,8 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action return actionResult, err } + // TODO print out the changes made by AI + // Ask the user to add feedback if err := e.collectAndApplyFeedback(ctx, azdAgent, "Any feedback or changes?"); err != nil { return nil, err From c778dcd74b376b2f1ec4ea2ff76839924e7a4c88 Mon Sep 17 00:00:00 2001 From: hemarina Date: Tue, 2 Sep 2025 20:11:10 -0700 Subject: [PATCH 092/116] remove comment --- cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go b/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go index 8b70bf3b434..4fd87682b13 100644 --- a/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go +++ b/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go @@ -444,7 +444,6 @@ func (p *BicepProvider) deploymentState( currentParamsHash string, ) (*azapi.ResourceDeployment, error) { p.console.ShowSpinner(ctx, "Comparing deployment state", input.Step) - // defer p.console.StopSpinner(ctx, "", input.Step) prevDeploymentResult, err := p.latestDeploymentResult(ctx, scope) if err != nil { return nil, fmt.Errorf("deployment state error: %w", err) From e5fe1b498b7ff76b1ef8d5de2f14e1d89f84c74a Mon Sep 17 00:00:00 2001 From: hemarina Date: Wed, 3 Sep 2025 17:42:45 -0700 Subject: [PATCH 093/116] fix provision print twice ux in up bug --- cli/azd/cmd/middleware/error.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index ea37315d952..a8fb1425263 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -45,21 +45,22 @@ func NewErrorMiddleware( } func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.ActionResult, error) { - actionResult, err := next(ctx) + var actionResult *actions.ActionResult + var err error if e.featuresManager.IsEnabled(llm.FeatureLlm) { if e.options.IsChildAction(ctx) { return next(ctx) } + actionResult, err = next(ctx) + attempt := 0 originalError := err suggestion := "" var previousError error var suggestionErr *internal.ErrorWithSuggestion var errorWithTraceId *internal.ErrorWithTraceId - - // TODO: think about Error exclusive or inclusive skipAnalyzingErrors := []string{ "environment already initialized", "interrupt", @@ -96,6 +97,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action if errors.As(originalError, &suggestionErr) { suggestion = suggestionErr.Suggestion e.console.Message(ctx, suggestion) + return actionResult, originalError } // Warn user that this is an alpha feature @@ -167,7 +169,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action agentOutput, err = azdAgent.SendMessage(ctx, fmt.Sprintf( `Steps to follow: 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. - 2. Resolve the error by making the minimal, targeted change required to the code or configuration. + 2. Resolve the error by making the minimal, targeted change required to the code or configuration. Avoid unnecessary modifications and focus only on what is essential to restore correct functionality. Error details: %s`, errorInput)) @@ -199,6 +201,10 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action } } + if actionResult == nil { + actionResult, err = next(ctx) + } + return actionResult, err } From ef17fd0a608fce1cb5c5c4fa9c072c47b03cc7d6 Mon Sep 17 00:00:00 2001 From: hemarina Date: Wed, 3 Sep 2025 18:40:56 -0700 Subject: [PATCH 094/116] make sure validation file changes is removed --- cli/azd/cmd/middleware/error.go | 2 +- cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index a8fb1425263..2feb7704b8d 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -87,7 +87,6 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action } } - e.console.StopSpinner(ctx, "", input.Step) e.console.Message(ctx, output.WithErrorFormat("\nERROR: %s", originalError.Error())) if errors.As(originalError, &errorWithTraceId) { @@ -171,6 +170,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. 2. Resolve the error by making the minimal, targeted change required to the code or configuration. Avoid unnecessary modifications and focus only on what is essential to restore correct functionality. + 3. Remove any changes that were created solely for validation and are not part of the actual error fix. Error details: %s`, errorInput)) if err != nil { diff --git a/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go b/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go index 4fd87682b13..e8423cf047d 100644 --- a/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go +++ b/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go @@ -444,6 +444,7 @@ func (p *BicepProvider) deploymentState( currentParamsHash string, ) (*azapi.ResourceDeployment, error) { p.console.ShowSpinner(ctx, "Comparing deployment state", input.Step) + defer p.console.StopSpinner(ctx, "", input.Step) prevDeploymentResult, err := p.latestDeploymentResult(ctx, scope) if err != nil { return nil, fmt.Errorf("deployment state error: %w", err) From c9fe9080ebfa13667a304ad066b9cab918e916fb Mon Sep 17 00:00:00 2001 From: hemarina Date: Wed, 3 Sep 2025 20:32:47 -0700 Subject: [PATCH 095/116] print out file changes --- cli/azd/cmd/middleware/error.go | 103 +++++++++++++++++++++++++++++--- 1 file changed, 96 insertions(+), 7 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index 2feb7704b8d..d814282744f 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -7,7 +7,10 @@ import ( "context" "errors" "fmt" + "os" + "path/filepath" "strings" + "sync" "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" @@ -19,6 +22,7 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/tools" uxlib "github.com/azure/azure-dev/cli/azd/pkg/ux" "github.com/fatih/color" + "github.com/fsnotify/fsnotify" ) type ErrorMiddleware struct { @@ -66,6 +70,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action "interrupt", "no project exists", } + agentName := "AI" for { if originalError == nil { @@ -81,8 +86,8 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action if previousError != nil && errors.Is(originalError, previousError) { attempt++ if attempt > 3 { - e.console.Message(ctx, "AI was unable to resolve the error after multiple attempts. "+ - "Please review the error and fix it manually.") + e.console.Message(ctx, fmt.Sprintf("%s was unable to resolve the error after multiple attempts. "+ + "Please review the error and fix it manually.", agentName)) return actionResult, originalError } } @@ -119,7 +124,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( `Steps to follow: 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. - 2. Provide actionable troubleshooting steps. + 2. Provide actionable troubleshooting steps. Do not perform any file changes. Error details: %s`, errorInput)) if err != nil { @@ -132,7 +137,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action // Ask if user wants to provide AI generated troubleshooting steps confirm, err := e.console.Confirm(ctx, input.ConsoleOptions{ - Message: "Provide AI generated troubleshooting steps?", + Message: fmt.Sprintf("Provide %s generated troubleshooting steps?", agentName), DefaultValue: true, }) if err != nil { @@ -150,7 +155,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action // TODO: update to "GitHub Copilot for Azure" // Ask user if they want to let AI fix the error selection, err := e.console.Select(ctx, input.ConsoleOptions{ - Message: "Do you want to continue to fix the error using AI?", + Message: fmt.Sprintf("Do you want to continue to fix the error using %s?", agentName), Options: []string{ "Yes", "No", @@ -165,6 +170,14 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action // fix the error with AI case 0: previousError = originalError + changedFiles := make(map[string]bool) + var mu sync.Mutex + + watcher, done, err := startWatcher(ctx, changedFiles, &mu) + if err != nil { + return nil, fmt.Errorf("failed to start watcher during error fix: %w", err) + } + agentOutput, err = azdAgent.SendMessage(ctx, fmt.Sprintf( `Steps to follow: 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. @@ -181,13 +194,16 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action return nil, err } + // Print out changed files + close(done) + printChangedFiles(changedFiles, &mu) + watcher.Close() + // Not fix the error with AI case 1: return actionResult, err } - // TODO print out the changes made by AI - // Ask the user to add feedback if err := e.collectAndApplyFeedback(ctx, azdAgent, "Any feedback or changes?"); err != nil { return nil, err @@ -247,6 +263,14 @@ func (e *ErrorMiddleware) collectAndApplyFeedback( if userInput != "" { e.console.Message(ctx, color.MagentaString("Feedback")) + changedFiles := make(map[string]bool) + var mu sync.Mutex + + watcher, done, err := startWatcher(ctx, changedFiles, &mu) + if err != nil { + return fmt.Errorf("failed to start watcher during error fix: %w", err) + } + feedbackOutput, err := azdAgent.SendMessage(ctx, userInput) if err != nil { if feedbackOutput != "" { @@ -255,6 +279,11 @@ func (e *ErrorMiddleware) collectAndApplyFeedback( return err } + // Print out changed files + close(done) + printChangedFiles(changedFiles, &mu) + watcher.Close() + e.console.Message(ctx, "") e.console.Message(ctx, fmt.Sprintf("%s:", output.AzdAgentLabel())) e.console.Message(ctx, output.WithMarkdown(feedbackOutput)) @@ -263,3 +292,63 @@ func (e *ErrorMiddleware) collectAndApplyFeedback( return nil } + +func printChangedFiles(changedFiles map[string]bool, mu *sync.Mutex) { + mu.Lock() + defer mu.Unlock() + fmt.Println("\nFiles changed:") + for file := range changedFiles { + fmt.Println("-", file) + } +} + +func startWatcher(ctx context.Context, changedFiles map[string]bool, mu *sync.Mutex) (*fsnotify.Watcher, chan bool, error) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, nil, fmt.Errorf("failed to create watcher: %w", err) + } + + done := make(chan bool) + + go func() { + for { + select { + case event := <-watcher.Events: + mu.Lock() + changedFiles[event.Name] = true + mu.Unlock() + case err := <-watcher.Errors: + fmt.Errorf("watcher error: %w", err) + case <-done: + return + } + } + }() + + cwd, err := os.Getwd() + if err != nil { + return nil, nil, fmt.Errorf("failed to get current working directory: %w", err) + } + + if err := watchRecursive(cwd, watcher); err != nil { + return nil, nil, fmt.Errorf("failed to watch for changes: %w", err) + } + + return watcher, done, nil +} + +func watchRecursive(root string, watcher *fsnotify.Watcher) error { + return filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + err = watcher.Add(path) + if err != nil { + return fmt.Errorf("failed to watch directory %s: %w", path, err) + } + } + + return nil + }) +} From 9ad6fd61624d5902772ffb2081e855b331757071 Mon Sep 17 00:00:00 2001 From: hemarina Date: Mon, 8 Sep 2025 23:48:20 -0700 Subject: [PATCH 096/116] add prompt consent for error handling, update tool color, add ai disclaimer, add file change --- cli/azd/cmd/middleware/error.go | 274 +++++++----------- cli/azd/internal/agent/consent/checker.go | 93 ++++++ cli/azd/internal/agent/consent/types.go | 5 +- .../internal/agent/conversational_agent.go | 128 +++++++- .../provisioning/bicep/bicep_provider.go | 1 + 5 files changed, 322 insertions(+), 179 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index d814282744f..0850dda2a3a 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -7,14 +7,12 @@ import ( "context" "errors" "fmt" - "os" - "path/filepath" "strings" - "sync" "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" "github.com/azure/azure-dev/cli/azd/internal/agent" + "github.com/azure/azure-dev/cli/azd/internal/agent/consent" "github.com/azure/azure-dev/cli/azd/pkg/alpha" "github.com/azure/azure-dev/cli/azd/pkg/input" "github.com/azure/azure-dev/cli/azd/pkg/llm" @@ -22,7 +20,6 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/tools" uxlib "github.com/azure/azure-dev/cli/azd/pkg/ux" "github.com/fatih/color" - "github.com/fsnotify/fsnotify" ) type ErrorMiddleware struct { @@ -31,6 +28,7 @@ type ErrorMiddleware struct { agentFactory *agent.AgentFactory global *internal.GlobalCommandOptions featuresManager *alpha.FeatureManager + consentManager consent.ConsentManager } func NewErrorMiddleware( @@ -38,6 +36,7 @@ func NewErrorMiddleware( agentFactory *agent.AgentFactory, global *internal.GlobalCommandOptions, featuresManager *alpha.FeatureManager, + consentManager consent.ConsentManager, ) Middleware { return &ErrorMiddleware{ options: options, @@ -45,6 +44,7 @@ func NewErrorMiddleware( agentFactory: agentFactory, global: global, featuresManager: featuresManager, + consentManager: consentManager, } } @@ -70,7 +70,17 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action "interrupt", "no project exists", } - agentName := "AI" + AIDisclaimer := output.WithHintFormat("The following content is AI-generated. AI responses may be incorrect.") + agentName := "agent mode" + + azdAgent, err := e.agentFactory.Create( + agent.WithDebug(e.global.EnableDebugLogging), + ) + if err != nil { + return nil, err + } + + defer azdAgent.Stop() for { if originalError == nil { @@ -86,8 +96,8 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action if previousError != nil && errors.Is(originalError, previousError) { attempt++ if attempt > 3 { - e.console.Message(ctx, fmt.Sprintf("%s was unable to resolve the error after multiple attempts. "+ - "Please review the error and fix it manually.", agentName)) + e.console.Message(ctx, fmt.Sprintf("Please review the error and fix it manually, "+ + "%s was unable to resolve the error after multiple attempts.", agentName)) return actionResult, originalError } } @@ -107,105 +117,70 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action // Warn user that this is an alpha feature e.console.WarnForFeature(ctx, llm.FeatureLlm) - azdAgent, err := e.agentFactory.Create( - agent.WithDebug(e.global.EnableDebugLogging), - ) - if err != nil { - return nil, err - } - - defer azdAgent.Stop() - errorInput := originalError.Error() - if suggestion != "" { - errorInput += "\n" + "Suggestion: " + suggestion + + confirm, err := e.checkErrorHandlingConsent(ctx, "troubleshooting_steps", "azd", fmt.Sprintf("Generate troubleshooting steps using %s?", agentName), true) + if err != nil { + return nil, fmt.Errorf("prompting to provide troubleshooting steps: %w", err) } - agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( - `Steps to follow: + if confirm { + // Provide manual steps for troubleshooting + agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( + `Steps to follow: 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. 2. Provide actionable troubleshooting steps. Do not perform any file changes. Error details: %s`, errorInput)) - if err != nil { - if agentOutput != "" { - e.console.Message(ctx, output.WithMarkdown(agentOutput)) - } - - return nil, err - } + if err != nil { + if agentOutput != "" { + e.console.Message(ctx, AIDisclaimer) + e.console.Message(ctx, output.WithMarkdown(agentOutput)) + } - // Ask if user wants to provide AI generated troubleshooting steps - confirm, err := e.console.Confirm(ctx, input.ConsoleOptions{ - Message: fmt.Sprintf("Provide %s generated troubleshooting steps?", agentName), - DefaultValue: true, - }) - if err != nil { - return nil, fmt.Errorf("prompting to provide troubleshooting steps: %w", err) - } + return nil, err + } - if confirm { - // Provide manual steps for troubleshooting + e.console.Message(ctx, AIDisclaimer) e.console.Message(ctx, "") e.console.Message(ctx, fmt.Sprintf("%s:", output.AzdAgentLabel())) e.console.Message(ctx, output.WithMarkdown(agentOutput)) e.console.Message(ctx, "") } - // TODO: update to "GitHub Copilot for Azure" - // Ask user if they want to let AI fix the error - selection, err := e.console.Select(ctx, input.ConsoleOptions{ - Message: fmt.Sprintf("Do you want to continue to fix the error using %s?", agentName), - Options: []string{ - "Yes", - "No", - }, + // Ask user if they want to let AI fix the + e.console.Confirm(ctx, input.ConsoleOptions{ + Message: "Debugger Ready?", + DefaultValue: true, }) - + confirm, err = e.checkErrorHandlingConsent(ctx, "error_fix", "azd", fmt.Sprintf("Fix this error using %s?", agentName), true) if err != nil { - return nil, fmt.Errorf("prompting failed to confirm selection: %w", err) + return nil, fmt.Errorf("prompting to fix error using %s: %w", agentName, err) + } + if !confirm { + return actionResult, err } - switch selection { - // fix the error with AI - case 0: - previousError = originalError - changedFiles := make(map[string]bool) - var mu sync.Mutex - - watcher, done, err := startWatcher(ctx, changedFiles, &mu) - if err != nil { - return nil, fmt.Errorf("failed to start watcher during error fix: %w", err) - } - - agentOutput, err = azdAgent.SendMessage(ctx, fmt.Sprintf( - `Steps to follow: + previousError = originalError + agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( + `Steps to follow: 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. 2. Resolve the error by making the minimal, targeted change required to the code or configuration. Avoid unnecessary modifications and focus only on what is essential to restore correct functionality. 3. Remove any changes that were created solely for validation and are not part of the actual error fix. Error details: %s`, errorInput)) - if err != nil { - if agentOutput != "" { - e.console.Message(ctx, output.WithMarkdown(agentOutput)) - } - - return nil, err + if err != nil { + if agentOutput != "" { + e.console.Message(ctx, AIDisclaimer) + e.console.Message(ctx, output.WithMarkdown(agentOutput)) } - // Print out changed files - close(done) - printChangedFiles(changedFiles, &mu) - watcher.Close() - - // Not fix the error with AI - case 1: - return actionResult, err + return nil, err } // Ask the user to add feedback - if err := e.collectAndApplyFeedback(ctx, azdAgent, "Any feedback or changes?"); err != nil { + if err := e.collectAndApplyFeedback(ctx, azdAgent, AIDisclaimer); err != nil { return nil, err } @@ -228,127 +203,80 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action func (e *ErrorMiddleware) collectAndApplyFeedback( ctx context.Context, azdAgent agent.Agent, - promptMessage string, + AIDisclaimer string, ) error { - confirmFeedback := uxlib.NewConfirm(&uxlib.ConfirmOptions{ - Message: promptMessage, - DefaultValue: uxlib.Ptr(false), - HelpMessage: "You will be able to provide and feedback or changes after AI fix.", + userInputPrompt := uxlib.NewPrompt(&uxlib.PromptOptions{ + Message: "Any changes you'd like to make?", + Hint: "Describe your changes or press enter to skip.", + Required: false, }) - hasFeedback, err := confirmFeedback.Ask(ctx) + userInput, err := userInputPrompt.Ask(ctx) if err != nil { - return err + return fmt.Errorf("failed to collect feedback for user input: %w", err) } - if !*hasFeedback { + if userInput == "" { e.console.Message(ctx, "") return nil } - userInputPrompt := uxlib.NewPrompt(&uxlib.PromptOptions{ - Message: "You", - PlaceHolder: "Provide feedback or changes to the project", - Required: true, - IgnoreHintKeys: true, - }) + e.console.Message(ctx, "") + e.console.Message(ctx, color.MagentaString("Feedback")) - userInput, err := userInputPrompt.Ask(ctx) + feedbackOutput, err := azdAgent.SendMessage(ctx, userInput) if err != nil { - return fmt.Errorf("failed to collect feedback after AI fix: %w", err) + if feedbackOutput != "" { + e.console.Message(ctx, AIDisclaimer) + e.console.Message(ctx, output.WithMarkdown(feedbackOutput)) + } + return err } + e.console.Message(ctx, AIDisclaimer) + e.console.Message(ctx, "") + e.console.Message(ctx, fmt.Sprintf("%s:", output.AzdAgentLabel())) + e.console.Message(ctx, output.WithMarkdown(feedbackOutput)) e.console.Message(ctx, "") - - if userInput != "" { - e.console.Message(ctx, color.MagentaString("Feedback")) - - changedFiles := make(map[string]bool) - var mu sync.Mutex - - watcher, done, err := startWatcher(ctx, changedFiles, &mu) - if err != nil { - return fmt.Errorf("failed to start watcher during error fix: %w", err) - } - - feedbackOutput, err := azdAgent.SendMessage(ctx, userInput) - if err != nil { - if feedbackOutput != "" { - e.console.Message(ctx, output.WithMarkdown(feedbackOutput)) - } - return err - } - - // Print out changed files - close(done) - printChangedFiles(changedFiles, &mu) - watcher.Close() - - e.console.Message(ctx, "") - e.console.Message(ctx, fmt.Sprintf("%s:", output.AzdAgentLabel())) - e.console.Message(ctx, output.WithMarkdown(feedbackOutput)) - e.console.Message(ctx, "") - } return nil } -func printChangedFiles(changedFiles map[string]bool, mu *sync.Mutex) { - mu.Lock() - defer mu.Unlock() - fmt.Println("\nFiles changed:") - for file := range changedFiles { - fmt.Println("-", file) - } -} - -func startWatcher(ctx context.Context, changedFiles map[string]bool, mu *sync.Mutex) (*fsnotify.Watcher, chan bool, error) { - watcher, err := fsnotify.NewWatcher() +func (e *ErrorMiddleware) checkErrorHandlingConsent( + ctx context.Context, + toolName string, + toolServer string, + message string, + skip bool, +) (bool, error) { + // Create a consent checker for this specific server + consentChecker := consent.NewConsentChecker(e.consentManager, toolServer) + + // Check error handling consent using the consent checker + decision, err := consentChecker.CheckErrorHandlingConsent(ctx, toolName) if err != nil { - return nil, nil, fmt.Errorf("failed to create watcher: %w", err) + return false, fmt.Errorf("consent check failed: %w", err) } - done := make(chan bool) - - go func() { - for { - select { - case event := <-watcher.Events: - mu.Lock() - changedFiles[event.Name] = true - mu.Unlock() - case err := <-watcher.Errors: - fmt.Errorf("watcher error: %w", err) - case <-done: - return + if !decision.Allowed { + if decision.RequiresPrompt { + // Use console.DoInteraction to show consent prompt + if err := e.console.DoInteraction(func() error { + return consentChecker.PromptAndGrantErrorHandlingConsent( + ctx, + toolName, + message, + skip, + ) + }); err != nil { + return false, err } - } - }() - - cwd, err := os.Getwd() - if err != nil { - return nil, nil, fmt.Errorf("failed to get current working directory: %w", err) - } - - if err := watchRecursive(cwd, watcher); err != nil { - return nil, nil, fmt.Errorf("failed to watch for changes: %w", err) - } - - return watcher, done, nil -} - -func watchRecursive(root string, watcher *fsnotify.Watcher) error { - return filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() { - err = watcher.Add(path) - if err != nil { - return fmt.Errorf("failed to watch directory %s: %w", path, err) + } else { + if !skip { + return false, fmt.Errorf("error handling prompt denied: %s", decision.Reason) } } + } - return nil - }) + return true, nil } diff --git a/cli/azd/internal/agent/consent/checker.go b/cli/azd/internal/agent/consent/checker.go index 90380e665aa..31dbd97513a 100644 --- a/cli/azd/internal/agent/consent/checker.go +++ b/cli/azd/internal/agent/consent/checker.go @@ -15,6 +15,7 @@ import ( var ErrToolExecutionDenied = fmt.Errorf("tool execution denied by user") var ErrSamplingDenied = fmt.Errorf("sampling denied by user") +var ErrErrorHandlingDenied = fmt.Errorf("error handling workflow denied by user") // ConsentChecker provides shared consent checking logic for different tool types type ConsentChecker struct { @@ -69,6 +70,23 @@ func (cc *ConsentChecker) CheckSamplingConsent( return cc.consentMgr.CheckConsent(ctx, consentRequest) } +// CheckErrorHandlingConsent checks error handling consent for prompt +func (cc *ConsentChecker) CheckErrorHandlingConsent( + ctx context.Context, + toolName string, +) (*ConsentDecision, error) { + toolId := fmt.Sprintf("%s/%s", cc.serverName, toolName) + + // Create consent request for error handling + consentRequest := ConsentRequest{ + ToolID: toolId, + ServerName: cc.serverName, + Operation: OperationTypeErrorHandling, // This is an error handling request + } + + return cc.consentMgr.CheckConsent(ctx, consentRequest) +} + // PromptAndGrantConsent shows consent prompt and grants permission based on user choice func (cc *ConsentChecker) PromptAndGrantConsent( ctx context.Context, @@ -110,6 +128,32 @@ func (cc *ConsentChecker) PromptAndGrantSamplingConsent( return cc.grantConsentFromChoice(ctx, toolId, choice, OperationTypeSampling) } +// PromptAndGrantErrorHandlingConsent shows error handling consent prompt and grants permission based on user choice +func (cc *ConsentChecker) PromptAndGrantErrorHandlingConsent( + ctx context.Context, + toolName string, + message string, + skip bool, +) error { + toolId := fmt.Sprintf("%s/%s", cc.serverName, toolName) + + choice, err := cc.promptForErrorHandlingConsent(ctx, message, skip) + if err != nil { + return fmt.Errorf("error handling consent prompt failed: %w", err) + } + + if choice == "deny" { + return ErrErrorHandlingDenied + } + + if choice == "skip" { + return nil + } + + // Grant error handling consent based on user choice + return cc.grantConsentFromChoice(ctx, toolId, choice, OperationTypeErrorHandling) +} + // Private Struct Methods // formatToolDescriptionWithAnnotations creates a formatted description with tool annotations as bullet points @@ -386,6 +430,55 @@ func (cc *ConsentChecker) grantConsentFromChoice( return cc.consentMgr.GrantConsent(ctx, rule) } +func (cc *ConsentChecker) promptForErrorHandlingConsent( + ctx context.Context, + message string, + skip bool, +) (string, error) { + choices := []*ux.SelectChoice{ + { + Value: "once", + Label: "Yes, allow once", + }, + { + Value: "always", + Label: "Yes, allow always", + }, + } + + if skip { + choices = append(choices, &ux.SelectChoice{ + Value: "skip", + Label: "No, skip to next step", + }) + } else { + choices = append(choices, &ux.SelectChoice{ + Value: "deny", + Label: "No, cancel this interaction (esc)", + }) + } + + selector := ux.NewSelect(&ux.SelectOptions{ + Message: message, + HelpMessage: fmt.Sprintf("This action will run AI tools to generate troubleshooting steps. Edit permissions for AI tools anytime by running %s.", + output.WithHighLightFormat("azd mcp")), + Choices: choices, + EnableFiltering: ux.Ptr(false), + DisplayCount: 5, + }) + + choiceIndex, err := selector.Ask(ctx) + if err != nil { + return "", err + } + + if choiceIndex == nil || *choiceIndex < 0 || *choiceIndex >= len(choices) { + return "", fmt.Errorf("invalid choice selected") + } + + return choices[*choiceIndex].Value, nil +} + // promptForSamplingConsent shows an interactive sampling consent prompt and returns the user's choice func (cc *ConsentChecker) promptForSamplingConsent( ctx context.Context, diff --git a/cli/azd/internal/agent/consent/types.go b/cli/azd/internal/agent/consent/types.go index e6de3dce5db..98e8cd33eb9 100644 --- a/cli/azd/internal/agent/consent/types.go +++ b/cli/azd/internal/agent/consent/types.go @@ -36,8 +36,9 @@ const ( type OperationType string const ( - OperationTypeTool OperationType = "tool" // running tools - OperationTypeSampling OperationType = "sampling" // sampling requests + OperationTypeTool OperationType = "tool" // running tools + OperationTypeSampling OperationType = "sampling" // sampling requests + OperationTypeErrorHandling OperationType = "error" // running agents for error handling ) // Permission is the consent outcome for a rule diff --git a/cli/azd/internal/agent/conversational_agent.go b/cli/azd/internal/agent/conversational_agent.go index 3be7e5bd43d..4a0e3277dfd 100644 --- a/cli/azd/internal/agent/conversational_agent.go +++ b/cli/azd/internal/agent/conversational_agent.go @@ -7,12 +7,18 @@ import ( "context" _ "embed" "fmt" + "log" + "os" + "path/filepath" "strings" + "sync" "time" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/azure/azure-dev/cli/azd/pkg/output" uxlib "github.com/azure/azure-dev/cli/azd/pkg/ux" "github.com/fatih/color" + "github.com/fsnotify/fsnotify" "github.com/tmc/langchaingo/agents" "github.com/tmc/langchaingo/chains" "github.com/tmc/langchaingo/llms" @@ -87,9 +93,27 @@ func NewConversationalAzdAiAgent(llm llms.Model, opts ...AgentCreateOption) (Age return azdAgent, nil } +type FileChanges struct { + Created map[string]bool + Modified map[string]bool + Deleted map[string]bool +} + // SendMessage processes a single message through the agent and returns the response func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, args ...string) (string, error) { thoughtsCtx, cancelCtx := context.WithCancel(ctx) + fileChanges := &FileChanges{ + Created: make(map[string]bool), + Modified: make(map[string]bool), + Deleted: make(map[string]bool), + } + var mu sync.Mutex + + watcher, done, err := startWatcher(ctx, fileChanges, &mu) + if err != nil { + return "", fmt.Errorf("failed to start watcher: %w", err) + } + cleanup, err := aai.renderThoughts(thoughtsCtx) if err != nil { cancelCtx() @@ -98,6 +122,8 @@ func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, args ...st defer func() { cleanup() + close(done) + watcher.Close() cancelCtx() }() @@ -106,6 +132,8 @@ func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, args ...st return "", err } + printChangedFiles(fileChanges, &mu) + return output, nil } @@ -113,7 +141,7 @@ func (aai *ConversationalAzdAiAgent) renderThoughts(ctx context.Context) (func() var latestThought string spinner := uxlib.NewSpinner(&uxlib.SpinnerOptions{ - Text: "Thinking...", + Text: "Processing...", }) canvas := uxlib.NewCanvas( @@ -156,11 +184,11 @@ func (aai *ConversationalAzdAiAgent) renderThoughts(ctx context.Context) (func() // Update spinner text if latestAction == "" { - spinnerText = "Thinking..." + spinnerText = "Processing..." } else { - spinnerText = fmt.Sprintf("Running %s tool", color.GreenString(latestAction)) + spinnerText = fmt.Sprintf("Running %s tool", color.BlueString(latestAction)) if latestActionInput != "" { - spinnerText += " with " + color.GreenString(latestActionInput) + spinnerText += " with " + color.BlueString(latestActionInput) } spinnerText += "..." @@ -178,3 +206,95 @@ func (aai *ConversationalAzdAiAgent) renderThoughts(ctx context.Context) (func() return cleanup, canvas.Run() } + +func startWatcher(ctx context.Context, fileChanges *FileChanges, mu *sync.Mutex) (*fsnotify.Watcher, chan bool, error) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, nil, fmt.Errorf("failed to create watcher: %w", err) + } + + done := make(chan bool) + + go func() { + for { + select { + case event := <-watcher.Events: + mu.Lock() + name := event.Name + switch { + case event.Has(fsnotify.Create): + fileChanges.Created[name] = true + case event.Has(fsnotify.Write) || event.Has(fsnotify.Rename): + if !fileChanges.Created[name] && !fileChanges.Deleted[name] { + fileChanges.Modified[name] = true + } + case event.Has(fsnotify.Remove): + if fileChanges.Created[name] { + delete(fileChanges.Created, name) + } else { + fileChanges.Deleted[name] = true + delete(fileChanges.Modified, name) + } + } + mu.Unlock() + case err := <-watcher.Errors: + log.Printf("watcher error: %v", err) + case <-done: + return + case <-ctx.Done(): + return + } + } + }() + + cwd, err := os.Getwd() + if err != nil { + return nil, nil, fmt.Errorf("failed to get current working directory: %w", err) + } + + if err := watchRecursive(cwd, watcher); err != nil { + return nil, nil, fmt.Errorf("watcher failed: %w", err) + } + + return watcher, done, nil +} + +func watchRecursive(root string, watcher *fsnotify.Watcher) error { + return filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + err = watcher.Add(path) + if err != nil { + return fmt.Errorf("failed to watch directory %s: %w", path, err) + } + } + + return nil + }) +} + +func printChangedFiles(fileChanges *FileChanges, mu *sync.Mutex) { + mu.Lock() + defer mu.Unlock() + fmt.Println(output.WithHintFormat("| Files changed:")) + + if len(fileChanges.Created) > 0 { + for file := range fileChanges.Created { + fmt.Println(output.WithHintFormat("| "), color.GreenString("+ Created "), file) + } + } + + if len(fileChanges.Modified) > 0 { + for file := range fileChanges.Modified { + fmt.Println(output.WithHintFormat("| "), color.YellowString("+/- Modified "), file) + } + } + + if len(fileChanges.Deleted) > 0 { + for file := range fileChanges.Deleted { + fmt.Println(output.WithHintFormat("| "), color.RedString("- Deleted "), file) + } + } +} diff --git a/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go b/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go index e8423cf047d..29159e27928 100644 --- a/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go +++ b/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go @@ -444,6 +444,7 @@ func (p *BicepProvider) deploymentState( currentParamsHash string, ) (*azapi.ResourceDeployment, error) { p.console.ShowSpinner(ctx, "Comparing deployment state", input.Step) + // Find the call stack and the right last message defer p.console.StopSpinner(ctx, "", input.Step) prevDeploymentResult, err := p.latestDeploymentResult(ctx, scope) if err != nil { From 4de0c1ed0948446a1c2cb9d6b7f0be49da734d52 Mon Sep 17 00:00:00 2001 From: hemarina Date: Tue, 9 Sep 2025 14:41:09 -0700 Subject: [PATCH 097/116] separate error handling consent from consent system --- cli/azd/cmd/middleware/error.go | 123 +++++++++++++++------- cli/azd/internal/agent/consent/checker.go | 93 ---------------- cli/azd/internal/agent/consent/types.go | 5 +- 3 files changed, 85 insertions(+), 136 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index 0850dda2a3a..c004d3f83a2 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -12,23 +12,24 @@ import ( "github.com/azure/azure-dev/cli/azd/cmd/actions" "github.com/azure/azure-dev/cli/azd/internal" "github.com/azure/azure-dev/cli/azd/internal/agent" - "github.com/azure/azure-dev/cli/azd/internal/agent/consent" "github.com/azure/azure-dev/cli/azd/pkg/alpha" + "github.com/azure/azure-dev/cli/azd/pkg/config" "github.com/azure/azure-dev/cli/azd/pkg/input" "github.com/azure/azure-dev/cli/azd/pkg/llm" "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/azure/azure-dev/cli/azd/pkg/tools" + "github.com/azure/azure-dev/cli/azd/pkg/ux" uxlib "github.com/azure/azure-dev/cli/azd/pkg/ux" "github.com/fatih/color" ) type ErrorMiddleware struct { - options *Options - console input.Console - agentFactory *agent.AgentFactory - global *internal.GlobalCommandOptions - featuresManager *alpha.FeatureManager - consentManager consent.ConsentManager + options *Options + console input.Console + agentFactory *agent.AgentFactory + global *internal.GlobalCommandOptions + featuresManager *alpha.FeatureManager + userConfigManager config.UserConfigManager } func NewErrorMiddleware( @@ -36,15 +37,15 @@ func NewErrorMiddleware( agentFactory *agent.AgentFactory, global *internal.GlobalCommandOptions, featuresManager *alpha.FeatureManager, - consentManager consent.ConsentManager, + userConfigManager config.UserConfigManager, ) Middleware { return &ErrorMiddleware{ - options: options, - console: console, - agentFactory: agentFactory, - global: global, - featuresManager: featuresManager, - consentManager: consentManager, + options: options, + console: console, + agentFactory: agentFactory, + global: global, + featuresManager: featuresManager, + userConfigManager: userConfigManager, } } @@ -119,7 +120,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action errorInput := originalError.Error() - confirm, err := e.checkErrorHandlingConsent(ctx, "troubleshooting_steps", "azd", fmt.Sprintf("Generate troubleshooting steps using %s?", agentName), true) + confirm, err := e.checkErrorHandlingConsent(ctx, "troubleshooting_steps", fmt.Sprintf("Generate troubleshooting steps using %s?", agentName), true) if err != nil { return nil, fmt.Errorf("prompting to provide troubleshooting steps: %w", err) } @@ -153,10 +154,11 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action Message: "Debugger Ready?", DefaultValue: true, }) - confirm, err = e.checkErrorHandlingConsent(ctx, "error_fix", "azd", fmt.Sprintf("Fix this error using %s?", agentName), true) + confirm, err = e.checkErrorHandlingConsent(ctx, "error_fix", fmt.Sprintf("Fix this error using %s?", agentName), false) if err != nil { return nil, fmt.Errorf("prompting to fix error using %s: %w", agentName, err) } + if !confirm { return actionResult, err } @@ -244,39 +246,80 @@ func (e *ErrorMiddleware) collectAndApplyFeedback( func (e *ErrorMiddleware) checkErrorHandlingConsent( ctx context.Context, - toolName string, - toolServer string, + promptName string, message string, skip bool, ) (bool, error) { - // Create a consent checker for this specific server - consentChecker := consent.NewConsentChecker(e.consentManager, toolServer) - - // Check error handling consent using the consent checker - decision, err := consentChecker.CheckErrorHandlingConsent(ctx, toolName) + userConfig, err := e.userConfigManager.Load() if err != nil { - return false, fmt.Errorf("consent check failed: %w", err) + return false, fmt.Errorf("failed to load user config: %w", err) } - if !decision.Allowed { - if decision.RequiresPrompt { - // Use console.DoInteraction to show consent prompt - if err := e.console.DoInteraction(func() error { - return consentChecker.PromptAndGrantErrorHandlingConsent( - ctx, - toolName, - message, - skip, - ) - }); err != nil { - return false, err - } - } else { - if !skip { - return false, fmt.Errorf("error handling prompt denied: %s", decision.Reason) + if exists, ok := userConfig.GetString(promptName); !ok && exists == "" { + choice, err := promptForErrorHandlingConsent(ctx, message, skip) + if err != nil { + return false, fmt.Errorf("prompting for error handling consent: %w", err) + } + + if choice == "skip" || choice == "deny" { + return false, nil + } + + if choice == "always" { + if err := userConfig.Set(promptName, "allow"); err != nil { + return false, fmt.Errorf("failed to set consent config: %w", err) } } } return true, nil } + +func promptForErrorHandlingConsent( + ctx context.Context, + message string, + skip bool, +) (string, error) { + choices := []*ux.SelectChoice{ + { + Value: "once", + Label: "Yes, allow once", + }, + { + Value: "always", + Label: "Yes, allow always", + }, + } + + if skip { + choices = append(choices, &ux.SelectChoice{ + Value: "skip", + Label: "No, skip to next step", + }) + } else { + choices = append(choices, &ux.SelectChoice{ + Value: "deny", + Label: "No, cancel this interaction (esc)", + }) + } + + selector := ux.NewSelect(&ux.SelectOptions{ + Message: message, + HelpMessage: fmt.Sprintf("This action will run AI tools to generate troubleshooting steps. Edit permissions for AI tools anytime by running %s.", + output.WithHighLightFormat("azd mcp")), + Choices: choices, + EnableFiltering: ux.Ptr(false), + DisplayCount: 5, + }) + + choiceIndex, err := selector.Ask(ctx) + if err != nil { + return "", err + } + + if choiceIndex == nil || *choiceIndex < 0 || *choiceIndex >= len(choices) { + return "", fmt.Errorf("invalid choice selected") + } + + return choices[*choiceIndex].Value, nil +} diff --git a/cli/azd/internal/agent/consent/checker.go b/cli/azd/internal/agent/consent/checker.go index 31dbd97513a..90380e665aa 100644 --- a/cli/azd/internal/agent/consent/checker.go +++ b/cli/azd/internal/agent/consent/checker.go @@ -15,7 +15,6 @@ import ( var ErrToolExecutionDenied = fmt.Errorf("tool execution denied by user") var ErrSamplingDenied = fmt.Errorf("sampling denied by user") -var ErrErrorHandlingDenied = fmt.Errorf("error handling workflow denied by user") // ConsentChecker provides shared consent checking logic for different tool types type ConsentChecker struct { @@ -70,23 +69,6 @@ func (cc *ConsentChecker) CheckSamplingConsent( return cc.consentMgr.CheckConsent(ctx, consentRequest) } -// CheckErrorHandlingConsent checks error handling consent for prompt -func (cc *ConsentChecker) CheckErrorHandlingConsent( - ctx context.Context, - toolName string, -) (*ConsentDecision, error) { - toolId := fmt.Sprintf("%s/%s", cc.serverName, toolName) - - // Create consent request for error handling - consentRequest := ConsentRequest{ - ToolID: toolId, - ServerName: cc.serverName, - Operation: OperationTypeErrorHandling, // This is an error handling request - } - - return cc.consentMgr.CheckConsent(ctx, consentRequest) -} - // PromptAndGrantConsent shows consent prompt and grants permission based on user choice func (cc *ConsentChecker) PromptAndGrantConsent( ctx context.Context, @@ -128,32 +110,6 @@ func (cc *ConsentChecker) PromptAndGrantSamplingConsent( return cc.grantConsentFromChoice(ctx, toolId, choice, OperationTypeSampling) } -// PromptAndGrantErrorHandlingConsent shows error handling consent prompt and grants permission based on user choice -func (cc *ConsentChecker) PromptAndGrantErrorHandlingConsent( - ctx context.Context, - toolName string, - message string, - skip bool, -) error { - toolId := fmt.Sprintf("%s/%s", cc.serverName, toolName) - - choice, err := cc.promptForErrorHandlingConsent(ctx, message, skip) - if err != nil { - return fmt.Errorf("error handling consent prompt failed: %w", err) - } - - if choice == "deny" { - return ErrErrorHandlingDenied - } - - if choice == "skip" { - return nil - } - - // Grant error handling consent based on user choice - return cc.grantConsentFromChoice(ctx, toolId, choice, OperationTypeErrorHandling) -} - // Private Struct Methods // formatToolDescriptionWithAnnotations creates a formatted description with tool annotations as bullet points @@ -430,55 +386,6 @@ func (cc *ConsentChecker) grantConsentFromChoice( return cc.consentMgr.GrantConsent(ctx, rule) } -func (cc *ConsentChecker) promptForErrorHandlingConsent( - ctx context.Context, - message string, - skip bool, -) (string, error) { - choices := []*ux.SelectChoice{ - { - Value: "once", - Label: "Yes, allow once", - }, - { - Value: "always", - Label: "Yes, allow always", - }, - } - - if skip { - choices = append(choices, &ux.SelectChoice{ - Value: "skip", - Label: "No, skip to next step", - }) - } else { - choices = append(choices, &ux.SelectChoice{ - Value: "deny", - Label: "No, cancel this interaction (esc)", - }) - } - - selector := ux.NewSelect(&ux.SelectOptions{ - Message: message, - HelpMessage: fmt.Sprintf("This action will run AI tools to generate troubleshooting steps. Edit permissions for AI tools anytime by running %s.", - output.WithHighLightFormat("azd mcp")), - Choices: choices, - EnableFiltering: ux.Ptr(false), - DisplayCount: 5, - }) - - choiceIndex, err := selector.Ask(ctx) - if err != nil { - return "", err - } - - if choiceIndex == nil || *choiceIndex < 0 || *choiceIndex >= len(choices) { - return "", fmt.Errorf("invalid choice selected") - } - - return choices[*choiceIndex].Value, nil -} - // promptForSamplingConsent shows an interactive sampling consent prompt and returns the user's choice func (cc *ConsentChecker) promptForSamplingConsent( ctx context.Context, diff --git a/cli/azd/internal/agent/consent/types.go b/cli/azd/internal/agent/consent/types.go index 98e8cd33eb9..e6de3dce5db 100644 --- a/cli/azd/internal/agent/consent/types.go +++ b/cli/azd/internal/agent/consent/types.go @@ -36,9 +36,8 @@ const ( type OperationType string const ( - OperationTypeTool OperationType = "tool" // running tools - OperationTypeSampling OperationType = "sampling" // sampling requests - OperationTypeErrorHandling OperationType = "error" // running agents for error handling + OperationTypeTool OperationType = "tool" // running tools + OperationTypeSampling OperationType = "sampling" // sampling requests ) // Permission is the consent outcome for a rule From 3f3a8afc57d3682f3da61b5cb342c779c170e2b7 Mon Sep 17 00:00:00 2001 From: hemarina Date: Tue, 9 Sep 2025 15:13:30 -0700 Subject: [PATCH 098/116] clean up and lll --- cli/azd/cmd/middleware/error.go | 28 ++++++++----- .../internal/mcp/tools/azd_common_error.go | 39 ------------------- .../mcp/tools/prompts/azd_common_error.md | 14 ------- cli/azd/internal/mcp/tools/prompts/prompts.go | 3 -- 4 files changed, 19 insertions(+), 65 deletions(-) delete mode 100644 cli/azd/internal/mcp/tools/azd_common_error.go delete mode 100644 cli/azd/internal/mcp/tools/prompts/azd_common_error.md diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index c004d3f83a2..414e0413d43 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -18,7 +18,6 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/llm" "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/azure/azure-dev/cli/azd/pkg/tools" - "github.com/azure/azure-dev/cli/azd/pkg/ux" uxlib "github.com/azure/azure-dev/cli/azd/pkg/ux" "github.com/fatih/color" ) @@ -120,7 +119,12 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action errorInput := originalError.Error() - confirm, err := e.checkErrorHandlingConsent(ctx, "troubleshooting_steps", fmt.Sprintf("Generate troubleshooting steps using %s?", agentName), true) + confirm, err := e.checkErrorHandlingConsent( + ctx, + "troubleshooting_steps", + fmt.Sprintf("Generate troubleshooting steps using %s?", agentName), + true, + ) if err != nil { return nil, fmt.Errorf("prompting to provide troubleshooting steps: %w", err) } @@ -154,7 +158,12 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action Message: "Debugger Ready?", DefaultValue: true, }) - confirm, err = e.checkErrorHandlingConsent(ctx, "error_fix", fmt.Sprintf("Fix this error using %s?", agentName), false) + confirm, err = e.checkErrorHandlingConsent( + ctx, + "error_fix", + fmt.Sprintf("Fix this error using %s?", agentName), + false, + ) if err != nil { return nil, fmt.Errorf("prompting to fix error using %s: %w", agentName, err) } @@ -280,7 +289,7 @@ func promptForErrorHandlingConsent( message string, skip bool, ) (string, error) { - choices := []*ux.SelectChoice{ + choices := []*uxlib.SelectChoice{ { Value: "once", Label: "Yes, allow once", @@ -292,23 +301,24 @@ func promptForErrorHandlingConsent( } if skip { - choices = append(choices, &ux.SelectChoice{ + choices = append(choices, &uxlib.SelectChoice{ Value: "skip", Label: "No, skip to next step", }) } else { - choices = append(choices, &ux.SelectChoice{ + choices = append(choices, &uxlib.SelectChoice{ Value: "deny", Label: "No, cancel this interaction (esc)", }) } - selector := ux.NewSelect(&ux.SelectOptions{ + selector := uxlib.NewSelect(&uxlib.SelectOptions{ Message: message, - HelpMessage: fmt.Sprintf("This action will run AI tools to generate troubleshooting steps. Edit permissions for AI tools anytime by running %s.", + HelpMessage: fmt.Sprintf("This action will run AI tools to generate troubleshooting steps."+ + " Edit permissions for AI tools anytime by running %s.", output.WithHighLightFormat("azd mcp")), Choices: choices, - EnableFiltering: ux.Ptr(false), + EnableFiltering: uxlib.Ptr(false), DisplayCount: 5, }) diff --git a/cli/azd/internal/mcp/tools/azd_common_error.go b/cli/azd/internal/mcp/tools/azd_common_error.go deleted file mode 100644 index dee3ce42671..00000000000 --- a/cli/azd/internal/mcp/tools/azd_common_error.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package tools - -import ( - "context" - - "github.com/azure/azure-dev/cli/azd/internal/mcp/tools/prompts" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" -) - -// NewAzdCommonErrorTool creates a new azd common error tool -func NewAzdCommonErrorTool() server.ServerTool { - return server.ServerTool{ - Tool: mcp.NewTool( - "azd_common_error", - mcp.WithReadOnlyHintAnnotation(true), - mcp.WithIdempotentHintAnnotation(true), - mcp.WithDestructiveHintAnnotation(false), - mcp.WithOpenWorldHintAnnotation(false), - mcp.WithDescription( - `Returns instructions for diagnosing common error type and providing suggested actions for resolution. - -The LLM agent should execute these instructions using available tools. - -Use this tool when: -- Need to identify the type of error and get actionable suggestions -- Ready to troubleshoot errors`, - ), - ), - Handler: handleAzdCommonError, - } -} - -func handleAzdCommonError(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return mcp.NewToolResultText(prompts.AzdCommonErrorPrompt), nil -} diff --git a/cli/azd/internal/mcp/tools/prompts/azd_common_error.md b/cli/azd/internal/mcp/tools/prompts/azd_common_error.md deleted file mode 100644 index 745fddd7146..00000000000 --- a/cli/azd/internal/mcp/tools/prompts/azd_common_error.md +++ /dev/null @@ -1,14 +0,0 @@ -# AZD Provision Common Error Resolution Instructions - -✅ **Agent Task List** - - -📄 **Required Outputs** - -- -🧠 **Execution Guidelines** - - -📌 **Completion Checklist** - -- [ ] Error message clearly understood and root cause identified \ No newline at end of file diff --git a/cli/azd/internal/mcp/tools/prompts/prompts.go b/cli/azd/internal/mcp/tools/prompts/prompts.go index 8762ecedcba..d7eadbe7bde 100644 --- a/cli/azd/internal/mcp/tools/prompts/prompts.go +++ b/cli/azd/internal/mcp/tools/prompts/prompts.go @@ -31,8 +31,5 @@ var AzdDockerGenerationPrompt string //go:embed azd_project_validation.md var AzdProjectValidationPrompt string -//go:embed azd_common_error.md -var AzdCommonErrorPrompt string - //go:embed azd_error_troubleshooting.md var AzdErrorTroubleShootingPrompt string From df6ff527c9ab2830e70331db23644059d5fcdd20 Mon Sep 17 00:00:00 2001 From: hemarina Date: Tue, 9 Sep 2025 15:59:49 -0700 Subject: [PATCH 099/116] fix bug for prompt consent --- cli/azd/cmd/middleware/error.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index 414e0413d43..4d4aaddb2f2 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -121,7 +121,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action confirm, err := e.checkErrorHandlingConsent( ctx, - "troubleshooting_steps", + "mcp.error.troubleshooting", fmt.Sprintf("Generate troubleshooting steps using %s?", agentName), true, ) @@ -160,7 +160,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action }) confirm, err = e.checkErrorHandlingConsent( ctx, - "error_fix", + "mcp.error.fix", fmt.Sprintf("Fix this error using %s?", agentName), false, ) @@ -278,6 +278,10 @@ func (e *ErrorMiddleware) checkErrorHandlingConsent( if err := userConfig.Set(promptName, "allow"); err != nil { return false, fmt.Errorf("failed to set consent config: %w", err) } + + if err := e.userConfigManager.Save(userConfig); err != nil { + return false, err + } } } From aea821587fdbb24de5cd5573933bd6b8c65e9081 Mon Sep 17 00:00:00 2001 From: hemarina Date: Tue, 9 Sep 2025 16:01:03 -0700 Subject: [PATCH 100/116] minor description update --- cli/azd/cmd/middleware/error.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index 4d4aaddb2f2..2d729cb9f1d 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -121,7 +121,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action confirm, err := e.checkErrorHandlingConsent( ctx, - "mcp.error.troubleshooting", + "mcp.errorHandling.troubleshooting", fmt.Sprintf("Generate troubleshooting steps using %s?", agentName), true, ) @@ -160,7 +160,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action }) confirm, err = e.checkErrorHandlingConsent( ctx, - "mcp.error.fix", + "mcp.errorHandling.fix", fmt.Sprintf("Fix this error using %s?", agentName), false, ) From 545bf7cb73b4268ce9828905367d5f0da435c289 Mon Sep 17 00:00:00 2001 From: hemarina Date: Tue, 9 Sep 2025 17:35:27 -0700 Subject: [PATCH 101/116] fileschange printed fix --- cli/azd/cmd/init.go | 4 +- cli/azd/internal/agent/agent.go | 3 +- .../internal/agent/conversational_agent.go | 50 +++++++++++++------ 3 files changed, 38 insertions(+), 19 deletions(-) diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index 95045b8843d..f2b714e6e7f 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -444,7 +444,7 @@ Do not stop until all tasks are complete and fully resolved. "Provide a very brief summary in markdown format that includes any files generated during this step.", }, "\n")) - agentOutput, err := azdAgent.SendMessage(ctx, fullTaskInput) + agentOutput, err := azdAgent.SendMessage(ctx, nil, fullTaskInput) if err != nil { if agentOutput != "" { i.console.Message(ctx, output.WithMarkdown(agentOutput)) @@ -508,7 +508,7 @@ func (i *initAction) collectAndApplyFeedback( if userInput != "" { i.console.Message(ctx, color.MagentaString("Feedback")) - feedbackOutput, err := azdAgent.SendMessage(ctx, userInput) + feedbackOutput, err := azdAgent.SendMessage(ctx, nil, userInput) if err != nil { if feedbackOutput != "" { i.console.Message(ctx, output.WithMarkdown(feedbackOutput)) diff --git a/cli/azd/internal/agent/agent.go b/cli/azd/internal/agent/agent.go index ab98e41ad4e..68a01452e46 100644 --- a/cli/azd/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -10,6 +10,7 @@ import ( "github.com/azure/azure-dev/cli/azd/internal/agent/logging" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/azure/azure-dev/cli/azd/pkg/input" "github.com/tmc/langchaingo/agents" "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/llms" @@ -34,7 +35,7 @@ type AgentCleanup func() error // Agent represents an AI agent that can execute tools and interact with language models. type Agent interface { // SendMessage sends a message to the agent and returns the response - SendMessage(ctx context.Context, args ...string) (string, error) + SendMessage(ctx context.Context, console input.Console, args ...string) (string, error) // Stop terminates the agent and performs any necessary cleanup Stop() error diff --git a/cli/azd/internal/agent/conversational_agent.go b/cli/azd/internal/agent/conversational_agent.go index 4a0e3277dfd..7eac6cd5ce6 100644 --- a/cli/azd/internal/agent/conversational_agent.go +++ b/cli/azd/internal/agent/conversational_agent.go @@ -15,6 +15,7 @@ import ( "time" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" + "github.com/azure/azure-dev/cli/azd/pkg/input" "github.com/azure/azure-dev/cli/azd/pkg/output" uxlib "github.com/azure/azure-dev/cli/azd/pkg/ux" "github.com/fatih/color" @@ -100,18 +101,23 @@ type FileChanges struct { } // SendMessage processes a single message through the agent and returns the response -func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, args ...string) (string, error) { +func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, console input.Console, args ...string) (string, error) { thoughtsCtx, cancelCtx := context.WithCancel(ctx) + var watcher *fsnotify.Watcher + var done chan bool + var mu sync.Mutex + var err error fileChanges := &FileChanges{ Created: make(map[string]bool), Modified: make(map[string]bool), Deleted: make(map[string]bool), } - var mu sync.Mutex - watcher, done, err := startWatcher(ctx, fileChanges, &mu) - if err != nil { - return "", fmt.Errorf("failed to start watcher: %w", err) + if console != nil { + watcher, done, err = startWatcher(ctx, fileChanges, &mu) + if err != nil { + return "", fmt.Errorf("failed to start watcher: %w", err) + } } cleanup, err := aai.renderThoughts(thoughtsCtx) @@ -121,9 +127,11 @@ func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, args ...st } defer func() { + if console != nil { + close(done) + watcher.Close() + } cleanup() - close(done) - watcher.Close() cancelCtx() }() @@ -132,7 +140,9 @@ func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, args ...st return "", err } - printChangedFiles(fileChanges, &mu) + if console != nil { + printChangedFiles(ctx, console, fileChanges, &mu) + } return output, nil } @@ -275,26 +285,34 @@ func watchRecursive(root string, watcher *fsnotify.Watcher) error { }) } -func printChangedFiles(fileChanges *FileChanges, mu *sync.Mutex) { +func printChangedFiles(ctx context.Context, console input.Console, fileChanges *FileChanges, mu *sync.Mutex) { mu.Lock() defer mu.Unlock() - fmt.Println(output.WithHintFormat("| Files changed:")) + createdFileLength := len(fileChanges.Created) + modifiedFileLength := len(fileChanges.Modified) + deletedFileLength := len(fileChanges.Deleted) + + if createdFileLength == 0 && modifiedFileLength == 0 && deletedFileLength == 0 { + return + } + + console.Message(ctx, output.WithHintFormat("| Files changed:")) - if len(fileChanges.Created) > 0 { + if createdFileLength > 0 { for file := range fileChanges.Created { - fmt.Println(output.WithHintFormat("| "), color.GreenString("+ Created "), file) + console.Message(ctx, fmt.Sprintf("%s %s %s", output.WithHintFormat("|"), color.GreenString("+ Created"), file)) } } - if len(fileChanges.Modified) > 0 { + if modifiedFileLength > 0 { for file := range fileChanges.Modified { - fmt.Println(output.WithHintFormat("| "), color.YellowString("+/- Modified "), file) + console.Message(ctx, fmt.Sprintf("%s %s %s", output.WithHintFormat("|"), color.YellowString("+/- Modified"), file)) } } - if len(fileChanges.Deleted) > 0 { + if deletedFileLength > 0 { for file := range fileChanges.Deleted { - fmt.Println(output.WithHintFormat("| "), color.RedString("- Deleted "), file) + console.Message(ctx, fmt.Sprintf("%s %s %s", output.WithHintFormat("|"), color.RedString("- Deleted"), file)) } } } From 2c01342b98f68676e9bba01ca7889759ff179a9b Mon Sep 17 00:00:00 2001 From: hemarina Date: Tue, 9 Sep 2025 17:47:13 -0700 Subject: [PATCH 102/116] fix azd.exe multiple calling at background bug --- cli/azd/cmd/middleware/error.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index 2d729cb9f1d..d9e58307421 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -58,6 +58,9 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action } actionResult, err = next(ctx) + if err == nil { + return actionResult, err + } attempt := 0 originalError := err @@ -73,6 +76,9 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action AIDisclaimer := output.WithHintFormat("The following content is AI-generated. AI responses may be incorrect.") agentName := "agent mode" + // Warn user that this is an alpha feature + e.console.WarnForFeature(ctx, llm.FeatureLlm) + azdAgent, err := e.agentFactory.Create( agent.WithDebug(e.global.EnableDebugLogging), ) @@ -114,9 +120,6 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action return actionResult, originalError } - // Warn user that this is an alpha feature - e.console.WarnForFeature(ctx, llm.FeatureLlm) - errorInput := originalError.Error() confirm, err := e.checkErrorHandlingConsent( @@ -131,7 +134,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action if confirm { // Provide manual steps for troubleshooting - agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( + agentOutput, err := azdAgent.SendMessage(ctx, e.console, fmt.Sprintf( `Steps to follow: 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. 2. Provide actionable troubleshooting steps. Do not perform any file changes. @@ -173,7 +176,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action } previousError = originalError - agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( + agentOutput, err := azdAgent.SendMessage(ctx, e.console, fmt.Sprintf( `Steps to follow: 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. 2. Resolve the error by making the minimal, targeted change required to the code or configuration. @@ -235,7 +238,7 @@ func (e *ErrorMiddleware) collectAndApplyFeedback( e.console.Message(ctx, "") e.console.Message(ctx, color.MagentaString("Feedback")) - feedbackOutput, err := azdAgent.SendMessage(ctx, userInput) + feedbackOutput, err := azdAgent.SendMessage(ctx, e.console, userInput) if err != nil { if feedbackOutput != "" { e.console.Message(ctx, AIDisclaimer) From 3f29682b06a9716b00b68424c6db4b41f0bdbe2f Mon Sep 17 00:00:00 2001 From: hemarina Date: Tue, 9 Sep 2025 17:49:29 -0700 Subject: [PATCH 103/116] nit --- cli/azd/cmd/middleware/error.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index d9e58307421..4eb02fb0d35 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -101,7 +101,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action if previousError != nil && errors.Is(originalError, previousError) { attempt++ - if attempt > 3 { + if attempt >= 3 { e.console.Message(ctx, fmt.Sprintf("Please review the error and fix it manually, "+ "%s was unable to resolve the error after multiple attempts.", agentName)) return actionResult, originalError From 5f940597f62b9df8f543cac444964a8d182919ad Mon Sep 17 00:00:00 2001 From: hemarina Date: Tue, 9 Sep 2025 18:03:21 -0700 Subject: [PATCH 104/116] remove debugger --- cli/azd/cmd/middleware/error.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index 4eb02fb0d35..02276bb86db 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -157,10 +157,6 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action } // Ask user if they want to let AI fix the - e.console.Confirm(ctx, input.ConsoleOptions{ - Message: "Debugger Ready?", - DefaultValue: true, - }) confirm, err = e.checkErrorHandlingConsent( ctx, "mcp.errorHandling.fix", From f0b927e33507a47e5a62cc8c60c00bebd2a51c07 Mon Sep 17 00:00:00 2001 From: hemarina Date: Tue, 9 Sep 2025 22:38:26 -0700 Subject: [PATCH 105/116] fix files changed, address comment --- cli/azd/cmd/init.go | 4 +- cli/azd/cmd/middleware/error.go | 9 +- cli/azd/internal/agent/agent.go | 3 +- .../internal/agent/conversational_agent.go | 139 ++---------------- cli/azd/pkg/watch/watch.go | 132 +++++++++++++++++ 5 files changed, 151 insertions(+), 136 deletions(-) create mode 100644 cli/azd/pkg/watch/watch.go diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index f2b714e6e7f..7cf46ff21ff 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -444,7 +444,7 @@ Do not stop until all tasks are complete and fully resolved. "Provide a very brief summary in markdown format that includes any files generated during this step.", }, "\n")) - agentOutput, err := azdAgent.SendMessage(ctx, nil, fullTaskInput) + agentOutput, err := azdAgent.SendMessage(ctx, false, fullTaskInput) if err != nil { if agentOutput != "" { i.console.Message(ctx, output.WithMarkdown(agentOutput)) @@ -508,7 +508,7 @@ func (i *initAction) collectAndApplyFeedback( if userInput != "" { i.console.Message(ctx, color.MagentaString("Feedback")) - feedbackOutput, err := azdAgent.SendMessage(ctx, nil, userInput) + feedbackOutput, err := azdAgent.SendMessage(ctx, false, userInput) if err != nil { if feedbackOutput != "" { i.console.Message(ctx, output.WithMarkdown(feedbackOutput)) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index 02276bb86db..d1ed4617970 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -73,7 +73,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action "interrupt", "no project exists", } - AIDisclaimer := output.WithHintFormat("The following content is AI-generated. AI responses may be incorrect.") + AIDisclaimer := output.WithGrayFormat("The following content is AI-generated. AI responses may be incorrect.") agentName := "agent mode" // Warn user that this is an alpha feature @@ -113,7 +113,6 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action if errors.As(originalError, &errorWithTraceId) { e.console.Message(ctx, output.WithErrorFormat("TraceID: %s", errorWithTraceId.TraceId)) } - if errors.As(originalError, &suggestionErr) { suggestion = suggestionErr.Suggestion e.console.Message(ctx, suggestion) @@ -134,7 +133,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action if confirm { // Provide manual steps for troubleshooting - agentOutput, err := azdAgent.SendMessage(ctx, e.console, fmt.Sprintf( + agentOutput, err := azdAgent.SendMessage(ctx, true, fmt.Sprintf( `Steps to follow: 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. 2. Provide actionable troubleshooting steps. Do not perform any file changes. @@ -172,7 +171,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action } previousError = originalError - agentOutput, err := azdAgent.SendMessage(ctx, e.console, fmt.Sprintf( + agentOutput, err := azdAgent.SendMessage(ctx, true, fmt.Sprintf( `Steps to follow: 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. 2. Resolve the error by making the minimal, targeted change required to the code or configuration. @@ -234,7 +233,7 @@ func (e *ErrorMiddleware) collectAndApplyFeedback( e.console.Message(ctx, "") e.console.Message(ctx, color.MagentaString("Feedback")) - feedbackOutput, err := azdAgent.SendMessage(ctx, e.console, userInput) + feedbackOutput, err := azdAgent.SendMessage(ctx, true, userInput) if err != nil { if feedbackOutput != "" { e.console.Message(ctx, AIDisclaimer) diff --git a/cli/azd/internal/agent/agent.go b/cli/azd/internal/agent/agent.go index 68a01452e46..214d3e24132 100644 --- a/cli/azd/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -10,7 +10,6 @@ import ( "github.com/azure/azure-dev/cli/azd/internal/agent/logging" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" - "github.com/azure/azure-dev/cli/azd/pkg/input" "github.com/tmc/langchaingo/agents" "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/llms" @@ -35,7 +34,7 @@ type AgentCleanup func() error // Agent represents an AI agent that can execute tools and interact with language models. type Agent interface { // SendMessage sends a message to the agent and returns the response - SendMessage(ctx context.Context, console input.Console, args ...string) (string, error) + SendMessage(ctx context.Context, useWatch bool, args ...string) (string, error) // Stop terminates the agent and performs any necessary cleanup Stop() error diff --git a/cli/azd/internal/agent/conversational_agent.go b/cli/azd/internal/agent/conversational_agent.go index 7eac6cd5ce6..9aa25038b39 100644 --- a/cli/azd/internal/agent/conversational_agent.go +++ b/cli/azd/internal/agent/conversational_agent.go @@ -7,17 +7,13 @@ import ( "context" _ "embed" "fmt" - "log" - "os" - "path/filepath" "strings" "sync" "time" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" - "github.com/azure/azure-dev/cli/azd/pkg/input" - "github.com/azure/azure-dev/cli/azd/pkg/output" uxlib "github.com/azure/azure-dev/cli/azd/pkg/ux" + "github.com/azure/azure-dev/cli/azd/pkg/watch" "github.com/fatih/color" "github.com/fsnotify/fsnotify" "github.com/tmc/langchaingo/agents" @@ -94,27 +90,18 @@ func NewConversationalAzdAiAgent(llm llms.Model, opts ...AgentCreateOption) (Age return azdAgent, nil } -type FileChanges struct { - Created map[string]bool - Modified map[string]bool - Deleted map[string]bool -} - // SendMessage processes a single message through the agent and returns the response -func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, console input.Console, args ...string) (string, error) { +func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, useWatch bool, args ...string) (string, error) { thoughtsCtx, cancelCtx := context.WithCancel(ctx) + var watcher *fsnotify.Watcher var done chan bool var mu sync.Mutex - var err error - fileChanges := &FileChanges{ - Created: make(map[string]bool), - Modified: make(map[string]bool), - Deleted: make(map[string]bool), - } + var fileChanges *watch.FileChanges - if console != nil { - watcher, done, err = startWatcher(ctx, fileChanges, &mu) + if useWatch { + var err error + watcher, done, fileChanges, err = watch.StartWatcher(ctx, &mu) if err != nil { return "", fmt.Errorf("failed to start watcher: %w", err) } @@ -127,12 +114,14 @@ func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, console in } defer func() { - if console != nil { + cleanup() + cancelCtx() + + if useWatch { + watch.PrintChangedFiles(ctx, fileChanges, &mu) close(done) watcher.Close() } - cleanup() - cancelCtx() }() output, err := chains.Run(ctx, aai.executor, strings.Join(args, "\n")) @@ -140,10 +129,6 @@ func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, console in return "", err } - if console != nil { - printChangedFiles(ctx, console, fileChanges, &mu) - } - return output, nil } @@ -216,103 +201,3 @@ func (aai *ConversationalAzdAiAgent) renderThoughts(ctx context.Context) (func() return cleanup, canvas.Run() } - -func startWatcher(ctx context.Context, fileChanges *FileChanges, mu *sync.Mutex) (*fsnotify.Watcher, chan bool, error) { - watcher, err := fsnotify.NewWatcher() - if err != nil { - return nil, nil, fmt.Errorf("failed to create watcher: %w", err) - } - - done := make(chan bool) - - go func() { - for { - select { - case event := <-watcher.Events: - mu.Lock() - name := event.Name - switch { - case event.Has(fsnotify.Create): - fileChanges.Created[name] = true - case event.Has(fsnotify.Write) || event.Has(fsnotify.Rename): - if !fileChanges.Created[name] && !fileChanges.Deleted[name] { - fileChanges.Modified[name] = true - } - case event.Has(fsnotify.Remove): - if fileChanges.Created[name] { - delete(fileChanges.Created, name) - } else { - fileChanges.Deleted[name] = true - delete(fileChanges.Modified, name) - } - } - mu.Unlock() - case err := <-watcher.Errors: - log.Printf("watcher error: %v", err) - case <-done: - return - case <-ctx.Done(): - return - } - } - }() - - cwd, err := os.Getwd() - if err != nil { - return nil, nil, fmt.Errorf("failed to get current working directory: %w", err) - } - - if err := watchRecursive(cwd, watcher); err != nil { - return nil, nil, fmt.Errorf("watcher failed: %w", err) - } - - return watcher, done, nil -} - -func watchRecursive(root string, watcher *fsnotify.Watcher) error { - return filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() { - err = watcher.Add(path) - if err != nil { - return fmt.Errorf("failed to watch directory %s: %w", path, err) - } - } - - return nil - }) -} - -func printChangedFiles(ctx context.Context, console input.Console, fileChanges *FileChanges, mu *sync.Mutex) { - mu.Lock() - defer mu.Unlock() - createdFileLength := len(fileChanges.Created) - modifiedFileLength := len(fileChanges.Modified) - deletedFileLength := len(fileChanges.Deleted) - - if createdFileLength == 0 && modifiedFileLength == 0 && deletedFileLength == 0 { - return - } - - console.Message(ctx, output.WithHintFormat("| Files changed:")) - - if createdFileLength > 0 { - for file := range fileChanges.Created { - console.Message(ctx, fmt.Sprintf("%s %s %s", output.WithHintFormat("|"), color.GreenString("+ Created"), file)) - } - } - - if modifiedFileLength > 0 { - for file := range fileChanges.Modified { - console.Message(ctx, fmt.Sprintf("%s %s %s", output.WithHintFormat("|"), color.YellowString("+/- Modified"), file)) - } - } - - if deletedFileLength > 0 { - for file := range fileChanges.Deleted { - console.Message(ctx, fmt.Sprintf("%s %s %s", output.WithHintFormat("|"), color.RedString("- Deleted"), file)) - } - } -} diff --git a/cli/azd/pkg/watch/watch.go b/cli/azd/pkg/watch/watch.go new file mode 100644 index 00000000000..e1973bacfc6 --- /dev/null +++ b/cli/azd/pkg/watch/watch.go @@ -0,0 +1,132 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package watch + +import ( + "context" + _ "embed" + "fmt" + "log" + "os" + "path/filepath" + "sync" + + "github.com/azure/azure-dev/cli/azd/pkg/output" + "github.com/fatih/color" + "github.com/fsnotify/fsnotify" +) + +type FileChanges struct { + Created map[string]bool + Modified map[string]bool + Deleted map[string]bool +} + +func StartWatcher(ctx context.Context, mu *sync.Mutex) (*fsnotify.Watcher, chan bool, *FileChanges, error) { + fileChanges := &FileChanges{ + Created: make(map[string]bool), + Modified: make(map[string]bool), + Deleted: make(map[string]bool), + } + + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to create watcher: %w", err) + } + + done := make(chan bool) + + go func() { + for { + select { + case event := <-watcher.Events: + mu.Lock() + name := event.Name + switch { + case event.Has(fsnotify.Create): + fileChanges.Created[name] = true + case event.Has(fsnotify.Write) || event.Has(fsnotify.Rename): + if !fileChanges.Created[name] && !fileChanges.Deleted[name] { + fileChanges.Modified[name] = true + } + case event.Has(fsnotify.Remove): + if fileChanges.Created[name] { + delete(fileChanges.Created, name) + } else { + fileChanges.Deleted[name] = true + delete(fileChanges.Modified, name) + } + } + mu.Unlock() + case err := <-watcher.Errors: + log.Printf("watcher error: %v", err) + case <-done: + return + case <-ctx.Done(): + return + } + } + }() + + cwd, err := os.Getwd() + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to get current working directory: %w", err) + } + + if err := watchRecursive(cwd, watcher); err != nil { + return nil, nil, nil, fmt.Errorf("watcher failed: %w", err) + } + + return watcher, done, fileChanges, nil +} + +func watchRecursive(root string, watcher *fsnotify.Watcher) error { + return filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + err = watcher.Add(path) + if err != nil { + return fmt.Errorf("failed to watch directory %s: %w", path, err) + } + } + + return nil + }) +} + +func PrintChangedFiles(ctx context.Context, fileChanges *FileChanges, mu *sync.Mutex) { + mu.Lock() + defer mu.Unlock() + createdFileLength := len(fileChanges.Created) + modifiedFileLength := len(fileChanges.Modified) + deletedFileLength := len(fileChanges.Deleted) + + if createdFileLength == 0 && modifiedFileLength == 0 && deletedFileLength == 0 { + return + } + + fmt.Println(output.WithGrayFormat("\n| Files changed:")) + + if createdFileLength > 0 { + for file := range fileChanges.Created { + fmt.Println(output.WithGrayFormat("| "), color.GreenString("+ Created "), file) + } + } + + if modifiedFileLength > 0 { + for file := range fileChanges.Modified { + fmt.Println(output.WithGrayFormat("| "), color.YellowString("+/- Modified "), file) + } + } + + if deletedFileLength > 0 { + for file := range fileChanges.Deleted { + fmt.Println(output.WithGrayFormat("| "), color.RedString("- Deleted "), file) + } + } + + fmt.Println("") +} From e23a0cdc5913d3f70c864e67ce44d4e6fae71731 Mon Sep 17 00:00:00 2001 From: hemarina Date: Tue, 9 Sep 2025 22:46:46 -0700 Subject: [PATCH 106/116] fix golangci-lint warning --- cli/azd/internal/agent/conversational_agent.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cli/azd/internal/agent/conversational_agent.go b/cli/azd/internal/agent/conversational_agent.go index 9aa25038b39..be718ae253c 100644 --- a/cli/azd/internal/agent/conversational_agent.go +++ b/cli/azd/internal/agent/conversational_agent.go @@ -93,6 +93,7 @@ func NewConversationalAzdAiAgent(llm llms.Model, opts ...AgentCreateOption) (Age // SendMessage processes a single message through the agent and returns the response func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, useWatch bool, args ...string) (string, error) { thoughtsCtx, cancelCtx := context.WithCancel(ctx) + defer cancelCtx() var watcher *fsnotify.Watcher var done chan bool @@ -109,13 +110,11 @@ func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, useWatch b cleanup, err := aai.renderThoughts(thoughtsCtx) if err != nil { - cancelCtx() return "", err } defer func() { cleanup() - cancelCtx() if useWatch { watch.PrintChangedFiles(ctx, fileChanges, &mu) From 0efbe50b311b936c27961c2468f8d24329a842af Mon Sep 17 00:00:00 2001 From: hemarina Date: Tue, 9 Sep 2025 23:13:54 -0700 Subject: [PATCH 107/116] minor UX fix --- cli/azd/pkg/watch/watch.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/azd/pkg/watch/watch.go b/cli/azd/pkg/watch/watch.go index e1973bacfc6..a44de61183d 100644 --- a/cli/azd/pkg/watch/watch.go +++ b/cli/azd/pkg/watch/watch.go @@ -118,7 +118,7 @@ func PrintChangedFiles(ctx context.Context, fileChanges *FileChanges, mu *sync.M if modifiedFileLength > 0 { for file := range fileChanges.Modified { - fmt.Println(output.WithGrayFormat("| "), color.YellowString("+/- Modified "), file) + fmt.Println(output.WithGrayFormat("| "), color.YellowString(output.WithUnderline("+")), color.YellowString("Modified "), file) } } From 5221d60448a6e4df5b3dd9c00cfe357f9ff4e067 Mon Sep 17 00:00:00 2001 From: hemarina Date: Wed, 10 Sep 2025 09:34:32 -0700 Subject: [PATCH 108/116] lll --- cli/azd/pkg/watch/watch.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cli/azd/pkg/watch/watch.go b/cli/azd/pkg/watch/watch.go index a44de61183d..8017647f3fa 100644 --- a/cli/azd/pkg/watch/watch.go +++ b/cli/azd/pkg/watch/watch.go @@ -118,7 +118,8 @@ func PrintChangedFiles(ctx context.Context, fileChanges *FileChanges, mu *sync.M if modifiedFileLength > 0 { for file := range fileChanges.Modified { - fmt.Println(output.WithGrayFormat("| "), color.YellowString(output.WithUnderline("+")), color.YellowString("Modified "), file) + fmt.Println(output.WithGrayFormat("| "), color.YellowString(output.WithUnderline("+")), + color.YellowString("Modified "), file) } } From d8e9f6161899a72835e03c9caace182ea23c78cc Mon Sep 17 00:00:00 2001 From: hemarina Date: Wed, 10 Sep 2025 11:37:50 -0700 Subject: [PATCH 109/116] fix spinner UX bug for "Comparing deployment state" running forever --- cli/azd/cmd/middleware/error.go | 9 ++++++++- cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go | 2 -- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index d1ed4617970..c4af190df96 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -54,7 +54,11 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action if e.featuresManager.IsEnabled(llm.FeatureLlm) { if e.options.IsChildAction(ctx) { - return next(ctx) + actionResult, err = next(ctx) + if err != nil { + e.console.StopSpinner(ctx, "", input.Step) + } + return actionResult, err } actionResult, err = next(ctx) @@ -62,6 +66,9 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action return actionResult, err } + // Stop the spinner always to un-hide cursor + e.console.StopSpinner(ctx, "", input.Step) + attempt := 0 originalError := err suggestion := "" diff --git a/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go b/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go index 29159e27928..4fd87682b13 100644 --- a/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go +++ b/cli/azd/pkg/infra/provisioning/bicep/bicep_provider.go @@ -444,8 +444,6 @@ func (p *BicepProvider) deploymentState( currentParamsHash string, ) (*azapi.ResourceDeployment, error) { p.console.ShowSpinner(ctx, "Comparing deployment state", input.Step) - // Find the call stack and the right last message - defer p.console.StopSpinner(ctx, "", input.Step) prevDeploymentResult, err := p.latestDeploymentResult(ctx, scope) if err != nil { return nil, fmt.Errorf("deployment state error: %w", err) From a431fd401fde5b17fa93888f5afc2df1dc6f0b72 Mon Sep 17 00:00:00 2001 From: hemarina Date: Wed, 10 Sep 2025 12:21:08 -0700 Subject: [PATCH 110/116] UX update on troubleshoot steps --- .../tools/prompts/azd_error_troubleshooting.md | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md b/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md index bb8ae1273f4..98cd7a962a5 100644 --- a/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md +++ b/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md @@ -3,18 +3,20 @@ ✅ **Agent Task List** 1. **Error Classification:** Identify the specific error type (Azure REST API, ARM Deployment, Authentication, Local Tool Installation or General) -2. **Error Analysis:** Explain and diagnose what the error means and its root causes. Note that this error occurs when running Azure Developer CLI -3. **Troubleshooting Steps:** Based on error type (Azure REST API Response Errors, Azure ARM Deployment Errors, Azure Authentication Errors, Local Tool Installation Errors, and General AZD Errors), find the Troubleshooting Approach below and provide troubleshooting approach +2. **Error Analysis:** Explain and diagnose what the error means and its root causes under the sections of "What's happening" and "Why it's happening". Note that this error occurs when running Azure Developer CLI +3. **Troubleshooting Steps:** Based on error type (Azure REST API Response Errors, Azure ARM Deployment Errors, Azure Authentication Errors, Local Tool Installation Errors, and General AZD Errors), find the Troubleshooting Approach below and provide troubleshooting approach under the section of "How to fix it" 4. **Resolution Confirmation:** Ensure the issue is fully resolved. If issue still exists, retry the task list to fix the error 📄 **Required Outputs** -- Clear error explanation and root cause analysis -- Step-by-step troubleshooting instructions -- Specific infrastructure code fixes for Bicep or Terraform files based on user usage if needed -- Azure Portal navigation instructions for verification -- Azure CLI commands for validation and testing if needed when user installed Azure CLI -- Actionable next steps for resolution +- Clear error explanation under the sections of "What's happening" +- Clear root cause analysis under the sections of "Why it's happening" +- Under the section of "How to fix it": + - Step-by-step troubleshooting instructions + - Specific infrastructure code fixes for Bicep or Terraform files based on user usage if needed + - Azure Portal navigation instructions for verification + - Azure CLI commands for validation and testing if needed when user installed Azure CLI + - Actionable next steps for resolution 🧠 **Execution Guidelines** From a5f21f129a096bfc73dd3df8502cfcea345097e6 Mon Sep 17 00:00:00 2001 From: hemarina Date: Wed, 10 Sep 2025 12:25:38 -0700 Subject: [PATCH 111/116] minor ux update --- cli/azd/pkg/watch/watch.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cli/azd/pkg/watch/watch.go b/cli/azd/pkg/watch/watch.go index 8017647f3fa..7fe29f29c6d 100644 --- a/cli/azd/pkg/watch/watch.go +++ b/cli/azd/pkg/watch/watch.go @@ -112,7 +112,7 @@ func PrintChangedFiles(ctx context.Context, fileChanges *FileChanges, mu *sync.M if createdFileLength > 0 { for file := range fileChanges.Created { - fmt.Println(output.WithGrayFormat("| "), color.GreenString("+ Created "), file) + fmt.Println(output.WithGrayFormat("| "), color.GreenString("+ Created "), file) } } @@ -125,7 +125,7 @@ func PrintChangedFiles(ctx context.Context, fileChanges *FileChanges, mu *sync.M if deletedFileLength > 0 { for file := range fileChanges.Deleted { - fmt.Println(output.WithGrayFormat("| "), color.RedString("- Deleted "), file) + fmt.Println(output.WithGrayFormat("| "), color.RedString("- Deleted "), file) } } From d6be32e6375a7057f302d1d9a6e0d00e475b197f Mon Sep 17 00:00:00 2001 From: hemarina Date: Wed, 10 Sep 2025 19:39:48 -0700 Subject: [PATCH 112/116] minor ux updates --- cli/azd/cmd/middleware/error.go | 20 +++++++++++++------ .../prompts/azd_error_troubleshooting.md | 9 ++++----- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index c4af190df96..5ee6a25d340 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -128,10 +128,14 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action errorInput := originalError.Error() + e.console.Message(ctx, "") confirm, err := e.checkErrorHandlingConsent( ctx, "mcp.errorHandling.troubleshooting", fmt.Sprintf("Generate troubleshooting steps using %s?", agentName), + fmt.Sprintf("This action will run AI tools to generate troubleshooting steps."+ + " Edit permissions for AI tools anytime by running %s.", + output.WithHighLightFormat("azd mcp consent")), true, ) if err != nil { @@ -142,7 +146,8 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action // Provide manual steps for troubleshooting agentOutput, err := azdAgent.SendMessage(ctx, true, fmt.Sprintf( `Steps to follow: - 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. + 1. Use available tool including azd_error_troubleshooting tool to identify and explain the error. + Diagnose its root cause when running azd command. 2. Provide actionable troubleshooting steps. Do not perform any file changes. Error details: %s`, errorInput)) @@ -167,6 +172,9 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action ctx, "mcp.errorHandling.fix", fmt.Sprintf("Fix this error using %s?", agentName), + fmt.Sprintf("This action will run AI tools to help fix the error."+ + " Edit permissions for AI tools anytime by running %s.", + output.WithHighLightFormat("azd mcp consent")), false, ) if err != nil { @@ -262,6 +270,7 @@ func (e *ErrorMiddleware) checkErrorHandlingConsent( ctx context.Context, promptName string, message string, + helpMessage string, skip bool, ) (bool, error) { userConfig, err := e.userConfigManager.Load() @@ -270,7 +279,7 @@ func (e *ErrorMiddleware) checkErrorHandlingConsent( } if exists, ok := userConfig.GetString(promptName); !ok && exists == "" { - choice, err := promptForErrorHandlingConsent(ctx, message, skip) + choice, err := promptForErrorHandlingConsent(ctx, message, helpMessage, skip) if err != nil { return false, fmt.Errorf("prompting for error handling consent: %w", err) } @@ -296,6 +305,7 @@ func (e *ErrorMiddleware) checkErrorHandlingConsent( func promptForErrorHandlingConsent( ctx context.Context, message string, + helpMessage string, skip bool, ) (string, error) { choices := []*uxlib.SelectChoice{ @@ -322,10 +332,8 @@ func promptForErrorHandlingConsent( } selector := uxlib.NewSelect(&uxlib.SelectOptions{ - Message: message, - HelpMessage: fmt.Sprintf("This action will run AI tools to generate troubleshooting steps."+ - " Edit permissions for AI tools anytime by running %s.", - output.WithHighLightFormat("azd mcp")), + Message: message, + HelpMessage: helpMessage, Choices: choices, EnableFiltering: uxlib.Ptr(false), DisplayCount: 5, diff --git a/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md b/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md index 98cd7a962a5..eba3273ed71 100644 --- a/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md +++ b/cli/azd/internal/mcp/tools/prompts/azd_error_troubleshooting.md @@ -3,15 +3,14 @@ ✅ **Agent Task List** 1. **Error Classification:** Identify the specific error type (Azure REST API, ARM Deployment, Authentication, Local Tool Installation or General) -2. **Error Analysis:** Explain and diagnose what the error means and its root causes under the sections of "What's happening" and "Why it's happening". Note that this error occurs when running Azure Developer CLI -3. **Troubleshooting Steps:** Based on error type (Azure REST API Response Errors, Azure ARM Deployment Errors, Azure Authentication Errors, Local Tool Installation Errors, and General AZD Errors), find the Troubleshooting Approach below and provide troubleshooting approach under the section of "How to fix it" +2. **Error Analysis:** Explain and diagnose what the error means and its root causes. Note that this error occurs when running Azure Developer CLI +3. **Troubleshooting Steps:** Based on error type (Azure REST API Response Errors, Azure ARM Deployment Errors, Azure Authentication Errors, Local Tool Installation Errors, and General AZD Errors), find the Troubleshooting Approach below and provide troubleshooting approach 4. **Resolution Confirmation:** Ensure the issue is fully resolved. If issue still exists, retry the task list to fix the error 📄 **Required Outputs** -- Clear error explanation under the sections of "What's happening" -- Clear root cause analysis under the sections of "Why it's happening" -- Under the section of "How to fix it": +- Clear error explanation and root cause analysis where error explanation will be under a section of "What's happening" and root cause analysis under a section of "Why it's happening" +- Provide troubleshooting steps under a section of "How to fix it": - Step-by-step troubleshooting instructions - Specific infrastructure code fixes for Bicep or Terraform files based on user usage if needed - Azure Portal navigation instructions for verification From bbc17698b4e1df87b06c9a2865f534adda5f619e Mon Sep 17 00:00:00 2001 From: hemarina Date: Thu, 11 Sep 2025 13:54:22 -0700 Subject: [PATCH 113/116] address feedback 1 --- cli/azd/cmd/init.go | 4 ++-- cli/azd/cmd/middleware/error.go | 7 ++++--- cli/azd/internal/agent/agent.go | 26 +++++++++++++++++--------- 3 files changed, 23 insertions(+), 14 deletions(-) diff --git a/cli/azd/cmd/init.go b/cli/azd/cmd/init.go index 7cf46ff21ff..95045b8843d 100644 --- a/cli/azd/cmd/init.go +++ b/cli/azd/cmd/init.go @@ -444,7 +444,7 @@ Do not stop until all tasks are complete and fully resolved. "Provide a very brief summary in markdown format that includes any files generated during this step.", }, "\n")) - agentOutput, err := azdAgent.SendMessage(ctx, false, fullTaskInput) + agentOutput, err := azdAgent.SendMessage(ctx, fullTaskInput) if err != nil { if agentOutput != "" { i.console.Message(ctx, output.WithMarkdown(agentOutput)) @@ -508,7 +508,7 @@ func (i *initAction) collectAndApplyFeedback( if userInput != "" { i.console.Message(ctx, color.MagentaString("Feedback")) - feedbackOutput, err := azdAgent.SendMessage(ctx, false, userInput) + feedbackOutput, err := azdAgent.SendMessage(ctx, userInput) if err != nil { if feedbackOutput != "" { i.console.Message(ctx, output.WithMarkdown(feedbackOutput)) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index 5ee6a25d340..1ddf5d36918 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -88,6 +88,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action azdAgent, err := e.agentFactory.Create( agent.WithDebug(e.global.EnableDebugLogging), + agent.WithFileWatching(true), ) if err != nil { return nil, err @@ -144,7 +145,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action if confirm { // Provide manual steps for troubleshooting - agentOutput, err := azdAgent.SendMessage(ctx, true, fmt.Sprintf( + agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( `Steps to follow: 1. Use available tool including azd_error_troubleshooting tool to identify and explain the error. Diagnose its root cause when running azd command. @@ -186,7 +187,7 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action } previousError = originalError - agentOutput, err := azdAgent.SendMessage(ctx, true, fmt.Sprintf( + agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( `Steps to follow: 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. 2. Resolve the error by making the minimal, targeted change required to the code or configuration. @@ -248,7 +249,7 @@ func (e *ErrorMiddleware) collectAndApplyFeedback( e.console.Message(ctx, "") e.console.Message(ctx, color.MagentaString("Feedback")) - feedbackOutput, err := azdAgent.SendMessage(ctx, true, userInput) + feedbackOutput, err := azdAgent.SendMessage(ctx, userInput) if err != nil { if feedbackOutput != "" { e.console.Message(ctx, AIDisclaimer) diff --git a/cli/azd/internal/agent/agent.go b/cli/azd/internal/agent/agent.go index 214d3e24132..00be4c54032 100644 --- a/cli/azd/internal/agent/agent.go +++ b/cli/azd/internal/agent/agent.go @@ -18,14 +18,15 @@ import ( // agentBase represents an AI agent that can execute tools and interact with language models. // It manages multiple models for different purposes and maintains an executor for tool execution. type agentBase struct { - debug bool - defaultModel llms.Model - executor *agents.Executor - tools []common.AnnotatedTool - callbacksHandler callbacks.Handler - thoughtChan chan logging.Thought - cleanupFunc AgentCleanup - maxIterations int + debug bool + fileWatchingEnabled bool + defaultModel llms.Model + executor *agents.Executor + tools []common.AnnotatedTool + callbacksHandler callbacks.Handler + thoughtChan chan logging.Thought + cleanupFunc AgentCleanup + maxIterations int } // AgentCleanup is a function that performs cleanup tasks for an agent. @@ -34,7 +35,7 @@ type AgentCleanup func() error // Agent represents an AI agent that can execute tools and interact with language models. type Agent interface { // SendMessage sends a message to the agent and returns the response - SendMessage(ctx context.Context, useWatch bool, args ...string) (string, error) + SendMessage(ctx context.Context, args ...string) (string, error) // Stop terminates the agent and performs any necessary cleanup Stop() error @@ -59,6 +60,13 @@ func WithDebug(debug bool) AgentCreateOption { } } +// WithFileWatching returns an option that enables or disables file watching for the agent +func WithFileWatching(fileWatchingEnabled bool) AgentCreateOption { + return func(agent *agentBase) { + agent.fileWatchingEnabled = fileWatchingEnabled + } +} + // WithMaxIterations returns an option that sets the maximum number of iterations for the agent func WithMaxIterations(maxIterations int) AgentCreateOption { return func(agent *agentBase) { From c55c7cdc165af5b616e392e94e8acfad5926e86e Mon Sep 17 00:00:00 2001 From: hemarina Date: Thu, 11 Sep 2025 14:31:37 -0700 Subject: [PATCH 114/116] address feedback 2 --- .../internal/agent/conversational_agent.go | 19 ++----- cli/azd/pkg/watch/watch.go | 55 +++++++++++-------- 2 files changed, 38 insertions(+), 36 deletions(-) diff --git a/cli/azd/internal/agent/conversational_agent.go b/cli/azd/internal/agent/conversational_agent.go index be718ae253c..a2b7eba455c 100644 --- a/cli/azd/internal/agent/conversational_agent.go +++ b/cli/azd/internal/agent/conversational_agent.go @@ -8,14 +8,12 @@ import ( _ "embed" "fmt" "strings" - "sync" "time" "github.com/azure/azure-dev/cli/azd/internal/agent/tools/common" uxlib "github.com/azure/azure-dev/cli/azd/pkg/ux" "github.com/azure/azure-dev/cli/azd/pkg/watch" "github.com/fatih/color" - "github.com/fsnotify/fsnotify" "github.com/tmc/langchaingo/agents" "github.com/tmc/langchaingo/chains" "github.com/tmc/langchaingo/llms" @@ -91,18 +89,15 @@ func NewConversationalAzdAiAgent(llm llms.Model, opts ...AgentCreateOption) (Age } // SendMessage processes a single message through the agent and returns the response -func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, useWatch bool, args ...string) (string, error) { +func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, args ...string) (string, error) { thoughtsCtx, cancelCtx := context.WithCancel(ctx) defer cancelCtx() - var watcher *fsnotify.Watcher - var done chan bool - var mu sync.Mutex - var fileChanges *watch.FileChanges + var watcher watch.Watcher - if useWatch { + if aai.fileWatchingEnabled { var err error - watcher, done, fileChanges, err = watch.StartWatcher(ctx, &mu) + watcher, err = watch.StartWatcher(ctx) if err != nil { return "", fmt.Errorf("failed to start watcher: %w", err) } @@ -116,10 +111,8 @@ func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, useWatch b defer func() { cleanup() - if useWatch { - watch.PrintChangedFiles(ctx, fileChanges, &mu) - close(done) - watcher.Close() + if aai.fileWatchingEnabled { + watcher.PrintChangedFiles(ctx) } }() diff --git a/cli/azd/pkg/watch/watch.go b/cli/azd/pkg/watch/watch.go index 7fe29f29c6d..5b0246672c5 100644 --- a/cli/azd/pkg/watch/watch.go +++ b/cli/azd/pkg/watch/watch.go @@ -5,7 +5,6 @@ package watch import ( "context" - _ "embed" "fmt" "log" "os" @@ -17,14 +16,23 @@ import ( "github.com/fsnotify/fsnotify" ) -type FileChanges struct { +type Watcher interface { + PrintChangedFiles(ctx context.Context) +} + +type fileWatcher struct { + fileChanges *fileChanges + mu sync.Mutex +} + +type fileChanges struct { Created map[string]bool Modified map[string]bool Deleted map[string]bool } -func StartWatcher(ctx context.Context, mu *sync.Mutex) (*fsnotify.Watcher, chan bool, *FileChanges, error) { - fileChanges := &FileChanges{ +func StartWatcher(ctx context.Context) (Watcher, error) { + fileChanges := &fileChanges{ Created: make(map[string]bool), Modified: make(map[string]bool), Deleted: make(map[string]bool), @@ -32,16 +40,20 @@ func StartWatcher(ctx context.Context, mu *sync.Mutex) (*fsnotify.Watcher, chan watcher, err := fsnotify.NewWatcher() if err != nil { - return nil, nil, nil, fmt.Errorf("failed to create watcher: %w", err) + return nil, fmt.Errorf("failed to create watcher: %w", err) } - done := make(chan bool) + fw := &fileWatcher{ + fileChanges: fileChanges, + } go func() { + defer watcher.Close() + for { select { case event := <-watcher.Events: - mu.Lock() + fw.mu.Lock() name := event.Name switch { case event.Has(fsnotify.Create): @@ -58,11 +70,9 @@ func StartWatcher(ctx context.Context, mu *sync.Mutex) (*fsnotify.Watcher, chan delete(fileChanges.Modified, name) } } - mu.Unlock() + fw.mu.Unlock() case err := <-watcher.Errors: log.Printf("watcher error: %v", err) - case <-done: - return case <-ctx.Done(): return } @@ -71,14 +81,14 @@ func StartWatcher(ctx context.Context, mu *sync.Mutex) (*fsnotify.Watcher, chan cwd, err := os.Getwd() if err != nil { - return nil, nil, nil, fmt.Errorf("failed to get current working directory: %w", err) + return nil, fmt.Errorf("failed to get current working directory: %w", err) } if err := watchRecursive(cwd, watcher); err != nil { - return nil, nil, nil, fmt.Errorf("watcher failed: %w", err) + return nil, fmt.Errorf("watcher failed: %w", err) } - return watcher, done, fileChanges, nil + return fw, nil } func watchRecursive(root string, watcher *fsnotify.Watcher) error { @@ -92,17 +102,16 @@ func watchRecursive(root string, watcher *fsnotify.Watcher) error { return fmt.Errorf("failed to watch directory %s: %w", path, err) } } - return nil }) } -func PrintChangedFiles(ctx context.Context, fileChanges *FileChanges, mu *sync.Mutex) { - mu.Lock() - defer mu.Unlock() - createdFileLength := len(fileChanges.Created) - modifiedFileLength := len(fileChanges.Modified) - deletedFileLength := len(fileChanges.Deleted) +func (fw *fileWatcher) PrintChangedFiles(ctx context.Context) { + fw.mu.Lock() + defer fw.mu.Unlock() + createdFileLength := len(fw.fileChanges.Created) + modifiedFileLength := len(fw.fileChanges.Modified) + deletedFileLength := len(fw.fileChanges.Deleted) if createdFileLength == 0 && modifiedFileLength == 0 && deletedFileLength == 0 { return @@ -111,20 +120,20 @@ func PrintChangedFiles(ctx context.Context, fileChanges *FileChanges, mu *sync.M fmt.Println(output.WithGrayFormat("\n| Files changed:")) if createdFileLength > 0 { - for file := range fileChanges.Created { + for file := range fw.fileChanges.Created { fmt.Println(output.WithGrayFormat("| "), color.GreenString("+ Created "), file) } } if modifiedFileLength > 0 { - for file := range fileChanges.Modified { + for file := range fw.fileChanges.Modified { fmt.Println(output.WithGrayFormat("| "), color.YellowString(output.WithUnderline("+")), color.YellowString("Modified "), file) } } if deletedFileLength > 0 { - for file := range fileChanges.Deleted { + for file := range fw.fileChanges.Deleted { fmt.Println(output.WithGrayFormat("| "), color.RedString("- Deleted "), file) } } From ae9ecf7ecfd7738bad70d5baf976e768c4050c9d Mon Sep 17 00:00:00 2001 From: hemarina Date: Thu, 11 Sep 2025 16:40:01 -0700 Subject: [PATCH 115/116] address feedback --- cli/azd/cmd/middleware/error.go | 249 +++++++++--------- .../internal/agent/conversational_agent.go | 2 +- .../agent/tools/dev/command_executor.go | 18 +- cli/azd/pkg/watch/watch.go | 2 +- 4 files changed, 132 insertions(+), 139 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index 1ddf5d36918..ee020ff3cb5 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -49,176 +49,163 @@ func NewErrorMiddleware( } func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.ActionResult, error) { - var actionResult *actions.ActionResult - var err error + actionResult, err := next(ctx) + if !e.featuresManager.IsEnabled(llm.FeatureLlm) { + return actionResult, err + } - if e.featuresManager.IsEnabled(llm.FeatureLlm) { - if e.options.IsChildAction(ctx) { - actionResult, err = next(ctx) - if err != nil { - e.console.StopSpinner(ctx, "", input.Step) - } - return actionResult, err - } + // Stop the spinner always to un-hide cursor + e.console.StopSpinner(ctx, "", input.Step) + if err == nil || e.options.IsChildAction(ctx) { + return actionResult, err + } - actionResult, err = next(ctx) - if err == nil { - return actionResult, err - } + // Error already has a suggestion, no need for AI + var suggestionErr *internal.ErrorWithSuggestion + if errors.As(err, &suggestionErr) { + e.console.Message(ctx, suggestionErr.Suggestion) + return actionResult, err + } - // Stop the spinner always to un-hide cursor - e.console.StopSpinner(ctx, "", input.Step) - - attempt := 0 - originalError := err - suggestion := "" - var previousError error - var suggestionErr *internal.ErrorWithSuggestion - var errorWithTraceId *internal.ErrorWithTraceId - skipAnalyzingErrors := []string{ - "environment already initialized", - "interrupt", - "no project exists", + // Skip certain errors, no need for AI + skipAnalyzingErrors := []string{ + "environment already initialized", + "interrupt", + "no project exists", + } + for _, s := range skipAnalyzingErrors { + if strings.Contains(err.Error(), s) { + return actionResult, err } - AIDisclaimer := output.WithGrayFormat("The following content is AI-generated. AI responses may be incorrect.") - agentName := "agent mode" + } - // Warn user that this is an alpha feature - e.console.WarnForFeature(ctx, llm.FeatureLlm) + // Warn user that this is an alpha feature + e.console.WarnForFeature(ctx, llm.FeatureLlm) - azdAgent, err := e.agentFactory.Create( - agent.WithDebug(e.global.EnableDebugLogging), - agent.WithFileWatching(true), - ) - if err != nil { - return nil, err - } + originalError := err + azdAgent, err := e.agentFactory.Create( + agent.WithDebug(e.global.EnableDebugLogging), + agent.WithFileWatching(true), + ) + if err != nil { + return nil, err + } - defer azdAgent.Stop() + defer azdAgent.Stop() - for { - if originalError == nil { - break - } + attempt := 0 + var previousError error + var errorWithTraceId *internal.ErrorWithTraceId + AIDisclaimer := output.WithGrayFormat("The following content is AI-generated. AI responses may be incorrect.") + agentName := "agent mode" - for _, s := range skipAnalyzingErrors { - if strings.Contains(originalError.Error(), s) { - return actionResult, originalError - } - } + for { + if originalError == nil { + break + } - if previousError != nil && errors.Is(originalError, previousError) { - attempt++ - if attempt >= 3 { - e.console.Message(ctx, fmt.Sprintf("Please review the error and fix it manually, "+ - "%s was unable to resolve the error after multiple attempts.", agentName)) - return actionResult, originalError - } + if previousError != nil && errors.Is(originalError, previousError) { + attempt++ + if attempt >= 3 { + e.console.Message(ctx, fmt.Sprintf("Please review the error and fix it manually, "+ + "%s was unable to resolve the error after multiple attempts.", agentName)) + return actionResult, originalError } + } - e.console.Message(ctx, output.WithErrorFormat("\nERROR: %s", originalError.Error())) + e.console.Message(ctx, output.WithErrorFormat("\nERROR: %s", originalError.Error())) - if errors.As(originalError, &errorWithTraceId) { - e.console.Message(ctx, output.WithErrorFormat("TraceID: %s", errorWithTraceId.TraceId)) - } - if errors.As(originalError, &suggestionErr) { - suggestion = suggestionErr.Suggestion - e.console.Message(ctx, suggestion) - return actionResult, originalError - } + if errors.As(originalError, &errorWithTraceId) { + e.console.Message(ctx, output.WithErrorFormat("TraceID: %s", errorWithTraceId.TraceId)) + } - errorInput := originalError.Error() + errorInput := originalError.Error() - e.console.Message(ctx, "") - confirm, err := e.checkErrorHandlingConsent( - ctx, - "mcp.errorHandling.troubleshooting", - fmt.Sprintf("Generate troubleshooting steps using %s?", agentName), - fmt.Sprintf("This action will run AI tools to generate troubleshooting steps."+ - " Edit permissions for AI tools anytime by running %s.", - output.WithHighLightFormat("azd mcp consent")), - true, - ) - if err != nil { - return nil, fmt.Errorf("prompting to provide troubleshooting steps: %w", err) - } + e.console.Message(ctx, "") + confirm, err := e.checkErrorHandlingConsent( + ctx, + "mcp.errorHandling.troubleshooting", + fmt.Sprintf("Generate troubleshooting steps using %s?", agentName), + fmt.Sprintf("This action will run AI tools to generate troubleshooting steps."+ + " Edit permissions for AI tools anytime by running %s.", + output.WithHighLightFormat("azd mcp consent")), + true, + ) + if err != nil { + return nil, fmt.Errorf("prompting to provide troubleshooting steps: %w", err) + } - if confirm { - // Provide manual steps for troubleshooting - agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( - `Steps to follow: + if confirm { + // Provide manual steps for troubleshooting + agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( + `Steps to follow: 1. Use available tool including azd_error_troubleshooting tool to identify and explain the error. Diagnose its root cause when running azd command. 2. Provide actionable troubleshooting steps. Do not perform any file changes. Error details: %s`, errorInput)) - if err != nil { - if agentOutput != "" { - e.console.Message(ctx, AIDisclaimer) - e.console.Message(ctx, output.WithMarkdown(agentOutput)) - } - - return nil, err + if err != nil { + if agentOutput != "" { + e.console.Message(ctx, AIDisclaimer) + e.console.Message(ctx, output.WithMarkdown(agentOutput)) } - e.console.Message(ctx, AIDisclaimer) - e.console.Message(ctx, "") - e.console.Message(ctx, fmt.Sprintf("%s:", output.AzdAgentLabel())) - e.console.Message(ctx, output.WithMarkdown(agentOutput)) - e.console.Message(ctx, "") + return nil, err } - // Ask user if they want to let AI fix the - confirm, err = e.checkErrorHandlingConsent( - ctx, - "mcp.errorHandling.fix", - fmt.Sprintf("Fix this error using %s?", agentName), - fmt.Sprintf("This action will run AI tools to help fix the error."+ - " Edit permissions for AI tools anytime by running %s.", - output.WithHighLightFormat("azd mcp consent")), - false, - ) - if err != nil { - return nil, fmt.Errorf("prompting to fix error using %s: %w", agentName, err) - } + e.console.Message(ctx, AIDisclaimer) + e.console.Message(ctx, "") + e.console.Message(ctx, fmt.Sprintf("%s:", output.AzdAgentLabel())) + e.console.Message(ctx, output.WithMarkdown(agentOutput)) + e.console.Message(ctx, "") + } - if !confirm { - return actionResult, err - } + // Ask user if they want to let AI fix the + confirm, err = e.checkErrorHandlingConsent( + ctx, + "mcp.errorHandling.fix", + fmt.Sprintf("Fix this error using %s?", agentName), + fmt.Sprintf("This action will run AI tools to help fix the error."+ + " Edit permissions for AI tools anytime by running %s.", + output.WithHighLightFormat("azd mcp consent")), + false, + ) + if err != nil { + return nil, fmt.Errorf("prompting to fix error using %s: %w", agentName, err) + } - previousError = originalError - agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( - `Steps to follow: + if !confirm { + return actionResult, err + } + + previousError = originalError + agentOutput, err := azdAgent.SendMessage(ctx, fmt.Sprintf( + `Steps to follow: 1. Use available tool to identify, explain and diagnose this error when running azd command and its root cause. 2. Resolve the error by making the minimal, targeted change required to the code or configuration. Avoid unnecessary modifications and focus only on what is essential to restore correct functionality. 3. Remove any changes that were created solely for validation and are not part of the actual error fix. Error details: %s`, errorInput)) - if err != nil { - if agentOutput != "" { - e.console.Message(ctx, AIDisclaimer) - e.console.Message(ctx, output.WithMarkdown(agentOutput)) - } - - return nil, err - } - - // Ask the user to add feedback - if err := e.collectAndApplyFeedback(ctx, azdAgent, AIDisclaimer); err != nil { - return nil, err + if err != nil { + if agentOutput != "" { + e.console.Message(ctx, AIDisclaimer) + e.console.Message(ctx, output.WithMarkdown(agentOutput)) } - // Clear check cache to prevent skip of tool related error - ctx = tools.WithInstalledCheckCache(ctx) + return nil, err + } - actionResult, err = next(ctx) - originalError = err + // Ask the user to add feedback + if err := e.collectAndApplyFeedback(ctx, azdAgent, AIDisclaimer); err != nil { + return nil, err } - } - if actionResult == nil { + // Clear check cache to prevent skip of tool related error + ctx = tools.WithInstalledCheckCache(ctx) + actionResult, err = next(ctx) + originalError = err } return actionResult, err diff --git a/cli/azd/internal/agent/conversational_agent.go b/cli/azd/internal/agent/conversational_agent.go index a2b7eba455c..a787f41e889 100644 --- a/cli/azd/internal/agent/conversational_agent.go +++ b/cli/azd/internal/agent/conversational_agent.go @@ -97,7 +97,7 @@ func (aai *ConversationalAzdAiAgent) SendMessage(ctx context.Context, args ...st if aai.fileWatchingEnabled { var err error - watcher, err = watch.StartWatcher(ctx) + watcher, err = watch.NewWatcher(ctx) if err != nil { return "", fmt.Errorf("failed to start watcher: %w", err) } diff --git a/cli/azd/internal/agent/tools/dev/command_executor.go b/cli/azd/internal/agent/tools/dev/command_executor.go index c043724e762..18876508928 100644 --- a/cli/azd/internal/agent/tools/dev/command_executor.go +++ b/cli/azd/internal/agent/tools/dev/command_executor.go @@ -121,13 +121,19 @@ func (t CommandExecutorTool) Call(ctx context.Context, input string) (string, er } if req.Command == "azd" { - errorResponse := common.ErrorResponse{ - Error: true, - Message: "azd command is not supported", + blockedCommands := []string{"up", "provision", "deploy", "down"} + + for _, blocked := range blockedCommands { + if req.Args[0] == blocked && req.Args[1] != "--preview" { + errorResponse := common.ErrorResponse{ + Error: true, + Message: "azd command is not supported", + } + + jsonData, _ := json.MarshalIndent(errorResponse, "", " ") + return string(jsonData), nil + } } - - jsonData, _ := json.MarshalIndent(errorResponse, "", " ") - return string(jsonData), nil } // Set defaults diff --git a/cli/azd/pkg/watch/watch.go b/cli/azd/pkg/watch/watch.go index 5b0246672c5..c5f55668022 100644 --- a/cli/azd/pkg/watch/watch.go +++ b/cli/azd/pkg/watch/watch.go @@ -31,7 +31,7 @@ type fileChanges struct { Deleted map[string]bool } -func StartWatcher(ctx context.Context) (Watcher, error) { +func NewWatcher(ctx context.Context) (Watcher, error) { fileChanges := &fileChanges{ Created: make(map[string]bool), Modified: make(map[string]bool), From 758d12f555dd4d00c27272232d338f2bc1a43d12 Mon Sep 17 00:00:00 2001 From: hemarina Date: Thu, 11 Sep 2025 17:05:00 -0700 Subject: [PATCH 116/116] address feedback --- cli/azd/cmd/middleware/error.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cli/azd/cmd/middleware/error.go b/cli/azd/cmd/middleware/error.go index ee020ff3cb5..efd68185580 100644 --- a/cli/azd/cmd/middleware/error.go +++ b/cli/azd/cmd/middleware/error.go @@ -104,6 +104,8 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action break } + e.console.Message(ctx, output.WithErrorFormat("\nERROR: %s", originalError.Error())) + if previousError != nil && errors.Is(originalError, previousError) { attempt++ if attempt >= 3 { @@ -113,8 +115,6 @@ func (e *ErrorMiddleware) Run(ctx context.Context, next NextFn) (*actions.Action } } - e.console.Message(ctx, output.WithErrorFormat("\nERROR: %s", originalError.Error())) - if errors.As(originalError, &errorWithTraceId) { e.console.Message(ctx, output.WithErrorFormat("TraceID: %s", errorWithTraceId.TraceId)) }