diff --git a/syntax/encoding/alloyyaml/alloyyaml_test.go b/syntax/encoding/alloyyaml/alloyyaml_test.go new file mode 100644 index 0000000000..83c6e6ac9a --- /dev/null +++ b/syntax/encoding/alloyyaml/alloyyaml_test.go @@ -0,0 +1,205 @@ +package alloyyaml + +import ( + "flag" + "fmt" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "testing" + + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/token" + "github.com/stretchr/testify/require" +) + +var updateGolden = flag.Bool("update", false, "update golden YAML files") + +// TestYAMLToAlloy tests YAML → Alloy conversion. +// YAML files are hand-written inputs, Alloy files are the expected outputs. +func TestYAMLToAlloy(t *testing.T) { + runGoldenTests(t, func(t *testing.T, alloyPath, yamlPath string) { + expectedAlloy, err := os.ReadFile(alloyPath) + require.NoError(t, err) + + yamlContent, err := os.ReadFile(yamlPath) + require.NoError(t, err) + + actualAlloy, err := ToAlloy(yamlContent) + require.NoError(t, err, "YAML → Alloy conversion failed") + + assertAlloyEqual(t, expectedAlloy, actualAlloy, "YAML → Alloy") + }) +} + +// TestAlloyToYAML tests Alloy → YAML → Alloy round-trip conversion. +// Uses round-trip testing: Alloy → YAML → Alloy, comparing original with result. +// Run with -args -update to regenerate YAML golden files from Alloy. +func TestAlloyToYAML(t *testing.T) { + runGoldenTests(t, func(t *testing.T, alloyPath, yamlPath string) { + alloyContent, err := os.ReadFile(alloyPath) + require.NoError(t, err) + + actualYAML, err := ToYAML(alloyContent) + require.NoError(t, err, "Alloy → YAML conversion failed") + + if *updateGolden { + err := os.WriteFile(yamlPath, actualYAML, 0644) + require.NoError(t, err) + t.Logf("✓ Updated golden file: %s", filepath.Base(yamlPath)) + return + } + + // Round-trip: convert YAML back to Alloy and compare + roundTripAlloy, err := ToAlloy(actualYAML) + require.NoError(t, err, "YAML → Alloy conversion failed") + + assertAlloyEqualWithYAML(t, alloyContent, roundTripAlloy, actualYAML) + }) +} + +// runGoldenTests discovers and runs golden file tests in testdata/. +func runGoldenTests(t *testing.T, testFn func(t *testing.T, alloyPath, yamlPath string)) { + alloyFiles, err := filepath.Glob(filepath.Join("testdata", "*.alloy")) + require.NoError(t, err) + if len(alloyFiles) == 0 { + t.Skip("no testdata files found") + } + + for _, alloyPath := range alloyFiles { + baseName := strings.TrimSuffix(filepath.Base(alloyPath), ".alloy") + yamlPath := filepath.Join("testdata", baseName+".yaml") + t.Run(baseName, func(t *testing.T) { + testFn(t, alloyPath, yamlPath) + }) + } +} + +// assertAlloyEqual compares two Alloy configurations using AST comparison. +func assertAlloyEqual(t *testing.T, expected, actual []byte, label string) { + if err := compareAlloyAST(expected, actual); err != nil { + t.Errorf("%s: AST comparison failed: %v", label, err) + t.Logf("Expected:\n%s", string(expected)) + t.Logf("Actual:\n%s", string(actual)) + t.FailNow() + } + t.Logf("✓ %s test passed", label) +} + +// assertAlloyEqualWithYAML is like assertAlloyEqual but also logs the intermediate YAML. +func assertAlloyEqualWithYAML(t *testing.T, expected, actual, yaml []byte) { + if err := compareAlloyAST(expected, actual); err != nil { + t.Errorf("Round-trip AST comparison failed: %v", err) + t.Logf("Original Alloy:\n%s", string(expected)) + t.Logf("YAML (intermediate):\n%s", string(yaml)) + t.Logf("Round-trip Alloy:\n%s", string(actual)) + t.FailNow() + } + t.Logf("✓ Alloy → YAML → Alloy round-trip test passed") +} + +// compareAlloyAST parses and compares two Alloy configurations semantically. +// Ignores comments, whitespace, formatting, and ordering. +func compareAlloyAST(expected, actual []byte) error { + expectedAST, err := parser.ParseFile("expected.alloy", expected) + if err != nil { + return fmt.Errorf("parse expected: %w", err) + } + + actualAST, err := parser.ParseFile("actual.alloy", actual) + if err != nil { + return fmt.Errorf("parse actual: %w", err) + } + + normalizeAST(expectedAST) + normalizeAST(actualAST) + + if !reflect.DeepEqual(expectedAST.Body, actualAST.Body) { + return fmt.Errorf("AST structures differ") + } + return nil +} + +// normalizeAST removes non-semantic information (positions, comments). +func normalizeAST(file *ast.File) { + file.Comments = nil + normalizeBody(file.Body) +} + +// normalizeBody sanitizes statements recursively. +func normalizeBody(body ast.Body) { + for _, stmt := range body { + normalizeStmt(stmt) + } +} + +func normalizeStmt(stmt ast.Stmt) { + switch s := stmt.(type) { + case *ast.AttributeStmt: + clearPos(s.Name) + normalizeExpr(s.Value) + case *ast.BlockStmt: + s.NamePos, s.LabelPos, s.LCurlyPos, s.RCurlyPos = token.Pos{}, token.Pos{}, token.Pos{}, token.Pos{} + normalizeBody(s.Body) + } +} + +func normalizeExpr(expr ast.Expr) { + if expr == nil { + return + } + + switch e := expr.(type) { + case *ast.LiteralExpr: + e.ValuePos = token.Pos{} + case *ast.IdentifierExpr: + clearPos(e.Ident) + case *ast.ArrayExpr: + e.LBrackPos, e.RBrackPos = token.Pos{}, token.Pos{} + for _, elem := range e.Elements { + normalizeExpr(elem) + } + case *ast.ObjectExpr: + e.LCurlyPos, e.RCurlyPos = token.Pos{}, token.Pos{} + // Sort fields by name for consistent comparison + sort.Slice(e.Fields, func(i, j int) bool { + return e.Fields[i].Name.Name < e.Fields[j].Name.Name + }) + for _, field := range e.Fields { + clearPos(field.Name) + normalizeExpr(field.Value) + } + case *ast.AccessExpr: + normalizeExpr(e.Value) + clearPos(e.Name) + case *ast.IndexExpr: + e.LBrackPos, e.RBrackPos = token.Pos{}, token.Pos{} + normalizeExpr(e.Value) + normalizeExpr(e.Index) + case *ast.CallExpr: + e.LParenPos, e.RParenPos = token.Pos{}, token.Pos{} + normalizeExpr(e.Value) + for _, arg := range e.Args { + normalizeExpr(arg) + } + case *ast.UnaryExpr: + e.KindPos = token.Pos{} + normalizeExpr(e.Value) + case *ast.BinaryExpr: + e.KindPos = token.Pos{} + normalizeExpr(e.Left) + normalizeExpr(e.Right) + case *ast.ParenExpr: + e.LParenPos, e.RParenPos = token.Pos{}, token.Pos{} + normalizeExpr(e.Inner) + } +} + +func clearPos(ident *ast.Ident) { + if ident != nil { + ident.NamePos = token.Pos{} + } +} diff --git a/syntax/encoding/alloyyaml/testdata/00_all_in_one.alloy b/syntax/encoding/alloyyaml/testdata/00_all_in_one.alloy new file mode 100644 index 0000000000..b47e3f69f1 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/00_all_in_one.alloy @@ -0,0 +1,35 @@ +// Example Alloy input file. +some_block "label_name" { + this_is_array = [ + {foo = "bar"}, + {baz = "qux"}, + ] + this_is_object = { + foo = "bar", + baz = "boop", + } + this_uses_expr = env("MY_VAR") + this_is_anon_block { + foo = "bar" + pos = "first" + } + this_is_anon_block { + foo = "fab" + pos = "second" + } + this_is_anon_block { + foo = "bop" + pos = "third" + } + these_are_named_blocks "block_1" { + foo = "bar" + } + these_are_named_blocks "block_2" { + foo = "fab" + } + these_are_named_blocks "block_3" { + foo = "bop" + } +} + +another_block {} diff --git a/syntax/encoding/alloyyaml/testdata/00_all_in_one.yaml b/syntax/encoding/alloyyaml/testdata/00_all_in_one.yaml new file mode 100644 index 0000000000..c69683c014 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/00_all_in_one.yaml @@ -0,0 +1,25 @@ +- some_block/label_name: + - this_is_array: + $array: + - foo: bar + - baz: qux + - this_is_object: + baz: boop + foo: bar + - this_uses_expr: expr(env("MY_VAR")) + - this_is_anon_block: + - foo: bar + - pos: first + - this_is_anon_block: + - foo: fab + - pos: second + - this_is_anon_block: + - foo: bop + - pos: third + - these_are_named_blocks/block_1: + - foo: bar + - these_are_named_blocks/block_2: + - foo: fab + - these_are_named_blocks/block_3: + - foo: bop +- another_block: [] diff --git a/syntax/encoding/alloyyaml/testdata/01_simple.alloy b/syntax/encoding/alloyyaml/testdata/01_simple.alloy new file mode 100644 index 0000000000..b60e7bc92f --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/01_simple.alloy @@ -0,0 +1,4 @@ +level = "debug" +format = "json" +port = 8080 + diff --git a/syntax/encoding/alloyyaml/testdata/01_simple.yaml b/syntax/encoding/alloyyaml/testdata/01_simple.yaml new file mode 100644 index 0000000000..65c57b4f53 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/01_simple.yaml @@ -0,0 +1,3 @@ +- level: debug +- format: json +- port: 8080 diff --git a/syntax/encoding/alloyyaml/testdata/02.2_block_vs_object.alloy b/syntax/encoding/alloyyaml/testdata/02.2_block_vs_object.alloy new file mode 100644 index 0000000000..82245232b9 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/02.2_block_vs_object.alloy @@ -0,0 +1,14 @@ +this_is_a_block "label_name" { + key1 = "value1" + key2 = "value2" +} + +this_is_anonymous_block { + key1 = "value1" + key2 = "value2" +} + +this_is_an_object = { + key1 = "value1", + key2 = "value2", +} diff --git a/syntax/encoding/alloyyaml/testdata/02.2_block_vs_object.yaml b/syntax/encoding/alloyyaml/testdata/02.2_block_vs_object.yaml new file mode 100644 index 0000000000..d8df2b9386 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/02.2_block_vs_object.yaml @@ -0,0 +1,9 @@ +- this_is_a_block/label_name: + - key1: value1 + - key2: value2 +- this_is_anonymous_block: + - key1: value1 + - key2: value2 +- this_is_an_object: + key1: value1 + key2: value2 diff --git a/syntax/encoding/alloyyaml/testdata/02_block.alloy b/syntax/encoding/alloyyaml/testdata/02_block.alloy new file mode 100644 index 0000000000..63d985f487 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/02_block.alloy @@ -0,0 +1,5 @@ +logging { + format = "json" + level = "debug" +} + diff --git a/syntax/encoding/alloyyaml/testdata/02_block.yaml b/syntax/encoding/alloyyaml/testdata/02_block.yaml new file mode 100644 index 0000000000..65b725a47f --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/02_block.yaml @@ -0,0 +1,3 @@ +- logging: + - format: json + - level: debug diff --git a/syntax/encoding/alloyyaml/testdata/03.2_nested_multiple.alloy b/syntax/encoding/alloyyaml/testdata/03.2_nested_multiple.alloy new file mode 100644 index 0000000000..10ad98be9d --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/03.2_nested_multiple.alloy @@ -0,0 +1,31 @@ +some_block "label_name" { + this_is_array = [ + { foo = "bar" }, + { baz = "qux" }, + ] + this_is_object = { + foo = "bar", + baz = "boop", + } + this_is_anon_block { + foo = "bar" + baz = "bap" + } + this_is_anon_block { + foo = "fab" + baz = "boom" + } + this_is_anon_block { + foo = "bop" + baz = "bob" + } + these_are_named_blocks "block_1" { + foo = "bar" + } + these_are_named_blocks "block_2" { + foo = "fab" + } + these_are_named_blocks "block_3" { + foo = "bop" + } +} \ No newline at end of file diff --git a/syntax/encoding/alloyyaml/testdata/03.2_nested_multiple.yaml b/syntax/encoding/alloyyaml/testdata/03.2_nested_multiple.yaml new file mode 100644 index 0000000000..25002e82cc --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/03.2_nested_multiple.yaml @@ -0,0 +1,23 @@ +- some_block/label_name: + - this_is_array: + $array: + - foo: bar + - baz: qux + - this_is_object: + baz: boop + foo: bar + - this_is_anon_block: + - foo: bar + - baz: bap + - this_is_anon_block: + - foo: fab + - baz: boom + - this_is_anon_block: + - foo: bop + - baz: bob + - these_are_named_blocks/block_1: + - foo: bar + - these_are_named_blocks/block_2: + - foo: fab + - these_are_named_blocks/block_3: + - foo: bop diff --git a/syntax/encoding/alloyyaml/testdata/03_nested.alloy b/syntax/encoding/alloyyaml/testdata/03_nested.alloy new file mode 100644 index 0000000000..f97aeb8b6d --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/03_nested.alloy @@ -0,0 +1,10 @@ +server { + address = "localhost" + port = 8080 + + tls { + ca_file = "/etc/ca.pem" + cert_file = "/etc/cert.pem" + } +} + diff --git a/syntax/encoding/alloyyaml/testdata/03_nested.yaml b/syntax/encoding/alloyyaml/testdata/03_nested.yaml new file mode 100644 index 0000000000..1999f54c41 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/03_nested.yaml @@ -0,0 +1,6 @@ +- server: + - address: localhost + - port: 8080 + - tls: + - ca_file: /etc/ca.pem + - cert_file: /etc/cert.pem diff --git a/syntax/encoding/alloyyaml/testdata/04_arrays.alloy b/syntax/encoding/alloyyaml/testdata/04_arrays.alloy new file mode 100644 index 0000000000..84d1d43408 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/04_arrays.alloy @@ -0,0 +1,12 @@ +labels = ["app", "prod", "web"] +ports = [8080, 8081, 8082] + +labels2 = ["app", "prod", "web"] +ports2 = [8080, 8081, 8082] + +object { + foo = "bar" +} +object { + baz = "qux" +} diff --git a/syntax/encoding/alloyyaml/testdata/04_arrays.yaml b/syntax/encoding/alloyyaml/testdata/04_arrays.yaml new file mode 100644 index 0000000000..c911f76273 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/04_arrays.yaml @@ -0,0 +1,20 @@ +- labels: + - app + - prod + - web +- ports: + - 8080 + - 8081 + - 8082 +- labels2: + - app + - prod + - web +- ports2: + - 8080 + - 8081 + - 8082 +- object: + - foo: bar +- object: + - baz: qux diff --git a/syntax/encoding/alloyyaml/testdata/05_multiple_blocks.alloy b/syntax/encoding/alloyyaml/testdata/05_multiple_blocks.alloy new file mode 100644 index 0000000000..448cbf05a0 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/05_multiple_blocks.alloy @@ -0,0 +1,11 @@ +endpoint { + url = "http://primary.com" +} + +endpoint { + url = "http://secondary.com" +} + +single_endpoint { + url = "http://single.com" +} diff --git a/syntax/encoding/alloyyaml/testdata/05_multiple_blocks.yaml b/syntax/encoding/alloyyaml/testdata/05_multiple_blocks.yaml new file mode 100644 index 0000000000..b5d07a405e --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/05_multiple_blocks.yaml @@ -0,0 +1,6 @@ +- endpoint: + - url: http://primary.com +- endpoint: + - url: http://secondary.com +- single_endpoint: + - url: http://single.com diff --git a/syntax/encoding/alloyyaml/testdata/06_prometheus.alloy b/syntax/encoding/alloyyaml/testdata/06_prometheus.alloy new file mode 100644 index 0000000000..f579f46d8f --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/06_prometheus.alloy @@ -0,0 +1,6 @@ +prometheus.scrape "default" { + forward_to = [prometheus.remote_write.default.receiver] + scrape_interval = "15s" + targets = ["localhost:9090", "localhost:9091"] +} + diff --git a/syntax/encoding/alloyyaml/testdata/06_prometheus.yaml b/syntax/encoding/alloyyaml/testdata/06_prometheus.yaml new file mode 100644 index 0000000000..42c12d5e5e --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/06_prometheus.yaml @@ -0,0 +1,7 @@ +- prometheus.scrape/default: + - forward_to: + - expr(prometheus.remote_write.default.receiver) + - scrape_interval: 15s + - targets: + - localhost:9090 + - localhost:9091 diff --git a/syntax/encoding/alloyyaml/testdata/07_object_literal.alloy b/syntax/encoding/alloyyaml/testdata/07_object_literal.alloy new file mode 100644 index 0000000000..7850e482e1 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/07_object_literal.alloy @@ -0,0 +1,2 @@ +labels = { app = "myapp", env = "prod", team = "platform" } + diff --git a/syntax/encoding/alloyyaml/testdata/07_object_literal.yaml b/syntax/encoding/alloyyaml/testdata/07_object_literal.yaml new file mode 100644 index 0000000000..ee8ff18aa0 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/07_object_literal.yaml @@ -0,0 +1,4 @@ +- labels: + app: myapp + env: prod + team: platform diff --git a/syntax/encoding/alloyyaml/testdata/08_expr.alloy b/syntax/encoding/alloyyaml/testdata/08_expr.alloy new file mode 100644 index 0000000000..c9dc8b83fc --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/08_expr.alloy @@ -0,0 +1,11 @@ +// Simple expressions +password = env("PASSWORD") +targets = discovery.kubernetes.pods.targets +computed = (base + 100) * factor + +// Multi-line expression (formatted in Alloy but should be single line in YAML) +config = array.concat( + [1, 2, 3], + [4, 5, 6], +) + diff --git a/syntax/encoding/alloyyaml/testdata/08_expr.yaml b/syntax/encoding/alloyyaml/testdata/08_expr.yaml new file mode 100644 index 0000000000..69e424dd2c --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/08_expr.yaml @@ -0,0 +1,8 @@ +- password: expr(env("PASSWORD")) +- targets: expr(discovery.kubernetes.pods.targets) +- computed: expr((base + 100) * factor) +- config: |- + expr(array.concat( + [1, 2, 3], + [4, 5, 6], + )) diff --git a/syntax/encoding/alloyyaml/testdata/09_complete.alloy b/syntax/encoding/alloyyaml/testdata/09_complete.alloy new file mode 100644 index 0000000000..49acf51d36 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/09_complete.alloy @@ -0,0 +1,26 @@ +logging { + format = "logfmt" + level = "debug" +} + +discovery.kubernetes "pods" { + role = "pod" +} + +prometheus.scrape "default" { + forward_to = [prometheus.remote_write.default.receiver] + scrape_interval = "15s" + targets = discovery.kubernetes.pods.targets +} + +prometheus.remote_write "default" { + endpoint { + url = "http://localhost:9009/api/prom/push" + + basic_auth { + password = env("PASSWORD") + username = "admin" + } + } +} + diff --git a/syntax/encoding/alloyyaml/testdata/09_complete.yaml b/syntax/encoding/alloyyaml/testdata/09_complete.yaml new file mode 100644 index 0000000000..9c4bada48d --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/09_complete.yaml @@ -0,0 +1,16 @@ +- logging: + - format: logfmt + - level: debug +- discovery.kubernetes/pods: + - role: pod +- prometheus.scrape/default: + - forward_to: + - expr(prometheus.remote_write.default.receiver) + - scrape_interval: 15s + - targets: expr(discovery.kubernetes.pods.targets) +- prometheus.remote_write/default: + - endpoint: + - url: http://localhost:9009/api/prom/push + - basic_auth: + - password: expr(env("PASSWORD")) + - username: admin diff --git a/syntax/encoding/alloyyaml/testdata/10_filter.alloy b/syntax/encoding/alloyyaml/testdata/10_filter.alloy new file mode 100644 index 0000000000..c47526ca48 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/10_filter.alloy @@ -0,0 +1,45 @@ +otelcol.receiver.otlp "default" { + grpc { + endpoint = "localhost:4317" + } + + http { + endpoint = "localhost:4318" + } + + output { + metrics = [otelcol.processor.filter.default_ottl.input] + logs = [otelcol.processor.filter.default_ottl.input] + traces = [otelcol.processor.filter.default_ottl.input] + } +} + +otelcol.processor.filter "default_ottl" { + error_mode = "ignore" + + traces { + span = ["attributes[\"container.name\"] == \"app_container_1\"", "resource.attributes[\"host.name\"] == \"localhost\"", "name == \"app_3\""] + spanevent = ["attributes[\"grpc\"] == true", "IsMatch(name, \".*grpc.*\")"] + } + + metrics { + metric = ["name == \"my.metric\" and resource.attributes[\"my_label\"] == \"abc123\"", "type == METRIC_DATA_TYPE_HISTOGRAM"] + datapoint = ["metric.type == METRIC_DATA_TYPE_SUMMARY", "resource.attributes[\"service.name\"] == \"my_service_name\""] + } + + logs { + log_record = ["IsMatch(body, \".*password.*\")", "severity_number < SEVERITY_NUMBER_WARN"] + } + + output { + metrics = [otelcol.exporter.otlp.default.input] + logs = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + } +} diff --git a/syntax/encoding/alloyyaml/testdata/10_filter.yaml b/syntax/encoding/alloyyaml/testdata/10_filter.yaml new file mode 100644 index 0000000000..9ecd7f0f5a --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/10_filter.yaml @@ -0,0 +1,43 @@ +- otelcol.receiver.otlp/default: + - grpc: + - endpoint: localhost:4317 + - http: + - endpoint: localhost:4318 + - output: + - metrics: + - expr(otelcol.processor.filter.default_ottl.input) + - logs: + - expr(otelcol.processor.filter.default_ottl.input) + - traces: + - expr(otelcol.processor.filter.default_ottl.input) +- otelcol.processor.filter/default_ottl: + - error_mode: ignore + - traces: + - span: + - attributes["container.name"] == "app_container_1" + - resource.attributes["host.name"] == "localhost" + - name == "app_3" + - spanevent: + - attributes["grpc"] == true + - IsMatch(name, ".*grpc.*") + - metrics: + - metric: + - name == "my.metric" and resource.attributes["my_label"] == "abc123" + - type == METRIC_DATA_TYPE_HISTOGRAM + - datapoint: + - metric.type == METRIC_DATA_TYPE_SUMMARY + - resource.attributes["service.name"] == "my_service_name" + - logs: + - log_record: + - IsMatch(body, ".*password.*") + - severity_number < SEVERITY_NUMBER_WARN + - output: + - metrics: + - expr(otelcol.exporter.otlp.default.input) + - logs: + - expr(otelcol.exporter.otlp.default.input) + - traces: + - expr(otelcol.exporter.otlp.default.input) +- otelcol.exporter.otlp/default: + - client: + - endpoint: database:4317 diff --git a/syntax/encoding/alloyyaml/testdata/11_transform.alloy b/syntax/encoding/alloyyaml/testdata/11_transform.alloy new file mode 100644 index 0000000000..f57a766a53 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/11_transform.alloy @@ -0,0 +1,66 @@ +otelcol.receiver.otlp "default" { + grpc { + endpoint = "localhost:4317" + } + + http { + endpoint = "localhost:4318" + } + + output { + metrics = [otelcol.processor.transform.default.input] + logs = [otelcol.processor.transform.default.input] + traces = [otelcol.processor.transform.default.input] + } +} + +otelcol.processor.transform "default" { + error_mode = "ignore" + + trace_statements { + context = "resource" + statements = ["keep_keys(attributes, [\"service.name\", \"service.namespace\", \"cloud.region\", \"process.command_line\"])", "replace_pattern(attributes[\"process.command_line\"], \"password\\\\=[^\\\\s]*(\\\\s?)\", \"password=***\")", "limit(attributes, 100, [])", "truncate_all(attributes, 4096)"] + } + + trace_statements { + context = "span" + statements = ["set(status.code, 1) where attributes[\"http.path\"] == \"/health\"", "set(name, attributes[\"http.route\"])", "replace_match(attributes[\"http.target\"], \"/user/*/list/*\", \"/user/{userId}/list/{listId}\")", "limit(attributes, 100, [])", "truncate_all(attributes, 4096)"] + } + + metric_statements { + context = "resource" + statements = ["keep_keys(attributes, [\"host.name\"])", "truncate_all(attributes, 4096)"] + } + + metric_statements { + context = "metric" + statements = ["set(description, \"Sum\") where type == \"Sum\"", "convert_sum_to_gauge() where name == \"system.processes.count\"", "convert_gauge_to_sum(\"cumulative\", false) where name == \"prometheus_metric\"", "aggregate_on_attributes(\"sum\") where name == \"system.memory.usage\""] + } + + metric_statements { + context = "datapoint" + statements = ["limit(attributes, 100, [\"host.name\"])", "truncate_all(attributes, 4096)"] + } + + log_statements { + context = "resource" + statements = ["keep_keys(attributes, [\"service.name\", \"service.namespace\", \"cloud.region\"])"] + } + + log_statements { + context = "log" + statements = ["set(severity_text, \"FAIL\") where body == \"request failed\"", "replace_all_matches(attributes, \"/user/*/list/*\", \"/user/{userId}/list/{listId}\")", "replace_all_patterns(attributes, \"value\", \"/account/\\\\d{4}\", \"/account/{accountId}\")", "set(body, attributes[\"http.route\"])"] + } + + output { + metrics = [otelcol.exporter.otlp.default.input] + logs = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + } +} diff --git a/syntax/encoding/alloyyaml/testdata/11_transform.yaml b/syntax/encoding/alloyyaml/testdata/11_transform.yaml new file mode 100644 index 0000000000..550350d5fd --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/11_transform.yaml @@ -0,0 +1,67 @@ +- otelcol.receiver.otlp/default: + - grpc: + - endpoint: localhost:4317 + - http: + - endpoint: localhost:4318 + - output: + - metrics: + - expr(otelcol.processor.transform.default.input) + - logs: + - expr(otelcol.processor.transform.default.input) + - traces: + - expr(otelcol.processor.transform.default.input) +- otelcol.processor.transform/default: + - error_mode: ignore + - trace_statements: + - context: resource + - statements: + - keep_keys(attributes, ["service.name", "service.namespace", "cloud.region", "process.command_line"]) + - replace_pattern(attributes["process.command_line"], "password\\=[^\\s]*(\\s?)", "password=***") + - limit(attributes, 100, []) + - truncate_all(attributes, 4096) + - trace_statements: + - context: span + - statements: + - set(status.code, 1) where attributes["http.path"] == "/health" + - set(name, attributes["http.route"]) + - replace_match(attributes["http.target"], "/user/*/list/*", "/user/{userId}/list/{listId}") + - limit(attributes, 100, []) + - truncate_all(attributes, 4096) + - metric_statements: + - context: resource + - statements: + - keep_keys(attributes, ["host.name"]) + - truncate_all(attributes, 4096) + - metric_statements: + - context: metric + - statements: + - set(description, "Sum") where type == "Sum" + - convert_sum_to_gauge() where name == "system.processes.count" + - convert_gauge_to_sum("cumulative", false) where name == "prometheus_metric" + - aggregate_on_attributes("sum") where name == "system.memory.usage" + - metric_statements: + - context: datapoint + - statements: + - limit(attributes, 100, ["host.name"]) + - truncate_all(attributes, 4096) + - log_statements: + - context: resource + - statements: + - keep_keys(attributes, ["service.name", "service.namespace", "cloud.region"]) + - log_statements: + - context: log + - statements: + - set(severity_text, "FAIL") where body == "request failed" + - replace_all_matches(attributes, "/user/*/list/*", "/user/{userId}/list/{listId}") + - replace_all_patterns(attributes, "value", "/account/\\d{4}", "/account/{accountId}") + - set(body, attributes["http.route"]) + - output: + - metrics: + - expr(otelcol.exporter.otlp.default.input) + - logs: + - expr(otelcol.exporter.otlp.default.input) + - traces: + - expr(otelcol.exporter.otlp.default.input) +- otelcol.exporter.otlp/default: + - client: + - endpoint: database:4317 diff --git a/syntax/encoding/alloyyaml/testdata/12_tail_sampling.alloy b/syntax/encoding/alloyyaml/testdata/12_tail_sampling.alloy new file mode 100644 index 0000000000..48f98817d8 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/12_tail_sampling.alloy @@ -0,0 +1,191 @@ +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + } +} + +otelcol.processor.tail_sampling "default" { + decision_wait = "10s" + expected_new_traces_per_sec = 10 + num_traces = 100 + output { + traces = [otelcol.exporter.otlp.default.input] + } + policy { + name = "test-policy-1" + type = "always_sample" + } + policy { + latency { + threshold_ms = 5000 + } + name = "test-policy-2" + type = "latency" + } + policy { + name = "test-policy-3" + numeric_attribute { + key = "key1" + max_value = 100 + min_value = 50 + } + type = "numeric_attribute" + } + policy { + name = "test-policy-4" + probabilistic { + sampling_percentage = 10 + } + type = "probabilistic" + } + policy { + name = "test-policy-5" + status_code { + status_codes = ["ERROR", "UNSET"] + } + type = "status_code" + } + policy { + name = "test-policy-6" + string_attribute { + key = "key2" + values = ["value1", "value2"] + } + type = "string_attribute" + } + policy { + name = "test-policy-7" + string_attribute { + cache_max_size = 10 + enabled_regex_matching = true + key = "key2" + values = ["value1", "val*"] + } + type = "string_attribute" + } + policy { + name = "test-policy-8" + rate_limiting { + spans_per_second = 35 + } + type = "rate_limiting" + } + policy { + name = "test-policy-9" + string_attribute { + enabled_regex_matching = true + invert_match = true + key = "http.url" + values = ["\\/health", "\\/metrics"] + } + type = "string_attribute" + } + policy { + name = "test-policy-10" + span_count { + max_spans = 20 + min_spans = 2 + } + type = "span_count" + } + policy { + name = "test-policy-11" + trace_state { + key = "key3" + values = ["value1", "value2"] + } + type = "trace_state" + } + policy { + boolean_attribute { + invert_match = true + key = "key4" + value = true + } + name = "test-policy-12" + type = "boolean_attribute" + } + policy { + name = "test-policy-13" + ottl_condition { + error_mode = "ignore" + span = ["attributes[\"test_attr_key_1\"] == \"test_attr_val_1\"", "attributes[\"test_attr_key_2\"] != \"test_attr_val_1\""] + spanevent = ["name != \"test_span_event_name\"", "attributes[\"test_event_attr_key_2\"] != \"test_event_attr_val_1\""] + } + type = "ottl_condition" + } + policy { + and { + and_sub_policy { + name = "test-and-policy-1" + numeric_attribute { + key = "key1" + max_value = 100 + min_value = 50 + } + type = "numeric_attribute" + } + and_sub_policy { + name = "test-and-policy-2" + string_attribute { + key = "key2" + values = ["value1", "value2"] + } + type = "string_attribute" + } + } + name = "and-policy-1" + type = "and" + } + policy { + composite { + composite_sub_policy { + name = "test-composite-policy-1" + numeric_attribute { + key = "key1" + max_value = 100 + min_value = 50 + } + type = "numeric_attribute" + } + composite_sub_policy { + name = "test-composite-policy-2" + string_attribute { + key = "key2" + values = ["value1", "value2"] + } + type = "string_attribute" + } + composite_sub_policy { + name = "test-composite-policy-3" + type = "always_sample" + } + max_total_spans_per_second = 1000 + policy_order = ["test-composite-policy-1", "test-composite-policy-2", "test-composite-policy-3"] + rate_allocation { + percent = 50 + policy = "test-composite-policy-1" + } + rate_allocation { + percent = 25 + policy = "test-composite-policy-2" + } + } + name = "composite-policy-1" + type = "composite" + } +} + +otelcol.receiver.otlp "default" { + grpc { + endpoint = "localhost:4317" + } + http { + endpoint = "localhost:4318" + } + output { + logs = [otelcol.processor.tail_sampling.default.input] + metrics = [otelcol.processor.tail_sampling.default.input] + traces = [otelcol.processor.tail_sampling.default.input] + } +} diff --git a/syntax/encoding/alloyyaml/testdata/12_tail_sampling.yaml b/syntax/encoding/alloyyaml/testdata/12_tail_sampling.yaml new file mode 100644 index 0000000000..a6a7f23b0b --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/12_tail_sampling.yaml @@ -0,0 +1,166 @@ +- otelcol.exporter.otlp/default: + - client: + - endpoint: database:4317 +- otelcol.processor.tail_sampling/default: + - decision_wait: 10s + - expected_new_traces_per_sec: 10 + - num_traces: 100 + - output: + - traces: + - expr(otelcol.exporter.otlp.default.input) + - policy: + - name: test-policy-1 + - type: always_sample + - policy: + - latency: + - threshold_ms: 5000 + - name: test-policy-2 + - type: latency + - policy: + - name: test-policy-3 + - numeric_attribute: + - key: key1 + - max_value: 100 + - min_value: 50 + - type: numeric_attribute + - policy: + - name: test-policy-4 + - probabilistic: + - sampling_percentage: 10 + - type: probabilistic + - policy: + - name: test-policy-5 + - status_code: + - status_codes: + - ERROR + - UNSET + - type: status_code + - policy: + - name: test-policy-6 + - string_attribute: + - key: key2 + - values: + - value1 + - value2 + - type: string_attribute + - policy: + - name: test-policy-7 + - string_attribute: + - cache_max_size: 10 + - enabled_regex_matching: true + - key: key2 + - values: + - value1 + - val* + - type: string_attribute + - policy: + - name: test-policy-8 + - rate_limiting: + - spans_per_second: 35 + - type: rate_limiting + - policy: + - name: test-policy-9 + - string_attribute: + - enabled_regex_matching: true + - invert_match: true + - key: http.url + - values: + - \/health + - \/metrics + - type: string_attribute + - policy: + - name: test-policy-10 + - span_count: + - max_spans: 20 + - min_spans: 2 + - type: span_count + - policy: + - name: test-policy-11 + - trace_state: + - key: key3 + - values: + - value1 + - value2 + - type: trace_state + - policy: + - boolean_attribute: + - invert_match: true + - key: key4 + - value: true + - name: test-policy-12 + - type: boolean_attribute + - policy: + - name: test-policy-13 + - ottl_condition: + - error_mode: ignore + - span: + - attributes["test_attr_key_1"] == "test_attr_val_1" + - attributes["test_attr_key_2"] != "test_attr_val_1" + - spanevent: + - name != "test_span_event_name" + - attributes["test_event_attr_key_2"] != "test_event_attr_val_1" + - type: ottl_condition + - policy: + - and: + - and_sub_policy: + - name: test-and-policy-1 + - numeric_attribute: + - key: key1 + - max_value: 100 + - min_value: 50 + - type: numeric_attribute + - and_sub_policy: + - name: test-and-policy-2 + - string_attribute: + - key: key2 + - values: + - value1 + - value2 + - type: string_attribute + - name: and-policy-1 + - type: and + - policy: + - composite: + - composite_sub_policy: + - name: test-composite-policy-1 + - numeric_attribute: + - key: key1 + - max_value: 100 + - min_value: 50 + - type: numeric_attribute + - composite_sub_policy: + - name: test-composite-policy-2 + - string_attribute: + - key: key2 + - values: + - value1 + - value2 + - type: string_attribute + - composite_sub_policy: + - name: test-composite-policy-3 + - type: always_sample + - max_total_spans_per_second: 1000 + - policy_order: + - test-composite-policy-1 + - test-composite-policy-2 + - test-composite-policy-3 + - rate_allocation: + - percent: 50 + - policy: test-composite-policy-1 + - rate_allocation: + - percent: 25 + - policy: test-composite-policy-2 + - name: composite-policy-1 + - type: composite +- otelcol.receiver.otlp/default: + - grpc: + - endpoint: localhost:4317 + - http: + - endpoint: localhost:4318 + - output: + - logs: + - expr(otelcol.processor.tail_sampling.default.input) + - metrics: + - expr(otelcol.processor.tail_sampling.default.input) + - traces: + - expr(otelcol.processor.tail_sampling.default.input) diff --git a/syntax/encoding/alloyyaml/testdata/13_traces.alloy b/syntax/encoding/alloyyaml/testdata/13_traces.alloy new file mode 100644 index 0000000000..4847ac663a --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/13_traces.alloy @@ -0,0 +1,238 @@ +prometheus.remote_write "metrics_remote_write_name" { + endpoint { + name = "remote_write_name-04b53d" + url = "http://localhost:9009/api/prom/push" + + queue_config { } + + metadata_config { } + } +} + +otelcol.extension.jaeger_remote_sampling "default_0" { + grpc { + endpoint = "localhost:14250" + } + + http { + endpoint = "localhost:5778" + } + + source { + remote { + endpoint = "jaeger-collector:14250" + compression = "" + write_buffer_size = "0B" + } + reload_interval = "30s" + } +} + +otelcol.receiver.otlp "_0_default" { + grpc { + endpoint = "localhost:4317" + include_metadata = true + } + + http { + endpoint = "localhost:4318" + include_metadata = true + } + + output { + traces = [otelcol.processor.discovery._0_default.input] + } +} + +discovery.azure "_0_default_prometheus1" { + subscription_id = "subscription1" + + oauth { + client_id = "client1" + tenant_id = "tenant1" + client_secret = "secret1" + } + + managed_identity { + client_id = "client1" + } +} + +discovery.lightsail "_0_default_prometheus1" { + region = "us-east-1" + access_key = "YOUR_ACCESS_KEY" + secret_key = "YOUR_SECRET_KEY" + port = 8080 +} + +discovery.relabel "_0_default_prometheus1" { + targets = array.concat( + discovery.azure._0_default_prometheus1.targets, + discovery.lightsail._0_default_prometheus1.targets, + ) + + rule { + source_labels = ["__address1__"] + target_label = "__param_target1" + } + + rule { + source_labels = ["__address2__"] + target_label = "__param_target2" + } +} + +otelcol.processor.discovery "_0_default" { + targets = discovery.relabel._0_default_prometheus1.output + operation_type = "insert" + pod_associations = ["ip", "net.host.ip"] + + output { + traces = [otelcol.processor.attributes._0_default.input] + } +} + +otelcol.processor.attributes "_0_default" { + action { + key = "db.table" + action = "delete" + } + + output { + traces = [otelcol.exporter.loadbalancing._0_default.input, otelcol.exporter.debug._0_default.input, otelcol.connector.spanmetrics._0_default.input] + } +} + +prometheus.relabel "_0_default" { + forward_to = [prometheus.remote_write.metrics_remote_write_name.receiver] + + rule { + target_label = "fizz" + replacement = "buzz" + } + + rule { + target_label = "foo" + replacement = "bar" + } +} + +otelcol.exporter.prometheus "_0_default" { + gc_frequency = "0s" + forward_to = [prometheus.relabel._0_default.receiver] +} + +otelcol.exporter.loadbalancing "_0_default" { + protocol { + otlp { + retry { + max_elapsed_time = "1m0s" + } + + client { + compression = "none" + } + } + } + + resolver { + static { + hostnames = ["tempo1.example.com", "tempo2.example.com"] + } + } +} + +otelcol.exporter.debug "_0_default" { + verbosity = "Basic" +} + +otelcol.connector.spanmetrics "_0_default" { + histogram { + explicit { } + } + namespace = "metrics_prefix" + + output { + metrics = [otelcol.exporter.prometheus._0_default.input] + } +} + +otelcol.receiver.otlp "_1_lb" { + grpc { + endpoint = "0.0.0.0:4318" + } + + output { + traces = [otelcol.processor.tail_sampling._1_default.input] + } +} + +otelcol.processor.tail_sampling "_1_default" { + policy { + name = "test-policy-1" + type = "always_sample" + } + decision_wait = "5s" + + output { + traces = [otelcol.processor.batch._1_default.input] + } +} + +otelcol.processor.batch "_1_default" { + timeout = "5s" + send_batch_size = 2048 + send_batch_max_size = 4096 + + output { + traces = [otelcol.exporter.otlp._1_0.input, otelcol.exporter.debug._1_default.input, otelcol.connector.spanmetrics._1_default.input] + } +} + +prometheus.relabel "_1_default" { + forward_to = [prometheus.remote_write.metrics_remote_write_name.receiver] + + rule { + target_label = "fizz" + replacement = "buzz" + } + + rule { + target_label = "foo" + replacement = "bar" + } +} + +otelcol.exporter.prometheus "_1_default" { + gc_frequency = "0s" + forward_to = [prometheus.relabel._1_default.receiver] +} + +otelcol.exporter.otlp "_1_0" { + retry_on_failure { + max_elapsed_time = "1m0s" + } + + client { + endpoint = "tempo.example.com:14250" + + tls { + insecure = true + } + } +} + +otelcol.exporter.debug "_1_default" { + verbosity = "Basic" +} + +otelcol.connector.spanmetrics "_1_default" { + histogram { + explicit { } + } + namespace = "metrics_prefix" + + output { + metrics = [otelcol.exporter.prometheus._1_default.input] + } +} diff --git a/syntax/encoding/alloyyaml/testdata/13_traces.yaml b/syntax/encoding/alloyyaml/testdata/13_traces.yaml new file mode 100644 index 0000000000..6804167ee8 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/13_traces.yaml @@ -0,0 +1,158 @@ +- prometheus.remote_write/metrics_remote_write_name: + - endpoint: + - name: remote_write_name-04b53d + - url: http://localhost:9009/api/prom/push + - queue_config: [] + - metadata_config: [] +- otelcol.extension.jaeger_remote_sampling/default_0: + - grpc: + - endpoint: localhost:14250 + - http: + - endpoint: localhost:5778 + - source: + - remote: + - endpoint: jaeger-collector:14250 + - compression: "" + - write_buffer_size: 0B + - reload_interval: 30s +- otelcol.receiver.otlp/_0_default: + - grpc: + - endpoint: localhost:4317 + - include_metadata: true + - http: + - endpoint: localhost:4318 + - include_metadata: true + - output: + - traces: + - expr(otelcol.processor.discovery._0_default.input) +- discovery.azure/_0_default_prometheus1: + - subscription_id: subscription1 + - oauth: + - client_id: client1 + - tenant_id: tenant1 + - client_secret: secret1 + - managed_identity: + - client_id: client1 +- discovery.lightsail/_0_default_prometheus1: + - region: us-east-1 + - access_key: YOUR_ACCESS_KEY + - secret_key: YOUR_SECRET_KEY + - port: 8080 +- discovery.relabel/_0_default_prometheus1: + - targets: |- + expr(array.concat( + discovery.azure._0_default_prometheus1.targets, + discovery.lightsail._0_default_prometheus1.targets, + )) + - rule: + - source_labels: + - __address1__ + - target_label: __param_target1 + - rule: + - source_labels: + - __address2__ + - target_label: __param_target2 +- otelcol.processor.discovery/_0_default: + - targets: expr(discovery.relabel._0_default_prometheus1.output) + - operation_type: insert + - pod_associations: + - ip + - net.host.ip + - output: + - traces: + - expr(otelcol.processor.attributes._0_default.input) +- otelcol.processor.attributes/_0_default: + - action: + - key: db.table + - action: delete + - output: + - traces: + - expr(otelcol.exporter.loadbalancing._0_default.input) + - expr(otelcol.exporter.debug._0_default.input) + - expr(otelcol.connector.spanmetrics._0_default.input) +- prometheus.relabel/_0_default: + - forward_to: + - expr(prometheus.remote_write.metrics_remote_write_name.receiver) + - rule: + - target_label: fizz + - replacement: buzz + - rule: + - target_label: foo + - replacement: bar +- otelcol.exporter.prometheus/_0_default: + - gc_frequency: 0s + - forward_to: + - expr(prometheus.relabel._0_default.receiver) +- otelcol.exporter.loadbalancing/_0_default: + - protocol: + - otlp: + - retry: + - max_elapsed_time: 1m0s + - client: + - compression: none + - resolver: + - static: + - hostnames: + - tempo1.example.com + - tempo2.example.com +- otelcol.exporter.debug/_0_default: + - verbosity: Basic +- otelcol.connector.spanmetrics/_0_default: + - histogram: + - explicit: [] + - namespace: metrics_prefix + - output: + - metrics: + - expr(otelcol.exporter.prometheus._0_default.input) +- otelcol.receiver.otlp/_1_lb: + - grpc: + - endpoint: 0.0.0.0:4318 + - output: + - traces: + - expr(otelcol.processor.tail_sampling._1_default.input) +- otelcol.processor.tail_sampling/_1_default: + - policy: + - name: test-policy-1 + - type: always_sample + - decision_wait: 5s + - output: + - traces: + - expr(otelcol.processor.batch._1_default.input) +- otelcol.processor.batch/_1_default: + - timeout: 5s + - send_batch_size: 2048 + - send_batch_max_size: 4096 + - output: + - traces: + - expr(otelcol.exporter.otlp._1_0.input) + - expr(otelcol.exporter.debug._1_default.input) + - expr(otelcol.connector.spanmetrics._1_default.input) +- prometheus.relabel/_1_default: + - forward_to: + - expr(prometheus.remote_write.metrics_remote_write_name.receiver) + - rule: + - target_label: fizz + - replacement: buzz + - rule: + - target_label: foo + - replacement: bar +- otelcol.exporter.prometheus/_1_default: + - gc_frequency: 0s + - forward_to: + - expr(prometheus.relabel._1_default.receiver) +- otelcol.exporter.otlp/_1_0: + - retry_on_failure: + - max_elapsed_time: 1m0s + - client: + - endpoint: tempo.example.com:14250 + - tls: + - insecure: true +- otelcol.exporter.debug/_1_default: + - verbosity: Basic +- otelcol.connector.spanmetrics/_1_default: + - histogram: + - explicit: [] + - namespace: metrics_prefix + - output: + - metrics: + - expr(otelcol.exporter.prometheus._1_default.input) diff --git a/syntax/encoding/alloyyaml/testdata/14_prom_scrape.alloy b/syntax/encoding/alloyyaml/testdata/14_prom_scrape.alloy new file mode 100644 index 0000000000..b31a610b24 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/14_prom_scrape.alloy @@ -0,0 +1,125 @@ +discovery.azure "metrics_agent_promobee" { + subscription_id = "subscription1" + + oauth { + client_id = "client" + tenant_id = "tenant" + client_secret = "secret" + } + + managed_identity { + client_id = "client" + } + proxy_url = "proxy" +} + +discovery.azure "metrics_agent_promobee_2" { + subscription_id = "subscription2" + + oauth { + client_id = "client" + tenant_id = "tenant" + client_secret = "secret" + } + + managed_identity { + client_id = "client" + } + proxy_url = "proxy" +} + +discovery.relabel "metrics_agent_promobee" { + targets = array.concat( + discovery.azure.metrics_agent_promobee.targets, + discovery.azure.metrics_agent_promobee_2.targets, + [{ + __address__ = "localhost:9090", + }], + ) + + rule { + source_labels = ["__address__"] + target_label = "__param_id" + } + + rule { + source_labels = ["__param_id"] + target_label = "thermostat" + } + + rule { + target_label = "__address__" + replacement = "localhost:8099" + } +} + +prometheus.scrape "metrics_agent_prometheus" { + targets = array.concat( + [{ + __address__ = "localhost:9099", + }], + [{ + __address__ = "localhost:9101", + }], + ) + forward_to = [prometheus.remote_write.metrics_agent.receiver] + job_name = "prometheus" + scrape_timeout = "45s" +} + +prometheus.scrape "metrics_agent_promobee" { + targets = discovery.relabel.metrics_agent_promobee.output + forward_to = [prometheus.relabel.metrics_agent_promobee.receiver] + job_name = "promobee" + scrape_timeout = "45s" + metrics_path = "/thermostat" +} + +prometheus.relabel "metrics_agent_promobee" { + forward_to = [prometheus.remote_write.metrics_agent.receiver] + + rule { + source_labels = ["__metric_address1__"] + target_label = "__metric_param_target1" + } + + rule { + source_labels = ["__metric_address2__"] + target_label = "__metric_param_target2" + } +} + +prometheus.remote_write "metrics_agent" { + endpoint { + name = "agent-f9a4a6" + url = "https://prometheus-us-central1.grafana.net/api/prom/push" + + basic_auth { + username = "11111" + password = "my-secret-password-here" + } + + queue_config { + max_shards = 10 + batch_send_deadline = "3m0s" + max_backoff = "10s" + sample_age_limit = "50s" + } + + metadata_config { } + } +} + +logging { + level = "debug" + format = "json" +} + +http { + tls { + cert_file = "./testdata/example-cert.pem" + key_file = "./testdata/example-key.pem" + client_ca_file = "./testdata/example-cert.pem" + client_auth_type = "VerifyClientCertIfGiven" + } +} diff --git a/syntax/encoding/alloyyaml/testdata/14_prom_scrape.yaml b/syntax/encoding/alloyyaml/testdata/14_prom_scrape.yaml new file mode 100644 index 0000000000..5427e18855 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/14_prom_scrape.yaml @@ -0,0 +1,92 @@ +- discovery.azure/metrics_agent_promobee: + - subscription_id: subscription1 + - oauth: + - client_id: client + - tenant_id: tenant + - client_secret: secret + - managed_identity: + - client_id: client + - proxy_url: proxy +- discovery.azure/metrics_agent_promobee_2: + - subscription_id: subscription2 + - oauth: + - client_id: client + - tenant_id: tenant + - client_secret: secret + - managed_identity: + - client_id: client + - proxy_url: proxy +- discovery.relabel/metrics_agent_promobee: + - targets: |- + expr(array.concat( + discovery.azure.metrics_agent_promobee.targets, + discovery.azure.metrics_agent_promobee_2.targets, + [{ + __address__ = "localhost:9090", + }], + )) + - rule: + - source_labels: + - __address__ + - target_label: __param_id + - rule: + - source_labels: + - __param_id + - target_label: thermostat + - rule: + - target_label: __address__ + - replacement: localhost:8099 +- prometheus.scrape/metrics_agent_prometheus: + - targets: |- + expr(array.concat( + [{ + __address__ = "localhost:9099", + }], + [{ + __address__ = "localhost:9101", + }], + )) + - forward_to: + - expr(prometheus.remote_write.metrics_agent.receiver) + - job_name: prometheus + - scrape_timeout: 45s +- prometheus.scrape/metrics_agent_promobee: + - targets: expr(discovery.relabel.metrics_agent_promobee.output) + - forward_to: + - expr(prometheus.relabel.metrics_agent_promobee.receiver) + - job_name: promobee + - scrape_timeout: 45s + - metrics_path: /thermostat +- prometheus.relabel/metrics_agent_promobee: + - forward_to: + - expr(prometheus.remote_write.metrics_agent.receiver) + - rule: + - source_labels: + - __metric_address1__ + - target_label: __metric_param_target1 + - rule: + - source_labels: + - __metric_address2__ + - target_label: __metric_param_target2 +- prometheus.remote_write/metrics_agent: + - endpoint: + - name: agent-f9a4a6 + - url: https://prometheus-us-central1.grafana.net/api/prom/push + - basic_auth: + - username: "11111" + - password: my-secret-password-here + - queue_config: + - max_shards: 10 + - batch_send_deadline: 3m0s + - max_backoff: 10s + - sample_age_limit: 50s + - metadata_config: [] +- logging: + - level: debug + - format: json +- http: + - tls: + - cert_file: ./testdata/example-cert.pem + - key_file: ./testdata/example-key.pem + - client_ca_file: ./testdata/example-cert.pem + - client_auth_type: VerifyClientCertIfGiven diff --git a/syntax/encoding/alloyyaml/testdata/15_oauth2.alloy b/syntax/encoding/alloyyaml/testdata/15_oauth2.alloy new file mode 100644 index 0000000000..82c12ed7b9 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/15_oauth2.alloy @@ -0,0 +1,47 @@ +otelcol.auth.oauth2 "default" { + client_id = "someclientid" + client_secret = "someclientsecret" + token_url = "https://example.com/oauth2/default/v1/token" + endpoint_params = { + audience = ["someaudience"], + } + scopes = ["api.metrics"] + + tls { + ca_file = "/var/lib/mycert.pem" + cert_file = "certfile" + key_file = "keyfile" + insecure = true + } + timeout = "2s" +} + +otelcol.receiver.otlp "default" { + grpc { + endpoint = "localhost:4317" + } + + output { + metrics = [otelcol.exporter.otlp.default_withauth.input, otelcol.exporter.otlphttp.default_noauth.input] + logs = [otelcol.exporter.otlp.default_withauth.input, otelcol.exporter.otlphttp.default_noauth.input] + traces = [otelcol.exporter.otlp.default_withauth.input, otelcol.exporter.otlphttp.default_noauth.input] + } +} + +otelcol.exporter.otlp "default_withauth" { + client { + endpoint = "database:4317" + + tls { + ca_file = "/tmp/certs/ca.pem" + } + auth = otelcol.auth.oauth2.default.handler + } +} + +otelcol.exporter.otlphttp "default_noauth" { + client { + endpoint = "database:4318" + http2_ping_timeout = "0s" + } +} diff --git a/syntax/encoding/alloyyaml/testdata/15_oauth2.yaml b/syntax/encoding/alloyyaml/testdata/15_oauth2.yaml new file mode 100644 index 0000000000..d00dff81b8 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/15_oauth2.yaml @@ -0,0 +1,38 @@ +- otelcol.auth.oauth2/default: + - client_id: someclientid + - client_secret: someclientsecret + - token_url: https://example.com/oauth2/default/v1/token + - endpoint_params: + audience: + - someaudience + - scopes: + - api.metrics + - tls: + - ca_file: /var/lib/mycert.pem + - cert_file: certfile + - key_file: keyfile + - insecure: true + - timeout: 2s +- otelcol.receiver.otlp/default: + - grpc: + - endpoint: localhost:4317 + - output: + - metrics: + - expr(otelcol.exporter.otlp.default_withauth.input) + - expr(otelcol.exporter.otlphttp.default_noauth.input) + - logs: + - expr(otelcol.exporter.otlp.default_withauth.input) + - expr(otelcol.exporter.otlphttp.default_noauth.input) + - traces: + - expr(otelcol.exporter.otlp.default_withauth.input) + - expr(otelcol.exporter.otlphttp.default_noauth.input) +- otelcol.exporter.otlp/default_withauth: + - client: + - endpoint: database:4317 + - tls: + - ca_file: /tmp/certs/ca.pem + - auth: expr(otelcol.auth.oauth2.default.handler) +- otelcol.exporter.otlphttp/default_noauth: + - client: + - endpoint: database:4318 + - http2_ping_timeout: 0s diff --git a/syntax/encoding/alloyyaml/testdata/16_loadbalancing.alloy b/syntax/encoding/alloyyaml/testdata/16_loadbalancing.alloy new file mode 100644 index 0000000000..98f89b9187 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/16_loadbalancing.alloy @@ -0,0 +1,47 @@ +otelcol.storage.file "default_fs" { + directory = "/var/lib/otelcol/file_storage" + + compaction { + directory = "/var/lib/otelcol/file_storage" + rebound_needed_threshold_mib = 100 + rebound_trigger_threshold_mib = 10 + max_transaction_size = 65536 + check_interval = "5s" + } + create_directory = false +} + +otelcol.receiver.otlp "default" { + grpc { + endpoint = "localhost:4317" + } + + output { + metrics = [otelcol.exporter.loadbalancing.default.input] + logs = [otelcol.exporter.loadbalancing.default.input] + traces = [otelcol.exporter.loadbalancing.default.input] + } +} + +otelcol.exporter.loadbalancing "default" { + protocol { + otlp { + client { } + } + } + + resolver { + static { + hostnames = ["backend-1:4317", "backend-2:4317", "backend-3:4317"] + } + } + routing_key = "service" + + sending_queue { + enabled = false + num_consumers = 0 + queue_size = 0 + sizer = "" + storage = otelcol.storage.file.default_fs.handler + } +} diff --git a/syntax/encoding/alloyyaml/testdata/16_loadbalancing.yaml b/syntax/encoding/alloyyaml/testdata/16_loadbalancing.yaml new file mode 100644 index 0000000000..47f6295413 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/16_loadbalancing.yaml @@ -0,0 +1,36 @@ +- otelcol.storage.file/default_fs: + - directory: /var/lib/otelcol/file_storage + - compaction: + - directory: /var/lib/otelcol/file_storage + - rebound_needed_threshold_mib: 100 + - rebound_trigger_threshold_mib: 10 + - max_transaction_size: 65536 + - check_interval: 5s + - create_directory: false +- otelcol.receiver.otlp/default: + - grpc: + - endpoint: localhost:4317 + - output: + - metrics: + - expr(otelcol.exporter.loadbalancing.default.input) + - logs: + - expr(otelcol.exporter.loadbalancing.default.input) + - traces: + - expr(otelcol.exporter.loadbalancing.default.input) +- otelcol.exporter.loadbalancing/default: + - protocol: + - otlp: + - client: [] + - resolver: + - static: + - hostnames: + - backend-1:4317 + - backend-2:4317 + - backend-3:4317 + - routing_key: service + - sending_queue: + - enabled: false + - num_consumers: 0 + - queue_size: 0 + - sizer: "" + - storage: expr(otelcol.storage.file.default_fs.handler) diff --git a/syntax/encoding/alloyyaml/testdata/17_headerssetter.alloy b/syntax/encoding/alloyyaml/testdata/17_headerssetter.alloy new file mode 100644 index 0000000000..22c0dda909 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/17_headerssetter.alloy @@ -0,0 +1,52 @@ +otelcol.auth.headers "default" { + header { + key = "X-Scope-OrgID" + from_context = "tenant_id" + action = "insert" + } + + header { + key = "X-Scope-OrgID-Fake" + from_attribute = "tenant_id" + action = "insert" + } + + header { + key = "User-ID" + value = "user_id" + } + + header { + key = "User-ID" + value = "user_id" + action = "update" + } + + header { + key = "Some-Header" + action = "delete" + } +} + +otelcol.receiver.otlp "default" { + grpc { + endpoint = "localhost:4317" + } + + http { + endpoint = "localhost:4318" + } + + output { + metrics = [otelcol.exporter.otlp.default.input] + logs = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + auth = otelcol.auth.headers.default.handler + } +} diff --git a/syntax/encoding/alloyyaml/testdata/17_headerssetter.yaml b/syntax/encoding/alloyyaml/testdata/17_headerssetter.yaml new file mode 100644 index 0000000000..88d9005f42 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/17_headerssetter.yaml @@ -0,0 +1,35 @@ +- otelcol.auth.headers/default: + - header: + - key: X-Scope-OrgID + - from_context: tenant_id + - action: insert + - header: + - key: X-Scope-OrgID-Fake + - from_attribute: tenant_id + - action: insert + - header: + - key: User-ID + - value: user_id + - header: + - key: User-ID + - value: user_id + - action: update + - header: + - key: Some-Header + - action: delete +- otelcol.receiver.otlp/default: + - grpc: + - endpoint: localhost:4317 + - http: + - endpoint: localhost:4318 + - output: + - metrics: + - expr(otelcol.exporter.otlp.default.input) + - logs: + - expr(otelcol.exporter.otlp.default.input) + - traces: + - expr(otelcol.exporter.otlp.default.input) +- otelcol.exporter.otlp/default: + - client: + - endpoint: database:4317 + - auth: expr(otelcol.auth.headers.default.handler) diff --git a/syntax/encoding/alloyyaml/testdata/18_servicegraph.alloy b/syntax/encoding/alloyyaml/testdata/18_servicegraph.alloy new file mode 100644 index 0000000000..c7d9db0a0d --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/18_servicegraph.alloy @@ -0,0 +1,41 @@ +otelcol.receiver.otlp "default" { + grpc { + endpoint = "localhost:4317" + } + + http { + endpoint = "localhost:4318" + } + + output { + traces = [otelcol.connector.servicegraph.default.input] + } +} + +otelcol.exporter.otlp "default" { + sending_queue { + queue_size = 5000 + } + + client { + endpoint = "database:4317" + } +} + +otelcol.connector.servicegraph "default" { + latency_histogram_buckets = ["100ms", "250ms", "1s", "5s", "10s"] + dimensions = ["dimension-1", "dimension-2"] + + store { + max_items = 10 + ttl = "1s" + } + cache_loop = "2m0s" + store_expiration_loop = "5s" + metrics_flush_interval = "3m0s" + database_name_attributes = ["db_name3", "db_name4"] + + output { + metrics = [otelcol.exporter.otlp.default.input] + } +} diff --git a/syntax/encoding/alloyyaml/testdata/18_servicegraph.yaml b/syntax/encoding/alloyyaml/testdata/18_servicegraph.yaml new file mode 100644 index 0000000000..5a3c55510f --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/18_servicegraph.yaml @@ -0,0 +1,35 @@ +- otelcol.receiver.otlp/default: + - grpc: + - endpoint: localhost:4317 + - http: + - endpoint: localhost:4318 + - output: + - traces: + - expr(otelcol.connector.servicegraph.default.input) +- otelcol.exporter.otlp/default: + - sending_queue: + - queue_size: 5000 + - client: + - endpoint: database:4317 +- otelcol.connector.servicegraph/default: + - latency_histogram_buckets: + - 100ms + - 250ms + - 1s + - 5s + - 10s + - dimensions: + - dimension-1 + - dimension-2 + - store: + - max_items: 10 + - ttl: 1s + - cache_loop: 2m0s + - store_expiration_loop: 5s + - metrics_flush_interval: 3m0s + - database_name_attributes: + - db_name3 + - db_name4 + - output: + - metrics: + - expr(otelcol.exporter.otlp.default.input) diff --git a/syntax/encoding/alloyyaml/testdata/19_example.alloy b/syntax/encoding/alloyyaml/testdata/19_example.alloy new file mode 100644 index 0000000000..9eb477b790 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/19_example.alloy @@ -0,0 +1,37 @@ +// This config file is designed to send traces and metrics to the docker +// compose environment from example/docker-compose. + +logging { + level = "debug" + format = "logfmt" +} + +tracing { + // Sample all traces. This value should be lower for production configs! + sampling_fraction = 1 + + write_to = [otelcol.exporter.otlp.tempo.input] +} + +otelcol.exporter.otlp "tempo" { + client { + endpoint = "localhost:4317" + + tls { + insecure = true + } + } +} + +prometheus.exporter.unix "default" { /* use defaults */ } + +prometheus.scrape "default" { + targets = prometheus.exporter.unix.default.targets + forward_to = [prometheus.remote_write.default.receiver] +} + +prometheus.remote_write "default" { + endpoint { + url = "http://localhost:9009/api/prom/push" + } +} diff --git a/syntax/encoding/alloyyaml/testdata/19_example.yaml b/syntax/encoding/alloyyaml/testdata/19_example.yaml new file mode 100644 index 0000000000..4981759777 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/19_example.yaml @@ -0,0 +1,20 @@ +- logging: + - level: debug + - format: logfmt +- tracing: + - sampling_fraction: 1 + - write_to: + - expr(otelcol.exporter.otlp.tempo.input) +- otelcol.exporter.otlp/tempo: + - client: + - endpoint: localhost:4317 + - tls: + - insecure: true +- prometheus.exporter.unix/default: [] +- prometheus.scrape/default: + - targets: expr(prometheus.exporter.unix.default.targets) + - forward_to: + - expr(prometheus.remote_write.default.receiver) +- prometheus.remote_write/default: + - endpoint: + - url: http://localhost:9009/api/prom/push diff --git a/syntax/encoding/alloyyaml/testdata/20_k8sattributes.alloy b/syntax/encoding/alloyyaml/testdata/20_k8sattributes.alloy new file mode 100644 index 0000000000..eaca28cd9c --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/20_k8sattributes.alloy @@ -0,0 +1,35 @@ +otelcol.receiver.otlp "default" { + grpc { + endpoint = "localhost:4317" + } + + http { + endpoint = "localhost:4318" + } + + output { + metrics = [otelcol.processor.k8sattributes.default.input] + logs = [otelcol.processor.k8sattributes.default.input] + traces = [otelcol.processor.k8sattributes.default.input] + } +} + +otelcol.processor.k8sattributes "default" { + auth_type = "serviceAccount" + + extract { + metadata = ["container.image.name", "container.image.tag", "k8s.deployment.name", "k8s.namespace.name", "k8s.node.name", "k8s.pod.name", "k8s.pod.start_time", "k8s.pod.uid"] + } + + output { + metrics = [otelcol.exporter.otlp.default.input] + logs = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + } +} diff --git a/syntax/encoding/alloyyaml/testdata/20_k8sattributes.yaml b/syntax/encoding/alloyyaml/testdata/20_k8sattributes.yaml new file mode 100644 index 0000000000..a66d2cdb58 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/20_k8sattributes.yaml @@ -0,0 +1,34 @@ +- otelcol.receiver.otlp/default: + - grpc: + - endpoint: localhost:4317 + - http: + - endpoint: localhost:4318 + - output: + - metrics: + - expr(otelcol.processor.k8sattributes.default.input) + - logs: + - expr(otelcol.processor.k8sattributes.default.input) + - traces: + - expr(otelcol.processor.k8sattributes.default.input) +- otelcol.processor.k8sattributes/default: + - auth_type: serviceAccount + - extract: + - metadata: + - container.image.name + - container.image.tag + - k8s.deployment.name + - k8s.namespace.name + - k8s.node.name + - k8s.pod.name + - k8s.pod.start_time + - k8s.pod.uid + - output: + - metrics: + - expr(otelcol.exporter.otlp.default.input) + - logs: + - expr(otelcol.exporter.otlp.default.input) + - traces: + - expr(otelcol.exporter.otlp.default.input) +- otelcol.exporter.otlp/default: + - client: + - endpoint: database:4317 diff --git a/syntax/encoding/alloyyaml/testdata/21_memorylimiter.alloy b/syntax/encoding/alloyyaml/testdata/21_memorylimiter.alloy new file mode 100644 index 0000000000..edd73a7fe1 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/21_memorylimiter.alloy @@ -0,0 +1,32 @@ +otelcol.receiver.otlp "default" { + grpc { + endpoint = "localhost:4317" + } + + http { + endpoint = "localhost:4318" + } + + output { + metrics = [otelcol.processor.memory_limiter.default.input] + logs = [otelcol.processor.memory_limiter.default.input] + traces = [otelcol.processor.memory_limiter.default.input] + } +} + +otelcol.processor.memory_limiter "default" { + check_interval = "1s" + limit_percentage = 90 + + output { + metrics = [otelcol.exporter.otlp.default.input] + logs = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + } +} diff --git a/syntax/encoding/alloyyaml/testdata/21_memorylimiter.yaml b/syntax/encoding/alloyyaml/testdata/21_memorylimiter.yaml new file mode 100644 index 0000000000..0d65715fb2 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/21_memorylimiter.yaml @@ -0,0 +1,25 @@ +- otelcol.receiver.otlp/default: + - grpc: + - endpoint: localhost:4317 + - http: + - endpoint: localhost:4318 + - output: + - metrics: + - expr(otelcol.processor.memory_limiter.default.input) + - logs: + - expr(otelcol.processor.memory_limiter.default.input) + - traces: + - expr(otelcol.processor.memory_limiter.default.input) +- otelcol.processor.memory_limiter/default: + - check_interval: 1s + - limit_percentage: 90 + - output: + - metrics: + - expr(otelcol.exporter.otlp.default.input) + - logs: + - expr(otelcol.exporter.otlp.default.input) + - traces: + - expr(otelcol.exporter.otlp.default.input) +- otelcol.exporter.otlp/default: + - client: + - endpoint: database:4317 diff --git a/syntax/encoding/alloyyaml/testdata/22_groupbyattrs.alloy b/syntax/encoding/alloyyaml/testdata/22_groupbyattrs.alloy new file mode 100644 index 0000000000..48ebc9b079 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/22_groupbyattrs.alloy @@ -0,0 +1,31 @@ +otelcol.receiver.otlp "default" { + grpc { + endpoint = "localhost:4317" + } + + http { + endpoint = "localhost:4318" + } + + output { + metrics = [otelcol.processor.groupbyattrs.default.input] + logs = [otelcol.processor.groupbyattrs.default.input] + traces = [otelcol.processor.groupbyattrs.default.input] + } +} + +otelcol.processor.groupbyattrs "default" { + keys = ["k8s.namespace.name", "k8s.deployment.name"] + + output { + metrics = [otelcol.exporter.otlp.default.input] + logs = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + } +} diff --git a/syntax/encoding/alloyyaml/testdata/22_groupbyattrs.yaml b/syntax/encoding/alloyyaml/testdata/22_groupbyattrs.yaml new file mode 100644 index 0000000000..381ad73d8c --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/22_groupbyattrs.yaml @@ -0,0 +1,26 @@ +- otelcol.receiver.otlp/default: + - grpc: + - endpoint: localhost:4317 + - http: + - endpoint: localhost:4318 + - output: + - metrics: + - expr(otelcol.processor.groupbyattrs.default.input) + - logs: + - expr(otelcol.processor.groupbyattrs.default.input) + - traces: + - expr(otelcol.processor.groupbyattrs.default.input) +- otelcol.processor.groupbyattrs/default: + - keys: + - k8s.namespace.name + - k8s.deployment.name + - output: + - metrics: + - expr(otelcol.exporter.otlp.default.input) + - logs: + - expr(otelcol.exporter.otlp.default.input) + - traces: + - expr(otelcol.exporter.otlp.default.input) +- otelcol.exporter.otlp/default: + - client: + - endpoint: database:4317 diff --git a/syntax/encoding/alloyyaml/testdata/23_interval.alloy b/syntax/encoding/alloyyaml/testdata/23_interval.alloy new file mode 100644 index 0000000000..32d21305eb --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/23_interval.alloy @@ -0,0 +1,30 @@ +otelcol.receiver.otlp "default" { + grpc { + endpoint = "localhost:4317" + } + + http { + endpoint = "localhost:4318" + } + + output { + metrics = [otelcol.processor.interval.default.input] + } +} + +otelcol.processor.interval "default" { + passthrough { + gauge = true + summary = true + } + + output { + metrics = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + } +} diff --git a/syntax/encoding/alloyyaml/testdata/23_interval.yaml b/syntax/encoding/alloyyaml/testdata/23_interval.yaml new file mode 100644 index 0000000000..843feffa13 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/23_interval.yaml @@ -0,0 +1,18 @@ +- otelcol.receiver.otlp/default: + - grpc: + - endpoint: localhost:4317 + - http: + - endpoint: localhost:4318 + - output: + - metrics: + - expr(otelcol.processor.interval.default.input) +- otelcol.processor.interval/default: + - passthrough: + - gauge: true + - summary: true + - output: + - metrics: + - expr(otelcol.exporter.otlp.default.input) +- otelcol.exporter.otlp/default: + - client: + - endpoint: database:4317 diff --git a/syntax/encoding/alloyyaml/testdata/24_promtail_scrape.alloy b/syntax/encoding/alloyyaml/testdata/24_promtail_scrape.alloy new file mode 100644 index 0000000000..22ee8576ed --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/24_promtail_scrape.alloy @@ -0,0 +1,28 @@ +loki.relabel "logs_log_config_fun" { + forward_to = [loki.write.logs_log_config.receiver] + + rule { + source_labels = ["__trail__"] + target_label = "__path__" + } +} + +loki.source.cloudflare "logs_log_config_fun" { + api_token = "dont_look_at_me_please" + zone_id = "area51" + labels = { + identity = "unidentified", + object_type = "flying", + } + workers = 42 + pull_range = "1h0m0s" + fields_type = "all" + forward_to = [loki.relabel.logs_log_config_fun.receiver] +} + +loki.write "logs_log_config" { + endpoint { + url = "http://localhost/loki/api/v1/push" + } + external_labels = {} +} diff --git a/syntax/encoding/alloyyaml/testdata/24_promtail_scrape.yaml b/syntax/encoding/alloyyaml/testdata/24_promtail_scrape.yaml new file mode 100644 index 0000000000..d47c7f0c49 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/24_promtail_scrape.yaml @@ -0,0 +1,22 @@ +- loki.relabel/logs_log_config_fun: + - forward_to: + - expr(loki.write.logs_log_config.receiver) + - rule: + - source_labels: + - __trail__ + - target_label: __path__ +- loki.source.cloudflare/logs_log_config_fun: + - api_token: dont_look_at_me_please + - zone_id: area51 + - labels: + identity: unidentified + object_type: flying + - workers: 42 + - pull_range: 1h0m0s + - fields_type: all + - forward_to: + - expr(loki.relabel.logs_log_config_fun.receiver) +- loki.write/logs_log_config: + - endpoint: + - url: http://localhost/loki/api/v1/push + - external_labels: {} diff --git a/syntax/encoding/alloyyaml/testdata/25_spanmetrics.alloy b/syntax/encoding/alloyyaml/testdata/25_spanmetrics.alloy new file mode 100644 index 0000000000..274878737f --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/25_spanmetrics.alloy @@ -0,0 +1,30 @@ +otelcol.receiver.otlp "default" { + grpc { + endpoint = "localhost:4317" + } + + http { + endpoint = "localhost:4318" + } + + output { + logs = [otelcol.exporter.otlp.default.input] + traces = [otelcol.connector.spanmetrics.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + } +} + +otelcol.connector.spanmetrics "default" { + histogram { + explicit { } + } + + output { + metrics = [otelcol.exporter.otlp.default.input] + } +} diff --git a/syntax/encoding/alloyyaml/testdata/25_spanmetrics.yaml b/syntax/encoding/alloyyaml/testdata/25_spanmetrics.yaml new file mode 100644 index 0000000000..130ea8fa43 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/25_spanmetrics.yaml @@ -0,0 +1,19 @@ +- otelcol.receiver.otlp/default: + - grpc: + - endpoint: localhost:4317 + - http: + - endpoint: localhost:4318 + - output: + - logs: + - expr(otelcol.exporter.otlp.default.input) + - traces: + - expr(otelcol.connector.spanmetrics.default.input) +- otelcol.exporter.otlp/default: + - client: + - endpoint: database:4317 +- otelcol.connector.spanmetrics/default: + - histogram: + - explicit: [] + - output: + - metrics: + - expr(otelcol.exporter.otlp.default.input) diff --git a/syntax/encoding/alloyyaml/testdata/26_alloy_logs.alloy b/syntax/encoding/alloyyaml/testdata/26_alloy_logs.alloy new file mode 100644 index 0000000000..37f40cb9c0 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/26_alloy_logs.alloy @@ -0,0 +1,846 @@ +livedebugging { + enabled = true +} + +// Setup discovery for all pods on the node alloy is running on. +discovery.kubernetes "pods" { + role = "pod" + // Limit discovery to pods on the same node. + selectors { + role = "pod" + field = "spec.nodeName=" + coalesce(sys.env("HOSTNAME"), constants.hostname) + } +} + +// Pods using "name" label. +discovery.relabel "pods_name" { + targets = discovery.kubernetes.pods.targets + + rule { + source_labels = ["__meta_kubernetes_pod_label_name"] + target_label = "__service__" + } +} + +// Pods using "app" label. +// Drop pods using "name" label. +discovery.relabel "pods_app" { + targets = discovery.kubernetes.pods.targets + + rule { + source_labels = ["__meta_kubernetes_pod_label_name"] + regex = ".+" + action = "drop" + } + + rule { + source_labels = ["__meta_kubernetes_pod_label_app"] + target_label = "__service__" + } +} + +// Pods using a direct controller e.g. StatefulSet. +// Drop pods using either "name" or "app" label or if the are unsing indirect controller. +discovery.relabel "pods_direct_controllers" { + targets = discovery.kubernetes.pods.targets + + rule { + source_labels = ["__meta_kubernetes_pod_label_name", "__meta_kubernetes_pod_label_app"] + separator = "" + regex = ".+" + action = "drop" + } + + rule { + source_labels = ["__meta_kubernetes_pod_controller_name"] + regex = "[0-9a-z-.]+-[0-9a-f]{8,10}" + action = "drop" + } + + rule { + source_labels = ["__meta_kubernetes_pod_controller_name"] + target_label = "__service__" + } +} + +// Pods using a indirect controller e.g. Deployments (Deployments -> ReplicaSet -> Pods). +// Drop pods using either "name" or "app" label or if they are using direct controller. +discovery.relabel "pods_indirect_controllers" { + targets = discovery.kubernetes.pods.targets + + rule { + source_labels = ["__meta_kubernetes_pod_label_name", "__meta_kubernetes_pod_label_app"] + separator = "" + regex = ".+" + action = "drop" + } + + rule { + source_labels = ["__meta_kubernetes_pod_controller_name"] + regex = "[0-9a-z-.]+-[0-9a-f]{8,10}" + action = "keep" + } + + rule { + source_labels = ["__meta_kubernetes_pod_controller_name"] + regex = "([0-9a-z-.]+)-[0-9a-f]{8,10}" + target_label = "__service__" + } +} + +// Any control plane static pods (e.g. kube-apiserver, etcd, kube-controller-manager & kube-scheduler) +discovery.relabel "pods_static" { + targets = discovery.kubernetes.pods.targets + + rule { + source_labels = ["__meta_kubernetes_pod_annotation_kubernetes_io_config_mirror"] + regex = "" + action = "drop" + } + + // FIXME(kalleep): These are using "__meta_kubernetes_pod_annotation_kubernetes_io_config_mirror" instead of + // "__meta_kubernetes_pod_uid" when creating the path, not sure if its ok to overwrite "__meta_kubernetes_pod_uid" here. + rule { + source_labels = ["__meta_kubernetes_pod_annotation_kubernetes_io_config_mirror"] + target_label = "__meta_kubernetes_pod_uid" + } + + rule { + source_labels = ["__meta_kubernetes_pod_label_component"] + target_label = "__service__" + } +} + +discovery.relabel "default" { + targets = array.concat( + discovery.relabel.pods_name.output, + discovery.relabel.pods_app.output, + discovery.relabel.pods_static.output, + discovery.relabel.pods_direct_controllers.output, + discovery.relabel.pods_indirect_controllers.output, + ) + + rule { + source_labels = ["__service__"] + regex = "" + action = "drop" + } + + rule { + source_labels = ["__meta_kubernetes_pod_node_name"] + target_label = "__host__" + } + + rule { + regex = "__meta_kubernetes_pod_label_(.+)" + action = "labelmap" + } + + rule { + source_labels = ["__meta_kubernetes_namespace", "__service__"] + separator = "/" + target_label = "job" + } + + rule { + source_labels = ["__meta_kubernetes_namespace"] + target_label = "namespace" + } + + rule { + source_labels = ["__meta_kubernetes_pod_name"] + target_label = "pod" + } + + rule { + source_labels = ["__meta_kubernetes_pod_container_name"] + target_label = "container" + } + + rule { + source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"] + separator = "/" + target_label = "__path__" + replacement = "/var/log/pods/*$1/*.log" + } +} + +loki.source.file "logs" { + targets = discovery.relabel.default.output + forward_to = [loki.process.logs.receiver] + + file_match { + enabled = true + } +} + +loki.process "logs" { + forward_to = [loki.write.logs.receiver] + + stage.cri { + max_partial_line_size = 262142 + max_partial_line_size_truncate = true + } + + stage.match { + selector = "{job=~\"loki-.*/ingester.*\"} |= \"metrics.go\"" + action = "drop" + } + + stage.drop { + longer_than = "262142B" + } + + stage.regex { + expression = "(level|lvl|severity)=(?Pdebug)" + } + + stage.drop { + drop_counter_reason = "noisy_logs_from_querier" + expression = "caching_index_client.go:308.*caching index entries" + } + + stage.template { + source = "debug" + template = "true" + } + + stage.labels { + values = { + debug = "", + } + } + + stage.metrics { + metric.counter { + prefix = "promtail_custom_" + name = "byte_count_total" + description = "A running counter of all bytes per stream with their corresponding labels" + action = "add" + match_all = true + count_entry_bytes = true + } + + metric.counter { + prefix = "promtail_custom_" + name = "line_count_total" + description = "A running counter of all lines with their corresponding labels" + action = "inc" + match_all = true + } + } + + stage.regex { + expression = "(?Ppanic:)" + } + + stage.regex { + expression = "(?i)(?Ppanic:|core_dumped|failure|error|attack| bad |illegal |denied|refused|unauthorized|fatal|failed|Segmentation Fault|Corrupted)" + } + + stage.metrics { + metric.counter { + prefix = "promtail_custom_" + name = "bad_words_total" + description = "total count of bad words found in log lines" + action = "inc" + source = "bad_words" + } + + metric.counter { + prefix = "promtail_custom_" + name = "panic_total" + description = "total count of panic: found in log lines" + action = "inc" + source = "panic" + } + } + + stage.match { + selector = "{container=\"ingress-nginx\"} |= \"hosted-grafana-\"" + + stage.template { + source = "service" + template = "hosted-grafana" + } + + stage.labels { + values = { + service = "", + } + } + } + + stage.match { + selector = "{name=\"eventrouter\"}" + + stage.json { + expressions = { + component = "event.source.component", + host = "event.source.host", + message = "event.message", + namespace = "event.metadata.namespace", + object_kind = "event.involvedObject.kind", + object_name = "event.involvedObject.name", + reason = "event.reason", + type = "event.type", + } + } + + stage.labels { + values = { + namespace = "", + } + } + + stage.template { + source = "newoutput" + template = "test" + } + + stage.output { + source = "newoutput" + } + } + + stage.match { + selector = "{name=\"kube-diff-logger\"}" + + stage.json { + expressions = { + timestamp = "timestamp", + } + } + + stage.timestamp { + source = "timestamp" + format = "RFC3339" + } + } + + stage.match { + selector = "{container=\"loki-canary\",stream=\"stdout\"}" + + stage.regex { + expression = "(?P\\d+) p+" + } + + stage.timestamp { + source = "ts" + format = "UnixNs" + action_on_failure = "skip" + } + } + + stage.label_drop { + values = ["filename"] + } + + stage.match { + selector = "{name=\"amixr-engine\"}" + + stage.multiline { + firstline = "^\\d{4}-\\d{2}-\\d{2} \\d{1,2}:\\d{2}:\\d{2}" + } + } + + stage.match { + selector = "{name=~\"amixr-engine-celery-.*\"}" + + stage.multiline { + firstline = "^\\d{4}-\\d{2}-\\d{2} \\d{1,2}:\\d{2}:\\d{2}" + } + } + + stage.match { + selector = "{container=\"cortex-gw\"} |= \"agent id\"" + + stage.static_labels { + values = { + agent_log = "id", + } + } + } + + stage.match { + selector = "{container=\"cortex-gw\"} |= \"sample remote write\"" + + stage.static_labels { + values = { + agent_log = "write", + } + } + } + + stage.match { + selector = "{namespace=~\".*(alertmanager|amixr|cortex|fire|ge-metrics|grafana|loki|mimir|tempo).*\"} |~ \"(insight_logs|insight|insights)=true\"" + + stage.static_labels { + values = { + insight = "true", + } + } + } + + stage.match { + selector = "{namespace=\"asserts\", container=~\"api-server|assertion-detector|model-builder\"}" + + stage.multiline { + firstline = "^[A-Z]{4,5}[ ]{1,2}\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}" + } + + stage.regex { + expression = "^(?P\\S+)\\s+\\S+\\s+\\S+\\s+(?P\\S+)\\s+(?P\\S+)(\\s+)?(?:.*tenantId=(?P\\d+))?" + } + + stage.labels { + values = { + level = "log_level", + loggerName = "logger_name", + tenantId = "tenant_id", + threadName = "thread_name", + } + } + } + + stage.match { + selector = "{namespace=~\".*(grafana|loki|cortex|mimir|tempo|pyroscope|phlare|fire|profiles|ge-logs|ge-metrics|ge-traces|promtail|machine-learning|agent|amixir|asserts|chatops|faro|incident).*\"}" + + stage.regex { + expression = ".*(?Pgrafana|loki|cortex|mimir|tempo|pyroscope|phlare|fire|profiles|ge-logs|ge-metrics|ge-traces|promtail|machine-learning|agent|amixir|asserts|chatops|faro|incident).*" + source = "namespace" + } + + stage.template { + source = "team" + template = "loki" + } + + stage.template { + source = "service_name" + template = "loki/container" + } + + stage.labels { + values = { + service_name = "", + } + } + } + + stage.match { + selector = "{namespace=\"machine-learning\",container=~\"modelapi|worker\"}" + + stage.multiline { + firstline = "^({\"text|\\[\\d{4})" + } + } + + stage.match { + selector = "{name=~\"hosted-grafana-api|grafana\"}" + + stage.logfmt { + mapping = { + instance_audit = "", + } + } + + stage.labels { + values = { + instance_audit = "", + } + } + } + + stage.match { + selector = "{namespace=\"securityops\",job=\"securityops/cloudtrail-exporter-workloads-prod\"}" + + stage.json { + expressions = { + eventCategory = "event.eventCategory", + eventName = "event.eventName", + eventSource = "event.eventSource", + timestamp = "time", + } + } + + stage.timestamp { + source = "timestamp" + format = "RFC3339" + } + + stage.labels { + values = { + eventCategory = "", + eventName = "", + eventSource = "", + } + } + + stage.structured_metadata { + values = { + container = "", + pod = "", + pod_template_hash = "", + stream = "", + } + } + } + + stage.match { + selector = "{namespace=\"securityops\",job=\"securityops/cloudtrail-exporter-workloads-ops\"}" + + stage.json { + expressions = { + eventCategory = "event.eventCategory", + eventName = "event.eventName", + eventSource = "event.eventSource", + timestamp = "time", + } + } + + stage.timestamp { + source = "timestamp" + format = "RFC3339" + } + + stage.labels { + values = { + eventCategory = "", + eventName = "", + eventSource = "", + } + } + + stage.structured_metadata { + values = { + container = "", + pod = "", + pod_template_hash = "", + stream = "", + } + } + } + + stage.match { + selector = "{namespace=\"securityops\",job=\"securityops/cloudtrail-exporter-workloads-dev\"}" + + stage.json { + expressions = { + eventCategory = "event.eventCategory", + eventName = "event.eventName", + eventSource = "event.eventSource", + timestamp = "time", + } + } + + stage.timestamp { + source = "timestamp" + format = "RFC3339" + } + + stage.labels { + values = { + eventCategory = "", + eventName = "", + eventSource = "", + } + } + + stage.structured_metadata { + values = { + container = "", + pod = "", + pod_template_hash = "", + stream = "", + } + } + } + + stage.match { + selector = "{namespace=\"securityops\",job=\"securityops/gh-audit-logs\"}" + + stage.json { + expressions = { + action = "event.action", + actorIsBot = "event.actor_is_bot", + eventType = "event.event", + operationType = "event.operation_type", + repo = "event.repo", + repoId = "event.repo_id", + timestamp = "time", + } + } + + stage.timestamp { + source = "timestamp" + format = "RFC3339" + } + + stage.labels { + values = { + action = "", + actorIsBot = "", + eventType = "", + operationType = "", + repo = "", + repoId = "", + } + } + + stage.structured_metadata { + values = { + container = "", + pod = "", + pod_template_hash = "", + stream = "", + } + } + } + + stage.match { + selector = "{job=\"notification-historian/notification-historian\"}" + + stage.json { + expressions = { + alerts = "alerts", + commonAnnotations = "commonAnnotations", + commonLabels = "commonLabels", + externalURL = "externalURL", + groupKey = "groupKey", + groupLabels = "groupLabels", + level = "level", + message = "message", + orgID = "orgID", + receiver = "receiver", + state = "state", + status = "status", + timestamp = "time", + title = "title", + truncatedAlerts = "truncatedAlerts", + version = "version", + } + } + + stage.timestamp { + source = "timestamp" + format = "RFC3339" + } + + stage.labels { + values = { + alertname = "", + level = "", + orgID = "", + receiver = "", + state = "", + status = "", + } + } + + stage.structured_metadata { + values = { + alerts = "", + commonAnnotations = "", + commonLabels = "", + container = "", + externalURL = "", + groupKey = "", + groupLabels = "", + message = "", + pod = "", + pod_template_hash = "", + stream = "", + title = "", + truncatedAlerts = "", + version = "", + } + } + } + + stage.match { + selector = "{job=\"default/tetragon\"}" + + stage.json { + expressions = { + processExecArguments = "process_exec.process.arguments", + processExecBinary = "process_exec.process.binary", + processExecPodNamespace = "process_exec.process.pod.namespace", + } + } + + stage.labels { + values = { + processExecPodNamespace = "", + } + } + + stage.structured_metadata { + values = { + processExecArguments = "", + processExecBinary = "", + } + } + + stage.label_drop { + values = ["pod", "container", "stream"] + } + } + + stage.match { + selector = "{name=\"okta-logs\"}" + + stage.json { + expressions = { + alternateId = "event.actor.alternateId", + eventType = "event.eventType", + ipAddress = "event.client.ipAddress", + level = "event.severity", + timestamp = "time", + } + } + + stage.timestamp { + source = "timestamp" + format = "RFC3339" + } + + stage.labels { + values = { + eventType = "", + level = "", + } + } + + stage.structured_metadata { + values = { + alternateId = "", + container = "", + ipAddress = "", + pod = "", + pod_template_hash = "", + stream = "", + } + } + + stage.label_drop { + values = ["pod", "pod_template_hash", "container", "stream"] + } + } + + stage.match { + selector = "{namespace=\"hosted-grafana\", app=\"grafana\"}" + + stage.structured_metadata { + values = { + pod = "", + pod_template_hash = "", + resource_version = "", + } + } + + stage.label_drop { + values = ["pod", "resource_version", "pod_template_hash"] + } + } + + stage.match { + selector = "{namespace=~\"loki.*\"}" + + stage.structured_metadata { + values = { + controller_revision_hash = "", + pod = "", + pod_template_hash = "", + } + } + + stage.label_drop { + values = ["pod", "pod_template_hash", "controller_revision_hash"] + } + } + + stage.match { + selector = "{namespace=~\"(loki-live|loki-dev-.*|loki-prod.*|loki-ops.*)\", container!~\"memcached|static-exporter|archiver-operator|cortex-gw|eventrouter|loki-canary|rollout-operator\"}" + + stage.logfmt { + mapping = { + orgID = "", + org_id = "", + traceID = "", + user_id = "", + } + } + + stage.structured_metadata { + values = { + orgID = "", + org_id = "", + traceID = "", + user_id = "", + } + } + } + + stage.match { + selector = "{job=~\"cicd-o11y/grafana-.*\"} |= \"service=bench\"" + + stage.static_labels { + values = { + bench = "true", + } + } + } + + stage.match { + selector = "{namespace=\"hosted-grafana-cd\"} |= \"service=bench\"" + + stage.static_labels { + values = { + bench = "true", + } + } + } +} + +discovery.relabel "systemd_journal" { + targets = [] + + rule { + source_labels = ["__journal__hostname"] + target_label = "nodename" + } + + rule { + source_labels = ["__journal_syslog_identifier"] + target_label = "syslog_identifier" + } + + rule { + source_labels = ["__journal_priority_keyword"] + target_label = "priority" + } +} + +loki.source.journal "logs_default_systemd_journal" { + path = "/var/log/journal" + relabel_rules = discovery.relabel.systemd_journal.rules + forward_to = [loki.write.logs.receiver] + labels = { + job = "default/systemd-journal", + } +} + +loki.write "logs" { + endpoint { + url = "https://logs-us-central1.grafana.net/loki/api/v1/push" + basic_auth { + username = sys.env("TENANT_ID") + password = sys.env("API_KEY") + } + } + + external_labels = { + cluster = sys.env("CLUSTER_NAME"), + } +} diff --git a/syntax/encoding/alloyyaml/testdata/26_alloy_logs.yaml b/syntax/encoding/alloyyaml/testdata/26_alloy_logs.yaml new file mode 100644 index 0000000000..67e7311f9f --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/26_alloy_logs.yaml @@ -0,0 +1,557 @@ +- livedebugging: + - enabled: true +- discovery.kubernetes/pods: + - role: pod + - selectors: + - role: pod + - field: expr("spec.nodeName=" + coalesce(sys.env("HOSTNAME"), constants.hostname)) +- discovery.relabel/pods_name: + - targets: expr(discovery.kubernetes.pods.targets) + - rule: + - source_labels: + - __meta_kubernetes_pod_label_name + - target_label: __service__ +- discovery.relabel/pods_app: + - targets: expr(discovery.kubernetes.pods.targets) + - rule: + - source_labels: + - __meta_kubernetes_pod_label_name + - regex: .+ + - action: drop + - rule: + - source_labels: + - __meta_kubernetes_pod_label_app + - target_label: __service__ +- discovery.relabel/pods_direct_controllers: + - targets: expr(discovery.kubernetes.pods.targets) + - rule: + - source_labels: + - __meta_kubernetes_pod_label_name + - __meta_kubernetes_pod_label_app + - separator: "" + - regex: .+ + - action: drop + - rule: + - source_labels: + - __meta_kubernetes_pod_controller_name + - regex: '[0-9a-z-.]+-[0-9a-f]{8,10}' + - action: drop + - rule: + - source_labels: + - __meta_kubernetes_pod_controller_name + - target_label: __service__ +- discovery.relabel/pods_indirect_controllers: + - targets: expr(discovery.kubernetes.pods.targets) + - rule: + - source_labels: + - __meta_kubernetes_pod_label_name + - __meta_kubernetes_pod_label_app + - separator: "" + - regex: .+ + - action: drop + - rule: + - source_labels: + - __meta_kubernetes_pod_controller_name + - regex: '[0-9a-z-.]+-[0-9a-f]{8,10}' + - action: keep + - rule: + - source_labels: + - __meta_kubernetes_pod_controller_name + - regex: ([0-9a-z-.]+)-[0-9a-f]{8,10} + - target_label: __service__ +- discovery.relabel/pods_static: + - targets: expr(discovery.kubernetes.pods.targets) + - rule: + - source_labels: + - __meta_kubernetes_pod_annotation_kubernetes_io_config_mirror + - regex: "" + - action: drop + - rule: + - source_labels: + - __meta_kubernetes_pod_annotation_kubernetes_io_config_mirror + - target_label: __meta_kubernetes_pod_uid + - rule: + - source_labels: + - __meta_kubernetes_pod_label_component + - target_label: __service__ +- discovery.relabel/default: + - targets: |- + expr(array.concat( + discovery.relabel.pods_name.output, + discovery.relabel.pods_app.output, + discovery.relabel.pods_static.output, + discovery.relabel.pods_direct_controllers.output, + discovery.relabel.pods_indirect_controllers.output, + )) + - rule: + - source_labels: + - __service__ + - regex: "" + - action: drop + - rule: + - source_labels: + - __meta_kubernetes_pod_node_name + - target_label: __host__ + - rule: + - regex: __meta_kubernetes_pod_label_(.+) + - action: labelmap + - rule: + - source_labels: + - __meta_kubernetes_namespace + - __service__ + - separator: / + - target_label: job + - rule: + - source_labels: + - __meta_kubernetes_namespace + - target_label: namespace + - rule: + - source_labels: + - __meta_kubernetes_pod_name + - target_label: pod + - rule: + - source_labels: + - __meta_kubernetes_pod_container_name + - target_label: container + - rule: + - source_labels: + - __meta_kubernetes_pod_uid + - __meta_kubernetes_pod_container_name + - separator: / + - target_label: __path__ + - replacement: /var/log/pods/*$1/*.log +- loki.source.file/logs: + - targets: expr(discovery.relabel.default.output) + - forward_to: + - expr(loki.process.logs.receiver) + - file_match: + - enabled: true +- loki.process/logs: + - forward_to: + - expr(loki.write.logs.receiver) + - stage.cri: + - max_partial_line_size: 262142 + - max_partial_line_size_truncate: true + - stage.match: + - selector: '{job=~"loki-.*/ingester.*"} |= "metrics.go"' + - action: drop + - stage.drop: + - longer_than: 262142B + - stage.regex: + - expression: (level|lvl|severity)=(?Pdebug) + - stage.drop: + - drop_counter_reason: noisy_logs_from_querier + - expression: caching_index_client.go:308.*caching index entries + - stage.template: + - source: debug + - template: "true" + - stage.labels: + - values: + debug: "" + - stage.metrics: + - metric.counter: + - prefix: promtail_custom_ + - name: byte_count_total + - description: A running counter of all bytes per stream with their corresponding labels + - action: add + - match_all: true + - count_entry_bytes: true + - metric.counter: + - prefix: promtail_custom_ + - name: line_count_total + - description: A running counter of all lines with their corresponding labels + - action: inc + - match_all: true + - stage.regex: + - expression: (?Ppanic:) + - stage.regex: + - expression: (?i)(?Ppanic:|core_dumped|failure|error|attack| bad |illegal |denied|refused|unauthorized|fatal|failed|Segmentation Fault|Corrupted) + - stage.metrics: + - metric.counter: + - prefix: promtail_custom_ + - name: bad_words_total + - description: total count of bad words found in log lines + - action: inc + - source: bad_words + - metric.counter: + - prefix: promtail_custom_ + - name: panic_total + - description: 'total count of panic: found in log lines' + - action: inc + - source: panic + - stage.match: + - selector: '{container="ingress-nginx"} |= "hosted-grafana-"' + - stage.template: + - source: service + - template: hosted-grafana + - stage.labels: + - values: + service: "" + - stage.match: + - selector: '{name="eventrouter"}' + - stage.json: + - expressions: + component: event.source.component + host: event.source.host + message: event.message + namespace: event.metadata.namespace + object_kind: event.involvedObject.kind + object_name: event.involvedObject.name + reason: event.reason + type: event.type + - stage.labels: + - values: + namespace: "" + - stage.template: + - source: newoutput + - template: test + - stage.output: + - source: newoutput + - stage.match: + - selector: '{name="kube-diff-logger"}' + - stage.json: + - expressions: + timestamp: timestamp + - stage.timestamp: + - source: timestamp + - format: RFC3339 + - stage.match: + - selector: '{container="loki-canary",stream="stdout"}' + - stage.regex: + - expression: (?P\d+) p+ + - stage.timestamp: + - source: ts + - format: UnixNs + - action_on_failure: skip + - stage.label_drop: + - values: + - filename + - stage.match: + - selector: '{name="amixr-engine"}' + - stage.multiline: + - firstline: ^\d{4}-\d{2}-\d{2} \d{1,2}:\d{2}:\d{2} + - stage.match: + - selector: '{name=~"amixr-engine-celery-.*"}' + - stage.multiline: + - firstline: ^\d{4}-\d{2}-\d{2} \d{1,2}:\d{2}:\d{2} + - stage.match: + - selector: '{container="cortex-gw"} |= "agent id"' + - stage.static_labels: + - values: + agent_log: id + - stage.match: + - selector: '{container="cortex-gw"} |= "sample remote write"' + - stage.static_labels: + - values: + agent_log: write + - stage.match: + - selector: '{namespace=~".*(alertmanager|amixr|cortex|fire|ge-metrics|grafana|loki|mimir|tempo).*"} |~ "(insight_logs|insight|insights)=true"' + - stage.static_labels: + - values: + insight: "true" + - stage.match: + - selector: '{namespace="asserts", container=~"api-server|assertion-detector|model-builder"}' + - stage.multiline: + - firstline: ^[A-Z]{4,5}[ ]{1,2}\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3} + - stage.regex: + - expression: ^(?P\S+)\s+\S+\s+\S+\s+(?P\S+)\s+(?P\S+)(\s+)?(?:.*tenantId=(?P\d+))? + - stage.labels: + - values: + level: log_level + loggerName: logger_name + tenantId: tenant_id + threadName: thread_name + - stage.match: + - selector: '{namespace=~".*(grafana|loki|cortex|mimir|tempo|pyroscope|phlare|fire|profiles|ge-logs|ge-metrics|ge-traces|promtail|machine-learning|agent|amixir|asserts|chatops|faro|incident).*"}' + - stage.regex: + - expression: .*(?Pgrafana|loki|cortex|mimir|tempo|pyroscope|phlare|fire|profiles|ge-logs|ge-metrics|ge-traces|promtail|machine-learning|agent|amixir|asserts|chatops|faro|incident).* + - source: namespace + - stage.template: + - source: team + - template: loki + - stage.template: + - source: service_name + - template: loki/container + - stage.labels: + - values: + service_name: "" + - stage.match: + - selector: '{namespace="machine-learning",container=~"modelapi|worker"}' + - stage.multiline: + - firstline: ^({"text|\[\d{4}) + - stage.match: + - selector: '{name=~"hosted-grafana-api|grafana"}' + - stage.logfmt: + - mapping: + instance_audit: "" + - stage.labels: + - values: + instance_audit: "" + - stage.match: + - selector: '{namespace="securityops",job="securityops/cloudtrail-exporter-workloads-prod"}' + - stage.json: + - expressions: + eventCategory: event.eventCategory + eventName: event.eventName + eventSource: event.eventSource + timestamp: time + - stage.timestamp: + - source: timestamp + - format: RFC3339 + - stage.labels: + - values: + eventCategory: "" + eventName: "" + eventSource: "" + - stage.structured_metadata: + - values: + container: "" + pod: "" + pod_template_hash: "" + stream: "" + - stage.match: + - selector: '{namespace="securityops",job="securityops/cloudtrail-exporter-workloads-ops"}' + - stage.json: + - expressions: + eventCategory: event.eventCategory + eventName: event.eventName + eventSource: event.eventSource + timestamp: time + - stage.timestamp: + - source: timestamp + - format: RFC3339 + - stage.labels: + - values: + eventCategory: "" + eventName: "" + eventSource: "" + - stage.structured_metadata: + - values: + container: "" + pod: "" + pod_template_hash: "" + stream: "" + - stage.match: + - selector: '{namespace="securityops",job="securityops/cloudtrail-exporter-workloads-dev"}' + - stage.json: + - expressions: + eventCategory: event.eventCategory + eventName: event.eventName + eventSource: event.eventSource + timestamp: time + - stage.timestamp: + - source: timestamp + - format: RFC3339 + - stage.labels: + - values: + eventCategory: "" + eventName: "" + eventSource: "" + - stage.structured_metadata: + - values: + container: "" + pod: "" + pod_template_hash: "" + stream: "" + - stage.match: + - selector: '{namespace="securityops",job="securityops/gh-audit-logs"}' + - stage.json: + - expressions: + action: event.action + actorIsBot: event.actor_is_bot + eventType: event.event + operationType: event.operation_type + repo: event.repo + repoId: event.repo_id + timestamp: time + - stage.timestamp: + - source: timestamp + - format: RFC3339 + - stage.labels: + - values: + action: "" + actorIsBot: "" + eventType: "" + operationType: "" + repo: "" + repoId: "" + - stage.structured_metadata: + - values: + container: "" + pod: "" + pod_template_hash: "" + stream: "" + - stage.match: + - selector: '{job="notification-historian/notification-historian"}' + - stage.json: + - expressions: + alerts: alerts + commonAnnotations: commonAnnotations + commonLabels: commonLabels + externalURL: externalURL + groupKey: groupKey + groupLabels: groupLabels + level: level + message: message + orgID: orgID + receiver: receiver + state: state + status: status + timestamp: time + title: title + truncatedAlerts: truncatedAlerts + version: version + - stage.timestamp: + - source: timestamp + - format: RFC3339 + - stage.labels: + - values: + alertname: "" + level: "" + orgID: "" + receiver: "" + state: "" + status: "" + - stage.structured_metadata: + - values: + alerts: "" + commonAnnotations: "" + commonLabels: "" + container: "" + externalURL: "" + groupKey: "" + groupLabels: "" + message: "" + pod: "" + pod_template_hash: "" + stream: "" + title: "" + truncatedAlerts: "" + version: "" + - stage.match: + - selector: '{job="default/tetragon"}' + - stage.json: + - expressions: + processExecArguments: process_exec.process.arguments + processExecBinary: process_exec.process.binary + processExecPodNamespace: process_exec.process.pod.namespace + - stage.labels: + - values: + processExecPodNamespace: "" + - stage.structured_metadata: + - values: + processExecArguments: "" + processExecBinary: "" + - stage.label_drop: + - values: + - pod + - container + - stream + - stage.match: + - selector: '{name="okta-logs"}' + - stage.json: + - expressions: + alternateId: event.actor.alternateId + eventType: event.eventType + ipAddress: event.client.ipAddress + level: event.severity + timestamp: time + - stage.timestamp: + - source: timestamp + - format: RFC3339 + - stage.labels: + - values: + eventType: "" + level: "" + - stage.structured_metadata: + - values: + alternateId: "" + container: "" + ipAddress: "" + pod: "" + pod_template_hash: "" + stream: "" + - stage.label_drop: + - values: + - pod + - pod_template_hash + - container + - stream + - stage.match: + - selector: '{namespace="hosted-grafana", app="grafana"}' + - stage.structured_metadata: + - values: + pod: "" + pod_template_hash: "" + resource_version: "" + - stage.label_drop: + - values: + - pod + - resource_version + - pod_template_hash + - stage.match: + - selector: '{namespace=~"loki.*"}' + - stage.structured_metadata: + - values: + controller_revision_hash: "" + pod: "" + pod_template_hash: "" + - stage.label_drop: + - values: + - pod + - pod_template_hash + - controller_revision_hash + - stage.match: + - selector: '{namespace=~"(loki-live|loki-dev-.*|loki-prod.*|loki-ops.*)", container!~"memcached|static-exporter|archiver-operator|cortex-gw|eventrouter|loki-canary|rollout-operator"}' + - stage.logfmt: + - mapping: + org_id: "" + orgID: "" + traceID: "" + user_id: "" + - stage.structured_metadata: + - values: + org_id: "" + orgID: "" + traceID: "" + user_id: "" + - stage.match: + - selector: '{job=~"cicd-o11y/grafana-.*"} |= "service=bench"' + - stage.static_labels: + - values: + bench: "true" + - stage.match: + - selector: '{namespace="hosted-grafana-cd"} |= "service=bench"' + - stage.static_labels: + - values: + bench: "true" +- discovery.relabel/systemd_journal: + - targets: + $array: [] + - rule: + - source_labels: + - __journal__hostname + - target_label: nodename + - rule: + - source_labels: + - __journal_syslog_identifier + - target_label: syslog_identifier + - rule: + - source_labels: + - __journal_priority_keyword + - target_label: priority +- loki.source.journal/logs_default_systemd_journal: + - path: /var/log/journal + - relabel_rules: expr(discovery.relabel.systemd_journal.rules) + - forward_to: + - expr(loki.write.logs.receiver) + - labels: + job: default/systemd-journal +- loki.write/logs: + - endpoint: + - url: https://logs-us-central1.grafana.net/loki/api/v1/push + - basic_auth: + - username: expr(sys.env("TENANT_ID")) + - password: expr(sys.env("API_KEY")) + - external_labels: + cluster: expr(sys.env("CLUSTER_NAME")) diff --git a/syntax/encoding/alloyyaml/testdata/27_foreach.alloy b/syntax/encoding/alloyyaml/testdata/27_foreach.alloy new file mode 100644 index 0000000000..d547fd69b9 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/27_foreach.alloy @@ -0,0 +1,51 @@ +discovery.kubernetes "default" { + role = "pod" +} + +discovery.relabel "redis" { + targets = discovery.kubernetes.default.targets + + rule { + source_labels = ["__meta_kubernetes_pod_container_name"] + regex = "redis-cont" + action = "keep" + } +} + +foreach "redis" { + collection = discovery.relabel.redis.output + var = "each" + + template { + prometheus.exporter.redis "default" { + redis_addr = each["__address__"] + } + + prometheus.scrape "default" { + targets = prometheus.exporter.redis.default.targets + forward_to = [prometheus.relabel.default.receiver] + } + + prometheus.relabel "default" { + rule { + replacement = each["__meta_kubernetes_namespace"] + target_label = "k8s_namespace" + action = "replace" + } + + forward_to = [prometheus.remote_write.mimir.receiver] + } + } +} + +prometheus.remote_write "mimir" { + endpoint { + url = "https://prometheus-xxx.grafana.net/api/prom/push" + + basic_auth { + username = sys.env("PROMETHEUS_USERNAME") + password = sys.env("GRAFANA_CLOUD_API_KEY") + } + } +} + diff --git a/syntax/encoding/alloyyaml/testdata/27_foreach.yaml b/syntax/encoding/alloyyaml/testdata/27_foreach.yaml new file mode 100644 index 0000000000..72cb43a999 --- /dev/null +++ b/syntax/encoding/alloyyaml/testdata/27_foreach.yaml @@ -0,0 +1,32 @@ +- discovery.kubernetes/default: + - role: pod +- discovery.relabel/redis: + - targets: expr(discovery.kubernetes.default.targets) + - rule: + - source_labels: + - __meta_kubernetes_pod_container_name + - regex: redis-cont + - action: keep +- foreach/redis: + - collection: expr(discovery.relabel.redis.output) + - var: each + - template: + - prometheus.exporter.redis/default: + - redis_addr: expr(each["__address__"]) + - prometheus.scrape/default: + - targets: expr(prometheus.exporter.redis.default.targets) + - forward_to: + - expr(prometheus.relabel.default.receiver) + - prometheus.relabel/default: + - rule: + - replacement: expr(each["__meta_kubernetes_namespace"]) + - target_label: k8s_namespace + - action: replace + - forward_to: + - expr(prometheus.remote_write.mimir.receiver) +- prometheus.remote_write/mimir: + - endpoint: + - url: https://prometheus-xxx.grafana.net/api/prom/push + - basic_auth: + - username: expr(sys.env("PROMETHEUS_USERNAME")) + - password: expr(sys.env("GRAFANA_CLOUD_API_KEY")) diff --git a/syntax/encoding/alloyyaml/to_alloy.go b/syntax/encoding/alloyyaml/to_alloy.go new file mode 100644 index 0000000000..1cb0ec6fc8 --- /dev/null +++ b/syntax/encoding/alloyyaml/to_alloy.go @@ -0,0 +1,414 @@ +package alloyyaml + +import ( + "bytes" + "fmt" + "io" + "sort" + "strings" + + "github.com/grafana/alloy/syntax/scanner" + "gopkg.in/yaml.v3" +) + +// ToAlloy converts YAML to Alloy configuration syntax. +// See tests for example YAML and Alloy input files. +func ToAlloy(yamlData []byte) ([]byte, error) { + var data interface{} + if err := yaml.Unmarshal(yamlData, &data); err != nil { + return nil, fmt.Errorf("invalid YAML: %w", err) + } + + var buf bytes.Buffer + if err := writeValue(&buf, data, true); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// writeValue writes any YAML value in the appropriate Alloy format. +// The isTopLevel flag indicates whether this is a top-level body (affects formatting). +func writeValue(w io.Writer, value interface{}, isTopLevel bool) error { + switch v := value.(type) { + case nil: + if isTopLevel { + // Top-level null doesn't make sense + return nil + } + _, err := fmt.Fprint(w, "null") + return err + + case map[string]interface{}: + return writeMap(w, v, 0, isTopLevel) + + case []interface{}: + if isTopLevel { + // Top-level array should be treated as multiple statements + return writeTopLevelArray(w, v, 0) + } + return writeArray(w, v) + + case string: + return writeString(w, v) + + case int: + _, err := fmt.Fprintf(w, "%d", v) + return err + case int64: + _, err := fmt.Fprintf(w, "%d", v) + return err + case float32: + return writeFloat(w, float64(v)) + case float64: + return writeFloat(w, v) + + case bool: + _, err := fmt.Fprintf(w, "%v", v) + return err + + default: + return fmt.Errorf("unsupported value type: %T", v) + } +} + +// writeMap writes a YAML map as either a block body (top-level) or object literal (nested). +func writeMap(w io.Writer, m map[string]interface{}, indent int, isTopLevel bool) error { + if len(m) == 0 { + if isTopLevel { + return nil + } + _, err := fmt.Fprint(w, "{}") + return err + } + + if isTopLevel { + // Top-level map represents a body with blocks and attributes + return writeBody(w, m, indent) + } + + // Nested maps (when used as attribute values) are always object literals + return writeObjectLiteral(w, m) +} + +// writeBody is a compatibility wrapper that handles both old map format and new array format. +func writeBody(w io.Writer, body interface{}, indent int) error { + switch b := body.(type) { + case []interface{}: + return writeBodyArray(w, b, indent) + case map[string]interface{}: + // Old format compatibility - should not happen in new design + return writeBodyMap(w, b, indent) + default: + return fmt.Errorf("invalid body type: %T", body) + } +} + +// writeBodyArray writes an array of single-key maps as Alloy statements. +// Each element in the array is a map with exactly one key (the statement name). +func writeBodyArray(w io.Writer, body []interface{}, indent int) error { + indentStr := strings.Repeat(" ", indent) + + for i, item := range body { + itemMap, ok := item.(map[string]interface{}) + if !ok { + return fmt.Errorf("body element %d must be a map, got %T", i, item) + } + + if len(itemMap) != 1 { + return fmt.Errorf("body element %d must have exactly one key, got %d keys", i, len(itemMap)) + } + + // Extract the single key-value pair + var key string + var value interface{} + for k, v := range itemMap { + key = k + value = v + break + } + + // Add blank line between top-level blocks (but not before first item) + if i > 0 && indent == 0 && isStructuralValue(value) { + if _, err := fmt.Fprintln(w); err != nil { + return err + } + } + + // Check if key contains "/" separator for block/label syntax + blockName, label := splitBlockLabel(key) + + // Determine if this is a block or attribute based on value type + if err := writeStatement(w, blockName, label, value, indentStr, indent); err != nil { + return err + } + } + + return nil +} + +// writeBodyMap writes the old map-based body format (for compatibility). +func writeBodyMap(w io.Writer, body map[string]interface{}, indent int) error { + indentStr := strings.Repeat(" ", indent) + + // Sort keys for deterministic output + keys := make([]string, 0, len(body)) + for k := range body { + keys = append(keys, k) + } + sort.Strings(keys) + + firstItem := true + for _, key := range keys { + value := body[key] + + // Add blank line between top-level blocks (but not first item) + if !firstItem && indent == 0 && isStructural(value) { + if _, err := fmt.Fprintln(w); err != nil { + return err + } + } + firstItem = false + + // Check if key contains "/" separator for block/label syntax + blockName, label := splitBlockLabel(key) + + if err := writeStatement(w, blockName, label, value, indentStr, indent); err != nil { + return err + } + } + + return nil +} + +// writeStatement writes a single statement (attribute or block). +func writeStatement(w io.Writer, blockName, label string, value interface{}, indentStr string, indent int) error { + switch v := value.(type) { + case map[string]interface{}: + // Check for $array marker - convert to array literal attribute + if arrValue, hasArray := v["$array"]; hasArray { + if err := writeAttribute(w, blockName, arrValue, indentStr); err != nil { + return fmt.Errorf("attribute %s: %w", blockName, err) + } + return nil + } + + // In array-based format, plain maps are object literals (not blocks) + // Blocks always have array bodies in the new format + if err := writeAttribute(w, blockName, value, indentStr); err != nil { + return fmt.Errorf("attribute %s: %w", blockName, err) + } + + case []interface{}: + // In new format, arrays are block bodies + // Empty arrays or arrays of single-key maps are blocks + if len(v) == 0 { + // Empty block + if label != "" { + _, err := fmt.Fprintf(w, "%s%s %q { }\n", indentStr, blockName, label) + return err + } + _, err := fmt.Fprintf(w, "%s%s { }\n", indentStr, blockName) + return err + } + + if firstElem, ok := v[0].(map[string]interface{}); ok && len(firstElem) == 1 { + // This is a block body + if label != "" { + if _, err := fmt.Fprintf(w, "%s%s %q {\n", indentStr, blockName, label); err != nil { + return err + } + } else { + if _, err := fmt.Fprintf(w, "%s%s {\n", indentStr, blockName); err != nil { + return err + } + } + if err := writeBodyArray(w, v, indent+1); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "%s}\n", indentStr); err != nil { + return err + } + return nil + } + + // Otherwise it's an array attribute + if err := writeAttribute(w, blockName, value, indentStr); err != nil { + return fmt.Errorf("attribute %s: %w", blockName, err) + } + + default: + // Simple values are attributes + if err := writeAttribute(w, blockName, value, indentStr); err != nil { + return fmt.Errorf("attribute %s: %w", blockName, err) + } + } + + return nil +} + +// splitBlockLabel splits a key into block name and label if it contains "/". +// Returns (blockName, label) where label is empty if no "/" is present. +func splitBlockLabel(key string) (string, string) { + parts := strings.SplitN(key, "/", 2) + if len(parts) == 2 { + return parts[0], parts[1] + } + return key, "" +} + +// isStructural returns true if the value is a structural element (nested map). +// Arrays are not considered structural because they can be inline attribute values. +func isStructural(value interface{}) bool { + switch value.(type) { + case map[string]interface{}: + return true + } + return false +} + +// isStructuralValue returns true if the value represents a block (not an attribute). +// In the new format, blocks are represented as arrays of single-key maps. +func isStructuralValue(value interface{}) bool { + switch v := value.(type) { + case []interface{}: + // Check if it's an array of single-key maps (block body) + if len(v) > 0 { + if firstElem, ok := v[0].(map[string]interface{}); ok && len(firstElem) == 1 { + return true + } + } + return false + case map[string]interface{}: + // In array-based format, maps are object literals (not blocks) + // Only $array marker indicates it's not structural + if _, hasArray := v["$array"]; hasArray { + return false + } + // Maps without markers are object literals, not structural + return false + } + return false +} + +// writeAttribute writes an attribute assignment. +func writeAttribute(w io.Writer, name string, value interface{}, indentStr string) error { + if _, err := fmt.Fprintf(w, "%s%s = ", indentStr, name); err != nil { + return err + } + + if err := writeValue(w, value, false); err != nil { + return err + } + + _, err := fmt.Fprintln(w) + return err +} + +// writeString writes a string value, handling expr() wrapper. +func writeString(w io.Writer, s string) error { + // Check for expr() wrapper + if strings.HasPrefix(s, "expr(") && strings.HasSuffix(s, ")") { + // Unwrap and write expression as-is (unquoted) + expr := s[5 : len(s)-1] + _, err := fmt.Fprint(w, expr) + return err + } + + // Regular string - quote it + // TODO: Handle escaping properly (newlines, quotes, etc.) + _, err := fmt.Fprintf(w, "%q", s) + return err +} + +// writeFloat writes a float, avoiding unnecessary decimal points for whole numbers. +func writeFloat(w io.Writer, f float64) error { + if f == float64(int64(f)) { + // Whole number + _, err := fmt.Fprintf(w, "%d", int64(f)) + return err + } + _, err := fmt.Fprintf(w, "%v", f) + return err +} + +// writeArray writes an array in Alloy syntax. +func writeArray(w io.Writer, arr []interface{}) error { + if len(arr) == 0 { + _, err := fmt.Fprint(w, "[]") + return err + } + + if _, err := fmt.Fprint(w, "["); err != nil { + return err + } + for i, elem := range arr { + if i > 0 { + if _, err := fmt.Fprint(w, ", "); err != nil { + return err + } + } + if err := writeValue(w, elem, false); err != nil { + return err + } + } + _, err := fmt.Fprint(w, "]") + return err +} + +// writeTopLevelArray handles an array at the top level. +// In the new format, this is an array of single-key maps representing statements. +func writeTopLevelArray(w io.Writer, arr []interface{}, indent int) error { + return writeBodyArray(w, arr, indent) +} + +// writeObjectLiteral writes an object literal in Alloy syntax. +func writeObjectLiteral(w io.Writer, obj map[string]interface{}) error { + if len(obj) == 0 { + _, err := fmt.Fprint(w, "{}") + return err + } + + if _, err := fmt.Fprint(w, "{ "); err != nil { + return err + } + + // Sort keys for deterministic output + keys := make([]string, 0, len(obj)) + for k := range obj { + keys = append(keys, k) + } + sort.Strings(keys) + + for i, key := range keys { + if i > 0 { + if _, err := fmt.Fprint(w, ", "); err != nil { + return err + } + } + + // Write key (quote if needed) + if needsQuoting(key) { + if _, err := fmt.Fprintf(w, "%q = ", key); err != nil { + return err + } + } else { + if _, err := fmt.Fprintf(w, "%s = ", key); err != nil { + return err + } + } + + // Write value + if err := writeValue(w, obj[key], false); err != nil { + return err + } + } + + _, err := fmt.Fprint(w, " }") + return err +} + +// needsQuoting returns true if a string needs to be quoted as an identifier. +func needsQuoting(s string) bool { + return !scanner.IsValidIdentifier(s) +} diff --git a/syntax/encoding/alloyyaml/to_yaml.go b/syntax/encoding/alloyyaml/to_yaml.go new file mode 100644 index 0000000000..49f25e0acd --- /dev/null +++ b/syntax/encoding/alloyyaml/to_yaml.go @@ -0,0 +1,224 @@ +package alloyyaml + +import ( + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/printer" + "github.com/grafana/alloy/syntax/token" + "gopkg.in/yaml.v3" +) + +// ToYAML converts Alloy configuration syntax to YAML. +// See tests for example Alloy and YAML input files. +func ToYAML(alloyData []byte) ([]byte, error) { + // Parse the Alloy file + file, err := parser.ParseFile("config.alloy", alloyData) + if err != nil { + return nil, fmt.Errorf("invalid Alloy syntax: %w", err) + } + + // Convert the AST body to YAML-compatible data structure + data, err := convertBody(file.Body) + if err != nil { + return nil, err + } + + // Marshal to YAML + yamlData, err := yaml.Marshal(data) + if err != nil { + return nil, fmt.Errorf("failed to marshal YAML: %w", err) + } + + return yamlData, nil +} + +// convertBody converts an AST body to a YAML-compatible array of single-key maps. +// Each statement (attribute or block) becomes a single-key map in the array. +// This preserves order and allows duplicate block names. +func convertBody(body ast.Body) (interface{}, error) { + if len(body) == 0 { + return []interface{}{}, nil + } + + result := make([]interface{}, 0, len(body)) + + for _, stmt := range body { + switch s := stmt.(type) { + case *ast.AttributeStmt: + // Convert attribute to single-key map + name := s.Name.Name + value, err := convertExpr(s.Value) + if err != nil { + return nil, fmt.Errorf("attribute %s: %w", name, err) + } + result = append(result, map[string]interface{}{ + name: value, + }) + + case *ast.BlockStmt: + // Convert block to single-key map + name := strings.Join(s.Name, ".") + key := name + if s.Label != "" { + key = name + "/" + s.Label + } + + blockData, err := convertBody(s.Body) + if err != nil { + return nil, fmt.Errorf("block %s: %w", key, err) + } + + result = append(result, map[string]interface{}{ + key: blockData, + }) + } + } + + return result, nil +} + +// convertExpr converts an AST expression to a YAML-compatible value. +func convertExpr(expr ast.Expr) (interface{}, error) { + switch e := expr.(type) { + case *ast.LiteralExpr: + return convertLiteral(e) + + case *ast.ArrayExpr: + return convertArray(e) + + case *ast.ObjectExpr: + return convertObject(e) + + case *ast.IdentifierExpr, *ast.AccessExpr, *ast.IndexExpr, *ast.CallExpr, + *ast.UnaryExpr, *ast.BinaryExpr, *ast.ParenExpr: + // Complex expressions: render as Alloy syntax and wrap in expr() + return renderExprAsString(expr) + + default: + return nil, fmt.Errorf("unsupported expression type: %T", expr) + } +} + +// convertLiteral converts a literal expression to its Go value. +func convertLiteral(lit *ast.LiteralExpr) (interface{}, error) { + switch lit.Kind { + case token.NULL: + return nil, nil + + case token.BOOL: + return lit.Value == "true", nil + + case token.NUMBER: + // Try parsing as int first, then as uint, then as float + if intVal, err := strconv.ParseInt(lit.Value, 0, 64); err == nil { + return intVal, nil + } + if uintVal, err := strconv.ParseUint(lit.Value, 0, 64); err == nil { + // Check if it fits in int64 + if uintVal <= 9223372036854775807 { + return int64(uintVal), nil + } + return uintVal, nil + } + if floatVal, err := strconv.ParseFloat(lit.Value, 64); err == nil { + return floatVal, nil + } + return nil, fmt.Errorf("invalid number literal: %s", lit.Value) + + case token.FLOAT: + floatVal, err := strconv.ParseFloat(lit.Value, 64) + if err != nil { + return nil, fmt.Errorf("invalid float literal: %s", lit.Value) + } + return floatVal, nil + + case token.STRING: + strVal, err := strconv.Unquote(lit.Value) + if err != nil { + return nil, fmt.Errorf("invalid string literal: %s", lit.Value) + } + return strVal, nil + + default: + return nil, fmt.Errorf("unsupported literal kind: %v", lit.Kind) + } +} + +// convertArray converts an array expression to a slice. +func convertArray(arr *ast.ArrayExpr) (interface{}, error) { + // Check if all elements are object literals + allObjects := true + for _, elem := range arr.Elements { + if _, ok := elem.(*ast.ObjectExpr); !ok { + allObjects = false + break + } + } + + result := make([]interface{}, len(arr.Elements)) + for i, elem := range arr.Elements { + // For object literals in arrays, convert directly without $object marker + if objExpr, ok := elem.(*ast.ObjectExpr); ok && allObjects { + objData := make(map[string]interface{}) + for _, field := range objExpr.Fields { + val, err := convertExpr(field.Value) + if err != nil { + return nil, fmt.Errorf("array element %d, field %s: %w", i, field.Name.Name, err) + } + objData[field.Name.Name] = val + } + result[i] = objData + } else { + val, err := convertExpr(elem) + if err != nil { + return nil, fmt.Errorf("array element %d: %w", i, err) + } + result[i] = val + } + } + + // If all elements were objects, wrap in $array marker to distinguish from multiple blocks + if allObjects { + return map[string]interface{}{ + "$array": result, + }, nil + } + + return result, nil +} + +// convertObject converts an object expression to a map. +// In the array-based format, plain maps represent object literals, +// while arrays represent block bodies, so no marker is needed. +func convertObject(obj *ast.ObjectExpr) (interface{}, error) { + objData := make(map[string]interface{}) + for _, field := range obj.Fields { + val, err := convertExpr(field.Value) + if err != nil { + return nil, fmt.Errorf("object field %s: %w", field.Name.Name, err) + } + objData[field.Name.Name] = val + } + + return objData, nil +} + +// renderExprAsString renders an expression as Alloy syntax and wraps it in expr(). +func renderExprAsString(expr ast.Expr) (string, error) { + var buf bytes.Buffer + if err := printer.Fprint(&buf, expr); err != nil { + return "", fmt.Errorf("failed to render expression: %w", err) + } + + // Clean up the output: remove excessive whitespace and newlines + output := buf.String() + output = strings.TrimSpace(output) + + // Wrap in expr() + return "expr(" + output + ")", nil +}