diff --git a/model/labels/labels.go b/model/labels/labels.go index 5697196324..510b8a32ed 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -36,13 +36,13 @@ func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name } // Encoding may change over time or between runs of Prometheus. func (ls Labels) Bytes(buf []byte) []byte { b := bytes.NewBuffer(buf[:0]) - b.WriteByte(labelSep) + b.WriteByte(LabelSep) for i, l := range ls { if i > 0 { - b.WriteByte(sep) + b.WriteByte(Sep) } b.WriteString(l.Name) - b.WriteByte(sep) + b.WriteByte(Sep) b.WriteString(l.Value) } return b.Bytes() @@ -79,17 +79,17 @@ func (ls Labels) Hash() uint64 { _, _ = h.Write(b) for _, v := range ls[i:] { _, _ = h.WriteString(v.Name) - _, _ = h.Write(seps) + _, _ = h.Write(Seps) _, _ = h.WriteString(v.Value) - _, _ = h.Write(seps) + _, _ = h.Write(Seps) } return h.Sum64() } b = append(b, v.Name...) - b = append(b, sep) + b = append(b, Sep) b = append(b, v.Value...) - b = append(b, sep) + b = append(b, Sep) } return xxhash.Sum64(b) } @@ -107,9 +107,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { i++ default: b = append(b, ls[i].Name...) - b = append(b, sep) + b = append(b, Sep) b = append(b, ls[i].Value...) - b = append(b, sep) + b = append(b, Sep) i++ j++ } @@ -131,9 +131,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, ls[i].Name...) - b = append(b, sep) + b = append(b, Sep) b = append(b, ls[i].Value...) - b = append(b, sep) + b = append(b, Sep) } return xxhash.Sum64(b), b } @@ -142,7 +142,7 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { // 'names' have to be sorted in ascending order. func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { b := bytes.NewBuffer(buf[:0]) - b.WriteByte(labelSep) + b.WriteByte(LabelSep) i, j := 0, 0 for i < len(ls) && j < len(names) { switch { @@ -152,10 +152,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { i++ default: if b.Len() > 1 { - b.WriteByte(sep) + b.WriteByte(Sep) } b.WriteString(ls[i].Name) - b.WriteByte(sep) + b.WriteByte(Sep) b.WriteString(ls[i].Value) i++ j++ @@ -168,7 +168,7 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { // 'names' have to be sorted in ascending order. func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { b := bytes.NewBuffer(buf[:0]) - b.WriteByte(labelSep) + b.WriteByte(LabelSep) j := 0 for i := range ls { for j < len(names) && names[j] < ls[i].Name { @@ -178,10 +178,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { continue } if b.Len() > 1 { - b.WriteByte(sep) + b.WriteByte(Sep) } b.WriteString(ls[i].Name) - b.WriteByte(sep) + b.WriteByte(Sep) b.WriteString(ls[i].Value) } return b.Bytes() @@ -279,6 +279,10 @@ func New(ls ...Label) Labels { return set } +func NewFromSorted(ls []Label) Labels { + return ls +} + // FromStrings creates new labels from pairs of strings. func FromStrings(ss ...string) Labels { if len(ss)%2 != 0 { @@ -423,7 +427,7 @@ func (b *Builder) Labels() Labels { } res := make(Labels, 0, expectedSize) for _, l := range b.base { - if slices.Contains(b.del, l.Name) || contains(b.add, l.Name) { + if slices.Contains(b.del, l.Name) || Contains(b.add, l.Name) { continue } res = append(res, l) diff --git a/model/labels/labels_common.go b/model/labels/labels_common.go index 5f46d6c35f..249803f76a 100644 --- a/model/labels/labels_common.go +++ b/model/labels/labels_common.go @@ -31,11 +31,11 @@ const ( AlertName = "alertname" BucketLabel = "le" - labelSep = '\xfe' // Used at beginning of `Bytes` return. - sep = '\xff' // Used between labels in `Bytes` and `Hash`. + LabelSep = '\xfe' // Used at beginning of `Bytes` return. + Sep = '\xff' // Used between labels in `Bytes` and `Hash`. ) -var seps = []byte{sep} // Used with Hash, which has no WriteByte method. +var Seps = []byte{Sep} // Used with Hash, which has no WriteByte method. // Label is a key/value a pair of strings. type Label struct { @@ -215,7 +215,7 @@ func (b *Builder) Range(f func(l Label)) { // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) b.base.Range(func(l Label) { - if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) { + if !slices.Contains(origDel, l.Name) && !Contains(origAdd, l.Name) { f(l) } }) @@ -224,7 +224,7 @@ func (b *Builder) Range(f func(l Label)) { } } -func contains(s []Label, n string) bool { +func Contains(s []Label, n string) bool { for _, a := range s { if a.Name == n { return true diff --git a/model/labels/labels_dedupelabels.go b/model/labels/labels_dedupelabels.go index edc6ff8e82..66ca6252cb 100644 --- a/model/labels/labels_dedupelabels.go +++ b/model/labels/labels_dedupelabels.go @@ -146,13 +146,13 @@ func (ls Labels) Bytes(buf []byte) []byte { b := bytes.NewBuffer(buf[:0]) for i := 0; i < len(ls.data); { if i > 0 { - b.WriteByte(sep) + b.WriteByte(Sep) } var name, value string name, i = decodeString(ls.syms, ls.data, i) value, i = decodeString(ls.syms, ls.data, i) b.WriteString(name) - b.WriteByte(sep) + b.WriteByte(Sep) b.WriteString(value) } return b.Bytes() @@ -193,17 +193,17 @@ func (ls Labels) Hash() uint64 { name, pos = decodeString(ls.syms, ls.data, pos) value, pos = decodeString(ls.syms, ls.data, pos) _, _ = h.WriteString(name) - _, _ = h.Write(seps) + _, _ = h.Write(Seps) _, _ = h.WriteString(value) - _, _ = h.Write(seps) + _, _ = h.Write(Seps) } return h.Sum64() } b = append(b, name...) - b = append(b, sep) + b = append(b, Sep) b = append(b, value...) - b = append(b, sep) + b = append(b, Sep) pos = newPos } return xxhash.Sum64(b) @@ -226,9 +226,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { } if name == names[j] { b = append(b, name...) - b = append(b, sep) + b = append(b, Sep) b = append(b, value...) - b = append(b, sep) + b = append(b, Sep) } } @@ -252,9 +252,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, name...) - b = append(b, sep) + b = append(b, Sep) b = append(b, value...) - b = append(b, sep) + b = append(b, Sep) } return xxhash.Sum64(b), b } @@ -275,10 +275,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { } if lName == names[j] { if b.Len() > 1 { - b.WriteByte(sep) + b.WriteByte(Sep) } b.WriteString(lName) - b.WriteByte(sep) + b.WriteByte(Sep) b.WriteString(lValue) } pos = newPos @@ -299,10 +299,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { } if j == len(names) || lName != names[j] { if b.Len() > 1 { - b.WriteByte(sep) + b.WriteByte(Sep) } b.WriteString(lName) - b.WriteByte(sep) + b.WriteByte(Sep) b.WriteString(lValue) } pos = newPos @@ -464,6 +464,15 @@ func New(ls ...Label) Labels { return Labels{syms: syms.nameTable, data: yoloString(buf)} } +func NewFromSorted(ls []Label) Labels { + syms := NewSymbolTable() + var stackSpace [16]int + size, nums := mapLabelsToNumbers(syms, ls, stackSpace[:]) + buf := make([]byte, size) + marshalNumbersToSizedBuffer(nums, buf) + return Labels{syms: syms.nameTable, data: yoloString(buf)} +} + // FromStrings creates new labels from pairs of strings. func FromStrings(ss ...string) Labels { if len(ss)%2 != 0 { diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index ce4f26eb2a..1efbf5164a 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -105,9 +105,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { } if name == names[j] { b = append(b, name...) - b = append(b, sep) + b = append(b, Sep) b = append(b, value...) - b = append(b, sep) + b = append(b, Sep) } } @@ -131,9 +131,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, name...) - b = append(b, sep) + b = append(b, Sep) b = append(b, value...) - b = append(b, sep) + b = append(b, Sep) } return xxhash.Sum64(b), b } @@ -314,6 +314,13 @@ func New(ls ...Label) Labels { return Labels{data: yoloString(buf)} } +func NewFromSorted(ls []Label) Labels { + size := labelsSize(ls) + buf := make([]byte, size) + marshalLabelsToSizedBuffer(ls, buf) + return Labels{data: yoloString(buf)} +} + // FromStrings creates new labels from pairs of strings. func FromStrings(ss ...string) Labels { if len(ss)%2 != 0 { diff --git a/model/labels/sharding.go b/model/labels/sharding.go index 8b3a369397..668e03bd5c 100644 --- a/model/labels/sharding.go +++ b/model/labels/sharding.go @@ -31,17 +31,17 @@ func StableHash(ls Labels) uint64 { _, _ = h.Write(b) for _, v := range ls[i:] { _, _ = h.WriteString(v.Name) - _, _ = h.Write(seps) + _, _ = h.Write(Seps) _, _ = h.WriteString(v.Value) - _, _ = h.Write(seps) + _, _ = h.Write(Seps) } return h.Sum64() } b = append(b, v.Name...) - b = append(b, sep) + b = append(b, Sep) b = append(b, v.Value...) - b = append(b, sep) + b = append(b, Sep) } return xxhash.Sum64(b) } diff --git a/model/labels/sharding_dedupelabels.go b/model/labels/sharding_dedupelabels.go index 5bf41b05d6..fc984b76b7 100644 --- a/model/labels/sharding_dedupelabels.go +++ b/model/labels/sharding_dedupelabels.go @@ -35,17 +35,17 @@ func StableHash(ls Labels) uint64 { name, pos = decodeString(ls.syms, ls.data, pos) value, pos = decodeString(ls.syms, ls.data, pos) _, _ = h.WriteString(name) - _, _ = h.Write(seps) + _, _ = h.Write(Seps) _, _ = h.WriteString(value) - _, _ = h.Write(seps) + _, _ = h.Write(Seps) } return h.Sum64() } b = append(b, name...) - b = append(b, sep) + b = append(b, Sep) b = append(b, value...) - b = append(b, sep) + b = append(b, Sep) pos = newPos } return xxhash.Sum64(b) diff --git a/model/labels/sharding_stringlabels.go b/model/labels/sharding_stringlabels.go index 798f268eb9..20119df392 100644 --- a/model/labels/sharding_stringlabels.go +++ b/model/labels/sharding_stringlabels.go @@ -36,16 +36,16 @@ func StableHash(ls Labels) uint64 { } if h != nil { _, _ = h.WriteString(v.Name) - _, _ = h.Write(seps) + _, _ = h.Write(Seps) _, _ = h.WriteString(v.Value) - _, _ = h.Write(seps) + _, _ = h.Write(Seps) continue } b = append(b, v.Name...) - b = append(b, sep) + b = append(b, Sep) b = append(b, v.Value...) - b = append(b, sep) + b = append(b, Sep) } if h != nil { return h.Sum64() diff --git a/storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go b/storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go new file mode 100644 index 0000000000..e7efb39dd7 --- /dev/null +++ b/storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go @@ -0,0 +1,162 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusremotewrite + +import ( + "errors" + "fmt" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + modelLabels "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/labels" +) + +// NewCombinedAppender creates a combined appender that sets start times and +// updates metadata for each series only once, and appends samples and +// exemplars for each call. +func NewCombinedAppender(app storage.Appender, logger *slog.Logger, reg prometheus.Registerer, ingestCTZeroSample bool) CombinedAppender { + return &combinedAppender{ + app: app, + logger: logger, + ingestCTZeroSample: ingestCTZeroSample, + refs: make(map[uint64]storage.SeriesRef), + samplesAppendedWithoutMetadata: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "otlp_without_metadata_appended_samples_total", + Help: "The total number of received OTLP data points which were ingested without corresponding metadata.", + }), + outOfOrderExemplars: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "otlp_out_of_order_exemplars_total", + Help: "The total number of received OTLP exemplars which were rejected because they were out of order.", + }), + } +} + +// CombinedAppender is similar to storage.Appender, but combines updates to +// metadata, created timestamps, exemplars and samples into a single call. +type CombinedAppender interface { + // AppendSample appends a sample and related exemplars, metadata, and + // created timestamp to the storage. + AppendSample(metricFamily string, ls labels.Labels, meta metadata.Metadata, t, ct int64, v float64, es []exemplar.Exemplar) error + // AppendSample appends a histogram and related exemplars, metadata, and + // created timestamp to the storage. + AppendHistogram(metricFamily string, ls labels.Labels, meta metadata.Metadata, t, ct int64, h *histogram.Histogram, es []exemplar.Exemplar) error +} + +type combinedAppender struct { + app storage.Appender + logger *slog.Logger + samplesAppendedWithoutMetadata prometheus.Counter + outOfOrderExemplars prometheus.Counter + ingestCTZeroSample bool + // Used to ensure we only update metadata and created timestamps once, and to share storage.SeriesRefs. + refs map[uint64]storage.SeriesRef +} + +func (b *combinedAppender) AppendSample(_ string, rawls labels.Labels, meta metadata.Metadata, t, ct int64, v float64, es []exemplar.Exemplar) (err error) { + ls := rawls.MoveToModel() + hash := ls.Hash() + ref, exists := b.refs[hash] + if !exists { + ref, err = b.app.UpdateMetadata(0, ls, meta) + if err != nil { + b.samplesAppendedWithoutMetadata.Add(1) + b.logger.Debug("error while updating metadata from OTLP", "err", err) + } + if ct != 0 && b.ingestCTZeroSample { + ref, err = b.app.AppendCTZeroSample(ref, ls, t, ct) + if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { + // Even for the first sample OOO is a common scenario because + // we can't tell if a CT was already ingested in a previous request. + // We ignore the error. + b.logger.Debug("Error when appending CT in OTLP request", "err", err, "series", ls.String(), "created_timestamp", ct, "timestamp", t) + } + } + } + ref, err = b.app.Append(ref, ls, t, v) + if err != nil { + // Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is + // a note indicating its inclusion in the future. + if errors.Is(err, storage.ErrOutOfOrderSample) || + errors.Is(err, storage.ErrOutOfBounds) || + errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { + b.logger.Error("Out of order sample from OTLP", "err", err.Error(), "series", ls.String(), "timestamp", t) + } + } + ref = b.appendExemplars(ref, ls, es) + b.refs[hash] = ref + return +} + +func (b *combinedAppender) AppendHistogram(_ string, rawls labels.Labels, meta metadata.Metadata, t, ct int64, h *histogram.Histogram, es []exemplar.Exemplar) (err error) { + ls := rawls.MoveToModel() + hash := ls.Hash() + ref, exists := b.refs[hash] + if !exists { + ref, err = b.app.UpdateMetadata(0, ls, meta) + if err != nil { + b.samplesAppendedWithoutMetadata.Add(1) + b.logger.Debug("error while updating metadata from OTLP", "err", err) + } + if b.ingestCTZeroSample { + ref, err = b.app.AppendHistogramCTZeroSample(ref, ls, t, ct, h, nil) + if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { + // Even for the first sample OOO is a common scenario because + // we can't tell if a CT was already ingested in a previous request. + // We ignore the error. + b.logger.Debug("Error when appending Histogram CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ct, "timestamp", t) + } + } + } + ref, err = b.app.AppendHistogram(ref, ls, t, h, nil) + if err != nil { + // Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is + // a note indicating its inclusion in the future. + if errors.Is(err, storage.ErrOutOfOrderSample) || + errors.Is(err, storage.ErrOutOfBounds) || + errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { + b.logger.Error("Out of order histogram from OTLP", "err", err.Error(), "series", ls.String(), "timestamp", t) + } + } + ref = b.appendExemplars(ref, ls, es) + b.refs[hash] = ref + return +} + +func (b *combinedAppender) appendExemplars(ref storage.SeriesRef, ls modelLabels.Labels, es []exemplar.Exemplar) storage.SeriesRef { + var err error + for _, e := range es { + if ref, err = b.app.AppendExemplar(ref, ls, e); err != nil { + switch { + case errors.Is(err, storage.ErrOutOfOrderExemplar): + b.outOfOrderExemplars.Add(1) + b.logger.Debug("Out of order exemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + default: + // Since exemplar storage is still experimental, we don't fail the request on ingestion errors + b.logger.Debug("Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err) + } + } + } + return ref +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go b/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go new file mode 100644 index 0000000000..d129f54d04 --- /dev/null +++ b/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go @@ -0,0 +1,77 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusremotewrite + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/labels" + "github.com/prometheus/prometheus/util/testutil" +) + +type mockCombinedAppender struct { + samples []combinedSample + histograms []combinedHistogram +} + +type combinedSample struct { + ls labels.Labels + meta metadata.Metadata + t int64 + ct int64 + v float64 + es []exemplar.Exemplar +} + +type combinedHistogram struct { + ls labels.Labels + meta metadata.Metadata + t int64 + ct int64 + h *histogram.Histogram + es []exemplar.Exemplar +} + +func (m *mockCombinedAppender) AppendSample(_ string, ls labels.Labels, meta metadata.Metadata, t, ct int64, v float64, es []exemplar.Exemplar) error { + m.samples = append(m.samples, combinedSample{ + ls: ls, + meta: meta, + t: t, + ct: ct, + v: v, + es: es, + }) + return nil +} + +func (m *mockCombinedAppender) AppendHistogram(_ string, ls labels.Labels, meta metadata.Metadata, t, ct int64, h *histogram.Histogram, es []exemplar.Exemplar) error { + m.histograms = append(m.histograms, combinedHistogram{ + ls: ls, + meta: meta, + t: t, + ct: ct, + h: h, + es: es, + }) + return nil +} + +func requireEqual(t testing.TB, expected, actual interface{}, msgAndArgs ...interface{}) { + testutil.RequireEqualWithOptions(t, expected, actual, []cmp.Option{cmp.AllowUnexported(combinedSample{}, combinedHistogram{})}, msgAndArgs...) +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index b763b3e2b4..3e27ad657c 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -21,24 +21,24 @@ import ( "encoding/hex" "fmt" "log" - "log/slog" "math" "slices" - "sort" "strconv" "time" "unicode/utf8" - "github.com/cespare/xxhash/v2" "github.com/prometheus/common/model" "github.com/prometheus/otlptranslator" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + "github.com/prometheus/prometheus/model/exemplar" + modelLabels "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" - "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/labels" ) const ( @@ -62,149 +62,83 @@ const ( defaultLookbackDelta = 5 * time.Minute ) -type bucketBoundsData struct { - ts *prompb.TimeSeries - bound float64 -} - -// byBucketBoundsData enables the usage of sort.Sort() with a slice of bucket bounds. -type byBucketBoundsData []bucketBoundsData - -func (m byBucketBoundsData) Len() int { return len(m) } -func (m byBucketBoundsData) Less(i, j int) bool { return m[i].bound < m[j].bound } -func (m byBucketBoundsData) Swap(i, j int) { m[i], m[j] = m[j], m[i] } - -// ByLabelName enables the usage of sort.Sort() with a slice of labels. -type ByLabelName []prompb.Label - -func (a ByLabelName) Len() int { return len(a) } -func (a ByLabelName) Less(i, j int) bool { return a[i].Name < a[j].Name } -func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// timeSeriesSignature returns a hashed label set signature. -// The label slice should not contain duplicate label names; this method sorts the slice by label name before creating -// the signature. -// The algorithm is the same as in Prometheus' labels.StableHash function. -func timeSeriesSignature(labels []prompb.Label) uint64 { - sort.Sort(ByLabelName(labels)) - - // Use xxhash.Sum64(b) for fast path as it's faster. - b := make([]byte, 0, 1024) - for i, v := range labels { - if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) { - // If labels entry is 1KB+ do not allocate whole entry. - h := xxhash.New() - _, _ = h.Write(b) - for _, v := range labels[i:] { - _, _ = h.WriteString(v.Name) - _, _ = h.Write(seps) - _, _ = h.WriteString(v.Value) - _, _ = h.Write(seps) - } - return h.Sum64() - } - - b = append(b, v.Name...) - b = append(b, seps[0]) - b = append(b, v.Value...) - b = append(b, seps[0]) - } - return xxhash.Sum64(b) -} - -var seps = []byte{'\xff'} - // createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values. // Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and // if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized. // If settings.PromoteResourceAttributes is not empty, it's a set of resource attributes that should be promoted to labels. -func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope scope, settings Settings, +func (c *PrometheusConverter) createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope scope, settings Settings, ignoreAttrs []string, logOnOverwrite bool, extras ...string, -) []prompb.Label { +) labels.Labels { resourceAttrs := resource.Attributes() serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName) instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID) - promotedAttrs := settings.PromoteResourceAttributes.promotedAttributes(resourceAttrs) - promoteScope := settings.PromoteScopeMetadata && scope.name != "" - scopeLabelCount := 0 - if promoteScope { - // Include name, version and schema URL. - scopeLabelCount = scope.attributes.Len() + 3 - } - // Calculate the maximum possible number of labels we could return so we can preallocate l. - maxLabelCount := attributes.Len() + len(settings.ExternalLabels) + len(promotedAttrs) + scopeLabelCount + len(extras)/2 - - if haveServiceName { - maxLabelCount++ - } - if haveInstanceID { - maxLabelCount++ - } // Ensure attributes are sorted by key for consistent merging of keys which // collide when sanitized. - labels := make([]prompb.Label, 0, maxLabelCount) + c.scratchBuilder.Reset() + // XXX: Should we always drop service namespace/service name/service instance ID from the labels // (as they get mapped to other Prometheus labels)? attributes.Range(func(key string, value pcommon.Value) bool { if !slices.Contains(ignoreAttrs, key) { - labels = append(labels, prompb.Label{Name: key, Value: value.AsString()}) + c.scratchBuilder.Add(key, value.AsString()) } return true }) - sort.Stable(ByLabelName(labels)) + c.scratchBuilder.Sort() + sortedLabels := c.scratchBuilder.Labels() - // map ensures no duplicate label names. - l := make(map[string]string, maxLabelCount) labelNamer := otlptranslator.LabelNamer{UTF8Allowed: settings.AllowUTF8} - for _, label := range labels { - finalKey := labelNamer.Build(label.Name) - if existingValue, alreadyExists := l[finalKey]; alreadyExists { - l[finalKey] = existingValue + ";" + label.Value - } else { - l[finalKey] = label.Value - } + if settings.AllowUTF8 { + // UTF8 is allowed, so conflicts aren't possible. + c.builder.Reset(sortedLabels) + } else { + // Now that we have sorted and filtered the labels, build the actual list + // of labels, and handle conflicts by appending values. + c.builder.Reset(labels.EmptyLabels()) + sortedLabels.Range(func(l modelLabels.Label) { + finalKey := labelNamer.Build(l.Name) + if existingValue := c.builder.Get(finalKey); existingValue != "" { + c.builder.Set(finalKey, existingValue+";"+l.Value) + } else { + c.builder.Set(finalKey, l.Value) + } + }) } - for _, lbl := range promotedAttrs { - normalized := labelNamer.Build(lbl.Name) - if _, exists := l[normalized]; !exists { - l[normalized] = lbl.Value - } - } + settings.PromoteResourceAttributes.addPromotedAttributes(c.builder, resourceAttrs, settings.AllowUTF8) if promoteScope { - l["otel_scope_name"] = scope.name - l["otel_scope_version"] = scope.version - l["otel_scope_schema_url"] = scope.schemaURL + c.builder.Set("otel_scope_name", scope.name) + c.builder.Set("otel_scope_version", scope.version) + c.builder.Set("otel_scope_schema_url", scope.schemaURL) scope.attributes.Range(func(k string, v pcommon.Value) bool { name := "otel_scope_" + k name = labelNamer.Build(name) - l[name] = v.AsString() + c.builder.Set(name, v.AsString()) return true }) } - // Map service.name + service.namespace to job. if haveServiceName { val := serviceName.AsString() if serviceNamespace, ok := resourceAttrs.Get(conventions.AttributeServiceNamespace); ok { val = fmt.Sprintf("%s/%s", serviceNamespace.AsString(), val) } - l[model.JobLabel] = val + c.builder.Set(model.JobLabel, val) } // Map service.instance.id to instance. if haveInstanceID { - l[model.InstanceLabel] = instance.AsString() + c.builder.Set(model.InstanceLabel, instance.AsString()) } for key, value := range settings.ExternalLabels { // External labels have already been sanitized. - if _, alreadyExists := l[key]; alreadyExists { + if existingValue := c.builder.Get(key); existingValue != "" { // Skip external labels if they are overridden by metric attributes. continue } - l[key] = value + c.builder.Set(key, value) } for i := 0; i < len(extras); i += 2 { @@ -213,23 +147,17 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope s } name := extras[i] - _, found := l[name] - if found && logOnOverwrite { + if existingValue := c.builder.Get(name); existingValue != "" && logOnOverwrite { log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.") } // internal labels should be maintained. if len(name) <= 4 || name[:2] != "__" || name[len(name)-2:] != "__" { name = labelNamer.Build(name) } - l[name] = extras[i+1] - } - - labels = labels[:0] - for k, v := range l { - labels = append(labels, prompb.Label{Name: k, Value: v}) + c.builder.Set(name, extras[i+1]) } - return labels + return c.builder.Labels() } func aggregationTemporality(metric pmetric.Metric) (pmetric.AggregationTemporality, bool, error) { @@ -255,7 +183,7 @@ func aggregationTemporality(metric pmetric.Metric) (pmetric.AggregationTemporali // However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets: // https://github.com/prometheus/prometheus/issues/13485. func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, - resource pcommon.Resource, settings Settings, baseName string, scope scope, logger *slog.Logger, + resource pcommon.Resource, settings Settings, baseName string, scope scope, meta metadata.Metadata, ) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { @@ -264,45 +192,43 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) - startTimestampNs := pt.StartTimestamp() - startTimestampMs := convertTimeStamp(startTimestampNs) - baseLabels := createAttributes(resource, pt.Attributes(), scope, settings, nil, false) + startTimestamp := convertTimeStamp(pt.StartTimestamp()) + baseLabels := c.createAttributes(resource, pt.Attributes(), scope, settings, nil, false) // If the sum is unset, it indicates the _sum metric point should be // omitted if pt.HasSum() { // treat sum as a sample in an individual TimeSeries - sumlabels := createLabels(baseName+sumStr, baseLabels) - sum := &prompb.Sample{ - Value: pt.Sum(), - Timestamp: timestamp, - } + val := pt.Sum() if pt.Flags().NoRecordedValue() { - sum.Value = math.Float64frombits(value.StaleNaN) + val = math.Float64frombits(value.StaleNaN) } - c.handleStartTime(startTimestampMs, timestamp, sumlabels, settings, "histogram_sum", sum.Value, logger) - c.addSample(sum, sumlabels) + sumlabels := c.addLabels(baseName+sumStr, baseLabels) + if err := c.appender.AppendSample(baseName, sumlabels, meta, timestamp, startTimestamp, val, nil); err != nil { + return err + } } // treat count as a sample in an individual TimeSeries - count := &prompb.Sample{ - Value: float64(pt.Count()), - Timestamp: timestamp, - } + val := float64(pt.Count()) if pt.Flags().NoRecordedValue() { - count.Value = math.Float64frombits(value.StaleNaN) + val = math.Float64frombits(value.StaleNaN) } - countlabels := createLabels(baseName+countStr, baseLabels) - c.handleStartTime(startTimestampMs, timestamp, countlabels, settings, "histogram_count", count.Value, logger) - c.addSample(count, countlabels) + countlabels := c.addLabels(baseName+countStr, baseLabels) + if err := c.appender.AppendSample(baseName, countlabels, meta, timestamp, startTimestamp, val, nil); err != nil { + return err + } + exemplars, err := c.getPromExemplars(ctx, pt.Exemplars()) + if err != nil { + return err + } + nextExemplarIdx := 0 // cumulative count for conversion to cumulative histogram var cumulativeCount uint64 - var bucketBounds []bucketBoundsData - // process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1 for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ { if err := c.everyN.checkContext(ctx); err != nil { @@ -311,118 +237,110 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo bound := pt.ExplicitBounds().At(i) cumulativeCount += pt.BucketCounts().At(i) - bucket := &prompb.Sample{ - Value: float64(cumulativeCount), - Timestamp: timestamp, + + // Find exemplars that belong to this bucket. Both exemplars and + // buckets are sorted in ascending order. + var currentBucketExemplars []exemplar.Exemplar + for ; nextExemplarIdx < len(exemplars); nextExemplarIdx++ { + ex := exemplars[nextExemplarIdx] + if ex.Value > bound { + // This exemplar belongs in a higher bucket. + break + } + currentBucketExemplars = append(currentBucketExemplars, ex) } + val := float64(cumulativeCount) if pt.Flags().NoRecordedValue() { - bucket.Value = math.Float64frombits(value.StaleNaN) + val = math.Float64frombits(value.StaleNaN) } boundStr := strconv.FormatFloat(bound, 'f', -1, 64) - labels := createLabels(baseName+bucketStr, baseLabels, leStr, boundStr) - c.handleStartTime(startTimestampMs, timestamp, labels, settings, "histogram_bucket", bucket.Value, logger) - ts := c.addSample(bucket, labels) - - bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: bound}) + labels := c.addLabels(baseName+bucketStr, baseLabels, leStr, boundStr) + if err := c.appender.AppendSample(baseName, labels, meta, timestamp, startTimestamp, val, currentBucketExemplars); err != nil { + return err + } } // add le=+Inf bucket - infBucket := &prompb.Sample{ - Timestamp: timestamp, - } + val = float64(pt.Count()) if pt.Flags().NoRecordedValue() { - infBucket.Value = math.Float64frombits(value.StaleNaN) - } else { - infBucket.Value = float64(pt.Count()) + val = math.Float64frombits(value.StaleNaN) } - infLabels := createLabels(baseName+bucketStr, baseLabels, leStr, pInfStr) - c.handleStartTime(startTimestampMs, timestamp, infLabels, settings, "histogram_inf_bucket", infBucket.Value, logger) - ts := c.addSample(infBucket, infLabels) - - bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: math.Inf(1)}) - if err := c.addExemplars(ctx, pt, bucketBounds); err != nil { + infLabels := c.addLabels(baseName+bucketStr, baseLabels, leStr, pInfStr) + if err := c.appender.AppendSample(baseName, infLabels, meta, timestamp, startTimestamp, val, exemplars[nextExemplarIdx:]); err != nil { return err } - if settings.ExportCreatedMetric && startTimestampNs != 0 { - labels := createLabels(baseName+createdSuffix, baseLabels) - c.addTimeSeriesIfNeeded(labels, startTimestampMs, pt.Timestamp()) + if settings.ExportCreatedMetric && pt.StartTimestamp() != 0 { + labels := c.addLabels(baseName+createdSuffix, baseLabels) + if c.timeSeriesIsNew(labels) { + if err := c.appender.AppendSample(baseName, labels, meta, timestamp, 0, float64(startTimestamp), nil); err != nil { + return err + } + } } - logger.Debug("addHistogramDataPoints", "labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "histogram") } return nil } -type exemplarType interface { - pmetric.ExponentialHistogramDataPoint | pmetric.HistogramDataPoint | pmetric.NumberDataPoint - Exemplars() pmetric.ExemplarSlice -} - -func getPromExemplars[T exemplarType](ctx context.Context, everyN *everyNTimes, pt T) ([]prompb.Exemplar, error) { - promExemplars := make([]prompb.Exemplar, 0, pt.Exemplars().Len()) - for i := 0; i < pt.Exemplars().Len(); i++ { - if err := everyN.checkContext(ctx); err != nil { +func (c *PrometheusConverter) getPromExemplars(ctx context.Context, exemplars pmetric.ExemplarSlice) ([]exemplar.Exemplar, error) { + if exemplars.Len() == 0 { + return nil, nil + } + outputExemplars := make([]exemplar.Exemplar, 0, exemplars.Len()) + for i := 0; i < exemplars.Len(); i++ { + if err := c.everyN.checkContext(ctx); err != nil { return nil, err } - exemplar := pt.Exemplars().At(i) + ex := exemplars.At(i) exemplarRunes := 0 - promExemplar := prompb.Exemplar{ - Timestamp: timestamp.FromTime(exemplar.Timestamp().AsTime()), + ts := timestamp.FromTime(ex.Timestamp().AsTime()) + newExemplar := exemplar.Exemplar{ + Ts: ts, + HasTs: ts != 0, } - switch exemplar.ValueType() { + c.scratchBuilder.Reset() + switch ex.ValueType() { case pmetric.ExemplarValueTypeInt: - promExemplar.Value = float64(exemplar.IntValue()) + newExemplar.Value = float64(ex.IntValue()) case pmetric.ExemplarValueTypeDouble: - promExemplar.Value = exemplar.DoubleValue() + newExemplar.Value = ex.DoubleValue() default: - return nil, fmt.Errorf("unsupported exemplar value type: %v", exemplar.ValueType()) + return nil, fmt.Errorf("unsupported exemplar value type: %v", ex.ValueType()) } - if traceID := exemplar.TraceID(); !traceID.IsEmpty() { + if traceID := ex.TraceID(); !traceID.IsEmpty() { val := hex.EncodeToString(traceID[:]) exemplarRunes += utf8.RuneCountInString(traceIDKey) + utf8.RuneCountInString(val) - promLabel := prompb.Label{ - Name: traceIDKey, - Value: val, - } - promExemplar.Labels = append(promExemplar.Labels, promLabel) + c.scratchBuilder.Add(traceIDKey, val) } - if spanID := exemplar.SpanID(); !spanID.IsEmpty() { + if spanID := ex.SpanID(); !spanID.IsEmpty() { val := hex.EncodeToString(spanID[:]) exemplarRunes += utf8.RuneCountInString(spanIDKey) + utf8.RuneCountInString(val) - promLabel := prompb.Label{ - Name: spanIDKey, - Value: val, - } - promExemplar.Labels = append(promExemplar.Labels, promLabel) + c.scratchBuilder.Add(spanIDKey, val) } - attrs := exemplar.FilteredAttributes() - labelsFromAttributes := make([]prompb.Label, 0, attrs.Len()) + attrs := ex.FilteredAttributes() attrs.Range(func(key string, value pcommon.Value) bool { - val := value.AsString() - exemplarRunes += utf8.RuneCountInString(key) + utf8.RuneCountInString(val) - promLabel := prompb.Label{ - Name: key, - Value: val, - } - - labelsFromAttributes = append(labelsFromAttributes, promLabel) - + exemplarRunes += utf8.RuneCountInString(key) + utf8.RuneCountInString(value.AsString()) return true }) + + // Only append filtered attributes if it does not cause exemplar + // labels to exceed the max number of runes. if exemplarRunes <= maxExemplarRunes { - // only append filtered attributes if it does not cause exemplar - // labels to exceed the max number of runes - promExemplar.Labels = append(promExemplar.Labels, labelsFromAttributes...) + attrs.Range(func(key string, value pcommon.Value) bool { + c.scratchBuilder.Add(key, value.AsString()) + return true + }) } - - promExemplars = append(promExemplars, promExemplar) + c.scratchBuilder.Sort() + newExemplar.Labels = c.scratchBuilder.Labels().MoveToModel() + outputExemplars = append(outputExemplars, newExemplar) } - return promExemplars, nil + return outputExemplars, nil } // findMinAndMaxTimestamps returns the minimum of minTimestamp and the earliest timestamp in metric and @@ -471,7 +389,7 @@ func findMinAndMaxTimestamps(metric pmetric.Metric, minTimestamp, maxTimestamp p } func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource, - settings Settings, baseName string, scope scope, logger *slog.Logger, + settings Settings, baseName string, scope scope, meta metadata.Metadata, ) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { @@ -480,175 +398,105 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) - startTimestampMs := convertTimeStamp(pt.StartTimestamp()) - baseLabels := createAttributes(resource, pt.Attributes(), scope, settings, nil, false) + startTimestamp := convertTimeStamp(pt.StartTimestamp()) + baseLabels := c.createAttributes(resource, pt.Attributes(), scope, settings, nil, false) // treat sum as a sample in an individual TimeSeries - sum := &prompb.Sample{ - Value: pt.Sum(), - Timestamp: timestamp, - } + val := pt.Sum() if pt.Flags().NoRecordedValue() { - sum.Value = math.Float64frombits(value.StaleNaN) + val = math.Float64frombits(value.StaleNaN) } // sum and count of the summary should append suffix to baseName - sumlabels := createLabels(baseName+sumStr, baseLabels) - c.handleStartTime(startTimestampMs, timestamp, sumlabels, settings, "summary_sum", sum.Value, logger) - c.addSample(sum, sumlabels) + sumlabels := c.addLabels(baseName+sumStr, baseLabels) + if err := c.appender.AppendSample(baseName, sumlabels, meta, timestamp, startTimestamp, val, nil); err != nil { + return err + } // treat count as a sample in an individual TimeSeries - count := &prompb.Sample{ - Value: float64(pt.Count()), - Timestamp: timestamp, - } + val = float64(pt.Count()) if pt.Flags().NoRecordedValue() { - count.Value = math.Float64frombits(value.StaleNaN) + val = math.Float64frombits(value.StaleNaN) + } + countlabels := c.addLabels(baseName+countStr, baseLabels) + if err := c.appender.AppendSample(baseName, countlabels, meta, timestamp, startTimestamp, val, nil); err != nil { + return err } - countlabels := createLabels(baseName+countStr, baseLabels) - c.handleStartTime(startTimestampMs, timestamp, countlabels, settings, "summary_count", count.Value, logger) - c.addSample(count, countlabels) // process each percentile/quantile for i := 0; i < pt.QuantileValues().Len(); i++ { qt := pt.QuantileValues().At(i) - quantile := &prompb.Sample{ - Value: qt.Value(), - Timestamp: timestamp, - } + val = qt.Value() if pt.Flags().NoRecordedValue() { - quantile.Value = math.Float64frombits(value.StaleNaN) + val = math.Float64frombits(value.StaleNaN) } percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64) - qtlabels := createLabels(baseName, baseLabels, quantileStr, percentileStr) - c.handleStartTime(startTimestampMs, timestamp, qtlabels, settings, "summary_quantile", quantile.Value, logger) - c.addSample(quantile, qtlabels) + qtlabels := c.addLabels(baseName, baseLabels, quantileStr, percentileStr) + if err := c.appender.AppendSample(baseName, qtlabels, meta, timestamp, startTimestamp, val, nil); err != nil { + return err + } } - if settings.ExportCreatedMetric && startTimestampMs != 0 { - createdLabels := createLabels(baseName+createdSuffix, baseLabels) - c.addTimeSeriesIfNeeded(createdLabels, startTimestampMs, pt.Timestamp()) + if settings.ExportCreatedMetric && pt.StartTimestamp() != 0 { + createdLabels := c.addLabels(baseName+createdSuffix, baseLabels) + if c.timeSeriesIsNew(createdLabels) { + if err := c.appender.AppendSample(baseName, createdLabels, meta, timestamp, 0, float64(startTimestamp), nil); err != nil { + return err + } + } } - - logger.Debug("addSummaryDataPoints", "labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "summary") } return nil } -// createLabels returns a copy of baseLabels, adding to it the pair model.MetricNameLabel=name. +// addLabels returns a copy of baseLabels, adding to it the pair model.MetricNameLabel=name. // If extras are provided, corresponding label pairs are also added to the returned slice. // If extras is uneven length, the last (unpaired) extra will be ignored. -func createLabels(name string, baseLabels []prompb.Label, extras ...string) []prompb.Label { - extraLabelCount := len(extras) / 2 - labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name - copy(labels, baseLabels) +func (c *PrometheusConverter) addLabels(name string, baseLabels labels.Labels, extras ...string) labels.Labels { + c.builder.Reset(baseLabels) n := len(extras) n -= n % 2 for extrasIdx := 0; extrasIdx < n; extrasIdx += 2 { - labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]}) + c.builder.Set(extras[extrasIdx], extras[extrasIdx+1]) } - - labels = append(labels, prompb.Label{Name: model.MetricNameLabel, Value: name}) - return labels + c.builder.Set(model.MetricNameLabel, name) + return c.builder.Labels() } // getOrCreateTimeSeries returns the time series corresponding to the label set if existent, and false. // Otherwise it creates a new one and returns that, and true. -func (c *PrometheusConverter) getOrCreateTimeSeries(lbls []prompb.Label) (*prompb.TimeSeries, bool) { - h := timeSeriesSignature(lbls) - ts := c.unique[h] - if ts != nil { - if isSameMetric(ts, lbls) { +func (c *PrometheusConverter) timeSeriesIsNew(lbls labels.Labels) bool { + h := lbls.Hash() + uLabels, ok := c.unique[h] + if ok { + if labels.Equal(uLabels, lbls) { // We already have this metric - return ts, false + return false } // Look for a matching conflict - for _, cTS := range c.conflicts[h] { - if isSameMetric(cTS, lbls) { + for _, cLabels := range c.conflicts[h] { + if labels.Equal(cLabels, lbls) { // We already have this metric - return cTS, false + return false } } // New conflict - ts = &prompb.TimeSeries{ - Labels: lbls, - } - c.conflicts[h] = append(c.conflicts[h], ts) - return ts, true + c.conflicts[h] = append(c.conflicts[h], uLabels) + return true } // This metric is new - ts = &prompb.TimeSeries{ - Labels: lbls, - } - c.unique[h] = ts - return ts, true -} - -// addTimeSeriesIfNeeded adds a corresponding time series if it doesn't already exist. -// If the time series doesn't already exist, it gets added with startTimestamp for its value and timestamp for its timestamp, -// both converted to milliseconds. -func (c *PrometheusConverter) addTimeSeriesIfNeeded(lbls []prompb.Label, startTimestamp int64, timestamp pcommon.Timestamp) { - ts, created := c.getOrCreateTimeSeries(lbls) - if created { - ts.Samples = []prompb.Sample{ - { - Value: float64(startTimestamp), - Timestamp: convertTimeStamp(timestamp), - }, - } - } -} - -// defaultIntervalForStartTimestamps is hardcoded to 5 minutes in milliseconds. -// Assuming a DPM of 1 and knowing that Grafana's $__rate_interval is typically 4 times the write interval that would give -// us 4 minutes. We add an extra minute for delays. -const defaultIntervalForStartTimestamps = int64(300_000) - -// handleStartTime adds a zero sample at startTs only if startTs is within validIntervalForStartTimestamps of the sample timestamp. -// The reason for doing this is that PRW v1 doesn't support Created Timestamps. After switching to PRW v2's direct CT support, -// make use of its direct support fort Created Timestamps instead. -// See https://github.com/prometheus/prometheus/issues/14600 for context. -// See https://opentelemetry.io/docs/specs/otel/metrics/data-model/#resets-and-gaps to know more about how OTel handles -// resets for cumulative metrics. -func (c *PrometheusConverter) handleStartTime(startTs, ts int64, labels []prompb.Label, settings Settings, typ string, val float64, logger *slog.Logger) { - if !settings.EnableCreatedTimestampZeroIngestion { - return - } - // We want to ignore the write in three cases. - // - We've seen samples with the start timestamp set to epoch meaning it wasn't set by the sender so we skip those. - // - If StartTimestamp equals Timestamp ist means we don't know at which time the metric restarted according to the spec. - // - StartTimestamp can never be greater than the sample timestamp. - if startTs <= 0 || startTs == ts || startTs > ts { - return - } - - threshold := defaultIntervalForStartTimestamps - if settings.ValidIntervalCreatedTimestampZeroIngestion != 0 { - threshold = settings.ValidIntervalCreatedTimestampZeroIngestion.Milliseconds() - } - - // The difference between the start and the actual timestamp is more than a reasonable time, so we skip this sample. - if ts-startTs > threshold { - return - } - - logger.Debug("adding zero value at start_ts", "type", typ, "labels", labelsStringer(labels), "start_ts", startTs, "sample_ts", ts, "sample_value", val) - - var createdTimeValue float64 - if settings.EnableStartTimeQuietZero { - createdTimeValue = math.Float64frombits(value.QuietZeroNaN) - } - c.addSample(&prompb.Sample{Timestamp: startTs, Value: createdTimeValue}, labels) + c.unique[h] = uLabels + return true } // addResourceTargetInfo converts the resource to the target info metric. -func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earliestTimestamp, latestTimestamp time.Time, converter *PrometheusConverter) { +func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, settings Settings, earliestTimestamp, latestTimestamp time.Time) error { if settings.DisableTargetInfo { - return + return nil } attributes := resource.Attributes() @@ -666,7 +514,7 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earlies } if nonIdentifyingAttrsCount == 0 { // If we only have job + instance, then target_info isn't useful, so don't add it. - return + return nil } name := targetMetricName @@ -679,18 +527,21 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earlies // Do not pass identifying attributes as ignoreAttrs below. identifyingAttrs = nil } - labels := createAttributes(resource, attributes, scope{}, settings, identifyingAttrs, false, model.MetricNameLabel, name) + lbls := c.createAttributes(resource, attributes, scope{}, settings, identifyingAttrs, false, model.MetricNameLabel, name) haveIdentifier := false - for _, l := range labels { + lbls.Range(func(l modelLabels.Label) { if l.Name == model.JobLabel || l.Name == model.InstanceLabel { haveIdentifier = true - break } - } + }) if !haveIdentifier { // We need at least one identifying label to generate target_info. - return + return nil + } + meta := metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "Target metadata", } // Generate target_info samples starting at earliestTimestamp and ending at latestTimestamp, @@ -701,19 +552,13 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earlies settings.LookbackDelta = defaultLookbackDelta } interval := settings.LookbackDelta / 2 - ts, _ := converter.getOrCreateTimeSeries(labels) - for timestamp := earliestTimestamp; timestamp.Before(latestTimestamp); timestamp = timestamp.Add(interval) { - ts.Samples = append(ts.Samples, prompb.Sample{ - Value: float64(1), - Timestamp: timestamp.UnixMilli(), - }) - } - if len(ts.Samples) == 0 || ts.Samples[len(ts.Samples)-1].Timestamp < latestTimestamp.UnixMilli() { - ts.Samples = append(ts.Samples, prompb.Sample{ - Value: float64(1), - Timestamp: latestTimestamp.UnixMilli(), - }) + timestamp := earliestTimestamp + for ; timestamp.Before(latestTimestamp); timestamp = timestamp.Add(interval) { + if err := c.appender.AppendSample(targetMetricName, lbls, meta, timestamp.UnixMilli(), 0, float64(1), nil); err != nil { + return err + } } + return c.appender.AppendSample(targetMetricName, lbls, meta, latestTimestamp.UnixMilli(), 0, float64(1), nil) } // convertTimeStamp converts OTLP timestamp in ns to timestamp in ms. diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go index a78179cec5..b65fb5ac58 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go @@ -18,20 +18,18 @@ package prometheusremotewrite import ( "context" - "math" "testing" "time" - "github.com/google/go-cmp/cmp" "github.com/prometheus/common/model" - "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/model/value" - "github.com/prometheus/prometheus/prompb" + //"github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/labels" "github.com/prometheus/prometheus/util/testutil" ) @@ -75,111 +73,51 @@ func TestCreateAttributes(t *testing.T) { promoteScope bool ignoreResourceAttributes []string ignoreAttrs []string - expectedLabels []prompb.Label + expectedLabels labels.Labels }{ { name: "Successful conversion without resource attribute promotion and without scope promotion", scope: defaultScope, promoteResourceAttributes: nil, promoteScope: false, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "metric_attr_other", - Value: "metric value other", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "metric_attr", "metric value", + "metric_attr_other", "metric value other", + ), }, { name: "Successful conversion without resource attribute promotion and with scope promotion", scope: defaultScope, promoteResourceAttributes: nil, promoteScope: true, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "metric_attr_other", - Value: "metric value other", - }, - { - Name: "otel_scope_name", - Value: defaultScope.name, - }, - { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, - { - Name: "otel_scope_attr1", - Value: "value1", - }, - { - Name: "otel_scope_attr2", - Value: "value2", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "metric_attr", "metric value", + "metric_attr_other", "metric value other", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ), }, { name: "Successful conversion without resource attribute promotion and with scope promotion, but without scope", scope: scope{}, promoteResourceAttributes: nil, promoteScope: true, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "metric_attr_other", - Value: "metric value other", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "metric_attr", "metric value", + "metric_attr_other", "metric value other", + ), }, { name: "Successful conversion with some attributes ignored and with scope promotion", @@ -187,260 +125,95 @@ func TestCreateAttributes(t *testing.T) { promoteResourceAttributes: nil, promoteScope: true, ignoreAttrs: []string{"metric-attr-other"}, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "otel_scope_name", - Value: defaultScope.name, - }, - { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, - { - Name: "otel_scope_attr1", - Value: "value1", - }, - { - Name: "otel_scope_attr2", - Value: "value2", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "metric_attr", "metric value", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ), }, { name: "Successful conversion with resource attribute promotion and with scope promotion", scope: defaultScope, promoteResourceAttributes: []string{"non-existent-attr", "existent-attr"}, promoteScope: true, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "metric_attr_other", - Value: "metric value other", - }, - { - Name: "existent_attr", - Value: "resource value", - }, - { - Name: "otel_scope_name", - Value: defaultScope.name, - }, - { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, - { - Name: "otel_scope_attr1", - Value: "value1", - }, - { - Name: "otel_scope_attr2", - Value: "value2", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "metric_attr", "metric value", + "metric_attr_other", "metric value other", + "existent_attr", "resource value", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ), }, { name: "Successful conversion with resource attribute promotion and with scope promotion, conflicting resource attributes are ignored", scope: defaultScope, promoteResourceAttributes: []string{"non-existent-attr", "existent-attr", "metric-attr", "job", "instance"}, promoteScope: true, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "existent_attr", - Value: "resource value", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "metric_attr_other", - Value: "metric value other", - }, - { - Name: "otel_scope_name", - Value: defaultScope.name, - }, - { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, - { - Name: "otel_scope_attr1", - Value: "value1", - }, - { - Name: "otel_scope_attr2", - Value: "value2", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "existent_attr", "resource value", + "metric_attr", "metric value", + "metric_attr_other", "metric value other", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ), }, { name: "Successful conversion with resource attribute promotion and with scope promotion, attributes are only promoted once", scope: defaultScope, promoteResourceAttributes: []string{"existent-attr", "existent-attr"}, promoteScope: true, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "existent_attr", - Value: "resource value", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "metric_attr_other", - Value: "metric value other", - }, - { - Name: "otel_scope_name", - Value: defaultScope.name, - }, - { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, - { - Name: "otel_scope_attr1", - Value: "value1", - }, - { - Name: "otel_scope_attr2", - Value: "value2", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "existent_attr", "resource value", + "metric_attr", "metric value", + "metric_attr_other", "metric value other", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ), }, { name: "Successful conversion promoting all resource attributes and with scope promotion", scope: defaultScope, promoteAllResourceAttributes: true, promoteScope: true, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "existent_attr", - Value: "resource value", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "metric_attr_other", - Value: "metric value other", - }, - { - Name: "service_name", - Value: "service name", - }, - { - Name: "service_instance_id", - Value: "service ID", - }, - { - Name: "otel_scope_name", - Value: defaultScope.name, - }, - { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, - { - Name: "otel_scope_attr1", - Value: "value1", - }, - { - Name: "otel_scope_attr2", - Value: "value2", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "existent_attr", "resource value", + "metric_attr", "metric value", + "metric_attr_other", "metric value other", + "service_name", "service name", + "service_instance_id", "service ID", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ), }, { name: "Successful conversion promoting all resource attributes and with scope promotion, ignoring 'service.instance.id'", @@ -450,60 +223,25 @@ func TestCreateAttributes(t *testing.T) { ignoreResourceAttributes: []string{ "service.instance.id", }, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "existent_attr", - Value: "resource value", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "metric_attr_other", - Value: "metric value other", - }, - { - Name: "service_name", - Value: "service name", - }, - { - Name: "otel_scope_name", - Value: defaultScope.name, - }, - { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, - { - Name: "otel_scope_attr1", - Value: "value1", - }, - { - Name: "otel_scope_attr2", - Value: "value2", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "existent_attr", "resource value", + "metric_attr", "metric value", + "metric_attr_other", "metric value other", + "service_name", "service name", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { + c := NewPrometheusConverter(&mockCombinedAppender{}) settings := Settings{ PromoteResourceAttributes: NewPromoteResourceAttributes(config.OTLPConfig{ PromoteAllResourceAttributes: tc.promoteAllResourceAttributes, @@ -512,9 +250,9 @@ func TestCreateAttributes(t *testing.T) { }), PromoteScopeMetadata: tc.promoteScope, } - lbls := createAttributes(resource, attrs, tc.scope, settings, tc.ignoreAttrs, false, model.MetricNameLabel, "test_metric") + lbls := c.createAttributes(resource, attrs, tc.scope, settings, tc.ignoreAttrs, false, model.MetricNameLabel, "test_metric") - require.ElementsMatch(t, lbls, tc.expectedLabels) + testutil.RequireEqual(t, lbls, tc.expectedLabels) }) } } @@ -550,18 +288,13 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) { attributes: scopeAttrs, } - now := time.Now() - nowUnixNano := pcommon.Timestamp(now.UnixNano()) - nowMinus2m30s := pcommon.Timestamp(now.Add(-2 * time.Minute).Add(-30 * time.Second).UnixNano()) - nowMinus6m := pcommon.Timestamp(now.Add(-20 * time.Second).UnixNano()) - nowMinus1h := pcommon.Timestamp(now.Add(-1 * time.Hour).UnixNano()) + ts := pcommon.Timestamp(time.Now().UnixNano()) tests := []struct { - name string - metric func() pmetric.Metric - scope scope - promoteScope bool - overrideValidInterval time.Duration - want func() map[uint64]*prompb.TimeSeries + name string + metric func() pmetric.Metric + scope scope + promoteScope bool + want func() []combinedSample }{ { name: "summary with start time and without scope promotion", @@ -571,228 +304,37 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) { metric.SetEmptySummary() dp := metric.Summary().DataPoints().AppendEmpty() - dp.SetTimestamp(nowUnixNano) - dp.SetStartTimestamp(nowUnixNano) + dp.SetTimestamp(ts) + dp.SetStartTimestamp(ts) return metric }, scope: defaultScope, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - countLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + countStr}, - } - sumLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + sumStr}, - } - createdLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + createdSuffix}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(countLabels): { - Labels: countLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(nowUnixNano)}, - }, - }, - timeSeriesSignature(sumLabels): { - Labels: sumLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(nowUnixNano)}, - }, - }, - timeSeriesSignature(createdLabels): { - Labels: createdLabels, - Samples: []prompb.Sample{ - {Value: float64(convertTimeStamp(nowUnixNano)), Timestamp: convertTimeStamp(nowUnixNano)}, - }, - }, - } - }, - }, - { - name: "summary with start time equal to sample timestamp", - metric: func() pmetric.Metric { - metric := pmetric.NewMetric() - metric.SetName("test_summary") - metric.SetEmptySummary() - - dp := metric.Summary().DataPoints().AppendEmpty() - dp.SetTimestamp(nowUnixNano) - dp.SetStartTimestamp(nowUnixNano) - - return metric - }, - scope: defaultScope, - promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + countStr}, - } - createdLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + createdSuffix}, - } - sumLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + sumStr}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(nowUnixNano)}, - }, - }, - timeSeriesSignature(sumLabels): { - Labels: sumLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(nowUnixNano)}, - }, - }, - timeSeriesSignature(createdLabels): { - Labels: createdLabels, - Samples: []prompb.Sample{ - {Value: float64(convertTimeStamp(nowUnixNano)), Timestamp: convertTimeStamp(nowUnixNano)}, - }, - }, - } - }, - }, - { - name: "summary with start time within default valid interval to sample timestamp", - metric: func() pmetric.Metric { - metric := pmetric.NewMetric() - metric.SetName("test_summary") - metric.SetEmptySummary() - - dp := metric.Summary().DataPoints().AppendEmpty() - dp.SetTimestamp(nowUnixNano) - dp.SetStartTimestamp(nowMinus2m30s) - - return metric - }, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + countStr}, - } - createdLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + createdSuffix}, - } - sumLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + sumStr}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - {Value: math.Float64frombits(value.QuietZeroNaN), Timestamp: convertTimeStamp(nowMinus2m30s)}, - {Value: 0, Timestamp: convertTimeStamp(nowUnixNano)}, - }, - }, - timeSeriesSignature(sumLabels): { - Labels: sumLabels, - Samples: []prompb.Sample{ - {Value: math.Float64frombits(value.QuietZeroNaN), Timestamp: convertTimeStamp(nowMinus2m30s)}, - {Value: 0, Timestamp: convertTimeStamp(nowUnixNano)}, - }, - }, - timeSeriesSignature(createdLabels): { - Labels: createdLabels, - Samples: []prompb.Sample{ - {Value: float64(convertTimeStamp(nowMinus2m30s)), Timestamp: convertTimeStamp(nowUnixNano)}, - }, - }, - } - }, - overrideValidInterval: 10 * time.Minute, - }, - { - name: "summary with start time within overiden valid interval to sample timestamp", - metric: func() pmetric.Metric { - metric := pmetric.NewMetric() - metric.SetName("test_summary") - metric.SetEmptySummary() - - dp := metric.Summary().DataPoints().AppendEmpty() - dp.SetTimestamp(nowUnixNano) - dp.SetStartTimestamp(nowMinus6m) - - return metric - }, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + countStr}, - } - createdLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + createdSuffix}, - } - sumLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + sumStr}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - {Value: math.Float64frombits(value.QuietZeroNaN), Timestamp: convertTimeStamp(nowMinus6m)}, - {Value: 0, Timestamp: convertTimeStamp(nowUnixNano)}, - }, - }, - timeSeriesSignature(sumLabels): { - Labels: sumLabels, - Samples: []prompb.Sample{ - {Value: math.Float64frombits(value.QuietZeroNaN), Timestamp: convertTimeStamp(nowMinus6m)}, - {Value: 0, Timestamp: convertTimeStamp(nowUnixNano)}, - }, - }, - timeSeriesSignature(createdLabels): { - Labels: createdLabels, - Samples: []prompb.Sample{ - {Value: float64(convertTimeStamp(nowMinus6m)), Timestamp: convertTimeStamp(nowUnixNano)}, - }, - }, - } - }, - }, - { - name: "summary with start time and with scope conversion", - metric: func() pmetric.Metric { - metric := pmetric.NewMetric() - metric.SetName("test_summary") - metric.SetEmptySummary() - - dp := metric.Summary().DataPoints().AppendEmpty() - dp.SetTimestamp(nowUnixNano) - dp.SetStartTimestamp(nowMinus1h) - - return metric - }, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + countStr}, - } - createdLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + createdSuffix}, - } - sumLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + sumStr}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(nowUnixNano)}, - }, + want: func() []combinedSample { + return []combinedSample{ + { + ls: labels.FromStrings( + model.MetricNameLabel, "test_summary"+sumStr, + ), + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 0, }, - timeSeriesSignature(sumLabels): { - Labels: sumLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(nowUnixNano)}, - }, + { + ls: labels.FromStrings( + model.MetricNameLabel, "test_summary"+countStr, + ), + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 0, }, - timeSeriesSignature(createdLabels): { - Labels: createdLabels, - Samples: []prompb.Sample{ - {Value: float64(convertTimeStamp(nowMinus1h)), Timestamp: convertTimeStamp(nowUnixNano)}, - }, + { + ls: labels.FromStrings( + model.MetricNameLabel, "test_summary"+createdSuffix, + ), + t: convertTimeStamp(ts), + v: float64(convertTimeStamp(ts)), }, } }, @@ -805,66 +347,42 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) { metric.SetEmptySummary() dp := metric.Summary().DataPoints().AppendEmpty() - dp.SetTimestamp(nowUnixNano) - dp.SetStartTimestamp(nowUnixNano) + dp.SetTimestamp(ts) + dp.SetStartTimestamp(ts) return metric }, scope: defaultScope, promoteScope: true, - want: func() map[uint64]*prompb.TimeSeries { - scopeLabels := []prompb.Label{ + want: func() []combinedSample { + scopeLabels := []string{ + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + } + return []combinedSample{ { - Name: "otel_scope_attr1", - Value: "value1", + ls: labels.FromStrings(append(scopeLabels, + model.MetricNameLabel, "test_summary"+sumStr)...), + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 0, }, { - Name: "otel_scope_attr2", - Value: "value2", + ls: labels.FromStrings(append(scopeLabels, + model.MetricNameLabel, "test_summary"+countStr)...), + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 0, }, { - Name: "otel_scope_name", - Value: defaultScope.name, - }, - { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, - } - countLabels := append([]prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + countStr}, - }, scopeLabels...) - sumLabels := append([]prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + sumStr}, - }, scopeLabels...) - createdLabels := append([]prompb.Label{ - { - Name: model.MetricNameLabel, - Value: "test_summary" + createdSuffix, - }, - }, scopeLabels...) - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(countLabels): { - Labels: countLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(nowUnixNano)}, - }, - }, - timeSeriesSignature(sumLabels): { - Labels: sumLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(nowUnixNano)}, - }, - }, - timeSeriesSignature(createdLabels): { - Labels: createdLabels, - Samples: []prompb.Sample{ - {Value: float64(convertTimeStamp(nowUnixNano)), Timestamp: convertTimeStamp(nowUnixNano)}, - }, + ls: labels.FromStrings(append(scopeLabels, + model.MetricNameLabel, "test_summary"+createdSuffix, + )...), + t: convertTimeStamp(ts), + v: float64(convertTimeStamp(ts)), }, } }, @@ -877,30 +395,26 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) { metric.SetEmptySummary() dp := metric.Summary().DataPoints().AppendEmpty() - dp.SetTimestamp(nowUnixNano) + dp.SetTimestamp(ts) return metric }, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - countLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + countStr}, - } - sumLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + sumStr}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(countLabels): { - Labels: countLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(nowUnixNano)}, - }, + want: func() []combinedSample { + return []combinedSample{ + { + ls: labels.FromStrings( + model.MetricNameLabel, "test_summary"+sumStr, + ), + t: convertTimeStamp(ts), + v: 0, }, - timeSeriesSignature(sumLabels): { - Labels: sumLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(nowUnixNano)}, - }, + { + ls: labels.FromStrings( + model.MetricNameLabel, "test_summary"+countStr, + ), + t: convertTimeStamp(ts), + v: 0, }, } }, @@ -909,36 +423,28 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { metric := tt.metric() - converter := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) - err := converter.addSummaryDataPoints( + converter.addSummaryDataPoints( context.Background(), metric.Summary().DataPoints(), pcommon.NewResource(), Settings{ - ExportCreatedMetric: true, - PromoteScopeMetadata: tt.promoteScope, - EnableCreatedTimestampZeroIngestion: true, - EnableStartTimeQuietZero: true, - ValidIntervalCreatedTimestampZeroIngestion: tt.overrideValidInterval, + PromoteScopeMetadata: tt.promoteScope, + ExportCreatedMetric: true, }, metric.Name(), tt.scope, - promslog.NewNopLogger(), + metadata.Metadata{}, ) - require.NoError(t, err) - testutil.RequireEqualWithOptions(t, tt.want(), converter.unique, []cmp.Option{cmp.Comparer(equalSamples)}) + requireEqual(t, tt.want(), mockAppender.samples) require.Empty(t, converter.conflicts) }) } } -func equalSamples(a, b prompb.Sample) bool { - // Compare Float64bits so NaN values which are exactly the same will compare equal. - return a.Timestamp == b.Timestamp && math.Float64bits(a.Value) == math.Float64bits(b.Value) -} - func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { scopeAttrs := pcommon.NewMap() scopeAttrs.FromRaw(map[string]any{ @@ -958,7 +464,7 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { metric func() pmetric.Metric scope scope promoteScope bool - want func() map[uint64]*prompb.TimeSeries + want func() []combinedSample }{ { name: "histogram with start time and without scope promotion", @@ -975,35 +481,31 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - countLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist" + countStr}, - } - createdLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist" + createdSuffix}, - } - infLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist_bucket"}, - {Name: model.BucketLabel, Value: "+Inf"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(countLabels): { - Labels: countLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + want: func() []combinedSample { + return []combinedSample{ + { + ls: labels.FromStrings( + model.MetricNameLabel, "test_hist"+countStr, + ), + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 0, }, - timeSeriesSignature(infLabels): { - Labels: infLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + { + ls: labels.FromStrings( + model.MetricNameLabel, "test_hist_bucket", + model.BucketLabel, "+Inf", + ), + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 0, }, - timeSeriesSignature(createdLabels): { - Labels: createdLabels, - Samples: []prompb.Sample{ - {Value: float64(convertTimeStamp(ts)), Timestamp: convertTimeStamp(ts)}, - }, + { + ls: labels.FromStrings( + model.MetricNameLabel, "test_hist"+createdSuffix, + ), + t: convertTimeStamp(ts), + v: float64(convertTimeStamp(ts)), }, } }, @@ -1023,57 +525,35 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: true, - want: func() map[uint64]*prompb.TimeSeries { - scopeLabels := []prompb.Label{ - { - Name: "otel_scope_attr1", - Value: "value1", - }, + want: func() []combinedSample { + scopeLabels := []string{ + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + } + return []combinedSample{ { - Name: "otel_scope_attr2", - Value: "value2", + ls: labels.FromStrings(append(scopeLabels, + model.MetricNameLabel, "test_hist"+countStr)...), + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 0, }, { - Name: "otel_scope_name", - Value: defaultScope.name, + ls: labels.FromStrings(append(scopeLabels, + model.MetricNameLabel, "test_hist_bucket", + model.BucketLabel, "+Inf")...), + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 0, }, { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, - } - countLabels := append([]prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist" + countStr}, - }, scopeLabels...) - infLabels := append([]prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist_bucket"}, - {Name: model.BucketLabel, Value: "+Inf"}, - }, scopeLabels...) - createdLabels := append([]prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist" + createdSuffix}, - }, scopeLabels...) - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(countLabels): { - Labels: countLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, - }, - timeSeriesSignature(infLabels): { - Labels: infLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, - }, - timeSeriesSignature(createdLabels): { - Labels: createdLabels, - Samples: []prompb.Sample{ - {Value: float64(convertTimeStamp(ts)), Timestamp: convertTimeStamp(ts)}, - }, + ls: labels.FromStrings(append(scopeLabels, + model.MetricNameLabel, "test_hist"+createdSuffix)...), + t: convertTimeStamp(ts), + v: float64(convertTimeStamp(ts)), }, } }, @@ -1090,26 +570,22 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { return metric }, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist" + countStr}, - } - infLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist_bucket"}, - {Name: model.BucketLabel, Value: "+Inf"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(infLabels): { - Labels: infLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + want: func() []combinedSample { + return []combinedSample{ + { + ls: labels.FromStrings( + model.MetricNameLabel, "test_hist"+countStr, + ), + t: convertTimeStamp(ts), + v: 0, }, - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + { + ls: labels.FromStrings( + model.MetricNameLabel, "test_hist_bucket", + model.BucketLabel, "+Inf", + ), + t: convertTimeStamp(ts), + v: 0, }, } }, @@ -1118,24 +594,23 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { metric := tt.metric() - converter := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) - err := converter.addHistogramDataPoints( + converter.addHistogramDataPoints( context.Background(), metric.Histogram().DataPoints(), pcommon.NewResource(), Settings{ - ExportCreatedMetric: true, - PromoteScopeMetadata: tt.promoteScope, - EnableCreatedTimestampZeroIngestion: true, + ExportCreatedMetric: true, + PromoteScopeMetadata: tt.promoteScope, }, metric.Name(), tt.scope, - promslog.NewNopLogger(), + metadata.Metadata{}, ) - require.NoError(t, err) - require.Equal(t, tt.want(), converter.unique) + requireEqual(t, tt.want(), mockAppender.samples) require.Empty(t, converter.conflicts) }) } @@ -1143,35 +618,35 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { func TestGetPromExemplars(t *testing.T) { ctx := context.Background() - everyN := &everyNTimes{n: 1} + c := NewPrometheusConverter(&mockCombinedAppender{}) t.Run("Exemplars with int value", func(t *testing.T) { - pt := pmetric.NewNumberDataPoint() - exemplar := pt.Exemplars().AppendEmpty() + es := pmetric.NewExemplarSlice() + exemplar := es.AppendEmpty() exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) exemplar.SetIntValue(42) - exemplars, err := getPromExemplars(ctx, everyN, pt) + exemplars, err := c.getPromExemplars(ctx, es) require.NoError(t, err) require.Len(t, exemplars, 1) require.Equal(t, float64(42), exemplars[0].Value) }) t.Run("Exemplars with double value", func(t *testing.T) { - pt := pmetric.NewNumberDataPoint() - exemplar := pt.Exemplars().AppendEmpty() + es := pmetric.NewExemplarSlice() + exemplar := es.AppendEmpty() exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) exemplar.SetDoubleValue(69.420) - exemplars, err := getPromExemplars(ctx, everyN, pt) + exemplars, err := c.getPromExemplars(ctx, es) require.NoError(t, err) require.Len(t, exemplars, 1) require.Equal(t, 69.420, exemplars[0].Value) }) t.Run("Exemplars with unsupported value type", func(t *testing.T) { - pt := pmetric.NewNumberDataPoint() - exemplar := pt.Exemplars().AppendEmpty() + es := pmetric.NewExemplarSlice() + exemplar := es.AppendEmpty() exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) - _, err := getPromExemplars(ctx, everyN, pt) + _, err := c.getPromExemplars(ctx, es) require.Error(t, err) }) } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index 855e122213..6e00232bfa 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -26,8 +26,8 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/value" - "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/util/annotations" ) @@ -37,7 +37,7 @@ const defaultZeroThreshold = 1e-128 // as native histogram samples. func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Context, dataPoints pmetric.ExponentialHistogramDataPointSlice, resource pcommon.Resource, settings Settings, promName string, temporality pmetric.AggregationTemporality, - scope scope, + scope scope, meta metadata.Metadata, ) (annotations.Annotations, error) { var annots annotations.Annotations for x := 0; x < dataPoints.Len(); x++ { @@ -47,13 +47,13 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont pt := dataPoints.At(x) - histogram, ws, err := exponentialToNativeHistogram(pt, temporality) + hp, ws, err := exponentialToNativeHistogram(pt, temporality) annots.Merge(ws) if err != nil { return annots, err } - lbls := createAttributes( + lbls := c.createAttributes( resource, pt.Attributes(), scope, @@ -63,14 +63,16 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont model.MetricNameLabel, promName, ) - ts, _ := c.getOrCreateTimeSeries(lbls) - ts.Histograms = append(ts.Histograms, histogram) - - exemplars, err := getPromExemplars[pmetric.ExponentialHistogramDataPoint](ctx, &c.everyN, pt) + ts := convertTimeStamp(pt.Timestamp()) + ct := convertTimeStamp(pt.StartTimestamp()) + exemplars, err := c.getPromExemplars(ctx, pt.Exemplars()) if err != nil { return annots, err } - ts.Exemplars = append(ts.Exemplars, exemplars...) + // OTel exponential histograms are always Int Histograms. + if err = c.appender.AppendHistogram(promName, lbls, meta, ts, ct, hp, exemplars); err != nil { + return annots, err + } } return annots, nil @@ -78,11 +80,11 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont // exponentialToNativeHistogram translates an OTel Exponential Histogram data point // to a Prometheus Native Histogram. -func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, temporality pmetric.AggregationTemporality) (prompb.Histogram, annotations.Annotations, error) { +func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, temporality pmetric.AggregationTemporality) (*histogram.Histogram, annotations.Annotations, error) { var annots annotations.Annotations scale := p.Scale() if scale < -4 { - return prompb.Histogram{}, annots, + return nil, annots, fmt.Errorf("cannot convert exponential to native histogram."+ " Scale must be >= -4, was %d", scale) } @@ -105,41 +107,36 @@ func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, tempo // need to know here if it was used for the detection. // Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28663#issuecomment-1810577303 // Counter reset detection in Prometheus: https://github.com/prometheus/prometheus/blob/f997c72f294c0f18ca13fa06d51889af04135195/tsdb/chunkenc/histogram.go#L232 - resetHint := prompb.Histogram_UNKNOWN + resetHint := histogram.UnknownCounterReset if temporality == pmetric.AggregationTemporalityDelta { // If the histogram has delta temporality, set the reset hint to gauge to avoid unnecessary chunk cutting. // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/). // This might be changed to a different hint name as gauge type might be misleading for samples that should be // summed over time. - resetHint = prompb.Histogram_GAUGE + resetHint = histogram.GaugeType } - - h := prompb.Histogram{ - ResetHint: resetHint, - Schema: scale, - - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: p.ZeroCount()}, + h := &histogram.Histogram{ + CounterResetHint: resetHint, + Schema: scale, // TODO use zero_threshold, if set, see // https://github.com/open-telemetry/opentelemetry-proto/pull/441 - ZeroThreshold: defaultZeroThreshold, - - PositiveSpans: pSpans, - PositiveDeltas: pDeltas, - NegativeSpans: nSpans, - NegativeDeltas: nDeltas, - - Timestamp: convertTimeStamp(p.Timestamp()), + ZeroThreshold: defaultZeroThreshold, + ZeroCount: p.ZeroCount(), + PositiveSpans: pSpans, + PositiveBuckets: pDeltas, + NegativeSpans: nSpans, + NegativeBuckets: nDeltas, } if p.Flags().NoRecordedValue() { h.Sum = math.Float64frombits(value.StaleNaN) - h.Count = &prompb.Histogram_CountInt{CountInt: value.StaleNaN} + h.Count = value.StaleNaN } else { if p.HasSum() { h.Sum = p.Sum() } - h.Count = &prompb.Histogram_CountInt{CountInt: p.Count()} + h.Count = p.Count() if p.Count() == 0 && h.Sum != 0 { annots.Add(fmt.Errorf("exponential histogram data point has zero count, but non-zero sum: %f", h.Sum)) } @@ -164,13 +161,13 @@ func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, tempo // // When converting from OTel Explicit Histograms to Native Histograms with Custom Buckets, // the bucket indexes are not scaled, and the indices are not adjusted by 1. -func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjustOffset bool) ([]prompb.BucketSpan, []int64) { +func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjustOffset bool) ([]histogram.Span, []int64) { if len(bucketCounts) == 0 { return nil, nil } var ( - spans []prompb.BucketSpan + spans []histogram.Span deltas []int64 count int64 prevCount int64 @@ -193,7 +190,7 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust initialOffset = initialOffset>>scaleDown + 1 } - spans = append(spans, prompb.BucketSpan{ + spans = append(spans, histogram.Span{ Offset: initialOffset, Length: 0, }) @@ -214,7 +211,7 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust // We have to create a new span, because we have found a gap // of more than two buckets. The constant 2 is copied from the logic in // https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296 - spans = append(spans, prompb.BucketSpan{ + spans = append(spans, histogram.Span{ Offset: gap, Length: 0, }) @@ -236,7 +233,7 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust // We have to create a new span, because we have found a gap // of more than two buckets. The constant 2 is copied from the logic in // https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296 - spans = append(spans, prompb.BucketSpan{ + spans = append(spans, histogram.Span{ Offset: gap, Length: 0, }) @@ -254,7 +251,7 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, resource pcommon.Resource, settings Settings, promName string, temporality pmetric.AggregationTemporality, - scope scope, + scope scope, meta metadata.Metadata, ) (annotations.Annotations, error) { var annots annotations.Annotations @@ -265,13 +262,13 @@ func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Co pt := dataPoints.At(x) - histogram, ws, err := explicitHistogramToCustomBucketsHistogram(pt, temporality) + hp, ws, err := explicitHistogramToCustomBucketsHistogram(pt, temporality) annots.Merge(ws) if err != nil { return annots, err } - lbls := createAttributes( + lbls := c.createAttributes( resource, pt.Attributes(), scope, @@ -281,21 +278,21 @@ func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Co model.MetricNameLabel, promName, ) - - ts, _ := c.getOrCreateTimeSeries(lbls) - ts.Histograms = append(ts.Histograms, histogram) - - exemplars, err := getPromExemplars[pmetric.HistogramDataPoint](ctx, &c.everyN, pt) + ts := convertTimeStamp(pt.Timestamp()) + ct := convertTimeStamp(pt.StartTimestamp()) + exemplars, err := c.getPromExemplars(ctx, pt.Exemplars()) if err != nil { return annots, err } - ts.Exemplars = append(ts.Exemplars, exemplars...) + if err = c.appender.AppendHistogram(promName, lbls, meta, ts, ct, hp, exemplars); err != nil { + return annots, err + } } return annots, nil } -func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint, temporality pmetric.AggregationTemporality) (prompb.Histogram, annotations.Annotations, error) { +func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint, temporality pmetric.AggregationTemporality) (*histogram.Histogram, annotations.Annotations, error) { var annots annotations.Annotations buckets := p.BucketCounts().AsRaw() @@ -312,23 +309,22 @@ func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint, tem // need to know here if it was used for the detection. // Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28663#issuecomment-1810577303 // Counter reset detection in Prometheus: https://github.com/prometheus/prometheus/blob/f997c72f294c0f18ca13fa06d51889af04135195/tsdb/chunkenc/histogram.go#L232 - resetHint := prompb.Histogram_UNKNOWN + resetHint := histogram.UnknownCounterReset if temporality == pmetric.AggregationTemporalityDelta { // If the histogram has delta temporality, set the reset hint to gauge to avoid unnecessary chunk cutting. // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/). // This might be changed to a different hint name as gauge type might be misleading for samples that should be // summed over time. - resetHint = prompb.Histogram_GAUGE + resetHint = histogram.GaugeType } // TODO(carrieedwards): Add setting to limit maximum bucket count - h := prompb.Histogram{ - ResetHint: resetHint, - Schema: histogram.CustomBucketsSchema, - - PositiveSpans: positiveSpans, - PositiveDeltas: positiveDeltas, + h := &histogram.Histogram{ + CounterResetHint: resetHint, + Schema: histogram.CustomBucketsSchema, + PositiveSpans: positiveSpans, + PositiveBuckets: positiveDeltas, // Note: OTel explicit histograms have an implicit +Inf bucket, which has a lower bound // of the last element in the explicit_bounds array. // This is similar to the custom_values array in native histograms with custom buckets. @@ -336,18 +332,16 @@ func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint, tem // can be mapped directly to the custom_values array. // See: https://github.com/open-telemetry/opentelemetry-proto/blob/d7770822d70c7bd47a6891fc9faacc66fc4af3d3/opentelemetry/proto/metrics/v1/metrics.proto#L469 CustomValues: p.ExplicitBounds().AsRaw(), - - Timestamp: convertTimeStamp(p.Timestamp()), } if p.Flags().NoRecordedValue() { h.Sum = math.Float64frombits(value.StaleNaN) - h.Count = &prompb.Histogram_CountInt{CountInt: value.StaleNaN} + h.Count = value.StaleNaN } else { if p.HasSum() { h.Sum = p.Sum() } - h.Count = &prompb.Histogram_CountInt{CountInt: p.Count()} + h.Count = p.Count() if p.Count() == 0 && h.Sum != 0 { annots.Add(fmt.Errorf("histogram data point has zero count, but non-zero sum: %f", h.Sum)) } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go index 7f10a0df91..0fe4492108 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go @@ -28,11 +28,15 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" - "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + //"github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/labels" ) type expectedBucketLayout struct { - wantSpans []prompb.BucketSpan + wantSpans []histogram.Span wantDeltas []int64 } @@ -52,7 +56,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, wantLayout: map[int32]expectedBucketLayout{ 0: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 1, Length: 4, @@ -61,7 +65,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{4, -1, -1, -1}, }, 1: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 1, Length: 2, @@ -71,7 +75,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{7, -4}, }, 2: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 1, Length: 1, @@ -92,7 +96,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, wantLayout: map[int32]expectedBucketLayout{ 0: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 2, Length: 4, @@ -101,7 +105,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{4, -1, -1, -1}, }, 1: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 1, Length: 3, @@ -110,7 +114,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{4, 1, -4}, // 0+4, 3+2, 1+0 = 4, 5, 1 }, 2: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 1, Length: 2, @@ -130,7 +134,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, wantLayout: map[int32]expectedBucketLayout{ 0: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 5, Length: 4, @@ -143,7 +147,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{4, -2, -2, 2, -1}, }, 1: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 3, Length: 2, @@ -158,7 +162,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{6, -4, -1}, }, 2: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 2, Length: 1, @@ -185,7 +189,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, wantLayout: map[int32]expectedBucketLayout{ 0: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 5, Length: 4, @@ -198,7 +202,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{4, -2, -2, 2, -1}, }, 1: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 3, Length: 2, @@ -213,7 +217,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{6, -4, -1}, }, 2: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 2, Length: 4, @@ -236,7 +240,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, wantLayout: map[int32]expectedBucketLayout{ 0: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: -1, Length: 2, @@ -249,7 +253,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{3, -2, 0}, }, 1: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 3, @@ -260,7 +264,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{4, -4, 1}, }, 2: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 2, @@ -282,7 +286,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, wantLayout: map[int32]expectedBucketLayout{ 0: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: -1, Length: 6, @@ -291,7 +295,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{3, -2, -1, 1, -1, 1}, }, 1: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 3, @@ -302,7 +306,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{4, -3, 0}, }, 2: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 2, @@ -324,7 +328,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, wantLayout: map[int32]expectedBucketLayout{ 0: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: -1, Length: 7, @@ -333,7 +337,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{3, -3, 0, 1, -1, 0, 1}, }, 1: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 4, @@ -344,7 +348,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{3, -2, -1, 1}, }, 2: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 3, @@ -379,8 +383,8 @@ func TestConvertBucketsLayout(t *testing.T) { for scaleDown, wantLayout := range tt.wantLayout { t.Run(fmt.Sprintf("%s-scaleby-%d", tt.name, scaleDown), func(t *testing.T) { gotSpans, gotDeltas := convertBucketsLayout(tt.buckets().BucketCounts().AsRaw(), tt.buckets().Offset(), scaleDown, true) - require.Equal(t, wantLayout.wantSpans, gotSpans) - require.Equal(t, wantLayout.wantDeltas, gotDeltas) + requireEqual(t, wantLayout.wantSpans, gotSpans) + requireEqual(t, wantLayout.wantDeltas, gotDeltas) }) } } @@ -418,7 +422,7 @@ func TestExponentialToNativeHistogram(t *testing.T) { tests := []struct { name string exponentialHist func() pmetric.ExponentialHistogramDataPoint - wantNativeHist func() prompb.Histogram + wantNativeHist func() *histogram.Histogram wantErrMessage string }{ { @@ -440,18 +444,17 @@ func TestExponentialToNativeHistogram(t *testing.T) { return pt }, - wantNativeHist: func() prompb.Histogram { - return prompb.Histogram{ - Count: &prompb.Histogram_CountInt{CountInt: 4}, - Sum: 10.1, - Schema: 1, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 1}, - NegativeSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}}, - NegativeDeltas: []int64{1, 0}, - PositiveSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}}, - PositiveDeltas: []int64{1, 0}, - Timestamp: 500, + wantNativeHist: func() *histogram.Histogram { + return &histogram.Histogram{ + Count: 4, + Sum: 10.1, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 1, + NegativeSpans: []histogram.Span{{Offset: 2, Length: 2}}, + NegativeBuckets: []int64{1, 0}, + PositiveSpans: []histogram.Span{{Offset: 2, Length: 2}}, + PositiveBuckets: []int64{1, 0}, } }, }, @@ -474,17 +477,16 @@ func TestExponentialToNativeHistogram(t *testing.T) { return pt }, - wantNativeHist: func() prompb.Histogram { - return prompb.Histogram{ - Count: &prompb.Histogram_CountInt{CountInt: 4}, - Schema: 1, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 1}, - NegativeSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}}, - NegativeDeltas: []int64{1, 0}, - PositiveSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}}, - PositiveDeltas: []int64{1, 0}, - Timestamp: 500, + wantNativeHist: func() *histogram.Histogram { + return &histogram.Histogram{ + Count: 4, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 1, + NegativeSpans: []histogram.Span{{Offset: 2, Length: 2}}, + NegativeBuckets: []int64{1, 0}, + PositiveSpans: []histogram.Span{{Offset: 2, Length: 2}}, + PositiveBuckets: []int64{1, 0}, } }, }, @@ -515,18 +517,17 @@ func TestExponentialToNativeHistogram(t *testing.T) { pt.Negative().SetOffset(2) return pt }, - wantNativeHist: func() prompb.Histogram { - return prompb.Histogram{ - Count: &prompb.Histogram_CountInt{CountInt: 6}, - Sum: 10.1, - Schema: 8, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 1}, - PositiveSpans: []prompb.BucketSpan{{Offset: 2, Length: 3}}, - PositiveDeltas: []int64{1, 0, 0}, // 1, 1, 1 - NegativeSpans: []prompb.BucketSpan{{Offset: 3, Length: 3}}, - NegativeDeltas: []int64{1, 0, 0}, // 1, 1, 1 - Timestamp: 500, + wantNativeHist: func() *histogram.Histogram { + return &histogram.Histogram{ + Count: 6, + Sum: 10.1, + Schema: 8, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 1, + PositiveSpans: []histogram.Span{{Offset: 2, Length: 3}}, + PositiveBuckets: []int64{1, 0, 0}, // 1, 1, 1 + NegativeSpans: []histogram.Span{{Offset: 3, Length: 3}}, + NegativeBuckets: []int64{1, 0, 0}, // 1, 1, 1 } }, }, @@ -547,18 +548,17 @@ func TestExponentialToNativeHistogram(t *testing.T) { pt.Negative().SetOffset(2) return pt }, - wantNativeHist: func() prompb.Histogram { - return prompb.Histogram{ - Count: &prompb.Histogram_CountInt{CountInt: 6}, - Sum: 10.1, - Schema: 8, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 1}, - PositiveSpans: []prompb.BucketSpan{{Offset: 1, Length: 2}}, - PositiveDeltas: []int64{1, 1}, // 0+1, 1+1 = 1, 2 - NegativeSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}}, - NegativeDeltas: []int64{2, -1}, // 1+1, 1+0 = 2, 1 - Timestamp: 500, + wantNativeHist: func() *histogram.Histogram { + return &histogram.Histogram{ + Count: 6, + Sum: 10.1, + Schema: 8, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 1, + PositiveSpans: []histogram.Span{{Offset: 1, Length: 2}}, + PositiveBuckets: []int64{1, 1}, // 0+1, 1+1 = 1, 2 + NegativeSpans: []histogram.Span{{Offset: 2, Length: 2}}, + NegativeBuckets: []int64{2, -1}, // 1+1, 1+0 = 2, 1 } }, }, @@ -599,20 +599,18 @@ func validateExponentialHistogramCount(t *testing.T, h pmetric.ExponentialHistog require.Equal(t, h.Count(), actualCount, "exponential histogram count mismatch") } -func validateNativeHistogramCount(t *testing.T, h prompb.Histogram) { - require.NotNil(t, h.Count) - require.IsType(t, &prompb.Histogram_CountInt{}, h.Count) - want := h.Count.(*prompb.Histogram_CountInt).CountInt +func validateNativeHistogramCount(t *testing.T, h *histogram.Histogram) { + want := h.Count var ( actualCount uint64 prevBucket int64 ) - for _, delta := range h.PositiveDeltas { + for _, delta := range h.PositiveBuckets { prevBucket += delta actualCount += uint64(prevBucket) } prevBucket = 0 - for _, delta := range h.NegativeDeltas { + for _, delta := range h.NegativeBuckets { prevBucket += delta actualCount += uint64(prevBucket) } @@ -631,19 +629,13 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { schemaURL: "https://schema.com", attributes: scopeAttrs, } - now := time.Now() - nowUnixNano := pcommon.Timestamp(now.UnixNano()) - // nowMinus2m30s := pcommon.Timestamp(now.Add(-2 * time.Minute).Add(-30 * time.Second).UnixNano()) - // nowMinus6m := pcommon.Timestamp(now.Add(-6 * time.Minute).UnixNano()) - nowMinus1h := pcommon.Timestamp(now.Add(-1 * time.Hour).UnixNano()) tests := []struct { - name string - metric func() pmetric.Metric - scope scope - promoteScope bool - overrideValidInterval time.Duration - wantSeries func() map[uint64]*prompb.TimeSeries + name string + metric func() pmetric.Metric + scope scope + promoteScope bool + wantSeries func() []combinedHistogram }{ { name: "histogram data points with same labels and without scope promotion", @@ -672,36 +664,41 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - wantSeries: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist"}, - {Name: "attr", Value: "test_attr"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 7}, - Schema: 1, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, - PositiveDeltas: []int64{4, -2}, - }, - { - Count: &prompb.Histogram_CountInt{CountInt: 4}, - Schema: 1, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, - PositiveDeltas: []int64{4, -2, -1}, - }, + wantSeries: func() []combinedHistogram { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_hist", + "attr", "test_attr", + ) + return []combinedHistogram{ + { + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 7, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{4, -2}, }, - Exemplars: []prompb.Exemplar{ - {Value: 1}, - {Value: 2}, + es: []exemplar.Exemplar{{Value: 1}}, + }, + { + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 4, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, + PositiveBuckets: []int64{4, -2, -1}, }, + es: []exemplar.Exemplar{{Value: 2}}, }, } }, @@ -733,41 +730,46 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: true, - wantSeries: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist"}, - {Name: "attr", Value: "test_attr"}, - {Name: "otel_scope_name", Value: defaultScope.name}, - {Name: "otel_scope_schema_url", Value: defaultScope.schemaURL}, - {Name: "otel_scope_version", Value: defaultScope.version}, - {Name: "otel_scope_attr1", Value: "value1"}, - {Name: "otel_scope_attr2", Value: "value2"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 7}, - Schema: 1, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, - PositiveDeltas: []int64{4, -2}, - }, - { - Count: &prompb.Histogram_CountInt{CountInt: 4}, - Schema: 1, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, - PositiveDeltas: []int64{4, -2, -1}, - }, + wantSeries: func() []combinedHistogram { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_hist", + "attr", "test_attr", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ) + return []combinedHistogram{ + { + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 7, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{4, -2}, }, - Exemplars: []prompb.Exemplar{ - {Value: 1}, - {Value: 2}, + es: []exemplar.Exemplar{{Value: 1}}, + }, + { + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 4, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, + PositiveBuckets: []int64{4, -2, -1}, }, + es: []exemplar.Exemplar{{Value: 2}}, }, } }, @@ -799,245 +801,46 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - wantSeries: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist"}, - {Name: "attr", Value: "test_attr"}, - } - labelsAnother := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist"}, - {Name: "attr", Value: "test_attr_two"}, - } - - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 7}, - Schema: 1, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, - PositiveDeltas: []int64{4, -2}, - }, - }, - Exemplars: []prompb.Exemplar{ - {Value: 1}, - }, - }, - timeSeriesSignature(labelsAnother): { - Labels: labelsAnother, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 4}, - Schema: 1, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, - NegativeSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, - NegativeDeltas: []int64{4, -2, -1}, - }, - }, - Exemplars: []prompb.Exemplar{ - {Value: 2}, - }, - }, - } - }, - }, - { - name: "histogram with start time and without scope conversion", - metric: func() pmetric.Metric { - metric := pmetric.NewMetric() - metric.SetName("test_exponential_hist") - metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - - pt := metric.ExponentialHistogram().DataPoints().AppendEmpty() - pt.SetTimestamp(nowUnixNano) - pt.SetStartTimestamp(nowUnixNano) - - return metric - }, - scope: defaultScope, - promoteScope: false, - wantSeries: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_exponential_hist"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Histograms: []prompb.Histogram{ - { - Timestamp: convertTimeStamp(nowUnixNano), - Count: &prompb.Histogram_CountInt{ - CountInt: 0, - }, - ZeroCount: &prompb.Histogram_ZeroCountInt{ - ZeroCountInt: 0, - }, - ZeroThreshold: defaultZeroThreshold, - }, - }, - }, - } - }, - }, - { - name: "histogram without start time and without scope conversion", - metric: func() pmetric.Metric { - metric := pmetric.NewMetric() - metric.SetName("test_exponential_hist") - metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - - pt := metric.ExponentialHistogram().DataPoints().AppendEmpty() - pt.SetTimestamp(nowUnixNano) - - return metric - }, - scope: defaultScope, - promoteScope: false, - wantSeries: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_exponential_hist"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Histograms: []prompb.Histogram{ - { - Timestamp: convertTimeStamp(nowUnixNano), - Count: &prompb.Histogram_CountInt{ - CountInt: 0, - }, - ZeroCount: &prompb.Histogram_ZeroCountInt{ - ZeroCountInt: 0, - }, - ZeroThreshold: defaultZeroThreshold, - }, + wantSeries: func() []combinedHistogram { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_hist", + "attr", "test_attr", + ) + labelsAnother := labels.FromStrings( + model.MetricNameLabel, "test_hist", + "attr", "test_attr_two", + ) + + return []combinedHistogram{ + { + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 7, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{4, -2}, }, + es: []exemplar.Exemplar{{Value: 1}}, }, - } - }, - }, - // TODO(@jesusvazquez) Reenable after OOO NH is stable - // { - // name: "histogram with start time within default valid interval to sample timestamp", - // metric: func() pmetric.Metric { - // metric := pmetric.NewMetric() - // metric.SetName("test_exponential_hist") - // metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - - // pt := metric.ExponentialHistogram().DataPoints().AppendEmpty() - // pt.SetTimestamp(nowUnixNano) - // pt.SetStartTimestamp(nowMinus2m30s) - - // return metric - // }, - // scope: defaultScope, - // promoteScope: false, - // wantSeries: func() map[uint64]*prompb.TimeSeries { - // labels := []prompb.Label{ - // {Name: model.MetricNameLabel, Value: "test_exponential_hist"}, - // } - // return map[uint64]*prompb.TimeSeries{ - // timeSeriesSignature(labels): { - // Labels: labels, - // Histograms: []prompb.Histogram{ - // { - // Timestamp: convertTimeStamp(nowMinus2m30s), - // }, - // { - // Timestamp: convertTimeStamp(nowUnixNano), - // Count: &prompb.Histogram_CountInt{ - // CountInt: 0, - // }, - // ZeroCount: &prompb.Histogram_ZeroCountInt{ - // ZeroCountInt: 0, - // }, - // ZeroThreshold: defaultZeroThreshold, - // }, - // }, - // }, - // } - // }, - // }, - // { - // name: "histogram with start time within overiden valid interval to sample timestamp", - // metric: func() pmetric.Metric { - // metric := pmetric.NewMetric() - // metric.SetName("test_exponential_hist") - // metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - - // pt := metric.ExponentialHistogram().DataPoints().AppendEmpty() - // pt.SetTimestamp(nowUnixNano) - // pt.SetStartTimestamp(nowMinus6m) - - // return metric - // }, - // scope: defaultScope, - // promoteScope: false, - // wantSeries: func() map[uint64]*prompb.TimeSeries { - // labels := []prompb.Label{ - // {Name: model.MetricNameLabel, Value: "test_exponential_hist"}, - // } - // return map[uint64]*prompb.TimeSeries{ - // timeSeriesSignature(labels): { - // Labels: labels, - // Histograms: []prompb.Histogram{ - // { - // Timestamp: convertTimeStamp(nowMinus6m), - // }, - // { - // Timestamp: convertTimeStamp(nowUnixNano), - // Count: &prompb.Histogram_CountInt{ - // CountInt: 0, - // }, - // ZeroCount: &prompb.Histogram_ZeroCountInt{ - // ZeroCountInt: 0, - // }, - // ZeroThreshold: defaultZeroThreshold, - // }, - // }, - // }, - // } - // }, - // overrideValidInterval: 10 * time.Minute, - // }, - { - name: "histogram with start time older than default valid interval to sample timestamp and without scope conversion", - metric: func() pmetric.Metric { - metric := pmetric.NewMetric() - metric.SetName("test_exponential_hist") - metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - - pt := metric.ExponentialHistogram().DataPoints().AppendEmpty() - pt.SetTimestamp(nowUnixNano) - pt.SetStartTimestamp(nowMinus1h) - - return metric - }, - scope: defaultScope, - promoteScope: false, - wantSeries: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_exponential_hist"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Histograms: []prompb.Histogram{ - { - Timestamp: convertTimeStamp(nowUnixNano), - Count: &prompb.Histogram_CountInt{ - CountInt: 0, - }, - ZeroCount: &prompb.Histogram_ZeroCountInt{ - ZeroCountInt: 0, - }, - ZeroThreshold: defaultZeroThreshold, - }, + { + ls: labelsAnother, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 4, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 0, + NegativeSpans: []histogram.Span{{Offset: 0, Length: 3}}, + NegativeBuckets: []int64{4, -2, -1}, }, + es: []exemplar.Exemplar{{Value: 2}}, }, } }, @@ -1046,7 +849,9 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { metric := tt.metric() - converter := NewPrometheusConverter() + + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) namer := otlptranslator.MetricNamer{ WithMetricSuffixes: true, } @@ -1055,19 +860,18 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { metric.ExponentialHistogram().DataPoints(), pcommon.NewResource(), Settings{ - ExportCreatedMetric: true, - PromoteScopeMetadata: tt.promoteScope, - EnableCreatedTimestampZeroIngestion: true, - ValidIntervalCreatedTimestampZeroIngestion: tt.overrideValidInterval, + ExportCreatedMetric: true, + PromoteScopeMetadata: tt.promoteScope, }, namer.Build(TranslatorMetricFromOtelMetric(metric)), pmetric.AggregationTemporalityCumulative, tt.scope, + metadata.Metadata{}, ) require.NoError(t, err) require.Empty(t, annots) - require.Equal(t, tt.wantSeries(), converter.unique) + requireEqual(t, tt.wantSeries(), mockAppender.histograms) require.Empty(t, converter.conflicts) }) } @@ -1083,7 +887,7 @@ func TestConvertExplicitHistogramBucketsToNHCBLayout(t *testing.T) { name: "zero offset", buckets: []uint64{4, 3, 2, 1}, wantLayout: expectedBucketLayout{ - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 4, @@ -1096,7 +900,7 @@ func TestConvertExplicitHistogramBucketsToNHCBLayout(t *testing.T) { name: "leading empty buckets", buckets: []uint64{0, 0, 1, 1, 2, 3}, wantLayout: expectedBucketLayout{ - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 2, Length: 4, @@ -1109,7 +913,7 @@ func TestConvertExplicitHistogramBucketsToNHCBLayout(t *testing.T) { name: "trailing empty buckets", buckets: []uint64{0, 0, 1, 1, 2, 3, 0, 0}, // TODO: add tests for 3 trailing buckets wantLayout: expectedBucketLayout{ - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 2, Length: 6, @@ -1122,7 +926,7 @@ func TestConvertExplicitHistogramBucketsToNHCBLayout(t *testing.T) { name: "bucket gap of 2", buckets: []uint64{1, 2, 0, 0, 2}, wantLayout: expectedBucketLayout{ - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 5, @@ -1135,7 +939,7 @@ func TestConvertExplicitHistogramBucketsToNHCBLayout(t *testing.T) { name: "bucket gap > 2", buckets: []uint64{1, 2, 0, 0, 0, 2, 4, 4}, wantLayout: expectedBucketLayout{ - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 2, @@ -1152,7 +956,7 @@ func TestConvertExplicitHistogramBucketsToNHCBLayout(t *testing.T) { name: "multiple bucket gaps", buckets: []uint64{0, 0, 1, 2, 0, 0, 0, 2, 4, 4, 0, 0}, wantLayout: expectedBucketLayout{ - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 2, Length: 2, @@ -1211,7 +1015,7 @@ func TestHistogramToCustomBucketsHistogram(t *testing.T) { tests := []struct { name string hist func() pmetric.HistogramDataPoint - wantNativeHist func() prompb.Histogram + wantNativeHist func() *histogram.Histogram wantErrMessage string }{ { @@ -1227,15 +1031,14 @@ func TestHistogramToCustomBucketsHistogram(t *testing.T) { pt.ExplicitBounds().FromRaw([]float64{0, 1}) return pt }, - wantNativeHist: func() prompb.Histogram { - return prompb.Histogram{ - Count: &prompb.Histogram_CountInt{CountInt: 2}, - Sum: 10.1, - Schema: -53, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, - PositiveDeltas: []int64{1, 0}, - CustomValues: []float64{0, 1}, - Timestamp: 500, + wantNativeHist: func() *histogram.Histogram { + return &histogram.Histogram{ + Count: 2, + Sum: 10.1, + Schema: -53, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{1, 0}, + CustomValues: []float64{0, 1}, } }, }, @@ -1251,14 +1054,13 @@ func TestHistogramToCustomBucketsHistogram(t *testing.T) { pt.ExplicitBounds().FromRaw([]float64{0, 1}) return pt }, - wantNativeHist: func() prompb.Histogram { - return prompb.Histogram{ - Count: &prompb.Histogram_CountInt{CountInt: 4}, - Schema: -53, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, - PositiveDeltas: []int64{2, 0}, - CustomValues: []float64{0, 1}, - Timestamp: 500, + wantNativeHist: func() *histogram.Histogram { + return &histogram.Histogram{ + Count: 4, + Schema: -53, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{2, 0}, + CustomValues: []float64{0, 1}, } }, }, @@ -1298,7 +1100,7 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) { metric func() pmetric.Metric scope scope promoteScope bool - wantSeries func() map[uint64]*prompb.TimeSeries + wantSeries func() []combinedHistogram }{ { name: "histogram data points with same labels and without scope promotion", @@ -1327,36 +1129,41 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - wantSeries: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist_to_nhcb"}, - {Name: "attr", Value: "test_attr"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 3}, - Sum: 3, - Schema: -53, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, - PositiveDeltas: []int64{2, -2, 1}, - CustomValues: []float64{5, 10}, - }, - { - Count: &prompb.Histogram_CountInt{CountInt: 11}, - Sum: 5, - Schema: -53, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, - PositiveDeltas: []int64{3, 5, -8}, - CustomValues: []float64{0, 1}, - }, + wantSeries: func() []combinedHistogram { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_hist_to_nhcb", + "attr", "test_attr", + ) + return []combinedHistogram{ + { + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 3, + Sum: 3, + Schema: -53, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, + PositiveBuckets: []int64{2, -2, 1}, + CustomValues: []float64{5, 10}, }, - Exemplars: []prompb.Exemplar{ - {Value: 1}, - {Value: 2}, + es: []exemplar.Exemplar{{Value: 1}}, + }, + { + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 11, + Sum: 5, + Schema: -53, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, + PositiveBuckets: []int64{3, 5, -8}, + CustomValues: []float64{0, 1}, }, + es: []exemplar.Exemplar{{Value: 2}}, }, } }, @@ -1388,41 +1195,46 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: true, - wantSeries: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist_to_nhcb"}, - {Name: "attr", Value: "test_attr"}, - {Name: "otel_scope_name", Value: defaultScope.name}, - {Name: "otel_scope_schema_url", Value: defaultScope.schemaURL}, - {Name: "otel_scope_version", Value: defaultScope.version}, - {Name: "otel_scope_attr1", Value: "value1"}, - {Name: "otel_scope_attr2", Value: "value2"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 3}, - Sum: 3, - Schema: -53, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, - PositiveDeltas: []int64{2, -2, 1}, - CustomValues: []float64{5, 10}, - }, - { - Count: &prompb.Histogram_CountInt{CountInt: 11}, - Sum: 5, - Schema: -53, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, - PositiveDeltas: []int64{3, 5, -8}, - CustomValues: []float64{0, 1}, - }, + wantSeries: func() []combinedHistogram { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_hist_to_nhcb", + "attr", "test_attr", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ) + return []combinedHistogram{ + { + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 3, + Sum: 3, + Schema: -53, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, + PositiveBuckets: []int64{2, -2, 1}, + CustomValues: []float64{5, 10}, }, - Exemplars: []prompb.Exemplar{ - {Value: 1}, - {Value: 2}, + es: []exemplar.Exemplar{{Value: 1}}, + }, + { + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 11, + Sum: 5, + Schema: -53, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, + PositiveBuckets: []int64{3, 5, -8}, + CustomValues: []float64{0, 1}, }, + es: []exemplar.Exemplar{{Value: 2}}, }, } }, @@ -1454,48 +1266,46 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - wantSeries: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist_to_nhcb"}, - {Name: "attr", Value: "test_attr"}, - } - labelsAnother := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist_to_nhcb"}, - {Name: "attr", Value: "test_attr_two"}, - } - - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 6}, - Sum: 3, - Schema: -53, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, - PositiveDeltas: []int64{4, -2}, - CustomValues: []float64{0, 1}, - }, - }, - Exemplars: []prompb.Exemplar{ - {Value: 1}, + wantSeries: func() []combinedHistogram { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_hist_to_nhcb", + "attr", "test_attr", + ) + labelsAnother := labels.FromStrings( + model.MetricNameLabel, "test_hist_to_nhcb", + "attr", "test_attr_two", + ) + + return []combinedHistogram{ + { + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 6, + Sum: 3, + Schema: -53, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{4, -2}, + CustomValues: []float64{0, 1}, }, + es: []exemplar.Exemplar{{Value: 1}}, }, - timeSeriesSignature(labelsAnother): { - Labels: labelsAnother, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 11}, - Sum: 5, - Schema: -53, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, - PositiveDeltas: []int64{3, 5}, - CustomValues: []float64{0, 1}, - }, - }, - Exemplars: []prompb.Exemplar{ - {Value: 2}, + { + ls: labelsAnother, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 11, + Sum: 5, + Schema: -53, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{3, 5}, + CustomValues: []float64{0, 1}, }, + es: []exemplar.Exemplar{{Value: 2}}, }, } }, @@ -1505,7 +1315,8 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) { t.Run(tt.name, func(t *testing.T) { metric := tt.metric() - converter := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) namer := otlptranslator.MetricNamer{ WithMetricSuffixes: true, } @@ -1521,12 +1332,13 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) { namer.Build(TranslatorMetricFromOtelMetric(metric)), pmetric.AggregationTemporalityCumulative, tt.scope, + metadata.Metadata{}, ) require.NoError(t, err) require.Empty(t, annots) - require.Equal(t, tt.wantSeries(), converter.unique) + requireEqual(t, tt.wantSeries(), mockAppender.histograms) require.Empty(t, converter.conflicts) }) } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/labels/labels.go b/storage/remote/otlptranslator/prometheusremotewrite/labels/labels.go new file mode 100644 index 0000000000..2466309eb2 --- /dev/null +++ b/storage/remote/otlptranslator/prometheusremotewrite/labels/labels.go @@ -0,0 +1,601 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is copied from model/labels. + +package labels + +import ( + //"bytes" + "slices" + "strings" + "unsafe" + + "github.com/cespare/xxhash/v2" + + common "github.com/prometheus/prometheus/model/labels" +) + +// Labels is a sorted set of labels. Order has to be guaranteed upon +// instantiation. +type Labels []common.Label + +func (ls Labels) Len() int { return len(ls) } +func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } +func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name } + +// ToModel returns the common Labels type from pre-sorted Labels. +// The original is no longer valid after this call. +func (ls Labels) MoveToModel() common.Labels { + return common.NewFromSorted(ls) +} + +// // Bytes returns an opaque, not-human-readable, encoding of ls, usable as a map key. +// // Encoding may change over time or between runs of Prometheus. +// func (ls Labels) Bytes(buf []byte) []byte { +// b := bytes.NewBuffer(buf[:0]) +// b.WriteByte(model.LabelSep) +// for i, l := range ls { +// if i > 0 { +// b.WriteByte(model.Sep) +// } +// b.WriteString(l.Name) +// b.WriteByte(model.Sep) +// b.WriteString(l.Value) +// } +// return b.Bytes() +// } + +// // MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. +// // If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. +// func (ls Labels) MatchLabels(on bool, names ...string) Labels { +// matchedLabels := Labels{} + +// nameSet := make(map[string]struct{}, len(names)) +// for _, n := range names { +// nameSet[n] = struct{}{} +// } + +// for _, v := range ls { +// if _, ok := nameSet[v.Name]; on == ok && (on || v.Name != MetricName) { +// matchedLabels = append(matchedLabels, v) +// } +// } + +// return matchedLabels +// } + +// Hash returns a hash value for the label set. +// Note: the result is not guaranteed to be consistent across different runs of Prometheus. +func (ls Labels) Hash() uint64 { + // Use xxhash.Sum64(b) for fast path as it's faster. + b := make([]byte, 0, 1024) + for i, v := range ls { + if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) { + // If labels entry is 1KB+ do not allocate whole entry. + h := xxhash.New() + _, _ = h.Write(b) + for _, v := range ls[i:] { + _, _ = h.WriteString(v.Name) + _, _ = h.Write(common.Seps) + _, _ = h.WriteString(v.Value) + _, _ = h.Write(common.Seps) + } + return h.Sum64() + } + + b = append(b, v.Name...) + b = append(b, common.Sep) + b = append(b, v.Value...) + b = append(b, common.Sep) + } + return xxhash.Sum64(b) +} + +// // HashForLabels returns a hash value for the labels matching the provided names. +// // 'names' have to be sorted in ascending order. +// func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { +// b = b[:0] +// i, j := 0, 0 +// for i < len(ls) && j < len(names) { +// switch { +// case names[j] < ls[i].Name: +// j++ +// case ls[i].Name < names[j]: +// i++ +// default: +// b = append(b, ls[i].Name...) +// b = append(b, sep) +// b = append(b, ls[i].Value...) +// b = append(b, sep) +// i++ +// j++ +// } +// } +// return xxhash.Sum64(b), b +// } + +// // HashWithoutLabels returns a hash value for all labels except those matching +// // the provided names. +// // 'names' have to be sorted in ascending order. +// func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { +// b = b[:0] +// j := 0 +// for i := range ls { +// for j < len(names) && names[j] < ls[i].Name { +// j++ +// } +// if ls[i].Name == MetricName || (j < len(names) && ls[i].Name == names[j]) { +// continue +// } +// b = append(b, ls[i].Name...) +// b = append(b, sep) +// b = append(b, ls[i].Value...) +// b = append(b, sep) +// } +// return xxhash.Sum64(b), b +// } + +// // BytesWithLabels is just as Bytes(), but only for labels matching names. +// // 'names' have to be sorted in ascending order. +// func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { +// b := bytes.NewBuffer(buf[:0]) +// b.WriteByte(labelSep) +// i, j := 0, 0 +// for i < len(ls) && j < len(names) { +// switch { +// case names[j] < ls[i].Name: +// j++ +// case ls[i].Name < names[j]: +// i++ +// default: +// if b.Len() > 1 { +// b.WriteByte(sep) +// } +// b.WriteString(ls[i].Name) +// b.WriteByte(sep) +// b.WriteString(ls[i].Value) +// i++ +// j++ +// } +// } +// return b.Bytes() +// } + +// // BytesWithoutLabels is just as Bytes(), but only for labels not matching names. +// // 'names' have to be sorted in ascending order. +// func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { +// b := bytes.NewBuffer(buf[:0]) +// b.WriteByte(labelSep) +// j := 0 +// for i := range ls { +// for j < len(names) && names[j] < ls[i].Name { +// j++ +// } +// if j < len(names) && ls[i].Name == names[j] { +// continue +// } +// if b.Len() > 1 { +// b.WriteByte(sep) +// } +// b.WriteString(ls[i].Name) +// b.WriteByte(sep) +// b.WriteString(ls[i].Value) +// } +// return b.Bytes() +// } + +// Copy returns a copy of the labels. +func (ls Labels) Copy() Labels { + res := make(Labels, len(ls)) + copy(res, ls) + return res +} + +// Get returns the value for the label with the given name. +// Returns an empty string if the label doesn't exist. +func (ls Labels) Get(name string) string { + for _, l := range ls { + if l.Name == name { + return l.Value + } + } + return "" +} + +// Has returns true if the label with the given name is present. +func (ls Labels) Has(name string) bool { + for _, l := range ls { + if l.Name == name { + return true + } + } + return false +} + +// HasDuplicateLabelNames returns whether ls has duplicate label names. +// It assumes that the labelset is sorted. +func (ls Labels) HasDuplicateLabelNames() (string, bool) { + for i, l := range ls { + if i == 0 { + continue + } + if l.Name == ls[i-1].Name { + return l.Name, true + } + } + return "", false +} + +// WithoutEmpty returns the labelset without empty labels. +// May return the same labelset. +func (ls Labels) WithoutEmpty() Labels { + for _, v := range ls { + if v.Value != "" { + continue + } + // Do not copy the slice until it's necessary. + els := make(Labels, 0, len(ls)-1) + for _, v := range ls { + if v.Value != "" { + els = append(els, v) + } + } + return els + } + return ls +} + +// ByteSize returns the approximate size of the labels in bytes including +// the two string headers size for name and value. +// Slice header size is ignored because it should be amortized to zero. +func (ls Labels) ByteSize() uint64 { + var size uint64 + for _, l := range ls { + size += uint64(len(l.Name)+len(l.Value)) + 2*uint64(unsafe.Sizeof("")) + } + return size +} + +// Equal returns whether the two label sets are equal. +func Equal(ls, o Labels) bool { + return slices.Equal(ls, o) +} + +// EmptyLabels returns n empty Labels value, for convenience. +func EmptyLabels() Labels { + return Labels{} +} + +// New returns a sorted Labels from the given labels. +// The caller has to guarantee that all label names are unique. +func New(ls ...common.Label) Labels { + set := make(Labels, 0, len(ls)) + set = append(set, ls...) + slices.SortFunc(set, func(a, b common.Label) int { return strings.Compare(a.Name, b.Name) }) + + return set +} + +// FromStrings creates new labels from pairs of strings. +func FromStrings(ss ...string) Labels { + if len(ss)%2 != 0 { + panic("invalid number of strings") + } + res := make(Labels, 0, len(ss)/2) + for i := 0; i < len(ss); i += 2 { + res = append(res, common.Label{Name: ss[i], Value: ss[i+1]}) + } + + slices.SortFunc(res, func(a, b common.Label) int { return strings.Compare(a.Name, b.Name) }) + return res +} + +// Compare compares the two label sets. +// The result will be 0 if a==b, <0 if a < b, and >0 if a > b. +func Compare(a, b Labels) int { + l := len(a) + if len(b) < l { + l = len(b) + } + + for i := 0; i < l; i++ { + if a[i].Name != b[i].Name { + if a[i].Name < b[i].Name { + return -1 + } + return 1 + } + if a[i].Value != b[i].Value { + if a[i].Value < b[i].Value { + return -1 + } + return 1 + } + } + // If all labels so far were in common, the set with fewer labels comes first. + return len(a) - len(b) +} + +// CopyFrom copies labels from b on top of whatever was in ls previously, +// reusing memory or expanding if needed. +func (ls *Labels) CopyFrom(b Labels) { + (*ls) = append((*ls)[:0], b...) +} + +// IsEmpty returns true if ls represents an empty set of labels. +func (ls Labels) IsEmpty() bool { + return len(ls) == 0 +} + +// Range calls f on each label. +func (ls Labels) Range(f func(l common.Label)) { + for _, l := range ls { + f(l) + } +} + +// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. +func (ls Labels) Validate(f func(l common.Label) error) error { + for _, l := range ls { + if err := f(l); err != nil { + return err + } + } + return nil +} + +// DropMetricName returns Labels with the "__name__" removed. +// Deprecated: Use DropReserved instead. +func (ls Labels) DropMetricName() Labels { + return ls.DropReserved(func(n string) bool { return n == common.MetricName }) +} + +// DropReserved returns Labels without the chosen (via shouldDropFn) reserved (starting with underscore) labels. +func (ls Labels) DropReserved(shouldDropFn func(name string) bool) Labels { + rm := 0 + for i, l := range ls { + if l.Name[0] > '_' { // Stop looking if we've gone past special labels. + break + } + if shouldDropFn(l.Name) { + i := i - rm // Offsetting after removals. + if i == 0 { // Make common case fast with no allocations. + ls = ls[1:] + } else { + // Avoid modifying original Labels - use [:i:i] so that left slice would not + // have any spare capacity and append would have to allocate a new slice for the result. + ls = append(ls[:i:i], ls[i+1:]...) + } + rm++ + } + } + return ls +} + +// InternStrings calls intern on every string value inside ls, replacing them with what it returns. +func (ls *Labels) InternStrings(intern func(string) string) { + for i, l := range *ls { + (*ls)[i].Name = intern(l.Name) + (*ls)[i].Value = intern(l.Value) + } +} + +// ReleaseStrings calls release on every string value inside ls. +func (ls Labels) ReleaseStrings(release func(string)) { + for _, l := range ls { + release(l.Name) + release(l.Value) + } +} + +// Builder allows modifying Labels. +type Builder struct { + base Labels + del []string + add []common.Label +} + +// NewBuilder returns a new LabelsBuilder. +func NewBuilder(base Labels) *Builder { + b := &Builder{ + del: make([]string, 0, 5), + add: make([]common.Label, 0, 5), + } + b.Reset(base) + return b +} + +// Reset clears all current state for the builder. +func (b *Builder) Reset(base Labels) { + b.base = base + b.del = b.del[:0] + b.add = b.add[:0] + b.base.Range(func(l common.Label) { + if l.Value == "" { + b.del = append(b.del, l.Name) + } + }) +} + +// Del deletes the label of the given name. +func (b *Builder) Del(ns ...string) *Builder { + for _, n := range ns { + for i, a := range b.add { + if a.Name == n { + b.add = append(b.add[:i], b.add[i+1:]...) + } + } + b.del = append(b.del, n) + } + return b +} + +// Keep removes all labels from the base except those with the given names. +func (b *Builder) Keep(ns ...string) *Builder { + b.base.Range(func(l common.Label) { + if slices.Contains(ns, l.Name) { + return + } + b.del = append(b.del, l.Name) + }) + return b +} + +// Set the name/value pair as a label. A value of "" means delete that label. +func (b *Builder) Set(n, v string) *Builder { + if v == "" { + // Empty labels are the same as missing labels. + return b.Del(n) + } + for i, a := range b.add { + if a.Name == n { + b.add[i].Value = v + return b + } + } + b.add = append(b.add, common.Label{Name: n, Value: v}) + + return b +} + +func (b *Builder) Get(n string) string { + // Del() removes entries from .add but Set() does not remove from .del, so check .add first. + for _, a := range b.add { + if a.Name == n { + return a.Value + } + } + if slices.Contains(b.del, n) { + return "" + } + return b.base.Get(n) +} + +// Range calls f on each label in the Builder. +func (b *Builder) Range(f func(l common.Label)) { + // Stack-based arrays to avoid heap allocation in most cases. + var addStack [128]common.Label + var delStack [128]string + // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). + origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) + b.base.Range(func(l common.Label) { + if !slices.Contains(origDel, l.Name) && !common.Contains(origAdd, l.Name) { + f(l) + } + }) + for _, a := range origAdd { + f(a) + } +} + +// Labels returns the labels from the builder. +// If no modifications were made, the original labels are returned. +func (b *Builder) Labels() Labels { + if len(b.del) == 0 && len(b.add) == 0 { + return b.base + } + + expectedSize := len(b.base) + len(b.add) - len(b.del) + if expectedSize < 1 { + expectedSize = 1 + } + res := make(Labels, 0, expectedSize) + for _, l := range b.base { + if slices.Contains(b.del, l.Name) || common.Contains(b.add, l.Name) { + continue + } + res = append(res, l) + } + if len(b.add) > 0 { // Base is already in order, so we only need to sort if we add to it. + res = append(res, b.add...) + slices.SortFunc(res, func(a, b common.Label) int { return strings.Compare(a.Name, b.Name) }) + } + return res +} + +// ScratchBuilder allows efficient construction of a Labels from scratch. +type ScratchBuilder struct { + add Labels +} + +// SymbolTable is no-op, just for api parity with dedupelabels. +type SymbolTable struct{} + +func NewSymbolTable() *SymbolTable { return nil } + +func (t *SymbolTable) Len() int { return 0 } + +// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. +func NewScratchBuilder(n int) ScratchBuilder { + return ScratchBuilder{add: make([]common.Label, 0, n)} +} + +// NewBuilderWithSymbolTable creates a Builder, for api parity with dedupelabels. +func NewBuilderWithSymbolTable(_ *SymbolTable) *Builder { + return NewBuilder(EmptyLabels()) +} + +// NewScratchBuilderWithSymbolTable creates a ScratchBuilder, for api parity with dedupelabels. +func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder { + return NewScratchBuilder(n) +} + +func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) { + // no-op +} + +func (b *ScratchBuilder) Reset() { + b.add = b.add[:0] +} + +// Add a name/value pair. +// Note if you Add the same name twice you will get a duplicate label, which is invalid. +func (b *ScratchBuilder) Add(name, value string) { + b.add = append(b.add, common.Label{Name: name, Value: value}) +} + +// UnsafeAddBytes adds a name/value pair, using []byte instead of string. +// The '-tags stringlabels' version of this function is unsafe, hence the name. +// This version is safe - it copies the strings immediately - but we keep the same name so everything compiles. +func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { + b.add = append(b.add, common.Label{Name: string(name), Value: string(value)}) +} + +// Sort the labels added so far by name. +func (b *ScratchBuilder) Sort() { + slices.SortFunc(b.add, func(a, b common.Label) int { return strings.Compare(a.Name, b.Name) }) +} + +// Assign is for when you already have a Labels which you want this ScratchBuilder to return. +func (b *ScratchBuilder) Assign(ls Labels) { + b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. +} + +// Labels returns the name/value pairs added so far as a Labels object. +// Note: if you want them sorted, call Sort() first. +func (b *ScratchBuilder) Labels() Labels { + // Copy the slice, so the next use of ScratchBuilder doesn't overwrite. + return append([]common.Label{}, b.add...) +} + +// Overwrite the newly-built Labels out to ls. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. +func (b *ScratchBuilder) Overwrite(ls *Labels) { + *ls = append((*ls)[:0], b.add...) +} + +// SizeOfLabels returns the approximate space required for n copies of a label. +func SizeOfLabels(name, value string, n uint64) uint64 { + return (uint64(len(name)) + uint64(unsafe.Sizeof(name)) + uint64(len(value)) + uint64(unsafe.Sizeof(value))) * n +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/labels/labels_test.go b/storage/remote/otlptranslator/prometheusremotewrite/labels/labels_test.go new file mode 100644 index 0000000000..060917df8b --- /dev/null +++ b/storage/remote/otlptranslator/prometheusremotewrite/labels/labels_test.go @@ -0,0 +1,1035 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + "testing" + + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" + + common "github.com/prometheus/prometheus/model/labels" +) + +// var ( +// s254 = strings.Repeat("x", 254) // Edge cases for stringlabels encoding. +// s255 = strings.Repeat("x", 255) +// ) + +// var testCaseLabels = []Labels{ +// FromStrings("t1", "t1", "t2", "t2"), +// {}, +// FromStrings("service.name", "t1", "whatever\\whatever", "t2"), +// FromStrings("aaa", "111", "xx", s254), +// FromStrings("aaa", "111", "xx", s255), +// FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), +// } + +// func TestLabels_String(t *testing.T) { +// expected := []string{ // Values must line up with testCaseLabels. +// "{t1=\"t1\", t2=\"t2\"}", +// "{}", +// `{"service.name"="t1", "whatever\\whatever"="t2"}`, +// `{aaa="111", xx="` + s254 + `"}`, +// `{aaa="111", xx="` + s255 + `"}`, +// `{" container"="prometheus", " namespace"="observability-prometheus", __name__="kube_pod_container_status_last_terminated_exitcode", cluster="prod-af-north-0", instance="kube-state-metrics-0:kube-state-metrics:ksm", job="kube-state-metrics/kube-state-metrics", pod="observability-prometheus-0", uid="d3ec90b2-4975-4607-b45d-b9ad64bb417e"}`, +// } +// require.Len(t, expected, len(testCaseLabels)) +// for i, c := range expected { +// str := testCaseLabels[i].String() +// require.Equal(t, c, str) +// } +// } + +// func BenchmarkString(b *testing.B) { +// ls := New(benchmarkLabels...) +// for i := 0; i < b.N; i++ { +// _ = ls.String() +// } +// } + +// func TestSizeOfLabels(t *testing.T) { +// require.Len(t, expectedSizeOfLabels, len(testCaseLabels)) +// for i, c := range expectedSizeOfLabels { // Declared in build-tag-specific files, e.g. labels_slicelabels_test.go. +// var total uint64 +// testCaseLabels[i].Range(func(l Label) { +// total += SizeOfLabels(l.Name, l.Value, 1) +// }) +// require.Equal(t, c, total) +// } +// } + +// func TestByteSize(t *testing.T) { +// require.Len(t, expectedByteSize, len(testCaseLabels)) +// for i, c := range expectedByteSize { // Declared in build-tag-specific files, e.g. labels_slicelabels_test.go. +// require.Equal(t, c, testCaseLabels[i].ByteSize()) +// } +// } + +// var GlobalTotal uint64 // Encourage the compiler not to elide the benchmark computation. + +// func BenchmarkSize(b *testing.B) { +// lb := New(benchmarkLabels...) +// b.Run("SizeOfLabels", func(b *testing.B) { +// for i := 0; i < b.N; i++ { +// var total uint64 +// lb.Range(func(l Label) { +// total += SizeOfLabels(l.Name, l.Value, 1) +// }) +// GlobalTotal = total +// } +// }) +// b.Run("ByteSize", func(b *testing.B) { +// for i := 0; i < b.N; i++ { +// GlobalTotal = lb.ByteSize() +// } +// }) +// } + +// func TestLabels_MatchLabels(t *testing.T) { +// labels := FromStrings( +// "__name__", "ALERTS", +// "alertname", "HTTPRequestRateLow", +// "alertstate", "pending", +// "instance", "0", +// "job", "app-server", +// "severity", "critical") + +// tests := []struct { +// providedNames []string +// on bool +// expected Labels +// }{ +// // on = true, explicitly including metric name in matching. +// { +// providedNames: []string{ +// "__name__", +// "alertname", +// "alertstate", +// "instance", +// }, +// on: true, +// expected: FromStrings( +// "__name__", "ALERTS", +// "alertname", "HTTPRequestRateLow", +// "alertstate", "pending", +// "instance", "0"), +// }, +// // on = false, explicitly excluding metric name from matching. +// { +// providedNames: []string{ +// "__name__", +// "alertname", +// "alertstate", +// "instance", +// }, +// on: false, +// expected: FromStrings( +// "job", "app-server", +// "severity", "critical"), +// }, +// // on = true, explicitly excluding metric name from matching. +// { +// providedNames: []string{ +// "alertname", +// "alertstate", +// "instance", +// }, +// on: true, +// expected: FromStrings( +// "alertname", "HTTPRequestRateLow", +// "alertstate", "pending", +// "instance", "0"), +// }, +// // on = false, implicitly excluding metric name from matching. +// { +// providedNames: []string{ +// "alertname", +// "alertstate", +// "instance", +// }, +// on: false, +// expected: FromStrings( +// "job", "app-server", +// "severity", "critical"), +// }, +// } + +// for i, test := range tests { +// got := labels.MatchLabels(test.on, test.providedNames...) +// require.True(t, Equal(test.expected, got), "unexpected labelset for test case %d", i) +// } +// } + +// func TestLabels_HasDuplicateLabelNames(t *testing.T) { +// cases := []struct { +// Input Labels +// Duplicate bool +// LabelName string +// }{ +// { +// Input: FromMap(map[string]string{"__name__": "up", "hostname": "localhost"}), +// Duplicate: false, +// }, { +// Input: FromStrings("__name__", "up", "hostname", "localhost", "hostname", "127.0.0.1"), +// Duplicate: true, +// LabelName: "hostname", +// }, +// } + +// for i, c := range cases { +// l, d := c.Input.HasDuplicateLabelNames() +// require.Equal(t, c.Duplicate, d, "test %d: incorrect duplicate bool", i) +// require.Equal(t, c.LabelName, l, "test %d: incorrect label name", i) +// } +// } + +// func TestLabels_WithoutEmpty(t *testing.T) { +// for _, test := range []struct { +// input Labels +// expected Labels +// }{ +// { +// input: FromStrings( +// "foo", "", +// "bar", ""), +// expected: EmptyLabels(), +// }, +// { +// input: FromStrings( +// "foo", "", +// "bar", "", +// "baz", ""), +// expected: EmptyLabels(), +// }, +// { +// input: FromStrings( +// "__name__", "test", +// "hostname", "localhost", +// "job", "check"), +// expected: FromStrings( +// "__name__", "test", +// "hostname", "localhost", +// "job", "check"), +// }, +// { +// input: FromStrings( +// "__name__", "test", +// "hostname", "localhost", +// "bar", "", +// "job", "check"), +// expected: FromStrings( +// "__name__", "test", +// "hostname", "localhost", +// "job", "check"), +// }, +// { +// input: FromStrings( +// "__name__", "test", +// "foo", "", +// "hostname", "localhost", +// "bar", "", +// "job", "check"), +// expected: FromStrings( +// "__name__", "test", +// "hostname", "localhost", +// "job", "check"), +// }, +// { +// input: FromStrings( +// "__name__", "test", +// "foo", "", +// "baz", "", +// "hostname", "localhost", +// "bar", "", +// "job", "check"), +// expected: FromStrings( +// "__name__", "test", +// "hostname", "localhost", +// "job", "check"), +// }, +// } { +// t.Run("", func(t *testing.T) { +// require.True(t, Equal(test.expected, test.input.WithoutEmpty())) +// }) +// } +// } + +// func TestLabels_IsValid(t *testing.T) { +// for _, test := range []struct { +// input Labels +// expected bool +// }{ +// { +// input: FromStrings( +// "__name__", "test", +// "hostname", "localhost", +// "job", "check", +// ), +// expected: true, +// }, +// { +// input: FromStrings( +// "__name__", "test:ms", +// "hostname_123", "localhost", +// "_job", "check", +// ), +// expected: true, +// }, +// { +// input: FromStrings("__name__", "test-ms"), +// expected: false, +// }, +// { +// input: FromStrings("__name__", "0zz"), +// expected: false, +// }, +// { +// input: FromStrings("abc:xyz", "invalid"), +// expected: false, +// }, +// { +// input: FromStrings("123abc", "invalid"), +// expected: false, +// }, +// { +// input: FromStrings("中文abc", "invalid"), +// expected: false, +// }, +// { +// input: FromStrings("invalid", "aa\xe2"), +// expected: false, +// }, +// { +// input: FromStrings("invalid", "\xF7\xBF\xBF\xBF"), +// expected: false, +// }, +// } { +// t.Run("", func(t *testing.T) { +// require.Equal(t, test.expected, test.input.IsValid(model.LegacyValidation)) +// }) +// } +// } + +// func TestLabels_ValidationModes(t *testing.T) { +// for _, test := range []struct { +// input Labels +// callMode model.ValidationScheme +// expected bool +// }{ +// { +// input: FromStrings( +// "__name__", "test.metric", +// "hostname", "localhost", +// "job", "check", +// ), +// callMode: model.UTF8Validation, +// expected: true, +// }, +// { +// input: FromStrings( +// "__name__", "test", +// "\xc5 bad utf8", "localhost", +// "job", "check", +// ), +// callMode: model.UTF8Validation, +// expected: false, +// }, +// { +// input: FromStrings( +// "__name__", "test.utf8.metric", +// "hostname", "localhost", +// "job", "check", +// ), +// callMode: model.LegacyValidation, +// expected: false, +// }, +// { +// input: FromStrings( +// "__name__", "test", +// "host.name", "localhost", +// "job", "check", +// ), +// callMode: model.LegacyValidation, +// expected: false, +// }, +// } { +// require.Equal(t, test.expected, test.input.IsValid(test.callMode)) +// } +// } + +func TestLabels_Equal(t *testing.T) { + labels := FromStrings( + "aaa", "111", + "bbb", "222") + + tests := []struct { + compared Labels + expected bool + }{ + { + compared: FromStrings( + "aaa", "111", + "bbb", "222", + "ccc", "333"), + expected: false, + }, + { + compared: FromStrings( + "aaa", "111", + "bar", "222"), + expected: false, + }, + { + compared: FromStrings( + "aaa", "111", + "bbb", "233"), + expected: false, + }, + { + compared: FromStrings( + "aaa", "111", + "bbb", "222"), + expected: true, + }, + } + + for i, test := range tests { + got := Equal(labels, test.compared) + require.Equal(t, test.expected, got, "unexpected comparison result for test case %d", i) + } +} + +func TestLabels_FromStrings(t *testing.T) { + labels := FromStrings("aaa", "111", "bbb", "222") + x := 0 + labels.Range(func(l common.Label) { + switch x { + case 0: + require.Equal(t, common.Label{Name: "aaa", Value: "111"}, l, "unexpected value") + case 1: + require.Equal(t, common.Label{Name: "bbb", Value: "222"}, l, "unexpected value") + default: + t.Fatalf("unexpected labelset value %d: %v", x, l) + } + x++ + }) + + require.Panics(t, func() { FromStrings("aaa", "111", "bbb") }) //nolint:staticcheck // Ignore SA5012, error is intentional test. +} + +func TestLabels_Compare(t *testing.T) { + labels := FromStrings( + "aaa", "111", + "bbb", "222") + + tests := []struct { + compared Labels + expected int + }{ + { + compared: FromStrings( + "aaa", "110", + "bbb", "222"), + expected: 1, + }, + { + compared: FromStrings( + "aaa", "111", + "bbb", "233"), + expected: -1, + }, + { + compared: FromStrings( + "aaa", "111", + "bar", "222"), + expected: 1, + }, + { + compared: FromStrings( + "aaa", "111", + "bbc", "222"), + expected: -1, + }, + { + compared: FromStrings( + "aaa", "111", + "bb", "222"), + expected: 1, + }, + { + compared: FromStrings( + "aaa", "111", + "bbbb", "222"), + expected: -1, + }, + { + compared: FromStrings( + "aaa", "111"), + expected: 1, + }, + { + compared: FromStrings( + "aaa", "111", + "bbb", "222", + "ccc", "333", + "ddd", "444"), + expected: -2, + }, + { + compared: FromStrings( + "aaa", "111", + "bbb", "222"), + expected: 0, + }, + { + compared: EmptyLabels(), + expected: 1, + }, + } + + sign := func(a int) int { + switch { + case a < 0: + return -1 + case a > 0: + return 1 + } + return 0 + } + + for i, test := range tests { + got := Compare(labels, test.compared) + require.Equal(t, sign(test.expected), sign(got), "unexpected comparison result for test case %d", i) + got = Compare(test.compared, labels) + require.Equal(t, -sign(test.expected), sign(got), "unexpected comparison result for reverse test case %d", i) + } +} + +func TestLabels_Has(t *testing.T) { + tests := []struct { + input string + expected bool + }{ + { + input: "foo", + expected: false, + }, + { + input: "aaa", + expected: true, + }, + } + + labelsSet := FromStrings( + "aaa", "111", + "bbb", "222") + + for i, test := range tests { + got := labelsSet.Has(test.input) + require.Equal(t, test.expected, got, "unexpected comparison result for test case %d", i) + } +} + +func TestLabels_Get(t *testing.T) { + require.Empty(t, FromStrings("aaa", "111", "bbb", "222").Get("foo")) + require.Equal(t, "111", FromStrings("aaaa", "111", "bbb", "222").Get("aaaa")) + require.Equal(t, "222", FromStrings("aaaa", "111", "bbb", "222").Get("bbb")) +} + +func TestLabels_DropMetricName(t *testing.T) { + require.True(t, Equal(FromStrings("aaa", "111", "bbb", "222"), FromStrings("aaa", "111", "bbb", "222").DropMetricName())) + require.True(t, Equal(FromStrings("aaa", "111"), FromStrings(common.MetricName, "myname", "aaa", "111").DropMetricName())) + + original := FromStrings("__aaa__", "111", common.MetricName, "myname", "bbb", "222") + check := original.Copy() + require.True(t, Equal(FromStrings("__aaa__", "111", "bbb", "222"), check.DropMetricName())) + require.True(t, Equal(original, check)) +} + +func TestLabels_DropReserved(t *testing.T) { + shouldDropFn := func(n string) bool { + return n == common.MetricName || n == "__something__" + } + require.True(t, Equal(FromStrings("aaa", "111", "bbb", "222"), FromStrings("aaa", "111", "bbb", "222").DropReserved(shouldDropFn))) + require.True(t, Equal(FromStrings("aaa", "111"), FromStrings(common.MetricName, "myname", "aaa", "111").DropReserved(shouldDropFn))) + require.True(t, Equal(FromStrings("aaa", "111"), FromStrings(common.MetricName, "myname", "__something__", string(model.MetricTypeCounter), "aaa", "111").DropReserved(shouldDropFn))) + + original := FromStrings("__aaa__", "111", common.MetricName, "myname", "bbb", "222") + check := original.Copy() + require.True(t, Equal(FromStrings("__aaa__", "111", "bbb", "222"), check.DropReserved(shouldDropFn))) + require.True(t, Equal(original, check)) +} + +func ScratchBuilderForBenchmark() ScratchBuilder { + // (Only relevant to -tags dedupelabels: stuff the symbol table before adding the real labels, to avoid having everything fitting into 1 byte.) + b := NewScratchBuilder(256) + for i := 0; i < 256; i++ { + b.Add(fmt.Sprintf("name%d", i), fmt.Sprintf("value%d", i)) + } + b.Labels() + b.Reset() + return b +} + +func NewForBenchmark(ls ...common.Label) Labels { + b := ScratchBuilderForBenchmark() + for _, l := range ls { + b.Add(l.Name, l.Value) + } + b.Sort() + return b.Labels() +} + +func FromStringsForBenchmark(ss ...string) Labels { + if len(ss)%2 != 0 { + panic("invalid number of strings") + } + b := ScratchBuilderForBenchmark() + for i := 0; i < len(ss); i += 2 { + b.Add(ss[i], ss[i+1]) + } + b.Sort() + return b.Labels() +} + +// BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation +// The results have shown that binary search would only be better when searching last labels in scenarios with more than 10 labels. +// In the following list, `old` is the linear search while `new` is the binary search implementation (without calling sort.Search, which performs even worse here) +// +// name old time/op new time/op delta +// Labels_Get/with_5_labels/get_first_label 5.12ns ± 0% 14.24ns ± 0% ~ (p=1.000 n=1+1) +// Labels_Get/with_5_labels/get_middle_label 13.5ns ± 0% 18.5ns ± 0% ~ (p=1.000 n=1+1) +// Labels_Get/with_5_labels/get_last_label 21.9ns ± 0% 18.9ns ± 0% ~ (p=1.000 n=1+1) +// Labels_Get/with_10_labels/get_first_label 5.11ns ± 0% 19.47ns ± 0% ~ (p=1.000 n=1+1) +// Labels_Get/with_10_labels/get_middle_label 26.2ns ± 0% 19.3ns ± 0% ~ (p=1.000 n=1+1) +// Labels_Get/with_10_labels/get_last_label 42.8ns ± 0% 23.4ns ± 0% ~ (p=1.000 n=1+1) +// Labels_Get/with_30_labels/get_first_label 5.10ns ± 0% 24.63ns ± 0% ~ (p=1.000 n=1+1) +// Labels_Get/with_30_labels/get_middle_label 75.8ns ± 0% 29.7ns ± 0% ~ (p=1.000 n=1+1) +// Labels_Get/with_30_labels/get_last_label 169ns ± 0% 29ns ± 0% ~ (p=1.000 n=1+1) +func BenchmarkLabels_Get(b *testing.B) { + maxLabels := 30 + allLabels := make([]common.Label, maxLabels) + for i := 0; i < maxLabels; i++ { + allLabels[i] = common.Label{Name: strings.Repeat(string('a'+byte(i)), 5+(i%5))} + } + for _, size := range []int{5, 10, maxLabels} { + b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) { + labels := NewForBenchmark(allLabels[:size]...) + for _, scenario := range []struct { + desc, label string + }{ + {"first label", allLabels[0].Name}, + {"middle label", allLabels[size/2].Name}, + {"last label", allLabels[size-1].Name}, + {"not-found label", "benchmark"}, + } { + b.Run(scenario.desc, func(b *testing.B) { + b.Run("get", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = labels.Get(scenario.label) + } + }) + b.Run("has", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = labels.Has(scenario.label) + } + }) + }) + } + }) + } +} + +var comparisonBenchmarkScenarios = []struct { + desc string + base, other Labels +}{ + { + "equal", + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + }, + { + "not equal", + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"), + }, + { + "different sizes", + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value"), + }, + { + "lots", + FromStringsForBenchmark("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"), + FromStringsForBenchmark("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"), + }, + { + "real long equal", + FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), + FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), + }, + { + "real long different end", + FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), + FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "deadbeef-0000-1111-2222-b9ad64bb417e"), + }, +} + +func BenchmarkLabels_Equals(b *testing.B) { + for _, scenario := range comparisonBenchmarkScenarios { + b.Run(scenario.desc, func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = Equal(scenario.base, scenario.other) + } + }) + } +} + +func BenchmarkLabels_Compare(b *testing.B) { + for _, scenario := range comparisonBenchmarkScenarios { + b.Run(scenario.desc, func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = Compare(scenario.base, scenario.other) + } + }) + } +} + +func TestLabels_Copy(t *testing.T) { + require.Equal(t, FromStrings("aaa", "111", "bbb", "222"), FromStrings("aaa", "111", "bbb", "222").Copy()) +} + +// func TestLabels_Map(t *testing.T) { +// require.Equal(t, map[string]string{"aaa": "111", "bbb": "222"}, FromStrings("aaa", "111", "bbb", "222").Map()) +// } + +// func TestLabels_BytesWithLabels(t *testing.T) { +// require.Equal(t, FromStrings("aaa", "111", "bbb", "222").Bytes(nil), FromStrings("aaa", "111", "bbb", "222", "ccc", "333").BytesWithLabels(nil, "aaa", "bbb")) +// require.Equal(t, FromStrings().Bytes(nil), FromStrings("aaa", "111", "bbb", "222", "ccc", "333").BytesWithLabels(nil)) +// } + +// func TestLabels_BytesWithoutLabels(t *testing.T) { +// require.Equal(t, FromStrings("aaa", "111").Bytes(nil), FromStrings("aaa", "111", "bbb", "222", "ccc", "333").BytesWithoutLabels(nil, "bbb", "ccc")) +// require.Equal(t, FromStrings(common.MetricName, "333", "aaa", "111").Bytes(nil), FromStrings(common.MetricName, "333", "aaa", "111", "bbb", "222").BytesWithoutLabels(nil, "bbb")) +// require.Equal(t, FromStrings("aaa", "111").Bytes(nil), FromStrings(common.MetricName, "333", "aaa", "111", "bbb", "222").BytesWithoutLabels(nil, common.MetricName, "bbb")) +// } + +func TestBuilder(t *testing.T) { + reuseBuilder := NewBuilderWithSymbolTable(NewSymbolTable()) + for i, tcase := range []struct { + base Labels + del []string + keep []string + set []common.Label + want Labels + }{ + { + base: FromStrings("aaa", "111"), + want: FromStrings("aaa", "111"), + }, + { + base: EmptyLabels(), + set: []common.Label{{"aaa", "444"}, {"bbb", "555"}, {"ccc", "666"}}, + want: FromStrings("aaa", "444", "bbb", "555", "ccc", "666"), + }, + { + base: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"), + set: []common.Label{{"aaa", "444"}, {"bbb", "555"}, {"ccc", "666"}}, + want: FromStrings("aaa", "444", "bbb", "555", "ccc", "666"), + }, + { + base: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"), + del: []string{"bbb"}, + want: FromStrings("aaa", "111", "ccc", "333"), + }, + { + set: []common.Label{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}, + del: []string{"bbb"}, + want: FromStrings("aaa", "111", "ccc", "333"), + }, + { + base: FromStrings("aaa", "111"), + set: []common.Label{{"bbb", "222"}}, + want: FromStrings("aaa", "111", "bbb", "222"), + }, + { + base: FromStrings("aaa", "111"), + set: []common.Label{{"bbb", "222"}, {"bbb", "333"}}, + want: FromStrings("aaa", "111", "bbb", "333"), + }, + { + base: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"), + del: []string{"bbb"}, + set: []common.Label{{"ddd", "444"}}, + want: FromStrings("aaa", "111", "ccc", "333", "ddd", "444"), + }, + { // Blank value is interpreted as delete. + base: FromStrings("aaa", "111", "bbb", "", "ccc", "333"), + want: FromStrings("aaa", "111", "ccc", "333"), + }, + { + base: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"), + set: []common.Label{{"bbb", ""}}, + want: FromStrings("aaa", "111", "ccc", "333"), + }, + { + base: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"), + keep: []string{"bbb"}, + want: FromStrings("bbb", "222"), + }, + { + base: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"), + keep: []string{"aaa", "ccc"}, + want: FromStrings("aaa", "111", "ccc", "333"), + }, + { + base: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"), + del: []string{"bbb"}, + set: []common.Label{{"ddd", "444"}}, + keep: []string{"aaa", "ddd"}, + want: FromStrings("aaa", "111", "ddd", "444"), + }, + } { + test := func(t *testing.T, b *Builder) { + for _, lbl := range tcase.set { + b.Set(lbl.Name, lbl.Value) + } + if len(tcase.keep) > 0 { + b.Keep(tcase.keep...) + } + b.Del(tcase.del...) + require.True(t, Equal(tcase.want, b.Labels())) + + // Check what happens when we call Range and mutate the builder. + b.Range(func(l common.Label) { + if l.Name == "aaa" || l.Name == "bbb" { + b.Del(l.Name) + } + }) + // require.Equal(t, tcase.want.BytesWithoutLabels(nil, "aaa", "bbb"), b.Labels().Bytes(nil)) + } + t.Run(fmt.Sprintf("NewBuilder %d", i), func(t *testing.T) { + test(t, NewBuilder(tcase.base)) + }) + t.Run(fmt.Sprintf("NewSymbolTable %d", i), func(t *testing.T) { + b := NewBuilderWithSymbolTable(NewSymbolTable()) + b.Reset(tcase.base) + test(t, b) + }) + t.Run(fmt.Sprintf("reuseBuilder %d", i), func(t *testing.T) { + reuseBuilder.Reset(tcase.base) + test(t, reuseBuilder) + }) + } + t.Run("set_after_del", func(t *testing.T) { + b := NewBuilder(FromStrings("aaa", "111")) + b.Del("bbb") + b.Set("bbb", "222") + require.Equal(t, FromStrings("aaa", "111", "bbb", "222"), b.Labels()) + require.Equal(t, "222", b.Get("bbb")) + }) +} + +func TestScratchBuilder(t *testing.T) { + for i, tcase := range []struct { + add []common.Label + want Labels + }{ + { + add: []common.Label{}, + want: EmptyLabels(), + }, + { + add: []common.Label{{"aaa", "111"}}, + want: FromStrings("aaa", "111"), + }, + { + add: []common.Label{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}, + want: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"), + }, + { + add: []common.Label{{"bbb", "222"}, {"aaa", "111"}, {"ccc", "333"}}, + want: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"), + }, + { + add: []common.Label{{"ddd", "444"}}, + want: FromStrings("ddd", "444"), + }, + } { + t.Run(strconv.Itoa(i), func(t *testing.T) { + b := NewScratchBuilder(len(tcase.add)) + for _, lbl := range tcase.add { + b.Add(lbl.Name, lbl.Value) + } + b.Sort() + require.True(t, Equal(tcase.want, b.Labels())) + b.Assign(tcase.want) + require.True(t, Equal(tcase.want, b.Labels())) + }) + } +} + +// func TestLabels_Hash(t *testing.T) { +// lbls := FromStrings("foo", "bar", "baz", "qux") +// hash1, hash2 := lbls.Hash(), lbls.Hash() +// require.Equal(t, hash1, hash2) +// require.NotEqual(t, lbls.Hash(), FromStrings("foo", "bar").Hash(), "different labels match.") +// } + +// var benchmarkLabelsResult uint64 + +// func BenchmarkLabels_Hash(b *testing.B) { +// for _, tcase := range []struct { +// name string +// lbls Labels +// }{ +// { +// name: "typical labels under 1KB", +// lbls: func() Labels { +// b := NewBuilder(EmptyLabels()) +// for i := 0; i < 10; i++ { +// // Label ~20B name, 50B value. +// b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)) +// } +// return b.Labels() +// }(), +// }, +// { +// name: "bigger labels over 1KB", +// lbls: func() Labels { +// b := NewBuilder(EmptyLabels()) +// for i := 0; i < 10; i++ { +// // Label ~50B name, 50B value. +// b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)) +// } +// return b.Labels() +// }(), +// }, +// { +// name: "extremely large label value 10MB", +// lbls: func() Labels { +// lbl := &strings.Builder{} +// lbl.Grow(1024 * 1024 * 10) // 10MB. +// word := "abcdefghij" +// for i := 0; i < lbl.Cap()/len(word); i++ { +// _, _ = lbl.WriteString(word) +// } +// return FromStrings("__name__", lbl.String()) +// }(), +// }, +// } { +// b.Run(tcase.name, func(b *testing.B) { +// var h uint64 + +// b.ReportAllocs() +// b.ResetTimer() +// for i := 0; i < b.N; i++ { +// h = tcase.lbls.Hash() +// } +// benchmarkLabelsResult = h +// }) +// } +// } + +var benchmarkLabels = []common.Label{ + {"job", "node"}, + {"instance", "123.123.1.211:9090"}, + {"path", "/api/v1/namespaces//deployments/"}, + {"method", http.MethodGet}, + {"namespace", "system"}, + {"status", "500"}, + {"prometheus", "prometheus-core-1"}, + {"datacenter", "eu-west-1"}, + {"pod_name", "abcdef-99999-defee"}, +} + +func BenchmarkBuilder(b *testing.B) { + var l Labels + builder := NewBuilder(EmptyLabels()) + for i := 0; i < b.N; i++ { + builder.Reset(EmptyLabels()) + for _, l := range benchmarkLabels { + builder.Set(l.Name, l.Value) + } + l = builder.Labels() + } + require.Equal(b, 9, l.Len()) +} + +func BenchmarkLabels_Copy(b *testing.B) { + l := NewForBenchmark(benchmarkLabels...) + + for i := 0; i < b.N; i++ { + l = l.Copy() + } +} + +func TestMarshaling(t *testing.T) { + lbls := FromStrings("aaa", "111", "bbb", "2222", "ccc", "33333") + expectedJSON := "{\"aaa\":\"111\",\"bbb\":\"2222\",\"ccc\":\"33333\"}" + b, err := json.Marshal(lbls) + require.NoError(t, err) + require.JSONEq(t, expectedJSON, string(b)) + + var gotJ Labels + err = json.Unmarshal(b, &gotJ) + require.NoError(t, err) + require.Equal(t, lbls, gotJ) + + expectedYAML := "aaa: \"111\"\nbbb: \"2222\"\nccc: \"33333\"\n" + b, err = yaml.Marshal(lbls) + require.NoError(t, err) + require.YAMLEq(t, expectedYAML, string(b)) + + var gotY Labels + err = yaml.Unmarshal(b, &gotY) + require.NoError(t, err) + require.Equal(t, lbls, gotY) + + // Now in a struct with a tag + type foo struct { + ALabels Labels `json:"a_labels,omitempty" yaml:"a_labels,omitempty"` + } + + f := foo{ALabels: lbls} + b, err = json.Marshal(f) + require.NoError(t, err) + expectedJSONFromStruct := "{\"a_labels\":" + expectedJSON + "}" + require.JSONEq(t, expectedJSONFromStruct, string(b)) + + var gotFJ foo + err = json.Unmarshal(b, &gotFJ) + require.NoError(t, err) + require.Equal(t, f, gotFJ) + + b, err = yaml.Marshal(f) + require.NoError(t, err) + expectedYAMLFromStruct := "a_labels:\n aaa: \"111\"\n bbb: \"2222\"\n ccc: \"33333\"\n" + require.YAMLEq(t, expectedYAMLFromStruct, string(b)) + + var gotFY foo + err = yaml.Unmarshal(b, &gotFY) + require.NoError(t, err) + require.Equal(t, f, gotFY) +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 91563bf2c2..eacd5e926a 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -20,10 +20,7 @@ import ( "context" "errors" "fmt" - "log/slog" "math" - "sort" - "strings" "time" "github.com/prometheus/otlptranslator" @@ -32,7 +29,8 @@ import ( "go.uber.org/multierr" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/labels" "github.com/prometheus/prometheus/util/annotations" ) @@ -47,7 +45,6 @@ type Settings struct { DisableTargetInfo bool ExportCreatedMetric bool AddMetricSuffixes bool - SendMetadata bool AllowUTF8 bool PromoteResourceAttributes *PromoteResourceAttributes KeepIdentifyingResourceAttributes bool @@ -57,31 +54,25 @@ type Settings struct { PromoteScopeMetadata bool // LookbackDelta is the PromQL engine lookback delta. LookbackDelta time.Duration - - // Mimir specifics. - EnableCreatedTimestampZeroIngestion bool - EnableStartTimeQuietZero bool - ValidIntervalCreatedTimestampZeroIngestion time.Duration -} - -type StartTsAndTs struct { - Labels []prompb.Label - StartTs int64 - Ts int64 } // PrometheusConverter converts from OTel write format to Prometheus remote write format. type PrometheusConverter struct { - unique map[uint64]*prompb.TimeSeries - conflicts map[uint64][]*prompb.TimeSeries - everyN everyNTimes - metadata []prompb.MetricMetadata + unique map[uint64]labels.Labels + conflicts map[uint64][]labels.Labels + everyN everyNTimes + scratchBuilder labels.ScratchBuilder + builder *labels.Builder + appender CombinedAppender } -func NewPrometheusConverter() *PrometheusConverter { +func NewPrometheusConverter(appender CombinedAppender) *PrometheusConverter { return &PrometheusConverter{ - unique: map[uint64]*prompb.TimeSeries{}, - conflicts: map[uint64][]*prompb.TimeSeries{}, + unique: map[uint64]labels.Labels{}, + conflicts: map[uint64][]labels.Labels{}, + scratchBuilder: labels.NewScratchBuilder(0), + builder: labels.NewBuilder(labels.EmptyLabels()), + appender: appender, } } @@ -128,12 +119,13 @@ func newScopeFromScopeMetrics(scopeMetrics pmetric.ScopeMetrics) scope { } // FromMetrics converts pmetric.Metrics to Prometheus remote write format. -func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings, logger *slog.Logger) (annots annotations.Annotations, errs error) { +func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings) (annots annotations.Annotations, errs error) { namer := otlptranslator.MetricNamer{ Namespace: settings.Namespace, WithMetricSuffixes: settings.AddMetricSuffixes, UTF8Allowed: settings.AllowUTF8, } + unitNamer := otlptranslator.UnitNamer{} c.everyN = everyNTimes{n: 128} resourceMetricsSlice := md.ResourceMetrics() @@ -144,7 +136,6 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric numMetrics += scopeMetricsSlice.At(j).Metrics().Len() } } - c.metadata = make([]prompb.MetricMetadata, 0, numMetrics) for i := 0; i < resourceMetricsSlice.Len(); i++ { resourceMetrics := resourceMetricsSlice.At(i) @@ -185,12 +176,11 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric } promName := namer.Build(TranslatorMetricFromOtelMetric(metric)) - c.metadata = append(c.metadata, prompb.MetricMetadata{ - Type: otelMetricTypeToPromMetricType(metric), - MetricFamilyName: promName, - Help: metric.Description(), - Unit: metric.Unit(), - }) + meta := metadata.Metadata{ + Type: otelMetricTypeToPromMetricType(metric), + Unit: unitNamer.Build(metric.Unit()), + Help: metric.Description(), + } // handle individual metrics based on type //exhaustive:enforce @@ -201,7 +191,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } - if err := c.addGaugeNumberDataPoints(ctx, dataPoints, resource, settings, promName, scope); err != nil { + if err := c.addGaugeNumberDataPoints(ctx, dataPoints, resource, settings, promName, scope, meta); err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return @@ -213,7 +203,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } - if err := c.addSumNumberDataPoints(ctx, dataPoints, resource, metric, settings, promName, scope, logger); err != nil { + if err := c.addSumNumberDataPoints(ctx, dataPoints, resource, metric, settings, promName, scope, meta); err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return @@ -227,7 +217,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric } if settings.ConvertHistogramsToNHCB { ws, err := c.addCustomBucketsHistogramDataPoints( - ctx, dataPoints, resource, settings, promName, temporality, scope, + ctx, dataPoints, resource, settings, promName, temporality, scope, meta, ) annots.Merge(ws) if err != nil { @@ -237,7 +227,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric } } } else { - if err := c.addHistogramDataPoints(ctx, dataPoints, resource, settings, promName, scope, logger); err != nil { + if err := c.addHistogramDataPoints(ctx, dataPoints, resource, settings, promName, scope, meta); err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return @@ -258,6 +248,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric promName, temporality, scope, + meta, ) annots.Merge(ws) if err != nil { @@ -272,7 +263,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } - if err := c.addSummaryDataPoints(ctx, dataPoints, resource, settings, promName, scope, logger); err != nil { + if err := c.addSummaryDataPoints(ctx, dataPoints, resource, settings, promName, scope, meta); err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return @@ -286,69 +277,13 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric if earliestTimestamp < pcommon.Timestamp(math.MaxUint64) { // We have at least one metric sample for this resource. // Generate a corresponding target_info series. - addResourceTargetInfo(resource, settings, earliestTimestamp.AsTime(), latestTimestamp.AsTime(), c) - } - } - - return annots, errs -} - -func isSameMetric(ts *prompb.TimeSeries, lbls []prompb.Label) bool { - if len(ts.Labels) != len(lbls) { - return false - } - for i, l := range ts.Labels { - if l.Name != ts.Labels[i].Name || l.Value != ts.Labels[i].Value { - return false - } - } - return true -} - -// addExemplars adds exemplars for the dataPoint. For each exemplar, if it can find a bucket bound corresponding to its value, -// the exemplar is added to the bucket bound's time series, provided that the time series' has samples. -func (c *PrometheusConverter) addExemplars(ctx context.Context, dataPoint pmetric.HistogramDataPoint, bucketBounds []bucketBoundsData) error { - if len(bucketBounds) == 0 { - return nil - } - - exemplars, err := getPromExemplars(ctx, &c.everyN, dataPoint) - if err != nil { - return err - } - if len(exemplars) == 0 { - return nil - } - - sort.Sort(byBucketBoundsData(bucketBounds)) - for _, exemplar := range exemplars { - for _, bound := range bucketBounds { - if err := c.everyN.checkContext(ctx); err != nil { - return err - } - if len(bound.ts.Samples) > 0 && exemplar.Value <= bound.bound { - bound.ts.Exemplars = append(bound.ts.Exemplars, exemplar) - break + if err := c.addResourceTargetInfo(resource, settings, earliestTimestamp.AsTime(), latestTimestamp.AsTime()); err != nil { + errs = multierr.Append(errs, err) } } } - return nil -} - -// addSample finds a TimeSeries that corresponds to lbls, and adds sample to it. -// If there is no corresponding TimeSeries already, it's created. -// The corresponding TimeSeries is returned. -// If either lbls is nil/empty or sample is nil, nothing is done. -func (c *PrometheusConverter) addSample(sample *prompb.Sample, lbls []prompb.Label) *prompb.TimeSeries { - if sample == nil || len(lbls) == 0 { - // This shouldn't happen - return nil - } - - ts, _ := c.getOrCreateTimeSeries(lbls) - ts.Samples = append(ts.Samples, *sample) - return ts + return } func NewPromoteResourceAttributes(otlpCfg config.OTLPConfig) *PromoteResourceAttributes { @@ -366,45 +301,32 @@ func NewPromoteResourceAttributes(otlpCfg config.OTLPConfig) *PromoteResourceAtt } } -// promotedAttributes returns labels for promoted resourceAttributes. -func (s *PromoteResourceAttributes) promotedAttributes(resourceAttributes pcommon.Map) []prompb.Label { +// addPromotedAttributes adds labels for promoted resourceAttributes to the builder. +func (s *PromoteResourceAttributes) addPromotedAttributes(builder *labels.Builder, resourceAttributes pcommon.Map, allowUTF8 bool) { if s == nil { - return nil + return } - var promotedAttrs []prompb.Label + labelNamer := otlptranslator.LabelNamer{UTF8Allowed: allowUTF8} if s.promoteAll { - promotedAttrs = make([]prompb.Label, 0, resourceAttributes.Len()) resourceAttributes.Range(func(name string, value pcommon.Value) bool { if _, exists := s.attrs[name]; !exists { - promotedAttrs = append(promotedAttrs, prompb.Label{Name: name, Value: value.AsString()}) - } - return true - }) - } else { - promotedAttrs = make([]prompb.Label, 0, len(s.attrs)) - resourceAttributes.Range(func(name string, value pcommon.Value) bool { - if _, exists := s.attrs[name]; exists { - promotedAttrs = append(promotedAttrs, prompb.Label{Name: name, Value: value.AsString()}) + normalized := labelNamer.Build(name) + if builder.Get(normalized) == "" { + builder.Set(normalized, value.AsString()) + } } return true }) + return } - sort.Stable(ByLabelName(promotedAttrs)) - return promotedAttrs -} - -type labelsStringer []prompb.Label - -func (ls labelsStringer) String() string { - var seriesBuilder strings.Builder - seriesBuilder.WriteString("{") - for i, l := range ls { - if i > 0 { - seriesBuilder.WriteString(",") + resourceAttributes.Range(func(name string, value pcommon.Value) bool { + if _, exists := s.attrs[name]; exists { + normalized := labelNamer.Build(name) + if builder.Get(normalized) == "" { + builder.Set(normalized, value.AsString()) + } } - seriesBuilder.WriteString(fmt.Sprintf("%s=%s", l.Name, l.Value)) - } - seriesBuilder.WriteString("}") - return seriesBuilder.String() + return true + }) } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go index cff3ba2ac4..897aff6000 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go @@ -19,20 +19,20 @@ package prometheusremotewrite import ( "context" "fmt" - "sort" "testing" "time" - "github.com/prometheus/common/promslog" + "github.com/prometheus/common/model" "github.com/prometheus/otlptranslator" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/prompb" - "github.com/prometheus/prometheus/util/testutil" + "github.com/prometheus/prometheus/model/histogram" + modelLabels "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/labels" ) func TestFromMetrics(t *testing.T) { @@ -78,9 +78,9 @@ func TestFromMetrics(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - converter := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) payload, wantPromMetrics := createExportRequest(5, 128, 128, 2, 0, tc.settings, tc.temporality) - var expMetadata []prompb.MetricMetadata seenFamilyNames := map[string]struct{}{} for _, wantMetric := range wantPromMetrics { if _, exists := seenFamilyNames[wantMetric.familyName]; exists { @@ -91,33 +91,24 @@ func TestFromMetrics(t *testing.T) { } seenFamilyNames[wantMetric.familyName] = struct{}{} - expMetadata = append(expMetadata, prompb.MetricMetadata{ - Type: wantMetric.metricType, - MetricFamilyName: wantMetric.familyName, - Help: wantMetric.description, - Unit: wantMetric.unit, - }) } + // TODO check returned counters annots, err := converter.FromMetrics( context.Background(), payload.Metrics(), tc.settings, - promslog.NewNopLogger(), ) require.NoError(t, err) require.Empty(t, annots) - testutil.RequireEqual(t, expMetadata, converter.Metadata()) - - ts := converter.TimeSeries() + ts := mockAppender.samples require.Len(t, ts, 1536+1) // +1 for the target_info. tgtInfoCount := 0 for _, s := range ts { - b := labels.NewScratchBuilder(2) - lbls := s.ToLabels(&b, nil) - if lbls.Get(labels.MetricName) == "target_info" { + lbls := s.ls + if lbls.Get(modelLabels.MetricName) == "target_info" { tgtInfoCount++ require.Equal(t, "test-namespace/test-service", lbls.Get("job")) require.Equal(t, "id1234", lbls.Get("instance")) @@ -158,53 +149,48 @@ func TestFromMetrics(t *testing.T) { generateAttributes(h.Attributes(), "series", 1) - converter := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) annots, err := converter.FromMetrics( context.Background(), request.Metrics(), Settings{ConvertHistogramsToNHCB: convertHistogramsToNHCB}, - promslog.NewNopLogger(), ) require.NoError(t, err) require.Empty(t, annots) - series := converter.TimeSeries() - if convertHistogramsToNHCB { - require.Len(t, series[0].Histograms, 1) - require.Empty(t, series[0].Samples) + require.Len(t, mockAppender.histograms, 1) + require.Empty(t, mockAppender.samples) } else { - require.Len(t, series, 3) - for i := range series { - require.Len(t, series[i].Samples, 1) - require.Nil(t, series[i].Histograms) - } + require.Empty(t, mockAppender.histograms) + require.Len(t, mockAppender.samples, 3) } }) } t.Run("context cancellation", func(t *testing.T) { settings := Settings{} - converter := NewPrometheusConverter() + converter := NewPrometheusConverter(&mockCombinedAppender{}) ctx, cancel := context.WithCancel(context.Background()) // Verify that converter.FromMetrics respects cancellation. cancel() payload, _ := createExportRequest(5, 128, 128, 2, 0, settings, pmetric.AggregationTemporalityCumulative) - annots, err := converter.FromMetrics(ctx, payload.Metrics(), settings, promslog.NewNopLogger()) + annots, err := converter.FromMetrics(ctx, payload.Metrics(), settings) require.ErrorIs(t, err, context.Canceled) require.Empty(t, annots) }) t.Run("context timeout", func(t *testing.T) { settings := Settings{} - converter := NewPrometheusConverter() + converter := NewPrometheusConverter(&mockCombinedAppender{}) // Verify that converter.FromMetrics respects timeout. ctx, cancel := context.WithTimeout(context.Background(), 0) t.Cleanup(cancel) payload, _ := createExportRequest(5, 128, 128, 2, 0, settings, pmetric.AggregationTemporalityCumulative) - annots, err := converter.FromMetrics(ctx, payload.Metrics(), settings, promslog.NewNopLogger()) + annots, err := converter.FromMetrics(ctx, payload.Metrics(), settings) require.ErrorIs(t, err, context.DeadlineExceeded) require.Empty(t, annots) }) @@ -231,8 +217,8 @@ func TestFromMetrics(t *testing.T) { generateAttributes(h.Attributes(), "series", 10) } - converter := NewPrometheusConverter() - annots, err := converter.FromMetrics(context.Background(), request.Metrics(), Settings{}, promslog.NewNopLogger()) + converter := NewPrometheusConverter(&mockCombinedAppender{}) + annots, err := converter.FromMetrics(context.Background(), request.Metrics(), Settings{}) require.NoError(t, err) require.NotEmpty(t, annots) ws, infos := annots.AsStrings("", 0, 0) @@ -264,12 +250,11 @@ func TestFromMetrics(t *testing.T) { generateAttributes(h.Attributes(), "series", 10) } - converter := NewPrometheusConverter() + converter := NewPrometheusConverter(&mockCombinedAppender{}) annots, err := converter.FromMetrics( context.Background(), request.Metrics(), Settings{ConvertHistogramsToNHCB: true}, - promslog.NewNopLogger(), ) require.NoError(t, err) require.NotEmpty(t, annots) @@ -296,7 +281,6 @@ func TestFromMetrics(t *testing.T) { metrics := rm.ScopeMetrics().AppendEmpty().Metrics() ts := pcommon.NewTimestampFromTime(time.Now()) - var expMetadata []prompb.MetricMetadata for i := range 3 { m := metrics.AppendEmpty() m.SetEmptyGauge() @@ -312,67 +296,61 @@ func TestFromMetrics(t *testing.T) { generateAttributes(point.Attributes(), "series", 2) curTs = curTs.Add(defaultLookbackDelta / 4) } + } - namer := otlptranslator.MetricNamer{} - expMetadata = append(expMetadata, prompb.MetricMetadata{ - Type: otelMetricTypeToPromMetricType(m), - MetricFamilyName: namer.Build(TranslatorMetricFromOtelMetric(m)), - Help: m.Description(), - Unit: m.Unit(), - }) + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) + settings := Settings{ + LookbackDelta: defaultLookbackDelta, } - converter := NewPrometheusConverter() - annots, err := converter.FromMetrics( - context.Background(), - request.Metrics(), - Settings{ - LookbackDelta: defaultLookbackDelta, - }, - promslog.NewNopLogger(), - ) + annots, err := converter.FromMetrics(context.Background(), request.Metrics(), settings) require.NoError(t, err) require.Empty(t, annots) - testutil.RequireEqual(t, expMetadata, converter.Metadata()) - - timeSeries := converter.TimeSeries() - tgtInfoCount := 0 - for _, s := range timeSeries { - b := labels.NewScratchBuilder(2) - lbls := s.ToLabels(&b, nil) - if lbls.Get(labels.MetricName) != "target_info" { - continue - } - - tgtInfoCount++ - require.Equal(t, "test-namespace/test-service", lbls.Get("job")) - require.Equal(t, "id1234", lbls.Get("instance")) - require.False(t, lbls.Has("service_name")) - require.False(t, lbls.Has("service_namespace")) - require.False(t, lbls.Has("service_instance_id")) - // There should be a target_info sample at the earliest metric timestamp, then two spaced lookback delta/2 apart, - // then one at the latest metric timestamp. - testutil.RequireEqual(t, []prompb.Sample{ - { - Value: 1, - Timestamp: ts.AsTime().UnixMilli(), - }, - { - Value: 1, - Timestamp: ts.AsTime().Add(defaultLookbackDelta / 2).UnixMilli(), - }, - { - Value: 1, - Timestamp: ts.AsTime().Add(defaultLookbackDelta).UnixMilli(), - }, - { - Value: 1, - Timestamp: ts.AsTime().Add(defaultLookbackDelta + defaultLookbackDelta/4).UnixMilli(), - }, - }, s.Samples) + require.Len(t, mockAppender.samples, 22) + // There should be a target_info sample at the earliest metric timestamp, then two spaced lookback delta/2 apart, + // then one at the latest metric timestamp. + targetInfoLabels := labels.FromStrings( + "__name__", "target_info", + "instance", "id1234", + "job", "test-namespace/test-service", + "resource_name_1", "value-1", + "resource_name_2", "value-2", + "resource_name_3", "value-3", + "resource_name_4", "value-4", + "resource_name_5", "value-5", + ) + targetInfoMeta := metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "Target metadata", } - require.Equal(t, 1, tgtInfoCount) + requireEqual(t, []combinedSample{ + { + v: 1, + t: ts.AsTime().UnixMilli(), + ls: targetInfoLabels, + meta: targetInfoMeta, + }, + { + v: 1, + t: ts.AsTime().Add(defaultLookbackDelta / 2).UnixMilli(), + ls: targetInfoLabels, + meta: targetInfoMeta, + }, + { + v: 1, + t: ts.AsTime().Add(defaultLookbackDelta).UnixMilli(), + ls: targetInfoLabels, + meta: targetInfoMeta, + }, + { + v: 1, + t: ts.AsTime().Add(defaultLookbackDelta + defaultLookbackDelta/4).UnixMilli(), + ls: targetInfoLabels, + meta: targetInfoMeta, + }, + }, mockAppender.samples[len(mockAppender.samples)-4:]) }) } @@ -380,12 +358,13 @@ func TestTemporality(t *testing.T) { ts := time.Unix(100, 0) tests := []struct { - name string - allowDelta bool - convertToNHCB bool - inputSeries []pmetric.Metric - expectedSeries []prompb.TimeSeries - expectedError string + name string + allowDelta bool + convertToNHCB bool + inputSeries []pmetric.Metric + expectedSamples []combinedSample + expectedHistograms []combinedHistogram + expectedError string }{ { name: "all cumulative when delta not allowed", @@ -394,9 +373,9 @@ func TestTemporality(t *testing.T) { createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts), createOtelSum("test_metric_2", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromFloatSeries("test_metric_1", ts), - createPromFloatSeries("test_metric_2", ts), + expectedSamples: []combinedSample{ + createPromFloatSeries("test_metric_1", ts, model.MetricTypeCounter), + createPromFloatSeries("test_metric_2", ts, model.MetricTypeCounter), }, }, { @@ -406,9 +385,9 @@ func TestTemporality(t *testing.T) { createOtelSum("test_metric_1", pmetric.AggregationTemporalityDelta, ts), createOtelSum("test_metric_2", pmetric.AggregationTemporalityDelta, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromFloatSeries("test_metric_1", ts), - createPromFloatSeries("test_metric_2", ts), + expectedSamples: []combinedSample{ + createPromFloatSeries("test_metric_1", ts, model.MetricTypeUnknown), + createPromFloatSeries("test_metric_2", ts, model.MetricTypeUnknown), }, }, { @@ -418,9 +397,9 @@ func TestTemporality(t *testing.T) { createOtelSum("test_metric_1", pmetric.AggregationTemporalityDelta, ts), createOtelSum("test_metric_2", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromFloatSeries("test_metric_1", ts), - createPromFloatSeries("test_metric_2", ts), + expectedSamples: []combinedSample{ + createPromFloatSeries("test_metric_1", ts, model.MetricTypeUnknown), + createPromFloatSeries("test_metric_2", ts, model.MetricTypeCounter), }, }, { @@ -430,8 +409,8 @@ func TestTemporality(t *testing.T) { createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts), createOtelSum("test_metric_2", pmetric.AggregationTemporalityDelta, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromFloatSeries("test_metric_1", ts), + expectedSamples: []combinedSample{ + createPromFloatSeries("test_metric_1", ts, model.MetricTypeCounter), }, expectedError: `invalid temporality and type combination for metric "test_metric_2"`, }, @@ -442,8 +421,8 @@ func TestTemporality(t *testing.T) { createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts), createOtelSum("test_metric_2", pmetric.AggregationTemporalityUnspecified, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromFloatSeries("test_metric_1", ts), + expectedSamples: []combinedSample{ + createPromFloatSeries("test_metric_1", ts, model.MetricTypeCounter), }, expectedError: `invalid temporality and type combination for metric "test_metric_2"`, }, @@ -453,8 +432,8 @@ func TestTemporality(t *testing.T) { inputSeries: []pmetric.Metric{ createOtelExponentialHistogram("test_histogram", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromNativeHistogramSeries("test_histogram", prompb.Histogram_UNKNOWN, ts), + expectedHistograms: []combinedHistogram{ + createPromNativeHistogramSeries("test_histogram", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, }, { @@ -464,9 +443,9 @@ func TestTemporality(t *testing.T) { createOtelExponentialHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExponentialHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromNativeHistogramSeries("test_histogram_1", prompb.Histogram_GAUGE, ts), - createPromNativeHistogramSeries("test_histogram_2", prompb.Histogram_UNKNOWN, ts), + expectedHistograms: []combinedHistogram{ + createPromNativeHistogramSeries("test_histogram_1", histogram.GaugeType, ts, model.MetricTypeUnknown), + createPromNativeHistogramSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, }, { @@ -476,8 +455,8 @@ func TestTemporality(t *testing.T) { createOtelExponentialHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExponentialHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromNativeHistogramSeries("test_histogram_2", prompb.Histogram_UNKNOWN, ts), + expectedHistograms: []combinedHistogram{ + createPromNativeHistogramSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, expectedError: `invalid temporality and type combination for metric "test_histogram_1"`, }, @@ -488,8 +467,8 @@ func TestTemporality(t *testing.T) { inputSeries: []pmetric.Metric{ createOtelExplicitHistogram("test_histogram", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromNHCBSeries("test_histogram", prompb.Histogram_UNKNOWN, ts), + expectedHistograms: []combinedHistogram{ + createPromNHCBSeries("test_histogram", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, }, { @@ -500,9 +479,9 @@ func TestTemporality(t *testing.T) { createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromNHCBSeries("test_histogram_1", prompb.Histogram_GAUGE, ts), - createPromNHCBSeries("test_histogram_2", prompb.Histogram_UNKNOWN, ts), + expectedHistograms: []combinedHistogram{ + createPromNHCBSeries("test_histogram_1", histogram.GaugeType, ts, model.MetricTypeUnknown), + createPromNHCBSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, }, { @@ -513,8 +492,8 @@ func TestTemporality(t *testing.T) { createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromNHCBSeries("test_histogram_2", prompb.Histogram_UNKNOWN, ts), + expectedHistograms: []combinedHistogram{ + createPromNHCBSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, expectedError: `invalid temporality and type combination for metric "test_histogram_1"`, }, @@ -526,8 +505,8 @@ func TestTemporality(t *testing.T) { createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: createPromClassicHistogramSeries("test_histogram_2", ts), - expectedError: `invalid temporality and type combination for metric "test_histogram_1"`, + expectedSamples: createPromClassicHistogramSeries("test_histogram_2", ts, model.MetricTypeHistogram), + expectedError: `invalid temporality and type combination for metric "test_histogram_1"`, }, { name: "delta histogram with buckets and convertToNHCB=false when allowed", @@ -537,9 +516,9 @@ func TestTemporality(t *testing.T) { createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: append( - createPromClassicHistogramSeries("test_histogram_1", ts), - createPromClassicHistogramSeries("test_histogram_2", ts)..., + expectedSamples: append( + createPromClassicHistogramSeries("test_histogram_1", ts, model.MetricTypeUnknown), + createPromClassicHistogramSeries("test_histogram_2", ts, model.MetricTypeHistogram)..., ), }, { @@ -547,15 +526,15 @@ func TestTemporality(t *testing.T) { inputSeries: []pmetric.Metric{ createOtelSummary("test_summary_1", ts), }, - expectedSeries: createPromSummarySeries("test_summary_1", ts), + expectedSamples: createPromSummarySeries("test_summary_1", ts), }, { name: "gauge does not have temporality", inputSeries: []pmetric.Metric{ createOtelGauge("test_gauge_1", ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromFloatSeries("test_gauge_1", ts), + expectedSamples: []combinedSample{ + createPromFloatSeries("test_gauge_1", ts, model.MetricTypeGauge), }, }, { @@ -563,8 +542,7 @@ func TestTemporality(t *testing.T) { inputSeries: []pmetric.Metric{ createOtelEmptyType("test_empty"), }, - expectedSeries: []prompb.TimeSeries{}, - expectedError: `could not get aggregation temporality for test_empty as it has unsupported metric type Empty`, + expectedError: `could not get aggregation temporality for test_empty as it has unsupported metric type Empty`, }, } @@ -578,13 +556,14 @@ func TestTemporality(t *testing.T) { s.CopyTo(sm.Metrics().AppendEmpty()) } - c := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + c := NewPrometheusConverter(mockAppender) settings := Settings{ AllowDeltaTemporality: tc.allowDelta, ConvertHistogramsToNHCB: tc.convertToNHCB, } - _, err := c.FromMetrics(context.Background(), metrics, settings, promslog.NewNopLogger()) + _, err := c.FromMetrics(context.Background(), metrics, settings) if tc.expectedError != "" { require.EqualError(t, err, tc.expectedError) @@ -592,10 +571,9 @@ func TestTemporality(t *testing.T) { require.NoError(t, err) } - series := c.TimeSeries() - // Sort series to make the test deterministic. - testutil.RequireEqual(t, sortTimeSeries(tc.expectedSeries), sortTimeSeries(series)) + requireEqual(t, tc.expectedSamples, mockAppender.samples) + requireEqual(t, tc.expectedHistograms, mockAppender.histograms) }) } } @@ -606,6 +584,7 @@ func createOtelSum(name string, temporality pmetric.AggregationTemporality, ts t m.SetName(name) sum := m.SetEmptySum() sum.SetAggregationTemporality(temporality) + sum.SetIsMonotonic(true) dp := sum.DataPoints().AppendEmpty() dp.SetDoubleValue(5) dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) @@ -613,16 +592,14 @@ func createOtelSum(name string, temporality pmetric.AggregationTemporality, ts t return m } -func createPromFloatSeries(name string, ts time.Time) prompb.TimeSeries { - return prompb.TimeSeries{ - Labels: []prompb.Label{ - {Name: "__name__", Value: name}, - {Name: "test_label", Value: "test_value"}, +func createPromFloatSeries(name string, ts time.Time, typ model.MetricType) combinedSample { + return combinedSample{ + ls: labels.FromStrings("__name__", name, "test_label", "test_value"), + t: ts.UnixMilli(), + v: 5, + meta: metadata.Metadata{ + Type: typ, }, - Samples: []prompb.Sample{{ - Value: 5, - Timestamp: ts.UnixMilli(), - }}, } } @@ -652,22 +629,20 @@ func createOtelExponentialHistogram(name string, temporality pmetric.Aggregation return m } -func createPromNativeHistogramSeries(name string, hint prompb.Histogram_ResetHint, ts time.Time) prompb.TimeSeries { - return prompb.TimeSeries{ - Labels: []prompb.Label{ - {Name: "__name__", Value: name}, - {Name: "test_label", Value: "test_value"}, +func createPromNativeHistogramSeries(name string, hint histogram.CounterResetHint, ts time.Time, typ model.MetricType) combinedHistogram { + return combinedHistogram{ + ls: labels.FromStrings("__name__", name, "test_label", "test_value"), + t: ts.UnixMilli(), + meta: metadata.Metadata{ + Type: typ, }, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 1}, - Sum: 5, - Schema: 0, - ZeroThreshold: 1e-128, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, - Timestamp: ts.UnixMilli(), - ResetHint: hint, - }, + h: &histogram.Histogram{ + Count: 1, + Sum: 5, + Schema: 0, + ZeroThreshold: 1e-128, + ZeroCount: 0, + CounterResetHint: hint, }, } } @@ -688,72 +663,71 @@ func createOtelExplicitHistogram(name string, temporality pmetric.AggregationTem return m } -func createPromNHCBSeries(name string, hint prompb.Histogram_ResetHint, ts time.Time) prompb.TimeSeries { - return prompb.TimeSeries{ - Labels: []prompb.Label{ - {Name: "__name__", Value: name}, - {Name: "test_label", Value: "test_value"}, +func createPromNHCBSeries(name string, hint histogram.CounterResetHint, ts time.Time, typ model.MetricType) combinedHistogram { + return combinedHistogram{ + ls: labels.FromStrings("__name__", name, "test_label", "test_value"), + meta: metadata.Metadata{ + Type: typ, }, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 20}, - Sum: 30, - Schema: -53, - ZeroThreshold: 0, - ZeroCount: nil, - PositiveSpans: []prompb.BucketSpan{ - { - Length: 3, - }, + t: ts.UnixMilli(), + h: &histogram.Histogram{ + Count: 20, + Sum: 30, + Schema: -53, + ZeroThreshold: 0, + PositiveSpans: []histogram.Span{ + { + Length: 3, }, - PositiveDeltas: []int64{10, 0, -10}, - CustomValues: []float64{1, 2}, - Timestamp: ts.UnixMilli(), - ResetHint: hint, }, + PositiveBuckets: []int64{10, 0, -10}, + CustomValues: []float64{1, 2}, + CounterResetHint: hint, }, } } -func createPromClassicHistogramSeries(name string, ts time.Time) []prompb.TimeSeries { - return []prompb.TimeSeries{ +func createPromClassicHistogramSeries(name string, ts time.Time, typ model.MetricType) []combinedSample { + return []combinedSample{ { - Labels: []prompb.Label{ - {Name: "__name__", Value: name + "_bucket"}, - {Name: "le", Value: "1"}, - {Name: "test_label", Value: "test_value"}, + ls: labels.FromStrings("__name__", name+"_sum", "test_label", "test_value"), + t: ts.UnixMilli(), + v: 30, + meta: metadata.Metadata{ + Type: typ, }, - Samples: []prompb.Sample{{Value: 10, Timestamp: ts.UnixMilli()}}, }, { - Labels: []prompb.Label{ - {Name: "__name__", Value: name + "_bucket"}, - {Name: "le", Value: "2"}, - {Name: "test_label", Value: "test_value"}, + ls: labels.FromStrings("__name__", name+"_count", "test_label", "test_value"), + t: ts.UnixMilli(), + v: 20, + meta: metadata.Metadata{ + Type: typ, }, - Samples: []prompb.Sample{{Value: 20, Timestamp: ts.UnixMilli()}}, }, { - Labels: []prompb.Label{ - {Name: "__name__", Value: name + "_bucket"}, - {Name: "le", Value: "+Inf"}, - {Name: "test_label", Value: "test_value"}, + ls: labels.FromStrings("__name__", name+"_bucket", "le", "1", "test_label", "test_value"), + t: ts.UnixMilli(), + v: 10, + meta: metadata.Metadata{ + Type: typ, }, - Samples: []prompb.Sample{{Value: 20, Timestamp: ts.UnixMilli()}}, }, { - Labels: []prompb.Label{ - {Name: "__name__", Value: name + "_count"}, - {Name: "test_label", Value: "test_value"}, + ls: labels.FromStrings("__name__", name+"_bucket", "le", "2", "test_label", "test_value"), + t: ts.UnixMilli(), + v: 20, + meta: metadata.Metadata{ + Type: typ, }, - Samples: []prompb.Sample{{Value: 20, Timestamp: ts.UnixMilli()}}, }, { - Labels: []prompb.Label{ - {Name: "__name__", Value: name + "_sum"}, - {Name: "test_label", Value: "test_value"}, + ls: labels.FromStrings("__name__", name+"_bucket", "le", "+Inf", "test_label", "test_value"), + t: ts.UnixMilli(), + v: 20, + meta: metadata.Metadata{ + Type: typ, }, - Samples: []prompb.Sample{{Value: 30, Timestamp: ts.UnixMilli()}}, }, } } @@ -774,38 +748,31 @@ func createOtelSummary(name string, ts time.Time) pmetric.Metric { return m } -func createPromSummarySeries(name string, ts time.Time) []prompb.TimeSeries { - return []prompb.TimeSeries{ +func createPromSummarySeries(name string, ts time.Time) []combinedSample { + return []combinedSample{ { - Labels: []prompb.Label{ - {Name: "__name__", Value: name + "_sum"}, - {Name: "test_label", Value: "test_value"}, + ls: labels.FromStrings("__name__", name+"_sum", "test_label", "test_value"), + t: ts.UnixMilli(), + v: 18, + meta: metadata.Metadata{ + Type: model.MetricTypeSummary, }, - Samples: []prompb.Sample{{ - Value: 18, - Timestamp: ts.UnixMilli(), - }}, }, { - Labels: []prompb.Label{ - {Name: "__name__", Value: name + "_count"}, - {Name: "test_label", Value: "test_value"}, + ls: labels.FromStrings("__name__", name+"_count", "test_label", "test_value"), + t: ts.UnixMilli(), + v: 9, + meta: metadata.Metadata{ + Type: model.MetricTypeSummary, }, - Samples: []prompb.Sample{{ - Value: 9, - Timestamp: ts.UnixMilli(), - }}, }, { - Labels: []prompb.Label{ - {Name: "__name__", Value: name}, - {Name: "quantile", Value: "0.5"}, - {Name: "test_label", Value: "test_value"}, + ls: labels.FromStrings("__name__", name, "quantile", "0.5", "test_label", "test_value"), + t: ts.UnixMilli(), + v: 2, + meta: metadata.Metadata{ + Type: model.MetricTypeSummary, }, - Samples: []prompb.Sample{{ - Value: 2, - Timestamp: ts.UnixMilli(), - }}, }, } } @@ -817,20 +784,6 @@ func createOtelEmptyType(name string) pmetric.Metric { return m } -func sortTimeSeries(series []prompb.TimeSeries) []prompb.TimeSeries { - for i := range series { - sort.Slice(series[i].Labels, func(j, k int) bool { - return series[i].Labels[j].Name < series[i].Labels[k].Name - }) - } - - sort.Slice(series, func(i, j int) bool { - return fmt.Sprint(series[i].Labels) < fmt.Sprint(series[j].Labels) - }) - - return series -} - func TestTranslatorMetricFromOtelMetric(t *testing.T) { tests := []struct { name string @@ -971,7 +924,7 @@ func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) { b.Run(fmt.Sprintf("histogram count: %v", histogramCount), func(b *testing.B) { nonHistogramCounts := []int{0, 1000} - if resourceAttributeCount == 0 && histogramCount == 0 { + if histogramCount == 0 { // Don't bother running a scenario where we'll generate no series. nonHistogramCounts = []int{1000} } @@ -995,12 +948,12 @@ func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) { b.ResetTimer() for range b.N { - converter := NewPrometheusConverter() - annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings, promslog.NewNopLogger()) + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) + annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings) require.NoError(b, err) require.Empty(b, annots) - require.NotNil(b, converter.TimeSeries()) - require.NotNil(b, converter.Metadata()) + require.Positive(b, len(mockAppender.samples)+len(mockAppender.histograms)) } }) } @@ -1017,7 +970,7 @@ func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) { type wantPrometheusMetric struct { name string familyName string - metricType prompb.MetricMetadata_MetricType + metricType model.MetricType description string unit string } @@ -1064,11 +1017,11 @@ func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCou generateAttributes(h.Attributes(), "series", labelsPerMetric) generateExemplars(h.Exemplars(), exemplarsPerSeries, ts) - metricType := prompb.MetricMetadata_HISTOGRAM + metricType := model.MetricTypeHistogram if temporality != pmetric.AggregationTemporalityCumulative { // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. - metricType = prompb.MetricMetadata_UNKNOWN + metricType = model.MetricTypeUnknown } wantPromMetrics = append(wantPromMetrics, wantPrometheusMetric{ name: fmt.Sprintf("histogram_%d%s_bucket", i, suffix), @@ -1106,11 +1059,11 @@ func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCou generateAttributes(point.Attributes(), "series", labelsPerMetric) generateExemplars(point.Exemplars(), exemplarsPerSeries, ts) - metricType := prompb.MetricMetadata_GAUGE + metricType := model.MetricTypeGauge if temporality != pmetric.AggregationTemporalityCumulative { // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. - metricType = prompb.MetricMetadata_UNKNOWN + metricType = model.MetricTypeUnknown } wantPromMetrics = append(wantPromMetrics, wantPrometheusMetric{ name: fmt.Sprintf("non_monotonic_sum_%d%s", i, suffix), @@ -1140,11 +1093,11 @@ func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCou counterSuffix = suffix + "_total" } - metricType := prompb.MetricMetadata_COUNTER + metricType := model.MetricTypeCounter if temporality != pmetric.AggregationTemporalityCumulative { // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. - metricType = prompb.MetricMetadata_UNKNOWN + metricType = model.MetricTypeUnknown } wantPromMetrics = append(wantPromMetrics, wantPrometheusMetric{ name: fmt.Sprintf("monotonic_sum_%d%s", i, counterSuffix), @@ -1170,7 +1123,7 @@ func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCou wantPromMetrics = append(wantPromMetrics, wantPrometheusMetric{ name: fmt.Sprintf("gauge_%d%s", i, suffix), familyName: fmt.Sprintf("gauge_%d%s", i, suffix), - metricType: prompb.MetricMetadata_GAUGE, + metricType: model.MetricTypeGauge, unit: "unit", description: "gauge", }) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go index e85c89b340..dbfd384198 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go @@ -18,19 +18,18 @@ package prometheusremotewrite import ( "context" - "log/slog" "math" "github.com/prometheus/common/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/value" - "github.com/prometheus/prometheus/prompb" ) func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, - resource pcommon.Resource, settings Settings, name string, scope scope, + resource pcommon.Resource, settings Settings, name string, scope scope, meta metadata.Metadata, ) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { @@ -38,7 +37,7 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data } pt := dataPoints.At(x) - labels := createAttributes( + labels := c.createAttributes( resource, pt.Attributes(), scope, @@ -48,27 +47,28 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data model.MetricNameLabel, name, ) - sample := &prompb.Sample{ - // convert ns to ms - Timestamp: convertTimeStamp(pt.Timestamp()), - } + var val float64 switch pt.ValueType() { case pmetric.NumberDataPointValueTypeInt: - sample.Value = float64(pt.IntValue()) + val = float64(pt.IntValue()) case pmetric.NumberDataPointValueTypeDouble: - sample.Value = pt.DoubleValue() + val = pt.DoubleValue() } if pt.Flags().NoRecordedValue() { - sample.Value = math.Float64frombits(value.StaleNaN) + val = math.Float64frombits(value.StaleNaN) + } + ts := convertTimeStamp(pt.Timestamp()) + ct := convertTimeStamp(pt.StartTimestamp()) + if err := c.appender.AppendSample(name, labels, meta, ts, ct, val, nil); err != nil { + return err } - c.addSample(sample, labels) } return nil } func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, - resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string, scope scope, logger *slog.Logger, + resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string, scope scope, meta metadata.Metadata, ) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { @@ -76,9 +76,7 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo } pt := dataPoints.At(x) - timestamp := convertTimeStamp(pt.Timestamp()) - startTimestampMs := convertTimeStamp(pt.StartTimestamp()) - lbls := createAttributes( + lbls := c.createAttributes( resource, pt.Attributes(), scope, @@ -88,49 +86,38 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo model.MetricNameLabel, name, ) - sample := &prompb.Sample{ - // convert ns to ms - Timestamp: timestamp, - } + var val float64 switch pt.ValueType() { case pmetric.NumberDataPointValueTypeInt: - sample.Value = float64(pt.IntValue()) + val = float64(pt.IntValue()) case pmetric.NumberDataPointValueTypeDouble: - sample.Value = pt.DoubleValue() + val = pt.DoubleValue() } if pt.Flags().NoRecordedValue() { - sample.Value = math.Float64frombits(value.StaleNaN) + val = math.Float64frombits(value.StaleNaN) } - isMonotonic := metric.Sum().IsMonotonic() - if isMonotonic { - c.handleStartTime(startTimestampMs, timestamp, lbls, settings, "sum", sample.Value, logger) + ts := convertTimeStamp(pt.Timestamp()) + ct := convertTimeStamp(pt.StartTimestamp()) + exemplars, err := c.getPromExemplars(ctx, pt.Exemplars()) + if err != nil { + return err } - ts := c.addSample(sample, lbls) - if ts != nil { - exemplars, err := getPromExemplars[pmetric.NumberDataPoint](ctx, &c.everyN, pt) - if err != nil { - return err - } - ts.Exemplars = append(ts.Exemplars, exemplars...) + if err := c.appender.AppendSample(name, lbls, meta, ts, ct, val, exemplars); err != nil { + return err } // add created time series if needed - if settings.ExportCreatedMetric && isMonotonic { - if startTimestampMs == 0 { - return nil - } - - createdLabels := make([]prompb.Label, len(lbls)) - copy(createdLabels, lbls) - for i, l := range createdLabels { - if l.Name == model.MetricNameLabel { - createdLabels[i].Value = name + createdSuffix - break + if settings.ExportCreatedMetric && metric.Sum().IsMonotonic() && pt.StartTimestamp() != 0 { + c.builder.Reset(lbls) + // Add created suffix to the metric name for CT series. + c.builder.Set(model.MetricNameLabel, c.builder.Get(model.MetricNameLabel)+createdSuffix) + ls := c.builder.Labels() + if c.timeSeriesIsNew(ls) { + if err := c.appender.AppendSample(name, ls, meta, ts, 0, float64(ct), nil); err != nil { + return err } } - c.addTimeSeriesIfNeeded(createdLabels, startTimestampMs, pt.Timestamp()) } - logger.Debug("addSumNumberDataPoints", "labels", labelsStringer(lbls), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "sum") } return nil diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go index 4f8802c419..d27b83f836 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go @@ -22,12 +22,13 @@ import ( "time" "github.com/prometheus/common/model" - "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" - "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/labels" ) func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { @@ -48,7 +49,7 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { metric func() pmetric.Metric scope scope promoteScope bool - want func() map[uint64]*prompb.TimeSeries + want func() []combinedSample }{ { name: "gauge without scope promotion", @@ -61,19 +62,16 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - { - Value: 1, - Timestamp: convertTimeStamp(pcommon.Timestamp(ts)), - }, - }, + want: func() []combinedSample { + lbls := labels.FromStrings( + model.MetricNameLabel, "test", + ) + return []combinedSample{ + { + ls: lbls, + meta: metadata.Metadata{}, + t: convertTimeStamp(pcommon.Timestamp(ts)), + v: 1, }, } }, @@ -89,24 +87,21 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: true, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test"}, - {Name: "otel_scope_name", Value: defaultScope.name}, - {Name: "otel_scope_schema_url", Value: defaultScope.schemaURL}, - {Name: "otel_scope_version", Value: defaultScope.version}, - {Name: "otel_scope_attr1", Value: "value1"}, - {Name: "otel_scope_attr2", Value: "value2"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - { - Value: 1, - Timestamp: convertTimeStamp(pcommon.Timestamp(ts)), - }, - }, + want: func() []combinedSample { + lbls := labels.FromStrings( + model.MetricNameLabel, "test", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ) + return []combinedSample{ + { + ls: lbls, + meta: metadata.Metadata{}, + t: convertTimeStamp(pcommon.Timestamp(ts)), + v: 1, }, } }, @@ -115,22 +110,23 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { metric := tt.metric() - converter := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) converter.addGaugeNumberDataPoints( context.Background(), metric.Gauge().DataPoints(), pcommon.NewResource(), Settings{ - ExportCreatedMetric: true, - PromoteScopeMetadata: tt.promoteScope, - EnableCreatedTimestampZeroIngestion: true, + ExportCreatedMetric: true, + PromoteScopeMetadata: tt.promoteScope, }, metric.Name(), tt.scope, + metadata.Metadata{}, ) - require.Equal(t, tt.want(), converter.unique) + requireEqual(t, tt.want(), mockAppender.samples) require.Empty(t, converter.conflicts) }) } @@ -154,7 +150,7 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { metric func() pmetric.Metric scope scope promoteScope bool - want func() map[uint64]*prompb.TimeSeries + want func() []combinedSample }{ { name: "sum without scope promotion", @@ -168,19 +164,16 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - { - Value: 1, - Timestamp: convertTimeStamp(ts), - }, - }, + want: func() []combinedSample { + lbls := labels.FromStrings( + model.MetricNameLabel, "test", + ) + return []combinedSample{ + { + ls: lbls, + meta: metadata.Metadata{}, + t: convertTimeStamp(ts), + v: 1, }, } }, @@ -197,24 +190,21 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: true, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test"}, - {Name: "otel_scope_name", Value: defaultScope.name}, - {Name: "otel_scope_schema_url", Value: defaultScope.schemaURL}, - {Name: "otel_scope_version", Value: defaultScope.version}, - {Name: "otel_scope_attr1", Value: "value1"}, - {Name: "otel_scope_attr2", Value: "value2"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - { - Value: 1, - Timestamp: convertTimeStamp(ts), - }, - }, + want: func() []combinedSample { + lbls := labels.FromStrings( + model.MetricNameLabel, "test", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ) + return []combinedSample{ + { + ls: lbls, + meta: metadata.Metadata{}, + t: convertTimeStamp(ts), + v: 1, }, } }, @@ -233,18 +223,17 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{{ - Value: 1, - Timestamp: convertTimeStamp(ts), - }}, - Exemplars: []prompb.Exemplar{ + want: func() []combinedSample { + lbls := labels.FromStrings( + model.MetricNameLabel, "test", + ) + return []combinedSample{ + { + ls: lbls, + meta: metadata.Metadata{}, + t: convertTimeStamp(ts), + v: 1, + es: []exemplar.Exemplar{ {Value: 2}, }, }, @@ -268,25 +257,26 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_sum"}, - } - createdLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_sum" + createdSuffix}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - {Value: 1, Timestamp: convertTimeStamp(ts)}, - }, + want: func() []combinedSample { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_sum", + ) + createdLabels := labels.FromStrings( + model.MetricNameLabel, "test_sum"+createdSuffix, + ) + return []combinedSample{ + { + ls: lbls, + meta: metadata.Metadata{}, + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 1, }, - timeSeriesSignature(createdLabels): { - Labels: createdLabels, - Samples: []prompb.Sample{ - {Value: float64(convertTimeStamp(ts)), Timestamp: convertTimeStamp(ts)}, - }, + { + ls: createdLabels, + meta: metadata.Metadata{}, + t: convertTimeStamp(ts), + v: float64(convertTimeStamp(ts)), }, } }, @@ -306,16 +296,16 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_sum"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + want: func() []combinedSample { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_sum", + ) + return []combinedSample{ + { + ls: lbls, + meta: metadata.Metadata{}, + t: convertTimeStamp(ts), + v: 0, }, } }, @@ -335,16 +325,16 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_sum"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + want: func() []combinedSample { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_sum", + ) + return []combinedSample{ + { + ls: lbls, + meta: metadata.Metadata{}, + t: convertTimeStamp(ts), + v: 0, }, } }, @@ -353,7 +343,8 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { metric := tt.metric() - converter := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) converter.addSumNumberDataPoints( context.Background(), @@ -361,16 +352,15 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { pcommon.NewResource(), metric, Settings{ - ExportCreatedMetric: true, - PromoteScopeMetadata: tt.promoteScope, - EnableCreatedTimestampZeroIngestion: true, + ExportCreatedMetric: true, + PromoteScopeMetadata: tt.promoteScope, }, metric.Name(), tt.scope, - promslog.NewNopLogger(), + metadata.Metadata{}, ) - require.Equal(t, tt.want(), converter.unique) + requireEqual(t, tt.want(), mockAppender.samples) require.Empty(t, converter.conflicts) }) } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go b/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go index 716a6cd6f9..49f96e0019 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go @@ -17,42 +17,41 @@ package prometheusremotewrite import ( + "github.com/prometheus/common/model" "go.opentelemetry.io/collector/pdata/pmetric" - - "github.com/prometheus/prometheus/prompb" ) -func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) prompb.MetricMetadata_MetricType { +func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) model.MetricType { switch otelMetric.Type() { case pmetric.MetricTypeGauge: - return prompb.MetricMetadata_GAUGE + return model.MetricTypeGauge case pmetric.MetricTypeSum: - metricType := prompb.MetricMetadata_GAUGE + metricType := model.MetricTypeGauge if otelMetric.Sum().IsMonotonic() { - metricType = prompb.MetricMetadata_COUNTER + metricType = model.MetricTypeCounter } // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. if otelMetric.Sum().AggregationTemporality() == pmetric.AggregationTemporalityDelta { - metricType = prompb.MetricMetadata_UNKNOWN + metricType = model.MetricTypeUnknown } return metricType case pmetric.MetricTypeHistogram: // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. if otelMetric.Histogram().AggregationTemporality() == pmetric.AggregationTemporalityDelta { - return prompb.MetricMetadata_UNKNOWN + return model.MetricTypeUnknown } - return prompb.MetricMetadata_HISTOGRAM + return model.MetricTypeHistogram case pmetric.MetricTypeSummary: - return prompb.MetricMetadata_SUMMARY + return model.MetricTypeSummary case pmetric.MetricTypeExponentialHistogram: if otelMetric.ExponentialHistogram().AggregationTemporality() == pmetric.AggregationTemporalityDelta { // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. - return prompb.MetricMetadata_UNKNOWN + return model.MetricTypeUnknown } - return prompb.MetricMetadata_HISTOGRAM + return model.MetricTypeHistogram } - return prompb.MetricMetadata_UNKNOWN + return model.MetricTypeUnknown } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go b/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go deleted file mode 100644 index abffbe6105..0000000000 --- a/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Provenance-includes-location: -// https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/metrics_to_prw.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. - -package prometheusremotewrite - -import ( - "github.com/prometheus/prometheus/prompb" -) - -// TimeSeries returns a slice of the prompb.TimeSeries that were converted from OTel format. -func (c *PrometheusConverter) TimeSeries() []prompb.TimeSeries { - conflicts := 0 - for _, ts := range c.conflicts { - conflicts += len(ts) - } - allTS := make([]prompb.TimeSeries, 0, len(c.unique)+conflicts) - for _, ts := range c.unique { - allTS = append(allTS, *ts) - } - for _, cTS := range c.conflicts { - for _, ts := range cTS { - allTS = append(allTS, *ts) - } - } - - return allTS -} - -// Metadata returns a slice of the prompb.Metadata that were converted from OTel format. -func (c *PrometheusConverter) Metadata() []prompb.MetricMetadata { - return c.metadata -} diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index ef180ae4a2..7319dbaad2 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -533,26 +533,27 @@ type OTLPOptions struct { // LookbackDelta is the query lookback delta. // Used to calculate the target_info sample timestamp interval. LookbackDelta time.Duration + // IngestCTZeroSample enables writing zero samples based on the start time + // of metrics. + IngestCTZeroSample bool } // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // writes them to the provided appendable. -func NewOTLPWriteHandler(logger *slog.Logger, _ prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, enableCTZeroIngestion bool, validIntervalCTZeroIngestion time.Duration, opts OTLPOptions) http.Handler { +func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler { if opts.NativeDelta && opts.ConvertDelta { // This should be validated when iterating through feature flags, so not expected to fail here. panic("cannot enable native delta ingestion and delta2cumulative conversion at the same time") } ex := &rwExporter{ - writeHandler: &writeHandler{ - logger: logger, - appendable: appendable, - }, - config: configFunc, - allowDeltaTemporality: opts.NativeDelta, - lookbackDelta: opts.LookbackDelta, - enableCTZeroIngestion: enableCTZeroIngestion, - validIntervalCTZeroIngestion: validIntervalCTZeroIngestion, + logger: logger, + appendable: appendable, + config: configFunc, + allowDeltaTemporality: opts.NativeDelta, + lookbackDelta: opts.LookbackDelta, + ingestCTZeroSample: opts.IngestCTZeroSample, + reg: reg, } wh := &otlpWriteHandler{logger: logger, defaultConsumer: ex} @@ -586,20 +587,23 @@ func NewOTLPWriteHandler(logger *slog.Logger, _ prometheus.Registerer, appendabl } type rwExporter struct { - *writeHandler + logger *slog.Logger + appendable storage.Appendable config func() config.Config allowDeltaTemporality bool lookbackDelta time.Duration - - // Mimir specifics. - enableCTZeroIngestion bool - validIntervalCTZeroIngestion time.Duration + ingestCTZeroSample bool + reg prometheus.Registerer } func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { otlpCfg := rw.config().OTLPConfig - - converter := otlptranslator.NewPrometheusConverter() + app := &timeLimitAppender{ + Appender: rw.appendable.Appender(ctx), + maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), + } + combinedAppender := otlptranslator.NewCombinedAppender(app, rw.logger, rw.reg, rw.ingestCTZeroSample) + converter := otlptranslator.NewPrometheusConverter(combinedAppender) annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{ AddMetricSuffixes: otlpCfg.TranslationStrategy != config.NoTranslation, AllowUTF8: otlpCfg.TranslationStrategy != config.UnderscoreEscapingWithSuffixes, @@ -609,23 +613,19 @@ func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) er AllowDeltaTemporality: rw.allowDeltaTemporality, PromoteScopeMetadata: otlpCfg.PromoteScopeMetadata, LookbackDelta: rw.lookbackDelta, + }) - // Mimir specifics. - EnableCreatedTimestampZeroIngestion: rw.enableCTZeroIngestion, - ValidIntervalCreatedTimestampZeroIngestion: rw.validIntervalCTZeroIngestion, - }, rw.logger) - if err != nil { - rw.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err) - } + defer func() { + if err != nil { + _ = app.Rollback() + return + } + err = app.Commit() + }() ws, _ := annots.AsStrings("", 0, 0) if len(ws) > 0 { rw.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws) } - - err = rw.write(ctx, &prompb.WriteRequest{ - Timeseries: converter.TimeSeries(), - Metadata: converter.Metadata(), - }) return err } diff --git a/storage/remote/write_test.go b/storage/remote/write_test.go index 9ad2fab41e..6d62f96dad 100644 --- a/storage/remote/write_test.go +++ b/storage/remote/write_test.go @@ -35,7 +35,6 @@ import ( "github.com/prometheus/client_golang/prometheus" common_config "github.com/prometheus/common/config" "github.com/prometheus/common/model" - "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -502,12 +501,13 @@ func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg c require.NoError(t, err) req.Header.Set("Content-Type", "application/x-protobuf") + log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn})) appendable := &mockAppendable{} - handler := NewOTLPWriteHandler(promslog.NewNopLogger(), nil, appendable, func() config.Config { + handler := NewOTLPWriteHandler(log, nil, appendable, func() config.Config { return config.Config{ OTLPConfig: otlpCfg, } - }, false, 0, OTLPOptions{}) + }, OTLPOptions{}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -603,7 +603,7 @@ func TestOTLPDelta(t *testing.T) { cfg := func() config.Config { return config.Config{OTLPConfig: config.DefaultOTLPConfig} } - handler := NewOTLPWriteHandler(log, nil, appendable, cfg, false, 0, OTLPOptions{ConvertDelta: true}) + handler := NewOTLPWriteHandler(log, nil, appendable, cfg, OTLPOptions{ConvertDelta: true}) md := pmetric.NewMetrics() ms := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() @@ -857,7 +857,7 @@ func BenchmarkOTLP(b *testing.B) { cfgfn := func() config.Config { return config.Config{OTLPConfig: config.DefaultOTLPConfig} } - handler := NewOTLPWriteHandler(log, nil, appendable, cfgfn, false, 0, cfg.opts) + handler := NewOTLPWriteHandler(log, nil, appendable, cfgfn, cfg.opts) fail := make(chan struct{}) done := make(chan struct{}) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 4f3926a2ea..113cd2158a 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -312,10 +312,11 @@ func NewAPI( a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs, ctZeroIngestionEnabled) } if otlpEnabled { - a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, ap, configFunc, ctZeroIngestionEnabled, validIntervalCTZeroIngestion, remote.OTLPOptions{ - ConvertDelta: otlpDeltaToCumulative, - NativeDelta: otlpNativeDeltaIngestion, - LookbackDelta: lookbackDelta, + a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, ap, configFunc, remote.OTLPOptions{ + ConvertDelta: otlpDeltaToCumulative, + NativeDelta: otlpNativeDeltaIngestion, + LookbackDelta: lookbackDelta, + IngestCTZeroSample: ctZeroIngestionEnabled, }) }