Skip to content

Commit 3b9278f

Browse files
authored
feat: implement query_metrics (#3074)
# What does this PR do? query_metrics currently has no implementation, meaning once a metric is emitted there is no way in llama stack to query it from the store. implement query_metrics for the meta_reference provider which follows a similar style to `query_traces`, using the trace_store to format an SQL query and execute it in this case the parameters for the query are `metric.METRIC_NAME, start_time, and end_time` and any other matchers if they are provided. this required client side changes since the client had no `query_metrics` or any associated resources, so any tests here will fail but I will provide manual execution logs for the new tests I am adding order the metrics by timestamp. Additionally add `unit` to the `MetricDataPoint` class since this adds much more context to the metric being queried. depends on llamastack/llama-stack-client-python#260 ## Test Plan ``` import time import uuid def create_http_client(): from llama_stack_client import LlamaStackClient return LlamaStackClient(base_url="http://localhost:8321") client = create_http_client() response = client.telemetry.query_metrics(metric_name="total_tokens", start_time=0) print(response) ``` ``` ╰─ python3.12 ~/telemetry.py INFO:httpx:HTTP Request: POST http://localhost:8322/v1/telemetry/metrics/total_tokens "HTTP/1.1 200 OK" [TelemetryQueryMetricsResponse(data=None, metric='total_tokens', labels=[], values=[{'timestamp': 1753999514, 'value': 34.0, 'unit': 'tokens'}, {'timestamp': 1753999816, 'value': 34.0, 'unit': 'tokens'}, {'timestamp': 1753999881, 'value': 34.0, 'unit': 'tokens'}, {'timestamp': 1753999956, 'value': 34.0, 'unit': 'tokens'}, {'timestamp': 1754000200, 'value': 34.0, 'unit': 'tokens'}, {'timestamp': 1754000419, 'value': 36.0, 'unit': 'tokens'}, {'timestamp': 1754000714, 'value': 36.0, 'unit': 'tokens'}, {'timestamp': 1754000876, 'value': 36.0, 'unit': 'tokens'}, {'timestamp': 1754000908, 'value': 34.0, 'unit': 'tokens'}, {'timestamp': 1754001309, 'value': 584.0, 'unit': 'tokens'}, {'timestamp': 1754001311, 'value': 138.0, 'unit': 'tokens'}, {'timestamp': 1754001316, 'value': 349.0, 'unit': 'tokens'}, {'timestamp': 1754001318, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754001320, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754001341, 'value': 923.0, 'unit': 'tokens'}, {'timestamp': 1754001350, 'value': 354.0, 'unit': 'tokens'}, {'timestamp': 1754001462, 'value': 417.0, 'unit': 'tokens'}, {'timestamp': 1754001464, 'value': 158.0, 'unit': 'tokens'}, {'timestamp': 1754001475, 'value': 697.0, 'unit': 'tokens'}, {'timestamp': 1754001477, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754001479, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754001489, 'value': 298.0, 'unit': 'tokens'}, {'timestamp': 1754001541, 'value': 615.0, 'unit': 'tokens'}, {'timestamp': 1754001543, 'value': 119.0, 'unit': 'tokens'}, {'timestamp': 1754001548, 'value': 310.0, 'unit': 'tokens'}, {'timestamp': 1754001549, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754001551, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754001568, 'value': 714.0, 'unit': 'tokens'}, {'timestamp': 1754001800, 'value': 437.0, 'unit': 'tokens'}, {'timestamp': 1754001802, 'value': 200.0, 'unit': 'tokens'}, {'timestamp': 1754001806, 'value': 262.0, 'unit': 'tokens'}, {'timestamp': 1754001808, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754001810, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754001816, 'value': 82.0, 'unit': 'tokens'}, {'timestamp': 1754001923, 'value': 61.0, 'unit': 'tokens'}, {'timestamp': 1754001929, 'value': 391.0, 'unit': 'tokens'}, {'timestamp': 1754001939, 'value': 598.0, 'unit': 'tokens'}, {'timestamp': 1754001941, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754001942, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754001952, 'value': 252.0, 'unit': 'tokens'}, {'timestamp': 1754002053, 'value': 251.0, 'unit': 'tokens'}, {'timestamp': 1754002059, 'value': 375.0, 'unit': 'tokens'}, {'timestamp': 1754002062, 'value': 244.0, 'unit': 'tokens'}, {'timestamp': 1754002064, 'value': 111.0, 'unit': 'tokens'}, {'timestamp': 1754002065, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754002083, 'value': 719.0, 'unit': 'tokens'}, {'timestamp': 1754002302, 'value': 279.0, 'unit': 'tokens'}, {'timestamp': 1754002306, 'value': 218.0, 'unit': 'tokens'}, {'timestamp': 1754002308, 'value': 198.0, 'unit': 'tokens'}, {'timestamp': 1754002309, 'value': 69.0, 'unit': 'tokens'}, {'timestamp': 1754002311, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754002324, 'value': 481.0, 'unit': 'tokens'}, {'timestamp': 1754003161, 'value': 579.0, 'unit': 'tokens'}, {'timestamp': 1754003161, 'value': 69.0, 'unit': 'tokens'}, {'timestamp': 1754003169, 'value': 499.0, 'unit': 'tokens'}, {'timestamp': 1754003171, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754003173, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754003185, 'value': 422.0, 'unit': 'tokens'}, {'timestamp': 1754003448, 'value': 579.0, 'unit': 'tokens'}, {'timestamp': 1754003453, 'value': 422.0, 'unit': 'tokens'}, {'timestamp': 1754003589, 'value': 579.0, 'unit': 'tokens'}, {'timestamp': 1754003609, 'value': 279.0, 'unit': 'tokens'}, {'timestamp': 1754003614, 'value': 481.0, 'unit': 'tokens'}, {'timestamp': 1754003706, 'value': 303.0, 'unit': 'tokens'}, {'timestamp': 1754003706, 'value': 51.0, 'unit': 'tokens'}, {'timestamp': 1754003713, 'value': 426.0, 'unit': 'tokens'}, {'timestamp': 1754003714, 'value': 70.0, 'unit': 'tokens'}, {'timestamp': 1754003715, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754003724, 'value': 225.0, 'unit': 'tokens'}, {'timestamp': 1754004226, 'value': 516.0, 'unit': 'tokens'}, {'timestamp': 1754004228, 'value': 127.0, 'unit': 'tokens'}, {'timestamp': 1754004232, 'value': 281.0, 'unit': 'tokens'}, {'timestamp': 1754004234, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754004236, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754004244, 'value': 206.0, 'unit': 'tokens'}, {'timestamp': 1754004683, 'value': 338.0, 'unit': 'tokens'}, {'timestamp': 1754004690, 'value': 481.0, 'unit': 'tokens'}, {'timestamp': 1754004692, 'value': 124.0, 'unit': 'tokens'}, {'timestamp': 1754004692, 'value': 65.0, 'unit': 'tokens'}, {'timestamp': 1754004694, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754004703, 'value': 211.0, 'unit': 'tokens'}, {'timestamp': 1754004743, 'value': 338.0, 'unit': 'tokens'}, {'timestamp': 1754004749, 'value': 211.0, 'unit': 'tokens'}, {'timestamp': 1754005566, 'value': 481.0, 'unit': 'tokens'}, {'timestamp': 1754006101, 'value': 159.0, 'unit': 'tokens'}, {'timestamp': 1754006105, 'value': 272.0, 'unit': 'tokens'}, {'timestamp': 1754006109, 'value': 308.0, 'unit': 'tokens'}, {'timestamp': 1754006110, 'value': 61.0, 'unit': 'tokens'}, {'timestamp': 1754006112, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754006130, 'value': 705.0, 'unit': 'tokens'}, {'timestamp': 1754051825, 'value': 454.0, 'unit': 'tokens'}, {'timestamp': 1754051827, 'value': 152.0, 'unit': 'tokens'}, {'timestamp': 1754051834, 'value': 481.0, 'unit': 'tokens'}, {'timestamp': 1754051835, 'value': 55.0, 'unit': 'tokens'}, {'timestamp': 1754051837, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754051845, 'value': 102.0, 'unit': 'tokens'}, {'timestamp': 1754099929, 'value': 36.0, 'unit': 'tokens'}, {'timestamp': 1754510050, 'value': 598.0, 'unit': 'tokens'}, {'timestamp': 1754510052, 'value': 160.0, 'unit': 'tokens'}, {'timestamp': 1754510064, 'value': 725.0, 'unit': 'tokens'}, {'timestamp': 1754510065, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754510067, 'value': 133.0, 'unit': 'tokens'}, {'timestamp': 1754510083, 'value': 535.0, 'unit': 'tokens'}, {'timestamp': 1754596582, 'value': 36.0, 'unit': 'tokens'}])] ``` adding tests for each currently documented metric in llama stack using this new function. attached is also some manual testing integrations tests passing locally with replay mode and the linked client changes: <img width="1907" height="529" alt="Screenshot 2025-08-08 at 2 49 14 PM" src="https://github.com/user-attachments/assets/d482ab06-dcff-4f0c-a1f1-f870670ee9bc" /> --------- Signed-off-by: Charlie Doern <[email protected]>
1 parent 3d119a8 commit 3b9278f

File tree

17 files changed

+921
-6
lines changed

17 files changed

+921
-6
lines changed

docs/_static/llama-stack-spec.html

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16067,12 +16067,16 @@
1606716067
"value": {
1606816068
"type": "number",
1606916069
"description": "The numeric value of the metric at this timestamp"
16070+
},
16071+
"unit": {
16072+
"type": "string"
1607016073
}
1607116074
},
1607216075
"additionalProperties": false,
1607316076
"required": [
1607416077
"timestamp",
16075-
"value"
16078+
"value",
16079+
"unit"
1607616080
],
1607716081
"title": "MetricDataPoint",
1607816082
"description": "A single data point in a metric time series."

docs/_static/llama-stack-spec.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11954,10 +11954,13 @@ components:
1195411954
type: number
1195511955
description: >-
1195611956
The numeric value of the metric at this timestamp
11957+
unit:
11958+
type: string
1195711959
additionalProperties: false
1195811960
required:
1195911961
- timestamp
1196011962
- value
11963+
- unit
1196111964
title: MetricDataPoint
1196211965
description: >-
1196311966
A single data point in a metric time series.

llama_stack/apis/telemetry/telemetry.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -386,6 +386,7 @@ class MetricDataPoint(BaseModel):
386386

387387
timestamp: int
388388
value: float
389+
unit: str
389390

390391

391392
@json_schema_type
@@ -518,7 +519,7 @@ async def query_metrics(
518519
metric_name: str,
519520
start_time: int,
520521
end_time: int | None = None,
521-
granularity: str | None = "1d",
522+
granularity: str | None = None,
522523
query_type: MetricQueryType = MetricQueryType.RANGE,
523524
label_matchers: list[MetricLabelMatcher] | None = None,
524525
) -> QueryMetricsResponse:

llama_stack/providers/inline/telemetry/meta_reference/telemetry.py

Lines changed: 33 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
# This source code is licensed under the terms described in the LICENSE file in
55
# the root directory of this source tree.
66

7+
import datetime
78
import threading
89
from typing import Any
910

@@ -145,11 +146,41 @@ async def query_metrics(
145146
metric_name: str,
146147
start_time: int,
147148
end_time: int | None = None,
148-
granularity: str | None = "1d",
149+
granularity: str | None = None,
149150
query_type: MetricQueryType = MetricQueryType.RANGE,
150151
label_matchers: list[MetricLabelMatcher] | None = None,
151152
) -> QueryMetricsResponse:
152-
raise NotImplementedError("Querying metrics is not implemented")
153+
"""Query metrics from the telemetry store.
154+
155+
Args:
156+
metric_name: The name of the metric to query (e.g., "prompt_tokens")
157+
start_time: Start time as Unix timestamp
158+
end_time: End time as Unix timestamp (defaults to now if None)
159+
granularity: Time granularity for aggregation
160+
query_type: Type of query (RANGE or INSTANT)
161+
label_matchers: Label filters to apply
162+
163+
Returns:
164+
QueryMetricsResponse with metric time series data
165+
"""
166+
# Convert timestamps to datetime objects
167+
start_dt = datetime.datetime.fromtimestamp(start_time, datetime.UTC)
168+
end_dt = datetime.datetime.fromtimestamp(end_time, datetime.UTC) if end_time else None
169+
170+
# Use SQLite trace store if available
171+
if hasattr(self, "trace_store") and self.trace_store:
172+
return await self.trace_store.query_metrics(
173+
metric_name=metric_name,
174+
start_time=start_dt,
175+
end_time=end_dt,
176+
granularity=granularity,
177+
query_type=query_type,
178+
label_matchers=label_matchers,
179+
)
180+
else:
181+
raise ValueError(
182+
f"In order to query_metrics, you must have {TelemetrySink.SQLITE} set in your telemetry sinks"
183+
)
153184

154185
def _log_unstructured(self, event: UnstructuredLogEvent, ttl_seconds: int) -> None:
155186
with self._lock:

llama_stack/providers/utils/telemetry/sqlite_trace_store.py

Lines changed: 194 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,23 @@
55
# the root directory of this source tree.
66

77
import json
8-
from datetime import datetime
8+
from datetime import UTC, datetime
99
from typing import Protocol
1010

1111
import aiosqlite
1212

13-
from llama_stack.apis.telemetry import QueryCondition, Span, SpanWithStatus, Trace
13+
from llama_stack.apis.telemetry import (
14+
MetricDataPoint,
15+
MetricLabel,
16+
MetricLabelMatcher,
17+
MetricQueryType,
18+
MetricSeries,
19+
QueryCondition,
20+
QueryMetricsResponse,
21+
Span,
22+
SpanWithStatus,
23+
Trace,
24+
)
1425

1526

1627
class TraceStore(Protocol):
@@ -29,11 +40,192 @@ async def get_span_tree(
2940
max_depth: int | None = None,
3041
) -> dict[str, SpanWithStatus]: ...
3142

43+
async def query_metrics(
44+
self,
45+
metric_name: str,
46+
start_time: datetime,
47+
end_time: datetime | None = None,
48+
granularity: str | None = "1d",
49+
query_type: MetricQueryType = MetricQueryType.RANGE,
50+
label_matchers: list[MetricLabelMatcher] | None = None,
51+
) -> QueryMetricsResponse: ...
52+
3253

3354
class SQLiteTraceStore(TraceStore):
3455
def __init__(self, conn_string: str):
3556
self.conn_string = conn_string
3657

58+
async def query_metrics(
59+
self,
60+
metric_name: str,
61+
start_time: datetime,
62+
end_time: datetime | None = None,
63+
granularity: str | None = None,
64+
query_type: MetricQueryType = MetricQueryType.RANGE,
65+
label_matchers: list[MetricLabelMatcher] | None = None,
66+
) -> QueryMetricsResponse:
67+
if end_time is None:
68+
end_time = datetime.now(UTC)
69+
70+
# Build base query
71+
if query_type == MetricQueryType.INSTANT:
72+
query = """
73+
SELECT
74+
se.name,
75+
SUM(CAST(json_extract(se.attributes, '$.value') AS REAL)) as value,
76+
json_extract(se.attributes, '$.unit') as unit,
77+
se.attributes
78+
FROM span_events se
79+
WHERE se.name = ?
80+
AND se.timestamp BETWEEN ? AND ?
81+
"""
82+
else:
83+
if granularity:
84+
time_format = self._get_time_format_for_granularity(granularity)
85+
query = f"""
86+
SELECT
87+
se.name,
88+
SUM(CAST(json_extract(se.attributes, '$.value') AS REAL)) as value,
89+
json_extract(se.attributes, '$.unit') as unit,
90+
se.attributes,
91+
strftime('{time_format}', se.timestamp) as bucket_start
92+
FROM span_events se
93+
WHERE se.name = ?
94+
AND se.timestamp BETWEEN ? AND ?
95+
"""
96+
else:
97+
query = """
98+
SELECT
99+
se.name,
100+
json_extract(se.attributes, '$.value') as value,
101+
json_extract(se.attributes, '$.unit') as unit,
102+
se.attributes,
103+
se.timestamp
104+
FROM span_events se
105+
WHERE se.name = ?
106+
AND se.timestamp BETWEEN ? AND ?
107+
"""
108+
109+
params = [f"metric.{metric_name}", start_time.isoformat(), end_time.isoformat()]
110+
111+
# Labels that will be attached to the MetricSeries (preserve matcher labels)
112+
all_labels: list[MetricLabel] = []
113+
matcher_label_names = set()
114+
if label_matchers:
115+
for matcher in label_matchers:
116+
json_path = f"$.{matcher.name}"
117+
if matcher.operator == "=":
118+
query += f" AND json_extract(se.attributes, '{json_path}') = ?"
119+
params.append(matcher.value)
120+
elif matcher.operator == "!=":
121+
query += f" AND json_extract(se.attributes, '{json_path}') != ?"
122+
params.append(matcher.value)
123+
elif matcher.operator == "=~":
124+
query += f" AND json_extract(se.attributes, '{json_path}') LIKE ?"
125+
params.append(f"%{matcher.value}%")
126+
elif matcher.operator == "!~":
127+
query += f" AND json_extract(se.attributes, '{json_path}') NOT LIKE ?"
128+
params.append(f"%{matcher.value}%")
129+
# Preserve filter context in output
130+
all_labels.append(MetricLabel(name=matcher.name, value=str(matcher.value)))
131+
matcher_label_names.add(matcher.name)
132+
133+
# GROUP BY / ORDER BY logic
134+
if query_type == MetricQueryType.RANGE and granularity:
135+
group_time_format = self._get_time_format_for_granularity(granularity)
136+
query += f" GROUP BY strftime('{group_time_format}', se.timestamp), json_extract(se.attributes, '$.unit')"
137+
query += " ORDER BY bucket_start"
138+
elif query_type == MetricQueryType.INSTANT:
139+
query += " GROUP BY json_extract(se.attributes, '$.unit')"
140+
else:
141+
query += " ORDER BY se.timestamp"
142+
143+
# Execute query
144+
async with aiosqlite.connect(self.conn_string) as conn:
145+
conn.row_factory = aiosqlite.Row
146+
async with conn.execute(query, params) as cursor:
147+
rows = await cursor.fetchall()
148+
149+
if not rows:
150+
return QueryMetricsResponse(data=[])
151+
152+
data_points = []
153+
# We want to add attribute labels, but only those not already present as matcher labels.
154+
attr_label_names = set()
155+
for row in rows:
156+
# Parse JSON attributes safely, if there are no attributes (weird), just don't add the labels to the result.
157+
try:
158+
attributes = json.loads(row["attributes"] or "{}")
159+
except (TypeError, json.JSONDecodeError):
160+
attributes = {}
161+
162+
value = row["value"]
163+
unit = row["unit"] or ""
164+
165+
# Add labels from attributes without duplicating matcher labels, if we don't do this, there will be a lot of duplicate label in the result.
166+
for k, v in attributes.items():
167+
if k not in ["value", "unit"] and k not in matcher_label_names and k not in attr_label_names:
168+
all_labels.append(MetricLabel(name=k, value=str(v)))
169+
attr_label_names.add(k)
170+
171+
# Determine timestamp
172+
if query_type == MetricQueryType.RANGE and granularity:
173+
try:
174+
bucket_start_raw = row["bucket_start"]
175+
except KeyError as e:
176+
raise ValueError(
177+
"DB did not have a bucket_start time in row when using granularity, this indicates improper formatting"
178+
) from e
179+
# this value could also be there, but be NULL, I think.
180+
if bucket_start_raw is None:
181+
raise ValueError("bucket_start is None check time format and data")
182+
bucket_start = datetime.fromisoformat(bucket_start_raw)
183+
timestamp = int(bucket_start.timestamp())
184+
elif query_type == MetricQueryType.INSTANT:
185+
timestamp = int(datetime.now(UTC).timestamp())
186+
else:
187+
try:
188+
timestamp_raw = row["timestamp"]
189+
except KeyError as e:
190+
raise ValueError(
191+
"DB did not have a timestamp in row, this indicates improper formatting"
192+
) from e
193+
# this value could also be there, but be NULL, I think.
194+
if timestamp_raw is None:
195+
raise ValueError("timestamp is None check time format and data")
196+
timestamp_iso = datetime.fromisoformat(timestamp_raw)
197+
timestamp = int(timestamp_iso.timestamp())
198+
199+
data_points.append(
200+
MetricDataPoint(
201+
timestamp=timestamp,
202+
value=value,
203+
unit=unit,
204+
)
205+
)
206+
207+
metric_series = [MetricSeries(metric=metric_name, labels=all_labels, values=data_points)]
208+
return QueryMetricsResponse(data=metric_series)
209+
210+
def _get_time_format_for_granularity(self, granularity: str | None) -> str:
211+
"""Get the SQLite strftime format string for a given granularity.
212+
Args:
213+
granularity: Granularity string (e.g., "1m", "5m", "1h", "1d")
214+
Returns:
215+
SQLite strftime format string for the granularity
216+
"""
217+
if granularity is None:
218+
raise ValueError("granularity cannot be None for this method - use separate logic for no aggregation")
219+
220+
if granularity.endswith("d"):
221+
return "%Y-%m-%d 00:00:00"
222+
elif granularity.endswith("h"):
223+
return "%Y-%m-%d %H:00:00"
224+
elif granularity.endswith("m"):
225+
return "%Y-%m-%d %H:%M:00"
226+
else:
227+
return "%Y-%m-%d %H:%M:00" # Default to most granular which will give us the most timestamps.
228+
37229
async def query_traces(
38230
self,
39231
attribute_filters: list[QueryCondition] | None = None,
0 Bytes
Binary file not shown.
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
{
2+
"request": {
3+
"method": "POST",
4+
"url": "http://localhost:11434/api/generate",
5+
"headers": {},
6+
"body": {
7+
"model": "llama3.2:3b-instruct-fp16",
8+
"raw": true,
9+
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nTest metrics generation 1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
10+
"options": {
11+
"temperature": 0.0
12+
},
13+
"stream": false
14+
},
15+
"endpoint": "/api/generate",
16+
"model": "llama3.2:3b-instruct-fp16"
17+
},
18+
"response": {
19+
"body": {
20+
"__type__": "ollama._types.GenerateResponse",
21+
"__data__": {
22+
"model": "llama3.2:3b-instruct-fp16",
23+
"created_at": "2025-08-11T15:51:18.170868Z",
24+
"done": true,
25+
"done_reason": "stop",
26+
"total_duration": 5240614083,
27+
"load_duration": 9823416,
28+
"prompt_eval_count": 21,
29+
"prompt_eval_duration": 21000000,
30+
"eval_count": 310,
31+
"eval_duration": 5209000000,
32+
"response": "This is the start of a test. I'll provide some sample data and you can try to generate metrics based on it.\n\n**Data:**\n\nLet's say we have a dataset of user interactions with an e-commerce website. The data includes:\n\n| User ID | Product Name | Purchase Date | Quantity | Price |\n| --- | --- | --- | --- | --- |\n| 1 | iPhone 13 | 2022-01-01 | 2 | 999.99 |\n| 1 | MacBook Air | 2022-01-05 | 1 | 1299.99 |\n| 2 | Samsung TV | 2022-01-10 | 3 | 899.99 |\n| 3 | iPhone 13 | 2022-01-15 | 1 | 999.99 |\n| 4 | MacBook Pro | 2022-01-20 | 2 | 1799.99 |\n\n**Task:**\n\nYour task is to generate the following metrics based on this data:\n\n1. Average order value (AOV)\n2. Conversion rate\n3. Average revenue per user (ARPU)\n4. Customer lifetime value (CLV)\n\nPlease provide your answers in a format like this:\n\n| Metric | Value |\n| --- | --- |\n| AOV | 1234.56 |\n| Conversion Rate | 0.25 |\n| ARPU | 1000.00 |\n| CLV | 5000.00 |\n\nGo ahead and generate the metrics!",
33+
"thinking": null,
34+
"context": null
35+
}
36+
},
37+
"is_streaming": false
38+
}
39+
}

0 commit comments

Comments
 (0)