Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
685 changes: 685 additions & 0 deletions tests/trace_server/query_builder/test_feedback_stats.py

Large diffs are not rendered by default.

20 changes: 16 additions & 4 deletions weave/trace_server/calls_query_builder/stats_query_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,21 @@ class StatsQueryTimeBounds:


@dataclass(frozen=True)
class StatsQueryBuildResult:
class SqlQueryResult:
"""Base result for parameterized SQL queries."""

sql: str
columns: list[str]
parameters: dict[str, Any]
granularity_seconds: int
start: datetime.datetime
end: datetime.datetime


@dataclass(frozen=True)
class StatsQueryBuildResult(SqlQueryResult):
"""Query result with time-bucketed granularity metadata."""

granularity_seconds: int = 0
start: datetime.datetime = datetime.datetime.min
end: datetime.datetime = datetime.datetime.min


def auto_select_granularity_seconds(delta: datetime.timedelta) -> int:
Expand Down Expand Up @@ -142,6 +150,10 @@ def aggregation_selects_for_metric(
results.append((f"maxOrNull({col})", f"max_{metric}"))
elif agg == AggregationType.COUNT:
results.append((f"countOrNull({col})", f"count_{metric}"))
elif agg == AggregationType.COUNT_TRUE:
results.append((f"countIf({col} = 1)", f"count_true_{metric}"))
elif agg == AggregationType.COUNT_FALSE:
results.append((f"countIf({col} = 0)", f"count_false_{metric}"))
else:
raise ValueError(f"Unsupported aggregation type: {agg}")

Expand Down
22 changes: 22 additions & 0 deletions weave/trace_server/clickhouse_trace_server_batched.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,12 @@
resolve_and_apply_prompt,
)
from weave.trace_server.methods.evaluation_status import evaluation_status
from weave.trace_server.methods.feedback_stats import (
feedback_payload_schema as feedback_payload_schema_handler,
)
from weave.trace_server.methods.feedback_stats import (
feedback_stats as feedback_stats_handler,
)
from weave.trace_server.model_providers.model_providers import (
VERTEX_PROVIDER_NAMES,
LLMModelProviderInfo,
Expand Down Expand Up @@ -223,6 +229,7 @@
# num_pools: Number of distinct connection pools (for different hosts/configs)
_CH_POOL_MANAGER = get_pool_manager(maxsize=50, num_pools=2)


# Precomputed list of (column_index, field_name) for every sentinel field that appears
# in ALL_CALL_COMPLETE_INSERT_COLUMNS. Used by _insert_call_complete_batch to enforce
# sentinel conversion as a last line of defense — preventing "Invalid None value in
Expand Down Expand Up @@ -1131,6 +1138,21 @@ def call_stats(self, req: tsi.CallStatsReq) -> tsi.CallStatsRes:
call_buckets=call_buckets,
)

def feedback_stats(self, req: tsi.FeedbackStatsReq) -> tsi.FeedbackStatsRes:
"""Return aggregated feedback statistics over time buckets.

Extracts numeric values from payload_dump via json_path and aggregates
by time bucket. Filters by project_id, optional feedback_type and trigger_ref.
Also includes window-level stats (min, max, avg, percentiles) over the full range.
"""
return feedback_stats_handler(self, req)

def feedback_payload_schema(
self, req: tsi.FeedbackPayloadSchemaReq
) -> tsi.FeedbackPayloadSchemaRes:
"""Discover feedback payload schema from sample rows."""
return feedback_payload_schema_handler(self, req)

@ddtrace.tracer.wrap(name="clickhouse_trace_server_batched.trace_usage")
def trace_usage(self, req: tsi.TraceUsageReq) -> tsi.TraceUsageRes:
"""Compute per-call usage for a trace, with descendant rollup.
Expand Down
10 changes: 10 additions & 0 deletions weave/trace_server/external_to_internal_trace_server_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,6 +381,16 @@ def feedback_replace(self, req: tsi.FeedbackReplaceReq) -> tsi.FeedbackReplaceRe
res.wb_user_id = original_user_id
return res

def feedback_stats(self, req: tsi.FeedbackStatsReq) -> tsi.FeedbackStatsRes:
req.project_id = self._idc.ext_to_int_project_id(req.project_id)
return self._ref_apply(self._internal_trace_server.feedback_stats, req)

def feedback_payload_schema(
self, req: tsi.FeedbackPayloadSchemaReq
) -> tsi.FeedbackPayloadSchemaRes:
req.project_id = self._idc.ext_to_int_project_id(req.project_id)
return self._ref_apply(self._internal_trace_server.feedback_payload_schema, req)

def cost_create(self, req: tsi.CostCreateReq) -> tsi.CostCreateRes:
req.project_id = self._idc.ext_to_int_project_id(req.project_id)
return self._ref_apply(self._internal_trace_server.cost_create, req)
Expand Down
152 changes: 152 additions & 0 deletions weave/trace_server/feedback_payload_schema.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
"""Feedback payload schema discovery from sample rows."""

from __future__ import annotations

import datetime
import json
import logging
from collections import defaultdict
from typing import Any

from weave.trace_server.calls_query_builder.utils import param_slot, safely_format_sql
from weave.trace_server.feedback_stats_query_builder import (
JSON_PATH_PATTERN,
trigger_ref_where_clause,
)
from weave.trace_server.orm import ParamBuilder
from weave.trace_server.trace_server_interface import (
FeedbackPayloadPath,
FeedbackPayloadSchemaReq,
)

logger = logging.getLogger(__name__)


def _discover_paths(obj: Any, prefix: str = "") -> dict[str, set[type]]:
"""Recursively discover leaf paths and collect value types.

Returns:
Mapping from dot path to set of Python types seen at that path.
"""
out: dict[str, set[type]] = defaultdict(set)
if obj is None:
return out
if isinstance(obj, dict):
for k, v in obj.items():
if not isinstance(k, str) or "." in k or not k.strip():
continue
path = f"{prefix}.{k}" if prefix else k
if isinstance(v, (dict, list)) and v is not None:
out.update(_discover_paths(v, path))
else:
out[path].add(type(v))
return out
if isinstance(obj, list):
for i, v in enumerate(obj):
if isinstance(v, (dict, list)) and v is not None:
out.update(_discover_paths(v, f"{prefix}[{i}]"))
else:
out[prefix].add(type(v))
return out
out[prefix].add(type(obj))
return out


def _infer_value_type(types_seen: set[type]) -> str:
"""Infer value_type from observed Python types."""
if not types_seen:
return "numeric"
# bool must be checked before int (bool is subclass of int)
if types_seen <= {bool, type(None)}:
return "boolean"
if types_seen <= {int, float, type(None)}:
return "numeric"
return "categorical"


def discover_payload_schema(payload_strs: list[str]) -> list[FeedbackPayloadPath]:
"""Discover schema from raw payload JSON strings.

Parses each string as JSON, recursively discovers leaf paths, infers
value_type from observed types, and returns unique paths.

Args:
payload_strs: List of JSON strings (payload_dump from feedback rows).

Returns:
Sorted list of FeedbackPayloadPath, deduplicated by json_path.

Examples:
>>> discover_payload_schema(['{"output": {"score": 0.9}}'])
[FeedbackPayloadPath(json_path='output.score', value_type='numeric')]
"""
path_to_types: dict[str, set[type]] = defaultdict(set)
for s in payload_strs:
if not s or not s.strip():
continue
try:
obj = json.loads(s)
except json.JSONDecodeError:
continue
for path, types in _discover_paths(obj).items():
# Skip array-index paths like "a[0]" for schema output
if "[" in path:
continue
# Skip paths with chars not allowed by feedback_stats (e.g. spaces)
if not JSON_PATH_PATTERN.match(path):
continue
path_to_types[path].update(types)

result: list[FeedbackPayloadPath] = []
for path in sorted(path_to_types.keys()):
value_type = _infer_value_type(path_to_types[path])
result.append(FeedbackPayloadPath(json_path=path, value_type=value_type))
return result


def build_feedback_payload_sample_query(
req: FeedbackPayloadSchemaReq,
pb: ParamBuilder,
) -> tuple[str, dict[str, Any]]:
"""Build parameterized ClickHouse SQL to fetch sample payload_dump.

Uses same filters as feedback_stats (project_id, created_at, feedback_type,
trigger_ref). Returns one payload per unique trigger_ref (most recent per
ref), since each trigger_ref has a unique payload schema.
"""
now_utc = datetime.datetime.now(datetime.timezone.utc)
start = req.start
end = req.end if req.end is not None else now_utc
limit = req.sample_limit

project_param = pb.add_param(req.project_id)
start_epoch = start.replace(tzinfo=datetime.timezone.utc).timestamp()
end_epoch = end.replace(tzinfo=datetime.timezone.utc).timestamp()
Comment on lines +123 to +124
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here also: is it save to overwrite the timezome?

start_param = pb.add_param(start_epoch)
end_param = pb.add_param(end_epoch)
limit_param = pb.add_param(limit)

where_clauses: list[str] = [
f"project_id = {param_slot(project_param, 'String')}",
f"created_at >= toDateTime({param_slot(start_param, 'Float64')}, 'UTC')",
f"created_at < toDateTime({param_slot(end_param, 'Float64')}, 'UTC')",
"payload_dump != ''",
"payload_dump IS NOT NULL",
]
if req.feedback_type is not None:
feedback_type_param = pb.add_param(req.feedback_type)
where_clauses.append(
f"feedback_type = {param_slot(feedback_type_param, 'String')}"
)
if req.trigger_ref is not None:
where_clauses.append(trigger_ref_where_clause(req.trigger_ref, pb))
where_sql = " AND ".join(where_clauses)

raw_sql = f"""
SELECT argMax(payload_dump, created_at) AS payload_sample
FROM feedback
WHERE {where_sql}
GROUP BY trigger_ref
LIMIT {param_slot(limit_param, "Int64")}
"""
return safely_format_sql(raw_sql, logger), pb.get_params()
Loading