Skip to content

Commit aeca65d

Browse files
authored
Merge branch 'main' into fix/anthropic-stream-helper-auto-instrumentation
2 parents b798263 + e5a81cb commit aeca65d

File tree

9 files changed

+48
-369
lines changed

9 files changed

+48
-369
lines changed

.github/workflows/agent-automations.yaml

Lines changed: 0 additions & 125 deletions
This file was deleted.
Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,54 +1,54 @@
11
[
22
{
33
"hasParent": false,
4-
"name": "ai.generateText.doGenerate"
4+
"name": "ai.generateText"
55
},
66
{
77
"hasParent": false,
88
"name": "ai.generateText"
99
},
1010
{
1111
"hasParent": false,
12-
"name": "ai.streamText.doStream"
12+
"name": "ai.generateText.doGenerate"
1313
},
1414
{
1515
"hasParent": false,
16-
"name": "ai.streamText"
16+
"name": "ai.generateText.doGenerate"
1717
},
1818
{
1919
"hasParent": false,
2020
"name": "ai.generateText.doGenerate"
2121
},
2222
{
2323
"hasParent": false,
24-
"name": "ai.toolCall"
24+
"name": "ai.generateText.doGenerate"
2525
},
2626
{
2727
"hasParent": false,
2828
"name": "ai.generateText.doGenerate"
2929
},
3030
{
3131
"hasParent": false,
32-
"name": "ai.toolCall"
32+
"name": "ai.streamText"
3333
},
3434
{
3535
"hasParent": false,
36-
"name": "ai.generateText.doGenerate"
36+
"name": "ai.streamText.doStream"
3737
},
3838
{
3939
"hasParent": false,
4040
"name": "ai.toolCall"
4141
},
4242
{
4343
"hasParent": false,
44-
"name": "ai.generateText.doGenerate"
44+
"name": "ai.toolCall"
4545
},
4646
{
4747
"hasParent": false,
4848
"name": "ai.toolCall"
4949
},
5050
{
5151
"hasParent": false,
52-
"name": "ai.generateText"
52+
"name": "ai.toolCall"
5353
}
5454
]
Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,54 +1,54 @@
11
[
22
{
33
"hasParent": false,
4-
"name": "ai.generateText.doGenerate"
4+
"name": "ai.generateText"
55
},
66
{
77
"hasParent": false,
88
"name": "ai.generateText"
99
},
1010
{
1111
"hasParent": false,
12-
"name": "ai.streamText.doStream"
12+
"name": "ai.generateText.doGenerate"
1313
},
1414
{
1515
"hasParent": false,
16-
"name": "ai.streamText"
16+
"name": "ai.generateText.doGenerate"
1717
},
1818
{
1919
"hasParent": false,
2020
"name": "ai.generateText.doGenerate"
2121
},
2222
{
2323
"hasParent": false,
24-
"name": "ai.toolCall"
24+
"name": "ai.generateText.doGenerate"
2525
},
2626
{
2727
"hasParent": false,
2828
"name": "ai.generateText.doGenerate"
2929
},
3030
{
3131
"hasParent": false,
32-
"name": "ai.toolCall"
32+
"name": "ai.streamText"
3333
},
3434
{
3535
"hasParent": false,
36-
"name": "ai.generateText.doGenerate"
36+
"name": "ai.streamText.doStream"
3737
},
3838
{
3939
"hasParent": false,
4040
"name": "ai.toolCall"
4141
},
4242
{
4343
"hasParent": false,
44-
"name": "ai.generateText.doGenerate"
44+
"name": "ai.toolCall"
4545
},
4646
{
4747
"hasParent": false,
4848
"name": "ai.toolCall"
4949
},
5050
{
5151
"hasParent": false,
52-
"name": "ai.generateText"
52+
"name": "ai.toolCall"
5353
}
5454
]

e2e/scenarios/ai-sdk-otel-export/scenario.test.ts

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -119,10 +119,17 @@ for (const scenario of scenarios) {
119119

120120
// Snapshot the span names and key structure (not full payloads, since
121121
// response content and token counts are non-deterministic).
122-
const spanSummary = allSpans.map((span) => ({
123-
hasParent: !!span.parentSpanId,
124-
name: span.name,
125-
}));
122+
// Sort for determinism — OTel spans arrive in non-deterministic order.
123+
const spanSummary = allSpans
124+
.map((span) => ({
125+
hasParent: !!span.parentSpanId,
126+
name: span.name,
127+
}))
128+
.sort((a, b) =>
129+
a.name !== b.name
130+
? a.name.localeCompare(b.name)
131+
: Number(a.hasParent) - Number(b.hasParent),
132+
);
126133
await expect(formatJsonFileSnapshot(spanSummary)).toMatchFileSnapshot(
127134
resolveFileSnapshotPath(
128135
import.meta.url,

e2e/scenarios/anthropic-instrumentation/scenario.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ import { defineAnthropicInstrumentationAssertions } from "./assertions";
99
const scenarioDir = await prepareScenarioDir({
1010
scenarioDir: resolveScenarioDir(import.meta.url),
1111
});
12-
const TIMEOUT_MS = 90_000;
12+
const TIMEOUT_MS = 150_000;
1313
const anthropicScenarios = await Promise.all(
1414
[
1515
{

e2e/scenarios/wrap-langchain-js-traces/__snapshots__/log-payloads.json

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -126,10 +126,6 @@
126126
"ls_model_type": "chat",
127127
"ls_provider": "openai",
128128
"ls_temperature": 0,
129-
"max_tokens": 16,
130-
"model": "gpt-4o-mini",
131-
"stream": false,
132-
"temperature": 0,
133129
"versions": {
134130
"@langchain/core": "<langchain-version>",
135131
"@langchain/openai": "<langchain-version>"
@@ -704,10 +700,6 @@
704700
"ls_model_type": "chat",
705701
"ls_provider": "openai",
706702
"ls_temperature": 0,
707-
"max_tokens": 32,
708-
"model": "gpt-4o-mini",
709-
"stream": false,
710-
"temperature": 0,
711703
"versions": {
712704
"@langchain/core": "<langchain-version>",
713705
"@langchain/openai": "<langchain-version>"
@@ -973,13 +965,6 @@
973965
"ls_model_type": "chat",
974966
"ls_provider": "openai",
975967
"ls_temperature": 0,
976-
"max_tokens": 32,
977-
"model": "gpt-4o-mini",
978-
"stream": true,
979-
"stream_options": {
980-
"include_usage": true
981-
},
982-
"temperature": 0,
983968
"versions": {
984969
"@langchain/core": "<langchain-version>",
985970
"@langchain/openai": "<langchain-version>"
@@ -1265,10 +1250,6 @@
12651250
"ls_model_type": "chat",
12661251
"ls_provider": "openai",
12671252
"ls_temperature": 0,
1268-
"max_tokens": 128,
1269-
"model": "gpt-4o-mini",
1270-
"stream": false,
1271-
"temperature": 0,
12721253
"versions": {
12731254
"@langchain/core": "<langchain-version>",
12741255
"@langchain/openai": "<langchain-version>"
@@ -1606,10 +1587,6 @@
16061587
"ls_model_type": "chat",
16071588
"ls_provider": "openai",
16081589
"ls_temperature": 0,
1609-
"max_tokens": 128,
1610-
"model": "gpt-4o-mini",
1611-
"stream": false,
1612-
"temperature": 0,
16131590
"versions": {
16141591
"@langchain/core": "<langchain-version>",
16151592
"@langchain/openai": "<langchain-version>"
@@ -1962,10 +1939,6 @@
19621939
"ls_model_type": "chat",
19631940
"ls_provider": "openai",
19641941
"ls_temperature": 0,
1965-
"max_tokens": 128,
1966-
"model": "gpt-4o-mini",
1967-
"stream": false,
1968-
"temperature": 0,
19691942
"versions": {
19701943
"@langchain/core": "<langchain-version>",
19711944
"@langchain/openai": "<langchain-version>"

e2e/scenarios/wrap-langchain-js-traces/assertions.ts

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,17 @@ function normalizeToolCallIds(obj: unknown): void {
137137
}
138138
}
139139

140+
// Fields that older @langchain/openai included in the ls_* metadata block but
141+
// newer versions removed. Normalize them out so the snapshot is stable across
142+
// both locked and canary (latest) langchain versions.
143+
const LANGCHAIN_LS_VOLATILE_KEYS = new Set([
144+
"max_tokens",
145+
"model",
146+
"stream",
147+
"stream_options",
148+
"temperature",
149+
]);
150+
140151
function normalizeLangchainVersions(obj: unknown): void {
141152
if (!obj || typeof obj !== "object") return;
142153

@@ -161,6 +172,15 @@ function normalizeLangchainVersions(obj: unknown): void {
161172
}
162173
}
163174

175+
// If this object is the ls_* metadata block (identified by the presence of
176+
// any `ls_` key), remove volatile keys that newer langchain drops.
177+
const hasLsKey = Object.keys(record).some((k) => k.startsWith("ls_"));
178+
if (hasLsKey) {
179+
for (const key of LANGCHAIN_LS_VOLATILE_KEYS) {
180+
delete record[key];
181+
}
182+
}
183+
164184
for (const value of Object.values(record)) {
165185
if (typeof value === "object" && value !== null) {
166186
normalizeLangchainVersions(value);

0 commit comments

Comments
 (0)