Skip to content

Commit c0d3f7d

Browse files
Qardclaude
andcommitted
fix(e2e): stabilize canary tests for langchain version drift and anthropic timeout
Normalize invocation-parameter fields (max_tokens, model, stream, temperature) that older @langchain/openai included alongside the standardized ls_* metadata keys but newer versions removed. Dropping these from the snapshot makes the wrap-langchain-js-traces canary test stable across both the locked and latest langchain versions. Also increase the anthropic-instrumentation scenario timeout from 90s to 150s to accommodate the additional messages.batches API calls (create, retrieve, list, cancel) that will be exercised once the batches PR merges. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
1 parent c2d24c1 commit c0d3f7d

File tree

3 files changed

+20
-25
lines changed

3 files changed

+20
-25
lines changed

e2e/scenarios/anthropic-instrumentation/scenario.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ import { defineAnthropicInstrumentationAssertions } from "./assertions";
99
const scenarioDir = await prepareScenarioDir({
1010
scenarioDir: resolveScenarioDir(import.meta.url),
1111
});
12-
const TIMEOUT_MS = 90_000;
12+
const TIMEOUT_MS = 150_000;
1313
const anthropicScenarios = await Promise.all(
1414
[
1515
{

e2e/scenarios/wrap-langchain-js-traces/__snapshots__/log-payloads.json

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -126,10 +126,6 @@
126126
"ls_model_type": "chat",
127127
"ls_provider": "openai",
128128
"ls_temperature": 0,
129-
"max_tokens": 16,
130-
"model": "gpt-4o-mini",
131-
"stream": false,
132-
"temperature": 0,
133129
"versions": {
134130
"@langchain/core": "<langchain-version>",
135131
"@langchain/openai": "<langchain-version>"
@@ -704,10 +700,6 @@
704700
"ls_model_type": "chat",
705701
"ls_provider": "openai",
706702
"ls_temperature": 0,
707-
"max_tokens": 32,
708-
"model": "gpt-4o-mini",
709-
"stream": false,
710-
"temperature": 0,
711703
"versions": {
712704
"@langchain/core": "<langchain-version>",
713705
"@langchain/openai": "<langchain-version>"
@@ -973,13 +965,9 @@
973965
"ls_model_type": "chat",
974966
"ls_provider": "openai",
975967
"ls_temperature": 0,
976-
"max_tokens": 32,
977-
"model": "gpt-4o-mini",
978-
"stream": true,
979968
"stream_options": {
980969
"include_usage": true
981970
},
982-
"temperature": 0,
983971
"versions": {
984972
"@langchain/core": "<langchain-version>",
985973
"@langchain/openai": "<langchain-version>"
@@ -1265,10 +1253,6 @@
12651253
"ls_model_type": "chat",
12661254
"ls_provider": "openai",
12671255
"ls_temperature": 0,
1268-
"max_tokens": 128,
1269-
"model": "gpt-4o-mini",
1270-
"stream": false,
1271-
"temperature": 0,
12721256
"versions": {
12731257
"@langchain/core": "<langchain-version>",
12741258
"@langchain/openai": "<langchain-version>"
@@ -1606,10 +1590,6 @@
16061590
"ls_model_type": "chat",
16071591
"ls_provider": "openai",
16081592
"ls_temperature": 0,
1609-
"max_tokens": 128,
1610-
"model": "gpt-4o-mini",
1611-
"stream": false,
1612-
"temperature": 0,
16131593
"versions": {
16141594
"@langchain/core": "<langchain-version>",
16151595
"@langchain/openai": "<langchain-version>"
@@ -1962,10 +1942,6 @@
19621942
"ls_model_type": "chat",
19631943
"ls_provider": "openai",
19641944
"ls_temperature": 0,
1965-
"max_tokens": 128,
1966-
"model": "gpt-4o-mini",
1967-
"stream": false,
1968-
"temperature": 0,
19691945
"versions": {
19701946
"@langchain/core": "<langchain-version>",
19711947
"@langchain/openai": "<langchain-version>"

e2e/scenarios/wrap-langchain-js-traces/assertions.ts

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,16 @@ function normalizeToolCallIds(obj: unknown): void {
137137
}
138138
}
139139

140+
// Fields that older @langchain/openai included in the ls_* metadata block but
141+
// newer versions removed. Normalize them out so the snapshot is stable across
142+
// both locked and canary (latest) langchain versions.
143+
const LANGCHAIN_LS_VOLATILE_KEYS = new Set([
144+
"max_tokens",
145+
"model",
146+
"stream",
147+
"temperature",
148+
]);
149+
140150
function normalizeLangchainVersions(obj: unknown): void {
141151
if (!obj || typeof obj !== "object") return;
142152

@@ -161,6 +171,15 @@ function normalizeLangchainVersions(obj: unknown): void {
161171
}
162172
}
163173

174+
// If this object is the ls_* metadata block (identified by the presence of
175+
// any `ls_` key), remove volatile keys that newer langchain drops.
176+
const hasLsKey = Object.keys(record).some((k) => k.startsWith("ls_"));
177+
if (hasLsKey) {
178+
for (const key of LANGCHAIN_LS_VOLATILE_KEYS) {
179+
delete record[key];
180+
}
181+
}
182+
164183
for (const value of Object.values(record)) {
165184
if (typeof value === "object" && value !== null) {
166185
normalizeLangchainVersions(value);

0 commit comments

Comments
 (0)