Skip to content

Commit de3e86c

Browse files
committed
server/llm: combine langchain implementations in a single file -- add tests
1 parent a602031 commit de3e86c

File tree

10 files changed

+534
-54
lines changed

10 files changed

+534
-54
lines changed

src/.claude/settings.local.json

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,9 @@
1111
"WebFetch(domain:github.com)",
1212
"WebFetch(domain:cocalc.com)",
1313
"WebFetch(domain:doc.cocalc.com)",
14-
"Bash(npm show:*)"
14+
"Bash(npm show:*)",
15+
"Bash(prettier -w:*)"
1516
],
1617
"deny": []
1718
}
18-
}
19+
}

src/packages/server/llm/anthropic.ts

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -100,9 +100,8 @@ export async function evaluateAnthropic(
100100
inputMessagesKey: "input",
101101
historyMessagesKey: "history",
102102
getMessageHistory: async () => {
103-
const { messageHistory, tokens } = await transformHistoryToMessages(
104-
history,
105-
);
103+
const { messageHistory, tokens } =
104+
await transformHistoryToMessages(history);
106105
historyTokens = tokens;
107106
return messageHistory;
108107
},
@@ -117,7 +116,7 @@ export async function evaluateAnthropic(
117116
if (typeof content !== "string") continue;
118117
output += content;
119118
opts.stream?.(content);
120-
119+
121120
// Collect the final result to check for usage metadata
122121
if (finalResult) {
123122
finalResult = concat(finalResult, chunk);
@@ -139,7 +138,7 @@ export async function evaluateAnthropic(
139138
output_tokens,
140139
total_tokens,
141140
});
142-
141+
143142
return {
144143
output,
145144
total_tokens,

src/packages/server/llm/call-llm.ts

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,13 @@ import { delay } from "awaiting";
22
import type OpenAI from "openai";
33
import getLogger from "@cocalc/backend/logger";
44
import { OpenAIMessages, OpenAIModel } from "@cocalc/util/db-schema/llm-utils";
5-
import type { ChatOutput, Stream as StreamFunction } from "@cocalc/util/types/llm";
5+
import type {
6+
ChatOutput,
7+
Stream as StreamFunction,
8+
} from "@cocalc/util/types/llm";
69
import { totalNumTokens } from "./chatgpt-numtokens";
710
import type { Stream } from "openai/streaming";
811

9-
1012
const log = getLogger("llm:call-llm");
1113

1214
interface CallChatGPTOpts {

src/packages/server/llm/custom-openai.ts

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ export async function evaluateCustomOpenAI(
5353

5454
const prompt = ChatPromptTemplate.fromMessages([
5555
["system", system ?? ""],
56-
new MessagesPlaceholder("chat_history"),
56+
new MessagesPlaceholder("history"),
5757
["human", "{input}"],
5858
]);
5959

@@ -65,11 +65,10 @@ export async function evaluateCustomOpenAI(
6565
runnable: chain,
6666
config: { configurable: { sessionId: "ignored" } },
6767
inputMessagesKey: "input",
68-
historyMessagesKey: "chat_history",
68+
historyMessagesKey: "history",
6969
getMessageHistory: async () => {
70-
const { messageHistory, tokens } = await transformHistoryToMessages(
71-
history,
72-
);
70+
const { messageHistory, tokens } =
71+
await transformHistoryToMessages(history);
7372
historyTokens = tokens;
7473
return messageHistory;
7574
},
@@ -86,7 +85,7 @@ export async function evaluateCustomOpenAI(
8685
}
8786
output += content;
8887
opts.stream?.(content);
89-
88+
9089
// Collect the final result to check for usage metadata
9190
if (finalResult) {
9291
finalResult = concat(finalResult, chunk);
@@ -109,7 +108,7 @@ export async function evaluateCustomOpenAI(
109108
output_tokens,
110109
total_tokens,
111110
});
112-
111+
113112
return {
114113
output,
115114
total_tokens,

0 commit comments

Comments
 (0)