From b86ed2373b5b68383c1991c59c6a4c560c27679e Mon Sep 17 00:00:00 2001
From: Marc Muller <153335452+datainvest23@users.noreply.github.com>
Date: Wed, 24 Sep 2025 02:46:30 +0200
Subject: [PATCH 1/2] Streamline Next.js chat example
---
examples/next-chat/.env.local.example | 5 +
examples/next-chat/README.md | 49 ++
examples/next-chat/app/api/chat/route.ts | 65 +++
examples/next-chat/app/globals.css | 462 ++++++++++++++++++
examples/next-chat/app/layout.tsx | 17 +
examples/next-chat/app/page.tsx | 566 +++++++++++++++++++++++
examples/next-chat/lib/models.ts | 41 ++
examples/next-chat/lib/tools.ts | 82 ++++
examples/next-chat/next-env.d.ts | 5 +
examples/next-chat/next.config.mjs | 6 +
examples/next-chat/package.json | 26 ++
examples/next-chat/tsconfig.json | 19 +
pnpm-workspace.yaml | 1 +
13 files changed, 1344 insertions(+)
create mode 100644 examples/next-chat/.env.local.example
create mode 100644 examples/next-chat/README.md
create mode 100644 examples/next-chat/app/api/chat/route.ts
create mode 100644 examples/next-chat/app/globals.css
create mode 100644 examples/next-chat/app/layout.tsx
create mode 100644 examples/next-chat/app/page.tsx
create mode 100644 examples/next-chat/lib/models.ts
create mode 100644 examples/next-chat/lib/tools.ts
create mode 100644 examples/next-chat/next-env.d.ts
create mode 100644 examples/next-chat/next.config.mjs
create mode 100644 examples/next-chat/package.json
create mode 100644 examples/next-chat/tsconfig.json
diff --git a/examples/next-chat/.env.local.example b/examples/next-chat/.env.local.example
new file mode 100644
index 0000000..bcdf210
--- /dev/null
+++ b/examples/next-chat/.env.local.example
@@ -0,0 +1,5 @@
+# Required: obtain an API key from https://openrouter.ai/keys
+OPENROUTER_API_KEY=sk-or-...
+
+# Optional: override the base URL if you are pointing at a proxy.
+# OPENROUTER_BASE_URL=https://openrouter.ai/api/v1
diff --git a/examples/next-chat/README.md b/examples/next-chat/README.md
new file mode 100644
index 0000000..76a99cc
--- /dev/null
+++ b/examples/next-chat/README.md
@@ -0,0 +1,49 @@
+# OpenRouter Next.js Chat Example
+
+This example demonstrates how to build a streaming chat experience in Next.js using the
+[`@openrouter/ai-sdk-provider`](https://www.npmjs.com/package/@openrouter/ai-sdk-provider)
+and the Vercel AI SDK. The UI lets you:
+
+- pick an OpenRouter model
+- toggle tool usage on or off
+- watch streaming assistant replies
+- inspect tool invocations and their inputs/outputs in real time
+
+## Getting Started
+
+1. Install dependencies:
+
+ ```bash
+ pnpm install
+ pnpm --filter @openrouter/examples-next-chat dev
+ ```
+
+ > **Note:** the example is part of the monorepo. You can also `cd examples/next-chat`
+ > and run `pnpm install` followed by `pnpm dev`.
+
+2. Copy the example environment file and add your OpenRouter key:
+
+ ```bash
+ cp examples/next-chat/.env.local.example examples/next-chat/.env.local
+ ```
+
+ At minimum you need `OPENROUTER_API_KEY`. Set `OPENROUTER_BASE_URL` if you proxy requests.
+
+3. Start the development server:
+
+ ```bash
+ pnpm --filter @openrouter/examples-next-chat dev
+ ```
+
+ Visit `http://localhost:3000` to try the chat experience.
+
+## How It Works
+
+- `app/api/chat/route.ts` configures the OpenRouter provider, streams responses with tools, and
+ returns AI SDK UI message streams.
+- `app/page.tsx` implements a small client-side state machine that consumes the stream, renders
+ messages, and keeps track of tool invocations.
+- `lib/tools.ts` defines two sample tools (`getCurrentWeather` and `getCurrentTime`). You can add
+ your own tools or wire in real data sources.
+
+This example is intentionally lightweight so you can adapt it for your own projects.
diff --git a/examples/next-chat/app/api/chat/route.ts b/examples/next-chat/app/api/chat/route.ts
new file mode 100644
index 0000000..cf55968
--- /dev/null
+++ b/examples/next-chat/app/api/chat/route.ts
@@ -0,0 +1,65 @@
+import { createOpenRouter } from '@openrouter/ai-sdk-provider';
+import type { ModelMessage } from 'ai';
+import { streamText } from 'ai';
+
+import { BASIC_TOOLS } from '../../../lib/tools';
+import { DEFAULT_SYSTEM_PROMPT } from '../../../lib/models';
+
+interface ChatRequestBody {
+ modelId: string;
+ toolMode?: 'auto' | 'disabled';
+ messages: ModelMessage[];
+}
+
+const openrouter = createOpenRouter({
+ compatibility: 'strict',
+ baseURL: process.env.OPENROUTER_BASE_URL ?? process.env.OPENROUTER_API_BASE,
+});
+
+function normalizeToolMode(toolMode: ChatRequestBody['toolMode']) {
+ return toolMode === 'disabled' ? 'disabled' : 'auto';
+}
+
+export async function POST(request: Request) {
+ const apiKey = process.env.OPENROUTER_API_KEY;
+ if (!apiKey) {
+ return Response.json(
+ { error: 'Missing OPENROUTER_API_KEY environment variable.' },
+ { status: 500 },
+ );
+ }
+
+ let body: ChatRequestBody;
+ try {
+ body = (await request.json()) as ChatRequestBody;
+ } catch (_error) {
+ return Response.json({ error: 'Invalid JSON payload.' }, { status: 400 });
+ }
+
+ if (!body || typeof body.modelId !== 'string') {
+ return Response.json({ error: 'Request must include a modelId string.' }, { status: 400 });
+ }
+
+ if (!Array.isArray(body.messages) || body.messages.some((message) => typeof message !== 'object')) {
+ return Response.json({ error: 'Messages must be an array of chat messages.' }, { status: 400 });
+ }
+
+ const toolMode = normalizeToolMode(body.toolMode);
+ const shouldExposeTools = toolMode !== 'disabled';
+
+ try {
+ const result = streamText({
+ model: openrouter(body.modelId),
+ system: DEFAULT_SYSTEM_PROMPT,
+ messages: body.messages,
+ tools: shouldExposeTools ? BASIC_TOOLS : undefined,
+ toolChoice: shouldExposeTools ? 'auto' : 'none',
+ });
+
+ return result.toUIMessageStreamResponse();
+ } catch (error) {
+ const errorMessage =
+ error instanceof Error ? error.message : 'Unknown error while contacting OpenRouter.';
+ return Response.json({ error: errorMessage }, { status: 500 });
+ }
+}
diff --git a/examples/next-chat/app/globals.css b/examples/next-chat/app/globals.css
new file mode 100644
index 0000000..eedeb41
--- /dev/null
+++ b/examples/next-chat/app/globals.css
@@ -0,0 +1,462 @@
+:root {
+ color-scheme: light dark;
+ font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
+ line-height: 1.5;
+ --surface-light: #ffffff;
+ --surface-dark: #0f172a;
+ --background-light: #f5f6fb;
+ --background-dark: #020617;
+ --text-muted-light: rgba(15, 23, 42, 0.65);
+ --text-muted-dark: rgba(226, 232, 240, 0.75);
+}
+
+* {
+ box-sizing: border-box;
+}
+
+body {
+ margin: 0;
+ min-height: 100vh;
+ background-color: var(--background-light);
+ color: #0f172a;
+}
+
+@media (prefers-color-scheme: dark) {
+ body {
+ background-color: var(--background-dark);
+ color: #f8fafc;
+ }
+}
+
+main {
+ min-height: 100vh;
+ display: flex;
+ justify-content: center;
+ padding: 2rem 1.5rem 3rem;
+}
+
+.container {
+ width: min(960px, 100%);
+ display: flex;
+ flex-direction: column;
+ gap: 1.5rem;
+}
+
+header {
+ text-align: center;
+}
+
+header h1 {
+ margin: 0;
+ font-size: clamp(1.8rem, 2.4vw + 1rem, 2.4rem);
+}
+
+.subtitle {
+ margin: 0.5rem 0 0;
+ color: var(--text-muted-light);
+}
+
+@media (prefers-color-scheme: dark) {
+ .subtitle {
+ color: var(--text-muted-dark);
+ }
+}
+
+.controls {
+ display: grid;
+ gap: 1rem;
+ grid-template-columns: repeat(auto-fit, minmax(220px, 1fr));
+}
+
+.control {
+ background-color: rgba(255, 255, 255, 0.88);
+ border: 1px solid rgba(15, 23, 42, 0.08);
+ border-radius: 12px;
+ padding: 1rem;
+ display: flex;
+ flex-direction: column;
+ gap: 0.5rem;
+ box-shadow: 0 12px 30px rgba(15, 23, 42, 0.08);
+}
+
+@media (prefers-color-scheme: dark) {
+ .control {
+ background-color: rgba(15, 23, 42, 0.55);
+ border-color: rgba(148, 163, 184, 0.18);
+ box-shadow: 0 12px 30px rgba(0, 0, 0, 0.45);
+ }
+}
+
+label,
+.label {
+ font-weight: 600;
+ font-size: 0.8rem;
+ letter-spacing: 0.05em;
+ text-transform: uppercase;
+ color: rgba(15, 23, 42, 0.7);
+}
+
+@media (prefers-color-scheme: dark) {
+ label,
+ .label {
+ color: rgba(226, 232, 240, 0.85);
+ }
+}
+
+select,
+textarea,
+button {
+ font: inherit;
+}
+
+select,
+textarea {
+ width: 100%;
+ border-radius: 10px;
+ border: 1px solid rgba(15, 23, 42, 0.16);
+ padding: 0.6rem 0.75rem;
+ background-color: rgba(255, 255, 255, 0.96);
+ color: inherit;
+ transition: border-color 0.2s ease, box-shadow 0.2s ease;
+}
+
+textarea {
+ min-height: 120px;
+ resize: vertical;
+ line-height: 1.4;
+}
+
+select:focus,
+textarea:focus {
+ outline: none;
+ border-color: #2563eb;
+ box-shadow: 0 0 0 3px rgba(37, 99, 235, 0.25);
+}
+
+select:disabled,
+textarea:disabled {
+ opacity: 0.55;
+ cursor: not-allowed;
+}
+
+@media (prefers-color-scheme: dark) {
+ select,
+ textarea {
+ background-color: rgba(15, 23, 42, 0.78);
+ border-color: rgba(148, 163, 184, 0.2);
+ }
+
+ select:focus,
+ textarea:focus {
+ border-color: #60a5fa;
+ box-shadow: 0 0 0 3px rgba(96, 165, 250, 0.25);
+ }
+}
+
+.badge {
+ display: inline-flex;
+ align-items: center;
+ padding: 0.2rem 0.6rem;
+ border-radius: 999px;
+ font-size: 0.75rem;
+ font-weight: 600;
+ background-color: rgba(37, 99, 235, 0.12);
+ color: #1d4ed8;
+ border: 1px solid rgba(37, 99, 235, 0.18);
+ width: fit-content;
+}
+
+.badge.idle {
+ background-color: rgba(15, 23, 42, 0.08);
+ color: rgba(15, 23, 42, 0.75);
+ border-color: transparent;
+}
+
+.badge.status.collecting {
+ background-color: rgba(16, 185, 129, 0.12);
+ color: #047857;
+ border-color: rgba(16, 185, 129, 0.2);
+}
+
+.badge.status.running,
+.badge.status.complete {
+ background-color: rgba(59, 130, 246, 0.12);
+ color: #1d4ed8;
+ border-color: rgba(59, 130, 246, 0.2);
+}
+
+.badge.status.error {
+ background-color: rgba(239, 68, 68, 0.14);
+ color: #b91c1c;
+ border-color: rgba(239, 68, 68, 0.2);
+}
+
+@media (prefers-color-scheme: dark) {
+ .badge {
+ background-color: rgba(96, 165, 250, 0.18);
+ color: #bfdbfe;
+ border-color: rgba(96, 165, 250, 0.24);
+ }
+
+ .badge.idle {
+ background-color: rgba(148, 163, 184, 0.22);
+ color: rgba(226, 232, 240, 0.85);
+ }
+
+ .badge.status.collecting {
+ background-color: rgba(45, 212, 191, 0.24);
+ color: #5eead4;
+ }
+
+ .badge.status.running,
+ .badge.status.complete {
+ background-color: rgba(96, 165, 250, 0.24);
+ color: #bfdbfe;
+ }
+
+ .badge.status.error {
+ background-color: rgba(248, 113, 113, 0.22);
+ color: #fecaca;
+ }
+}
+
+.primary,
+.secondary {
+ border-radius: 999px;
+ border: none;
+ cursor: pointer;
+ font-weight: 600;
+ padding: 0.55rem 1.2rem;
+ transition: transform 0.15s ease, box-shadow 0.15s ease, opacity 0.15s ease;
+}
+
+.primary {
+ background: linear-gradient(135deg, #2563eb, #4f46e5);
+ color: #fff;
+ box-shadow: 0 10px 24px rgba(37, 99, 235, 0.35);
+}
+
+.primary:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+ box-shadow: none;
+}
+
+.primary:not(:disabled):active {
+ transform: translateY(1px);
+}
+
+.secondary {
+ background-color: transparent;
+ border: 1px solid rgba(15, 23, 42, 0.2);
+ color: inherit;
+}
+
+.secondary:disabled {
+ opacity: 0.55;
+ cursor: not-allowed;
+}
+
+@media (prefers-color-scheme: dark) {
+ .secondary {
+ border-color: rgba(148, 163, 184, 0.35);
+ }
+}
+
+.chat-panel {
+ display: flex;
+ flex-direction: column;
+ gap: 1rem;
+ background-color: rgba(255, 255, 255, 0.92);
+ border: 1px solid rgba(15, 23, 42, 0.08);
+ border-radius: 16px;
+ padding: 1.5rem;
+ box-shadow: 0 24px 48px rgba(15, 23, 42, 0.12);
+}
+
+@media (prefers-color-scheme: dark) {
+ .chat-panel {
+ background-color: rgba(15, 23, 42, 0.65);
+ border-color: rgba(148, 163, 184, 0.2);
+ box-shadow: 0 24px 48px rgba(0, 0, 0, 0.4);
+ }
+}
+
+.alert {
+ padding: 0.75rem 1rem;
+ border-radius: 12px;
+ background-color: rgba(239, 68, 68, 0.14);
+ color: #991b1b;
+ border: 1px solid rgba(239, 68, 68, 0.2);
+}
+
+@media (prefers-color-scheme: dark) {
+ .alert {
+ background-color: rgba(239, 68, 68, 0.22);
+ color: #fecaca;
+ }
+}
+
+.chat-log {
+ display: flex;
+ flex-direction: column;
+ gap: 1rem;
+ padding: 1rem;
+ border-radius: 12px;
+ border: 1px solid rgba(15, 23, 42, 0.08);
+ max-height: 420px;
+ overflow-y: auto;
+ background: rgba(248, 250, 252, 0.85);
+}
+
+@media (prefers-color-scheme: dark) {
+ .chat-log {
+ background: rgba(30, 41, 59, 0.62);
+ border-color: rgba(148, 163, 184, 0.16);
+ }
+}
+
+.empty {
+ margin: 0;
+ color: rgba(15, 23, 42, 0.6);
+}
+
+@media (prefers-color-scheme: dark) {
+ .empty {
+ color: rgba(226, 232, 240, 0.7);
+ }
+}
+
+.message {
+ padding: 1rem;
+ border-radius: 12px;
+ background-color: rgba(255, 255, 255, 0.95);
+ border: 1px solid rgba(15, 23, 42, 0.06);
+ display: flex;
+ flex-direction: column;
+ gap: 0.75rem;
+}
+
+.message.user {
+ border-left: 4px solid rgba(37, 99, 235, 0.6);
+}
+
+.message.assistant {
+ border-left: 4px solid rgba(16, 185, 129, 0.55);
+}
+
+@media (prefers-color-scheme: dark) {
+ .message {
+ background-color: rgba(15, 23, 42, 0.74);
+ border-color: rgba(148, 163, 184, 0.12);
+ }
+
+ .message.user {
+ border-left-color: rgba(96, 165, 250, 0.6);
+ }
+
+ .message.assistant {
+ border-left-color: rgba(45, 212, 191, 0.6);
+ }
+}
+
+.message-header {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ gap: 0.5rem;
+}
+
+.role-label {
+ font-weight: 600;
+ letter-spacing: 0.01em;
+}
+
+.message-text {
+ margin: 0;
+ white-space: pre-wrap;
+ line-height: 1.55;
+}
+
+.tool-list {
+ display: flex;
+ flex-direction: column;
+ gap: 0.75rem;
+}
+
+.tool-card {
+ border-radius: 10px;
+ border: 1px solid rgba(37, 99, 235, 0.18);
+ padding: 0.75rem 0.85rem;
+ background: rgba(37, 99, 235, 0.07);
+ display: flex;
+ flex-direction: column;
+ gap: 0.5rem;
+}
+
+.tool-card-header {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ gap: 0.5rem;
+}
+
+.tool-name {
+ font-weight: 600;
+}
+
+.tool-block {
+ display: flex;
+ flex-direction: column;
+ gap: 0.35rem;
+}
+
+.tool-block pre {
+ margin: 0;
+ padding: 0.6rem;
+ border-radius: 8px;
+ background: rgba(15, 23, 42, 0.08);
+ border: 1px solid rgba(15, 23, 42, 0.1);
+ font-size: 0.8rem;
+ line-height: 1.4;
+ max-height: 200px;
+ overflow: auto;
+ white-space: pre-wrap;
+}
+
+@media (prefers-color-scheme: dark) {
+ .tool-card {
+ background: rgba(96, 165, 250, 0.18);
+ border-color: rgba(96, 165, 250, 0.24);
+ }
+
+ .tool-block pre {
+ background: rgba(15, 23, 42, 0.78);
+ border-color: rgba(148, 163, 184, 0.2);
+ }
+}
+
+.hint {
+ margin: 0;
+ font-size: 0.8rem;
+ color: rgba(15, 23, 42, 0.6);
+}
+
+@media (prefers-color-scheme: dark) {
+ .hint {
+ color: rgba(226, 232, 240, 0.7);
+ }
+}
+
+.chat-form {
+ display: flex;
+ flex-direction: column;
+ gap: 0.75rem;
+}
+
+.actions {
+ display: flex;
+ gap: 0.75rem;
+ justify-content: flex-end;
+ flex-wrap: wrap;
+}
diff --git a/examples/next-chat/app/layout.tsx b/examples/next-chat/app/layout.tsx
new file mode 100644
index 0000000..3c4e069
--- /dev/null
+++ b/examples/next-chat/app/layout.tsx
@@ -0,0 +1,17 @@
+import type { Metadata } from 'next';
+import type { ReactNode } from 'react';
+import './globals.css';
+
+export const metadata: Metadata = {
+ title: 'OpenRouter Chat Playground',
+ description:
+ 'A minimal Next.js chat app that demonstrates streaming OpenRouter responses, model selection, and tool use.',
+};
+
+export default function RootLayout({ children }: { children: ReactNode }) {
+ return (
+
+
{children}
+
+ );
+}
diff --git a/examples/next-chat/app/page.tsx b/examples/next-chat/app/page.tsx
new file mode 100644
index 0000000..11cf687
--- /dev/null
+++ b/examples/next-chat/app/page.tsx
@@ -0,0 +1,566 @@
+'use client';
+
+import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
+import type { FormEvent } from 'react';
+import type { ModelMessage } from 'ai';
+
+import type { ToolMode } from '../lib/models';
+import { DEFAULT_MODEL_ID, DEFAULT_TOOL_MODE, MODEL_OPTIONS } from '../lib/models';
+
+type ToolStatus = 'collecting' | 'running' | 'complete' | 'error';
+
+interface ToolCall {
+ id: string;
+ name: string;
+ status: ToolStatus;
+ inputText?: string;
+ resultText?: string;
+ errorText?: string;
+ providerExecuted?: boolean;
+}
+
+interface ConversationEntry {
+ id: string;
+ role: 'user' | 'assistant';
+ text: string;
+ tools: ToolCall[];
+ pending: boolean;
+}
+
+const TOOL_STATUS_LABEL: Record = {
+ collecting: 'Collecting input',
+ running: 'Running',
+ complete: 'Completed',
+ error: 'Error',
+};
+
+const TOOL_MODE_OPTIONS: Array<{ value: ToolMode; label: string }> = [
+ { value: 'auto', label: 'Automatic tool calling' },
+ { value: 'disabled', label: 'Disable tools' },
+];
+
+function createMessageId(counterRef: { current: number }, prefix: string) {
+ counterRef.current += 1;
+ return `${prefix}-${Date.now()}-${counterRef.current}`;
+}
+
+function mapConversationToModelMessages(history: ConversationEntry[]): ModelMessage[] {
+ return history.map((entry) =>
+ entry.role === 'user'
+ ? ({ role: 'user', content: entry.text } as ModelMessage)
+ : ({ role: 'assistant', content: entry.text } as ModelMessage),
+ );
+}
+
+function formatData(value: unknown): string {
+ if (value === undefined || value === null) {
+ return '—';
+ }
+
+ if (typeof value === 'string') {
+ return value;
+ }
+
+ try {
+ return JSON.stringify(value, null, 2);
+ } catch {
+ return String(value);
+ }
+}
+
+export default function ChatPage() {
+ const [conversation, setConversation] = useState([]);
+ const [input, setInput] = useState('');
+ const [modelId, setModelId] = useState(DEFAULT_MODEL_ID);
+ const [toolMode, setToolMode] = useState(DEFAULT_TOOL_MODE);
+ const [isStreaming, setIsStreaming] = useState(false);
+ const [error, setError] = useState(null);
+
+ const abortControllerRef = useRef(null);
+ const messageCounterRef = useRef(0);
+ const currentAssistantIdRef = useRef(null);
+
+ const selectedModel = useMemo(
+ () => MODEL_OPTIONS.find((option) => option.id === modelId) ?? MODEL_OPTIONS[0],
+ [modelId],
+ );
+ const toolsSupported = selectedModel?.supportsTools ?? false;
+
+ useEffect(() => {
+ if (!toolsSupported && toolMode !== 'disabled') {
+ setToolMode('disabled');
+ }
+ }, [toolMode, toolsSupported]);
+
+ const appendEntry = useCallback((entry: ConversationEntry) => {
+ setConversation((prev) => [...prev, entry]);
+ }, []);
+
+ const updateEntryById = useCallback(
+ (id: string, updater: (entry: ConversationEntry) => ConversationEntry) => {
+ setConversation((prev) => {
+ const index = prev.findIndex((item) => item.id === id);
+ if (index === -1) {
+ return prev;
+ }
+
+ const updated = updater(prev[index]);
+ if (updated === prev[index]) {
+ return prev;
+ }
+
+ const next = [...prev];
+ next[index] = updated;
+ return next;
+ });
+ },
+ [],
+ );
+
+ const handleStop = useCallback(() => {
+ abortControllerRef.current?.abort();
+ }, []);
+
+ const handleClear = useCallback(() => {
+ abortControllerRef.current?.abort();
+ abortControllerRef.current = null;
+ currentAssistantIdRef.current = null;
+ setConversation([]);
+ setInput('');
+ setError(null);
+ setIsStreaming(false);
+ }, []);
+
+ const streamResponse = useCallback(
+ async (history: ConversationEntry[]) => {
+ setIsStreaming(true);
+ const controller = new AbortController();
+ abortControllerRef.current = controller;
+
+ const payload = JSON.stringify({
+ messages: mapConversationToModelMessages(history),
+ modelId,
+ toolMode,
+ });
+
+ const toolBuffers = new Map();
+
+ const ensureAssistantMessage = (messageId?: string) => {
+ if (currentAssistantIdRef.current) {
+ return currentAssistantIdRef.current;
+ }
+
+ const newId = messageId ?? createMessageId(messageCounterRef, 'assistant');
+ currentAssistantIdRef.current = newId;
+ appendEntry({
+ id: newId,
+ role: 'assistant',
+ text: '',
+ tools: [],
+ pending: true,
+ });
+ return newId;
+ };
+
+ const updateToolCall = (
+ assistantId: string,
+ toolId: string,
+ updater: (tool: ToolCall) => ToolCall,
+ ) => {
+ updateEntryById(assistantId, (entry) => ({
+ ...entry,
+ tools: entry.tools.map((tool) => (tool.id === toolId ? updater(tool) : tool)),
+ }));
+ };
+
+ const parseEvent = (eventText: string) => {
+ const dataLines = eventText
+ .split('\n')
+ .filter((line) => line.startsWith('data:'))
+ .map((line) => line.slice(5).trim());
+
+ if (dataLines.length === 0) {
+ return null;
+ }
+
+ const payloadText = dataLines.join('');
+ if (!payloadText) {
+ return null;
+ }
+
+ try {
+ return JSON.parse(payloadText) as Record;
+ } catch {
+ return null;
+ }
+ };
+
+ const finalizeAssistant = (fallback?: string) => {
+ const assistantId = currentAssistantIdRef.current;
+ if (!assistantId) {
+ return;
+ }
+
+ updateEntryById(assistantId, (entry) => ({
+ ...entry,
+ text: entry.text || fallback || entry.text,
+ pending: false,
+ }));
+ };
+
+ const processChunk = (chunk: Record) => {
+ const type = typeof chunk.type === 'string' ? chunk.type : null;
+ if (!type) {
+ return;
+ }
+
+ if (type === 'start') {
+ const messageId = typeof chunk.messageId === 'string' ? chunk.messageId : undefined;
+ ensureAssistantMessage(messageId);
+ return;
+ }
+
+ const assistantId = ensureAssistantMessage();
+
+ switch (type) {
+ case 'text-delta': {
+ if (typeof chunk.delta === 'string') {
+ updateEntryById(assistantId, (entry) => ({
+ ...entry,
+ text: entry.text + chunk.delta,
+ }));
+ }
+ break;
+ }
+ case 'tool-input-start': {
+ if (typeof chunk.toolCallId === 'string' && typeof chunk.toolName === 'string') {
+ toolBuffers.set(chunk.toolCallId, '');
+ const status: ToolStatus = chunk.providerExecuted === true ? 'running' : 'collecting';
+ updateEntryById(assistantId, (entry) => ({
+ ...entry,
+ tools: [
+ ...entry.tools,
+ {
+ id: chunk.toolCallId as string,
+ name: chunk.toolName as string,
+ status,
+ providerExecuted: chunk.providerExecuted === true,
+ },
+ ],
+ }));
+ }
+ break;
+ }
+ case 'tool-input-delta': {
+ if (typeof chunk.toolCallId === 'string' && typeof chunk.inputTextDelta === 'string') {
+ const nextValue = (toolBuffers.get(chunk.toolCallId) ?? '') + chunk.inputTextDelta;
+ toolBuffers.set(chunk.toolCallId, nextValue);
+ updateToolCall(assistantId, chunk.toolCallId, (tool) => ({
+ ...tool,
+ inputText: nextValue,
+ status: tool.status === 'collecting' ? 'collecting' : tool.status,
+ }));
+ }
+ break;
+ }
+ case 'tool-input-available': {
+ if (typeof chunk.toolCallId === 'string') {
+ const formatted =
+ 'input' in chunk ? formatData((chunk as { input?: unknown }).input) : undefined;
+ const preview = toolBuffers.get(chunk.toolCallId);
+ toolBuffers.delete(chunk.toolCallId);
+ updateToolCall(assistantId, chunk.toolCallId, (tool) => ({
+ ...tool,
+ inputText: formatted ?? preview ?? tool.inputText,
+ providerExecuted: tool.providerExecuted || chunk.providerExecuted === true,
+ status: 'running',
+ }));
+ }
+ break;
+ }
+ case 'tool-output-available': {
+ if (typeof chunk.toolCallId === 'string') {
+ const formatted =
+ 'output' in chunk ? formatData((chunk as { output?: unknown }).output) : undefined;
+ updateToolCall(assistantId, chunk.toolCallId, (tool) => ({
+ ...tool,
+ resultText: formatted ?? tool.resultText,
+ status: 'complete',
+ }));
+ }
+ break;
+ }
+ case 'tool-output-error': {
+ if (typeof chunk.toolCallId === 'string') {
+ const errorText =
+ typeof chunk.errorText === 'string' ? chunk.errorText : 'Tool error';
+ updateToolCall(assistantId, chunk.toolCallId, (tool) => ({
+ ...tool,
+ errorText,
+ status: 'error',
+ }));
+ }
+ break;
+ }
+ case 'finish': {
+ updateEntryById(assistantId, (entry) => ({
+ ...entry,
+ pending: false,
+ }));
+ break;
+ }
+ case 'abort': {
+ finalizeAssistant('Response aborted.');
+ break;
+ }
+ case 'error': {
+ if (typeof chunk.errorText === 'string') {
+ setError(chunk.errorText);
+ }
+ finalizeAssistant('The model returned an error.');
+ break;
+ }
+ default:
+ break;
+ }
+ };
+
+ try {
+ const response = await fetch('/api/chat', {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: payload,
+ signal: controller.signal,
+ });
+
+ if (!response.ok || !response.body) {
+ const message = await response.text();
+ throw new Error(message || 'Unable to reach the chat endpoint.');
+ }
+
+ const reader = response.body.getReader();
+ const decoder = new TextDecoder();
+ let buffer = '';
+
+ while (true) {
+ const { value, done } = await reader.read();
+ buffer += decoder.decode(value ?? new Uint8Array(), { stream: !done });
+
+ let boundary = buffer.indexOf('\n\n');
+ while (boundary !== -1) {
+ const eventText = buffer.slice(0, boundary);
+ buffer = buffer.slice(boundary + 2);
+ const chunk = parseEvent(eventText);
+ if (chunk) {
+ processChunk(chunk);
+ }
+ boundary = buffer.indexOf('\n\n');
+ }
+
+ if (done) {
+ break;
+ }
+ }
+
+ finalizeAssistant();
+ } catch (error) {
+ if (controller.signal.aborted) {
+ finalizeAssistant('Generation cancelled.');
+ return;
+ }
+
+ const message =
+ error instanceof Error ? error.message : 'Unexpected error while streaming response.';
+ setError(message);
+ finalizeAssistant('The response ended unexpectedly.');
+ } finally {
+ setIsStreaming(false);
+ abortControllerRef.current = null;
+ currentAssistantIdRef.current = null;
+ }
+ },
+ [appendEntry, modelId, toolMode, updateEntryById],
+ );
+
+ const handleSubmit = useCallback(
+ (event: FormEvent) => {
+ event.preventDefault();
+ if (isStreaming) {
+ return;
+ }
+
+ const trimmed = input.trim();
+ if (!trimmed) {
+ return;
+ }
+
+ const userEntry: ConversationEntry = {
+ id: createMessageId(messageCounterRef, 'user'),
+ role: 'user',
+ text: trimmed,
+ tools: [],
+ pending: false,
+ };
+
+ const nextConversation = [...conversation, userEntry];
+ setConversation(nextConversation);
+ setInput('');
+ setError(null);
+ void streamResponse(nextConversation);
+ },
+ [conversation, input, isStreaming, streamResponse],
+ );
+
+ return (
+
+
+
+
+
+
+
+
+ {selectedModel ?
{selectedModel.description}
: null}
+
+
+
+
+
+ {!toolsSupported ?
Tools are disabled for this model.
: null}
+
+
+
+ Status
+
+ {isStreaming ? 'Streaming response…' : 'Ready'}
+
+
+
+
+ Conversation
+
+
+
+
+
+ {error ? {error}
: null}
+
+ {conversation.length === 0 ? (
+
+ Start by asking a question. The assistant streams its reply and displays each tool call.
+
+ ) : (
+ conversation.map((entry) => (
+
+
+ {entry.role === 'user' ? 'You' : 'Assistant'}
+ {entry.pending ? Streaming… : null}
+
+ {entry.text ? {entry.text}
: null}
+ {entry.tools.length > 0 ? (
+
+ {entry.tools.map((tool) => (
+
+
+ {tool.name}
+
+ {TOOL_STATUS_LABEL[tool.status]}
+
+
+ {tool.providerExecuted ?
Executed by provider
: null}
+ {tool.inputText ? (
+
+
Input
+
{tool.inputText}
+
+ ) : null}
+ {tool.resultText ? (
+
+
Result
+
{tool.resultText}
+
+ ) : null}
+ {tool.errorText ? (
+
+
Error
+
{tool.errorText}
+
+ ) : null}
+
+ ))}
+
+ ) : null}
+
+ ))
+ )}
+
+
+
+
+
+
+ );
+}
diff --git a/examples/next-chat/lib/models.ts b/examples/next-chat/lib/models.ts
new file mode 100644
index 0000000..4494930
--- /dev/null
+++ b/examples/next-chat/lib/models.ts
@@ -0,0 +1,41 @@
+export interface ModelOption {
+ id: string;
+ label: string;
+ description: string;
+ supportsTools: boolean;
+}
+
+export type ToolMode = 'auto' | 'disabled';
+
+export const MODEL_OPTIONS: ModelOption[] = [
+ {
+ id: 'openai/gpt-4.1-mini',
+ label: 'OpenAI GPT-4.1 Mini',
+ description:
+ 'Fast and capable general-purpose model with strong support for streaming tool calls.',
+ supportsTools: true,
+ },
+ {
+ id: 'anthropic/claude-3.7-sonnet',
+ label: 'Anthropic Claude 3.7 Sonnet',
+ description:
+ 'Reasoning-focused assistant that can plan multi-step solutions and execute structured tools.',
+ supportsTools: true,
+ },
+ {
+ id: 'meta-llama/llama-3.1-70b-instruct',
+ label: 'Llama 3.1 70B Instruct',
+ description:
+ 'Great open-weight model for narrative tasks. Tools are disabled by default for this model.',
+ supportsTools: false,
+ },
+];
+
+export const DEFAULT_MODEL_ID = MODEL_OPTIONS[0]?.id ?? 'openai/gpt-4.1-mini';
+
+export const DEFAULT_TOOL_MODE: ToolMode = 'auto';
+
+export const DEFAULT_SYSTEM_PROMPT =
+ 'You are an expert assistant running on OpenRouter. Provide concise, actionable answers, '
+ + 'call the available tools when they make the response more helpful, and always explain how '
+ + 'you used any tool results.';
diff --git a/examples/next-chat/lib/tools.ts b/examples/next-chat/lib/tools.ts
new file mode 100644
index 0000000..ab3d737
--- /dev/null
+++ b/examples/next-chat/lib/tools.ts
@@ -0,0 +1,82 @@
+import { tool } from 'ai';
+import { z } from 'zod';
+
+function roundTo(value: number, decimals: number) {
+ const factor = 10 ** decimals;
+ return Math.round(value * factor) / factor;
+}
+
+export const getCurrentWeather = tool({
+ description:
+ 'Look up an approximate weather report for a location. Useful for travel planning or casual questions.',
+ inputSchema: z.object({
+ location: z
+ .string({ description: 'City, region, or coordinates describing the location to inspect.' })
+ .min(2),
+ unit: z
+ .enum(['celsius', 'fahrenheit'], {
+ description: 'Unit to use when reporting the temperature.',
+ })
+ .default('celsius'),
+ }),
+ execute: async ({ location, unit }) => {
+ const fakeTemperatureCelsius = 18 + Math.random() * 10;
+ const temperatureCelsius = roundTo(fakeTemperatureCelsius, 1);
+ const temperatureFahrenheit = roundTo((temperatureCelsius * 9) / 5 + 32, 1);
+
+ return {
+ location,
+ unit,
+ report: `Skies are mostly clear over ${location}. A gentle breeze keeps the humidity comfortable.`,
+ temperature: unit === 'celsius' ? temperatureCelsius : temperatureFahrenheit,
+ feelsLike: unit === 'celsius'
+ ? roundTo(temperatureCelsius - 1.1, 1)
+ : roundTo(temperatureFahrenheit - 1.8, 1),
+ humidity: roundTo(52 + Math.random() * 8, 1),
+ windKph: roundTo(8 + Math.random() * 6, 1),
+ source: 'open-meteorology.example',
+ };
+ },
+});
+
+export const getCurrentTime = tool({
+ description:
+ 'Return the current local time for a requested IANA timezone or city description. '
+ + 'Helpful for scheduling and calendar coordination tasks.',
+ inputSchema: z.object({
+ timezone: z
+ .string({ description: 'An IANA timezone such as "Europe/Paris" or "America/New_York".' })
+ .default('UTC'),
+ locale: z
+ .string({ description: 'BCP47 locale string used when formatting the timestamp.' })
+ .default('en-US'),
+ }),
+ execute: async ({ timezone, locale }) => {
+ const now = new Date();
+ let formatted: string;
+ try {
+ formatted = now.toLocaleString(locale, { timeZone: timezone, hour12: false });
+ } catch (_error) {
+ formatted = now.toLocaleString('en-US', { timeZone: 'UTC', hour12: false });
+ return {
+ timezone,
+ locale,
+ iso: now.toISOString(),
+ formatted,
+ note: `Unable to format for timezone "${timezone}". Falling back to UTC.`,
+ };
+ }
+
+ return {
+ timezone,
+ locale,
+ iso: now.toISOString(),
+ formatted,
+ };
+ },
+});
+
+export const BASIC_TOOLS = {
+ getCurrentWeather,
+ getCurrentTime,
+};
diff --git a/examples/next-chat/next-env.d.ts b/examples/next-chat/next-env.d.ts
new file mode 100644
index 0000000..4f11a03
--- /dev/null
+++ b/examples/next-chat/next-env.d.ts
@@ -0,0 +1,5 @@
+///
+///
+
+// NOTE: This file should not be edited
+// see https://nextjs.org/docs/basic-features/typescript for more information.
diff --git a/examples/next-chat/next.config.mjs b/examples/next-chat/next.config.mjs
new file mode 100644
index 0000000..d5456a1
--- /dev/null
+++ b/examples/next-chat/next.config.mjs
@@ -0,0 +1,6 @@
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+ reactStrictMode: true,
+};
+
+export default nextConfig;
diff --git a/examples/next-chat/package.json b/examples/next-chat/package.json
new file mode 100644
index 0000000..9af84ae
--- /dev/null
+++ b/examples/next-chat/package.json
@@ -0,0 +1,26 @@
+{
+ "name": "@openrouter/examples-next-chat",
+ "private": true,
+ "version": "0.1.0",
+ "type": "module",
+ "scripts": {
+ "dev": "next dev",
+ "build": "next build",
+ "start": "next start",
+ "lint": "next lint"
+ },
+ "dependencies": {
+ "@openrouter/ai-sdk-provider": "workspace:*",
+ "ai": "5.0.5",
+ "next": "14.2.15",
+ "react": "18.3.1",
+ "react-dom": "18.3.1",
+ "zod": "3.25.76"
+ },
+ "devDependencies": {
+ "@types/node": "20.17.24",
+ "@types/react": "18.3.5",
+ "@types/react-dom": "18.3.0",
+ "typescript": "5.9.2"
+ }
+}
diff --git a/examples/next-chat/tsconfig.json b/examples/next-chat/tsconfig.json
new file mode 100644
index 0000000..81bf283
--- /dev/null
+++ b/examples/next-chat/tsconfig.json
@@ -0,0 +1,19 @@
+{
+ "compilerOptions": {
+ "target": "ES2020",
+ "lib": ["dom", "dom.iterable", "esnext"],
+ "allowJs": false,
+ "skipLibCheck": true,
+ "strict": true,
+ "noEmit": true,
+ "esModuleInterop": true,
+ "module": "esnext",
+ "moduleResolution": "bundler",
+ "resolveJsonModule": true,
+ "isolatedModules": true,
+ "jsx": "preserve",
+ "types": ["node"]
+ },
+ "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx"],
+ "exclude": ["node_modules"]
+}
diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml
index bd8ded7..3759f2c 100644
--- a/pnpm-workspace.yaml
+++ b/pnpm-workspace.yaml
@@ -1,5 +1,6 @@
packages:
- '.'
+ - 'examples/*'
onlyBuiltDependencies:
- '@biomejs/biome'
From 29138cde4cc7e088046f3696e71a0f25a7f655ae Mon Sep 17 00:00:00 2001
From: "google-labs-jules[bot]"
<161369871+google-labs-jules[bot]@users.noreply.github.com>
Date: Wed, 24 Sep 2025 01:24:54 +0000
Subject: [PATCH 2/2] doc updated
---
README.md | 42 ++++--
e2e/tools.ts | 9 ++
examples/next-chat/lib/models.ts | 30 ++++
examples/next-chat/lib/tools.ts | 9 ++
.../convert-to-openrouter-chat-messages.ts | 11 +-
src/chat/file-url-utils.ts | 21 +++
src/chat/get-tool-choice.ts | 7 +
src/chat/index.ts | 22 +++
src/chat/is-url.ts | 8 +
src/chat/schemas.ts | 14 +-
...convert-to-openrouter-completion-prompt.ts | 12 ++
src/completion/index.ts | 22 +++
src/completion/schemas.ts | 7 +-
src/facade.ts | 14 ++
src/provider.ts | 78 +++++++---
src/schemas/error-response.ts | 11 ++
src/schemas/image.ts | 7 +
src/schemas/reasoning-details.ts | 33 +++++
src/types/index.ts | 63 +++++++-
.../openrouter-chat-completions-input.ts | 139 +++++++++++++++++-
src/types/openrouter-chat-settings.ts | 112 ++++++++------
src/types/openrouter-completion-settings.ts | 50 ++++---
src/utils/map-finish-reason.ts | 6 +
23 files changed, 616 insertions(+), 111 deletions(-)
diff --git a/README.md b/README.md
index 63dfcba..40c58ec 100644
--- a/README.md
+++ b/README.md
@@ -2,31 +2,30 @@
The [OpenRouter](https://openrouter.ai/) provider for the [Vercel AI SDK](https://sdk.vercel.ai/docs) gives access to over 300 large language models on the OpenRouter chat and completion APIs.
-## Setup for AI SDK v5
+## Overview
-```bash
-# For pnpm
-pnpm add @openrouter/ai-sdk-provider
+This provider allows you to use the Vercel AI SDK with the OpenRouter API. It provides a seamless integration, allowing you to leverage the power of OpenRouter's extensive model catalog with the convenience of the AI SDK.
-# For npm
-npm install @openrouter/ai-sdk-provider
+## Features
-# For yarn
-yarn add @openrouter/ai-sdk-provider
-```
+- **Access to over 300 models**: Use any of the models available on OpenRouter, including the latest open-source and proprietary models.
+- **Chat and completion APIs**: Use both the chat and completion APIs, with support for streaming and non-streaming responses.
+- **Tool support**: Use tools with supported models to build powerful applications.
+- **Usage accounting**: Track your token usage and costs with OpenRouter's usage accounting feature.
+- **Anthropic prompt caching**: Leverage Anthropic's prompt caching for faster and cheaper responses.
+- **Provider routing**: Control how your requests are routed to different providers.
-## (LEGACY) Setup for AI SDK v4
+## Setup
```bash
# For pnpm
-pnpm add @openrouter/ai-sdk-provider@ai-sdk-v4
+pnpm add @openrouter/ai-sdk-provider
# For npm
-npm install @openrouter/ai-sdk-provider@ai-sdk-v4
+npm install @openrouter/ai-sdk-provider
# For yarn
-yarn add @openrouter/ai-sdk-provider@ai-sdk-v4
-
+yarn add @openrouter/ai-sdk-provider
```
## Provider Instance
@@ -37,6 +36,17 @@ You can import the default provider instance `openrouter` from `@openrouter/ai-s
import { openrouter } from '@openrouter/ai-sdk-provider';
```
+You can also create your own provider instance with custom settings:
+
+```ts
+import { createOpenRouter } from '@openrouter/ai-sdk-provider';
+
+const openrouter = createOpenRouter({
+ apiKey: 'YOUR_API_KEY',
+ baseURL: 'https://my-proxy.com/api/v1',
+});
+```
+
## Example
```ts
@@ -197,3 +207,7 @@ if (result.providerMetadata?.openrouter?.usage) {
);
}
```
+
+## API Reference
+
+The full API reference is available in the [generated documentation]().
diff --git a/e2e/tools.ts b/e2e/tools.ts
index c672e9f..2310cf6 100644
--- a/e2e/tools.ts
+++ b/e2e/tools.ts
@@ -9,6 +9,9 @@ const openrouter = createOpenRouter({
baseUrl: `${process.env.OPENROUTER_API_BASE}/api/v1`,
});
+/**
+ * A tool for sending an SMS message.
+ */
export const sendSMSTool = tool({
description: 'Send an SMS to any phone number',
inputSchema: z.object({
@@ -24,6 +27,9 @@ export const sendSMSTool = tool({
},
});
+/**
+ * A tool for reading an SMS message.
+ */
export const readSMSTool = tool({
description: 'Read the nth SMS from a phone number',
inputSchema: z.object({
@@ -39,6 +45,9 @@ export const readSMSTool = tool({
},
});
+/**
+ * A tool for executing a command in the terminal.
+ */
export const executeCommandInTerminalTool = tool({
description: 'Execute a command in the terminal',
inputSchema: z.object({
diff --git a/examples/next-chat/lib/models.ts b/examples/next-chat/lib/models.ts
index 4494930..ab244e0 100644
--- a/examples/next-chat/lib/models.ts
+++ b/examples/next-chat/lib/models.ts
@@ -1,12 +1,33 @@
+/**
+ * Represents a model option in the UI.
+ */
export interface ModelOption {
+ /**
+ * The ID of the model.
+ */
id: string;
+ /**
+ * The label for the model.
+ */
label: string;
+ /**
+ * A description of the model.
+ */
description: string;
+ /**
+ * Whether the model supports tools.
+ */
supportsTools: boolean;
}
+/**
+ * The mode for using tools.
+ */
export type ToolMode = 'auto' | 'disabled';
+/**
+ * The available model options.
+ */
export const MODEL_OPTIONS: ModelOption[] = [
{
id: 'openai/gpt-4.1-mini',
@@ -31,10 +52,19 @@ export const MODEL_OPTIONS: ModelOption[] = [
},
];
+/**
+ * The default model ID.
+ */
export const DEFAULT_MODEL_ID = MODEL_OPTIONS[0]?.id ?? 'openai/gpt-4.1-mini';
+/**
+ * The default tool mode.
+ */
export const DEFAULT_TOOL_MODE: ToolMode = 'auto';
+/**
+ * The default system prompt.
+ */
export const DEFAULT_SYSTEM_PROMPT =
'You are an expert assistant running on OpenRouter. Provide concise, actionable answers, '
+ 'call the available tools when they make the response more helpful, and always explain how '
diff --git a/examples/next-chat/lib/tools.ts b/examples/next-chat/lib/tools.ts
index ab3d737..98a8d34 100644
--- a/examples/next-chat/lib/tools.ts
+++ b/examples/next-chat/lib/tools.ts
@@ -6,6 +6,9 @@ function roundTo(value: number, decimals: number) {
return Math.round(value * factor) / factor;
}
+/**
+ * A tool for getting the current weather.
+ */
export const getCurrentWeather = tool({
description:
'Look up an approximate weather report for a location. Useful for travel planning or casual questions.',
@@ -39,6 +42,9 @@ export const getCurrentWeather = tool({
},
});
+/**
+ * A tool for getting the current time.
+ */
export const getCurrentTime = tool({
description:
'Return the current local time for a requested IANA timezone or city description. '
@@ -76,6 +82,9 @@ export const getCurrentTime = tool({
},
});
+/**
+ * A set of basic tools that can be used by the assistant.
+ */
export const BASIC_TOOLS = {
getCurrentWeather,
getCurrentTime,
diff --git a/src/chat/convert-to-openrouter-chat-messages.ts b/src/chat/convert-to-openrouter-chat-messages.ts
index 3f033f1..18fe50e 100644
--- a/src/chat/convert-to-openrouter-chat-messages.ts
+++ b/src/chat/convert-to-openrouter-chat-messages.ts
@@ -15,7 +15,10 @@ import { ReasoningDetailType } from '@/src/schemas/reasoning-details';
import { getFileUrl } from './file-url-utils';
import { isUrl } from './is-url';
-// Type for OpenRouter Cache Control following Anthropic's pattern
+/**
+ * Type for OpenRouter Cache Control, following Anthropic's pattern.
+ * It is used to control caching behavior for requests.
+ */
export type OpenRouterCacheControl = { type: 'ephemeral' };
function getCacheControl(
@@ -31,6 +34,12 @@ function getCacheControl(
anthropic?.cache_control) as OpenRouterCacheControl | undefined;
}
+/**
+ * Converts a prompt from the AI SDK V2 format to the OpenRouter Chat Completions format.
+ *
+ * @param {LanguageModelV2Prompt} prompt - The prompt to convert.
+ * @returns {OpenRouterChatCompletionsInput} The converted prompt in OpenRouter format.
+ */
export function convertToOpenRouterChatMessages(
prompt: LanguageModelV2Prompt,
): OpenRouterChatCompletionsInput {
diff --git a/src/chat/file-url-utils.ts b/src/chat/file-url-utils.ts
index 700b246..e990fed 100644
--- a/src/chat/file-url-utils.ts
+++ b/src/chat/file-url-utils.ts
@@ -3,6 +3,14 @@ import type { LanguageModelV2FilePart } from '@ai-sdk/provider';
import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils';
import { isUrl } from './is-url';
+/**
+ * Gets a URL for a file part, converting it to a data URL if necessary.
+ *
+ * @param {object} options - The options for getting the file URL.
+ * @param {LanguageModelV2FilePart} options.part - The file part to get the URL for.
+ * @param {string} options.defaultMediaType - The default media type to use if the part doesn't have one.
+ * @returns {string} The URL for the file part.
+ */
export function getFileUrl({
part,
defaultMediaType,
@@ -31,11 +39,24 @@ export function getFileUrl({
: `data:${part.mediaType ?? defaultMediaType};base64,${stringUrl}`;
}
+/**
+ * Gets the media type from a data URL.
+ *
+ * @param {string} dataUrl - The data URL to get the media type from.
+ * @param {string} defaultMediaType - The default media type to use if it can't be determined from the URL.
+ * @returns {string} The media type of the data URL.
+ */
export function getMediaType(dataUrl: string, defaultMediaType: string): string {
const match = dataUrl.match(/^data:([^;]+)/);
return match ? match[1] ?? defaultMediaType : defaultMediaType;
}
+/**
+ * Gets the base64 content from a data URL.
+ *
+ * @param {string} dataUrl - The data URL to get the base64 content from.
+ * @returns {string} The base64 content of the data URL.
+ */
export function getBase64FromDataUrl(dataUrl: string): string {
const match = dataUrl.match(/^data:[^;]*;base64,(.+)$/);
return match ? match[1]! : dataUrl;
diff --git a/src/chat/get-tool-choice.ts b/src/chat/get-tool-choice.ts
index f7370a7..13a10c3 100644
--- a/src/chat/get-tool-choice.ts
+++ b/src/chat/get-tool-choice.ts
@@ -16,6 +16,13 @@ const ChatCompletionToolChoiceSchema = z.union([
type ChatCompletionToolChoice = z.infer;
+/**
+ * Gets the chat completion tool choice from the language model tool choice.
+ *
+ * @param {LanguageModelV2ToolChoice} toolChoice - The language model tool choice to convert.
+ * @returns {ChatCompletionToolChoice} The chat completion tool choice.
+ * @throws {Error} If the tool choice type is invalid.
+ */
export function getChatCompletionToolChoice(
toolChoice: LanguageModelV2ToolChoice,
): ChatCompletionToolChoice {
diff --git a/src/chat/index.ts b/src/chat/index.ts
index f4d4cec..8c9e6ed 100644
--- a/src/chat/index.ts
+++ b/src/chat/index.ts
@@ -47,6 +47,13 @@ type OpenRouterChatConfig = {
extraBody?: Record;
};
+/**
+ * A concrete implementation of a language model that interacts with the OpenRouter API.
+ * This class is responsible for making API calls to OpenRouter for chat completions.
+ * It handles both streaming and non-streaming responses.
+ *
+ * @implements {LanguageModelV2}
+ */
export class OpenRouterChatLanguageModel implements LanguageModelV2 {
readonly specificationVersion = 'v2' as const;
readonly provider = 'openrouter';
@@ -185,6 +192,14 @@ export class OpenRouterChatLanguageModel implements LanguageModelV2 {
return baseArgs;
}
+ /**
+ * Performs a non-streaming generation task.
+ * It sends a request to the OpenRouter API and returns the result as a promise.
+ *
+ * @param {LanguageModelV2CallOptions} options - The options for the language model call.
+ * @returns {Promise