diff --git a/package.json b/package.json index 3e1a495..0eee1f9 100644 --- a/package.json +++ b/package.json @@ -1,19 +1,19 @@ { - "name": "copilot-api", - "version": "0.3.1", + "name": "@nghyane/copilot-api", + "version": "1.0.1-beta.1", "description": "A wrapper around GitHub Copilot API to make it OpenAI compatible, making it usable for other tools.", "keywords": [ "proxy", "github-copilot", "openai-compatible" ], - "homepage": "https://github.com/ericc-ch/copilot-api", - "bugs": "https://github.com/ericc-ch/copilot-api/issues", + "homepage": "https://github.com/nghyane/copilot-api", + "bugs": "https://github.com/nghyane/copilot-api/issues", "repository": { "type": "git", - "url": "git+https://github.com/ericc-ch/copilot-api.git" + "url": "git+https://github.com/nghyane/copilot-api.git" }, - "author": "Erick Christian ", + "author": "Nghyane ", "type": "module", "bin": { "copilot-api": "./dist/main.js" diff --git a/src/auth.ts b/src/auth.ts index 85b5a13..0843ecd 100644 --- a/src/auth.ts +++ b/src/auth.ts @@ -3,7 +3,8 @@ import { defineCommand } from "citty" import consola from "consola" -import { PATHS, ensurePaths } from "./lib/paths" +import { ensurePaths } from "./lib/paths" +import { PATHS } from "./lib/paths" import { setupGitHubToken } from "./lib/token" interface RunAuthOptions { @@ -13,7 +14,6 @@ interface RunAuthOptions { export async function runAuth(options: RunAuthOptions): Promise { if (options.verbose) { consola.level = 5 - consola.info("Verbose logging enabled") } await ensurePaths() diff --git a/src/lib/api-config.ts b/src/lib/api-config.ts index 8075145..13e1305 100644 --- a/src/lib/api-config.ts +++ b/src/lib/api-config.ts @@ -15,7 +15,7 @@ const API_VERSION = "2025-04-01" export const copilotBaseUrl = (state: State) => `https://api.${state.accountType}.githubcopilot.com` -export const copilotHeaders = (state: State, vision: boolean = false) => { +export const copilotHeaders = (state: State, vision = false) => { const headers: Record = { Authorization: `Bearer ${state.copilotToken}`, "content-type": standardHeaders()["content-type"], diff --git a/src/lib/format-detector.ts b/src/lib/format-detector.ts new file mode 100644 index 0000000..dfdf542 --- /dev/null +++ b/src/lib/format-detector.ts @@ -0,0 +1,43 @@ +/** + * Simple format detection for Anthropic vs OpenAI requests + * Only used to detect incoming format - no response conversion + */ + +export interface FormatDetectionResult { + isAnthropic: boolean + originalFormat: 'anthropic' | 'openai' +} + +/** + * Detect if request is in Anthropic format + * Based on Anthropic-specific fields and structures + */ +export function detectFormat(payload: any): FormatDetectionResult { + const isAnthropic = !!( + // Anthropic system format (array instead of string) + (Array.isArray(payload.system)) || + + // Anthropic metadata field + (payload.metadata) || + + // Anthropic message content structures + (payload.messages?.some((msg: any) => + msg.content?.some?.((part: any) => + part.cache_control || + part.type === 'tool_use' || + part.type === 'tool_result' + ) + )) || + + // Anthropic tool format (input_schema instead of function) + (payload.tools?.some((tool: any) => tool.input_schema && !tool.function)) || + + // Anthropic tool_choice format (object with type) + (typeof payload.tool_choice === 'object' && payload.tool_choice?.type) + ) + + return { + isAnthropic, + originalFormat: isAnthropic ? 'anthropic' : 'openai' + } +} diff --git a/src/lib/forward-error.ts b/src/lib/forward-error.ts index c0a1e02..a1887c6 100644 --- a/src/lib/forward-error.ts +++ b/src/lib/forward-error.ts @@ -9,7 +9,14 @@ export async function forwardError(c: Context, error: unknown) { consola.error("Error occurred:", error) if (error instanceof HTTPError) { - const errorText = await error.response.text() + let errorText: string + try { + errorText = await error.response.text() + } catch { + // If body is already used, fall back to the error message + errorText = error.message + } + return c.json( { error: { diff --git a/src/lib/http-error.ts b/src/lib/http-error.ts index 352d3c6..c13d76e 100644 --- a/src/lib/http-error.ts +++ b/src/lib/http-error.ts @@ -4,5 +4,7 @@ export class HTTPError extends Error { constructor(message: string, response: Response) { super(message) this.response = response + + console.error(message, response) } } diff --git a/src/lib/logger.ts b/src/lib/logger.ts new file mode 100644 index 0000000..d17aedc --- /dev/null +++ b/src/lib/logger.ts @@ -0,0 +1,9 @@ +import consola from "consola" + +// Simple logger wrapper around consola for basic logging needs +export const globalLogger = { + debug: (message: string, data?: any) => consola.debug(message, data), + info: (message: string, data?: any) => consola.info(message, data), + warn: (message: string, data?: any) => consola.warn(message, data), + error: (message: string, data?: any) => consola.error(message, data), +} diff --git a/src/lib/model-utils.ts b/src/lib/model-utils.ts new file mode 100644 index 0000000..0015c67 --- /dev/null +++ b/src/lib/model-utils.ts @@ -0,0 +1,39 @@ +import { state } from "./state" + +/** + * Check if the model vendor is Anthropic (Claude models) + */ +export const isAnthropicVendor = (modelName: string): boolean => { + if (!state.models?.data) return false + + const model = state.models.data.find((m) => m.id === modelName) + return model?.vendor === "Anthropic" +} + +/** + * Get model information by name + */ +export const getModelInfo = (modelName: string) => { + if (!state.models?.data) return null + + return state.models.data.find((m) => m.id === modelName) +} + +/** + * Check if model supports vision + * Note: Vision support is not explicitly defined in the API response, + * so we check based on model name patterns + */ +export const supportsVision = (modelName: string): boolean => { + // For now, assume vision support based on model name patterns + // This can be updated when the API provides explicit vision support info + return modelName.includes("gpt-4") || modelName.includes("claude") +} + +/** + * Check if model supports tool calls + */ +export const supportsToolCalls = (modelName: string): boolean => { + const model = getModelInfo(modelName) + return model?.capabilities?.supports?.tool_calls === true +} diff --git a/src/lib/models.ts b/src/lib/models.ts index d6a3516..9803ce6 100644 --- a/src/lib/models.ts +++ b/src/lib/models.ts @@ -1,14 +1,40 @@ -import consola from "consola" - import { getModels } from "~/services/copilot/get-models" import { state } from "./state" +/** + * Transform disguised model names back to real Claude model names + * + * STRATEGY: Cursor sees "gpt-4-claude-sonnet-4" and sends OpenAI format, + * but we need to map it back to real Claude model for GitHub Copilot API. + */ +export function transformModelName(modelName: string): string { + // Handle disguised Claude models - map back to real Claude models + if (modelName === "gpt-4.1") { + return "claude-sonnet-4" + } + + if (modelName.startsWith("gpt-4.1-")) { + return "gpt-4.1" + } + + + return modelName +} + +/** + * Transform model name from internal format to client-facing format + * Example: "claude-sonnet-4" -> "claude-4-sonnet" + */ +export function reverseTransformModelName(modelName: string): string { + if (modelName === "claude-sonnet-4") { + return "claude-4-sonnet" + } + + return modelName +} + export async function cacheModels(): Promise { const models = await getModels() state.models = models - - consola.info( - `Available models: \n${models.data.map((model) => `- ${model.id}`).join("\n")}`, - ) } diff --git a/src/lib/streaming-utils.ts b/src/lib/streaming-utils.ts new file mode 100644 index 0000000..1f2afbd --- /dev/null +++ b/src/lib/streaming-utils.ts @@ -0,0 +1,15 @@ +import { events } from "fetch-event-stream" + +/** + * Create streaming response - simple pass-through for all models + * No format conversion needed since all requests are OpenAI format + */ +export async function* createStreamingResponse( + response: Response, +): AsyncIterable { + const eventStream = events(response) + + for await (const event of eventStream) { + yield event + } +} diff --git a/src/lib/token.ts b/src/lib/token.ts index aa66967..86a0b8a 100644 --- a/src/lib/token.ts +++ b/src/lib/token.ts @@ -1,5 +1,5 @@ -import consola from "consola" import fs from "node:fs/promises" +import consola from "consola" import { PATHS } from "~/lib/paths" import { getCopilotToken } from "~/services/github/get-copilot-token" @@ -22,7 +22,6 @@ export const setupCopilotToken = async () => { const refreshInterval = (refresh_in - 60) * 1000 setInterval(async () => { - consola.start("Refreshing Copilot token") try { const { token } = await getCopilotToken() state.copilotToken = token @@ -50,13 +49,10 @@ export async function setupGitHubToken( return } - consola.info("Not logged in, getting new access token") + consola.info("Getting new GitHub access token") const response = await getDeviceCode() - consola.debug("Device code response:", response) - consola.info( - `Please enter the code "${response.user_code}" in ${response.verification_uri}`, - ) + consola.info(`Please enter the code "${response.user_code}" in ${response.verification_uri}`) const token = await pollAccessToken(response) await writeGithubToken(token) diff --git a/src/lib/tokenizer.ts b/src/lib/tokenizer.ts index 98797c6..0c1298b 100644 --- a/src/lib/tokenizer.ts +++ b/src/lib/tokenizer.ts @@ -1,12 +1,96 @@ import { countTokens } from "gpt-tokenizer/model/gpt-4o" -import type { Message } from "~/services/copilot/create-chat-completions" +import type { MessageRole } from "~/services/copilot/create-chat-completions" -export const getTokenCount = (messages: Array) => { - const input = messages.filter( - (m) => m.role !== "assistant" && typeof m.content === "string", - ) - const output = messages.filter((m) => m.role === "assistant") +// Convert Message to gpt-tokenizer compatible format +interface ChatMessage { + role: "user" | "assistant" | "system" + content: string +} + +// Generic message type for tokenizer +interface TokenizerMessage { + role: MessageRole + content: string | Array | null + tool_calls?: Array + tool_call_id?: string + name?: string + [key: string]: any +} + +const convertToTokenizerFormat = ( + message: TokenizerMessage, +): ChatMessage | null => { + // Handle tool role messages - convert to assistant for token counting + const role = message.role === "tool" ? "assistant" : message.role + + // Handle string content + if (typeof message.content === "string") { + return { + role: role, + content: message.content, + } + } + + // Handle null content (can happen with tool calls) + if (message.content === null) { + // If there are tool calls, convert them to text for token counting + if (message.tool_calls && message.tool_calls.length > 0) { + const toolCallsText = message.tool_calls + .map((toolCall: any) => { + return `Function call: ${toolCall.function?.name}(${toolCall.function?.arguments})` + }) + .join(" ") + + return { + role: role, + content: toolCallsText, + } + } + + // If it's a tool response, use the tool_call_id and name for context + if (message.role === "tool" && message.name) { + return { + role: "assistant", + content: `Tool response from ${message.name}`, + } + } + + return null + } + + // Handle ContentPart array - extract text content + const textContent = message.content + .map((part: any) => { + if (part.type === "input_text" && part.text) { + return part.text + } + // For image parts, we can't count tokens meaningfully, so we'll skip them + // or provide a placeholder. For now, we'll skip them. + return "" + }) + .filter(Boolean) + .join(" ") + + // Only return a message if we have actual text content + if (textContent.trim()) { + return { + role: role, + content: textContent, + } + } + + return null +} + +export const getTokenCount = (messages: Array) => { + // Convert messages to tokenizer-compatible format + const convertedMessages = messages + .map(convertToTokenizerFormat) + .filter((m): m is ChatMessage => m !== null) + + const input = convertedMessages.filter((m) => m.role !== "assistant") + const output = convertedMessages.filter((m) => m.role === "assistant") const inputTokens = countTokens(input) const outputTokens = countTokens(output) diff --git a/src/lib/vscode-version.ts b/src/lib/vscode-version.ts index 5b33011..6ae9aa0 100644 --- a/src/lib/vscode-version.ts +++ b/src/lib/vscode-version.ts @@ -1,5 +1,3 @@ -import consola from "consola" - import { getVSCodeVersion } from "~/services/get-vscode-version" import { state } from "./state" @@ -7,6 +5,4 @@ import { state } from "./state" export const cacheVSCodeVersion = async () => { const response = await getVSCodeVersion() state.vsCodeVersion = response - - consola.info(`Using VSCode version: ${response}`) } diff --git a/src/main.ts b/src/main.ts index b028364..b4c4cf7 100644 --- a/src/main.ts +++ b/src/main.ts @@ -26,15 +26,12 @@ interface RunServerOptions { export async function runServer(options: RunServerOptions): Promise { if (options.verbose) { consola.level = 5 - consola.info("Verbose logging enabled") } if (options.business) { state.accountType = "business" - consola.info("Using business plan GitHub account") } else if (options.enterprise) { state.accountType = "enterprise" - consola.info("Using enterprise plan GitHub account") } state.manualApprove = options.manual @@ -46,7 +43,6 @@ export async function runServer(options: RunServerOptions): Promise { if (options.githubToken) { state.githubToken = options.githubToken - consola.info("Using provided GitHub token") } else { await setupGitHubToken() } diff --git a/src/routes/chat-completions/handler.ts b/src/routes/chat-completions/handler.ts index 9755ecd..4a35d52 100644 --- a/src/routes/chat-completions/handler.ts +++ b/src/routes/chat-completions/handler.ts @@ -1,36 +1,45 @@ import type { Context } from "hono" -import consola from "consola" import { streamSSE, type SSEMessage } from "hono/streaming" - import { awaitApproval } from "~/lib/approval" import { isNullish } from "~/lib/is-nullish" +import { transformModelName } from "~/lib/models" import { checkRateLimit } from "~/lib/rate-limit" import { state } from "~/lib/state" -import { getTokenCount } from "~/lib/tokenizer" -import { - createChatCompletions, - type ChatCompletionResponse, - type ChatCompletionsPayload, -} from "~/services/copilot/create-chat-completions" +import { createChatCompletions } from "~/services/copilot/create-chat-completions" export async function handleCompletion(c: Context) { await checkRateLimit(state) - let payload = await c.req.json() + const originalPayload = await c.req.json() + + // No format conversion - pass through all requests as OpenAI format + let payload = originalPayload + + // Transform model name if needed + if (payload.model) { + payload.model = transformModelName(payload.model) + } + + // All requests are passed through as OpenAI format + + + + + - consola.info("Current token count:", getTokenCount(payload.messages)) if (state.manualApprove) await awaitApproval() - if (isNullish(payload.max_tokens)) { + if (isNullish(payload.max_tokens) && payload.model) { + // Transform model name to internal format for lookup + const internalModelName = transformModelName(payload.model) const selectedModel = state.models?.data.find( - (model) => model.id === payload.model, + (model) => model.id === internalModelName, ) - payload = { - ...payload, - max_tokens: selectedModel?.capabilities.limits.max_output_tokens, + if (selectedModel?.capabilities?.limits?.max_output_tokens) { + payload.max_tokens = selectedModel.capabilities.limits.max_output_tokens } } @@ -41,12 +50,48 @@ export async function handleCompletion(c: Context) { } return streamSSE(c, async (stream) => { - for await (const chunk of response) { - await stream.writeSSE(chunk as SSEMessage) + try { + for await (const chunk of response as AsyncIterable) { + // Check if connection is still alive + if (stream.closed) { + break + } + + await stream.writeSSE(chunk as SSEMessage) + } + } catch (error) { + // Send error event to client if connection is still open + if (!stream.closed) { + try { + await stream.writeSSE({ + event: "error", + data: JSON.stringify({ error: "Stream interrupted" }), + }) + } catch { + // Ignore write errors to closed streams + } + } + } finally { + // Ensure stream is properly closed + if (!stream.closed) { + try { + await stream.writeSSE({ event: "done", data: "[DONE]" }) + } catch { + // Ignore close errors + } + } } }) } const isNonStreaming = ( response: Awaited>, -): response is ChatCompletionResponse => Object.hasOwn(response, "choices") +): response is Record => { + return ( + response + && typeof response === "object" + && (Object.hasOwn(response, "choices") + || Object.hasOwn(response, "content")) // Support both OpenAI and Anthropic formats + && !response[Symbol.asyncIterator] // Not an async iterable + ) +} diff --git a/src/routes/models/route.ts b/src/routes/models/route.ts index 8e282a3..0f2172a 100644 --- a/src/routes/models/route.ts +++ b/src/routes/models/route.ts @@ -1,6 +1,7 @@ import { Hono } from "hono" import { forwardError } from "~/lib/forward-error" +import { reverseTransformModelName } from "~/lib/models" import { getModels } from "~/services/copilot/get-models" export const modelRoutes = new Hono() @@ -8,7 +9,43 @@ export const modelRoutes = new Hono() modelRoutes.get("/", async (c) => { try { const models = await getModels() - return c.json(models) + + // Transform model names and disguise Claude models as GPT models + // This prevents Cursor from detecting Claude models and sending Anthropic format + const transformedModels = { + ...models, + data: models.data.map((model) => { + const transformedModel = { ...model } + + // Disguise Claude models as GPT models to force OpenAI format + if (model.id.includes('claude-sonnet-4')) { + transformedModel.id = 'gpt-4-claude-sonnet-4' + transformedModel.name = 'GPT-4 (Claude Sonnet 4)' + transformedModel.vendor = 'OpenAI' + } else if (model.id.includes('claude-sonnet-3.7')) { + transformedModel.id = 'gpt-4-claude-sonnet-37' + transformedModel.name = 'GPT-4 (Claude Sonnet 3.7)' + transformedModel.vendor = 'OpenAI' + } else if (model.id.includes('claude-sonnet-3.5')) { + transformedModel.id = 'gpt-35-turbo-claude-sonnet-35' + transformedModel.name = 'GPT-3.5 Turbo (Claude Sonnet 3.5)' + transformedModel.vendor = 'OpenAI' + } else if (model.id.includes('claude')) { + // Handle other Claude models + const claudeId = model.id.replace(/[^a-z0-9]/gi, '') + transformedModel.id = `gpt-4-${claudeId}` + transformedModel.name = `GPT-4 (${model.name || model.id})` + transformedModel.vendor = 'OpenAI' + } else { + // Non-Claude models pass through with normal transformation + transformedModel.id = reverseTransformModelName(model.id) + } + + return transformedModel + }), + } + + return c.json(transformedModels) } catch (error) { return await forwardError(c, error) } diff --git a/src/services/copilot/create-chat-completions.ts b/src/services/copilot/create-chat-completions.ts index 7d54d11..993d40b 100644 --- a/src/services/copilot/create-chat-completions.ts +++ b/src/services/copilot/create-chat-completions.ts @@ -1,111 +1,67 @@ -import { events } from "fetch-event-stream" - import { copilotHeaders, copilotBaseUrl } from "~/lib/api-config" import { HTTPError } from "~/lib/http-error" +import { transformModelName } from "~/lib/models" import { state } from "~/lib/state" +import { createStreamingResponse } from "~/lib/streaming-utils" export const createChatCompletions = async ( - payload: ChatCompletionsPayload, -) => { + payload: any, +): Promise> => { if (!state.copilotToken) throw new Error("Copilot token not found") - for (const message of payload.messages) { - intoCopilotMessage(message) + // Transform model name + if (payload.model) { + payload.model = transformModelName(payload.model) } - const visionEnable = payload.messages.some( - (x) => - typeof x.content !== "string" - && x.content.some((x) => x.type === "image_url"), - ) - - const response = await fetch(`${copilotBaseUrl(state)}/chat/completions`, { - method: "POST", - headers: copilotHeaders(state, visionEnable), - body: JSON.stringify(payload), - }) - - if (!response.ok) - throw new HTTPError("Failed to create chat completions", response) - - if (payload.stream) { - return events(response) - } + // Process all models (including Anthropic models) + const processedPayload = { ...payload } - return (await response.json()) as ChatCompletionResponse -} -const intoCopilotMessage = (message: Message) => { - if (typeof message.content === "string") return false - for (const part of message.content) { - if (part.type === "input_image") part.type = "image_url" - } -} + try { + // Detect vision usage for headers + const visionEnable = processedPayload.messages?.some( + (message: any) => + Array.isArray(message.content) + && message.content.some( + (part: any) => part.type === "image_url" || part.type === "image", + ), + ) -// Streaming types + const response = await fetch(`${copilotBaseUrl(state)}/chat/completions`, { + method: "POST", + headers: copilotHeaders(state, visionEnable), + body: JSON.stringify(processedPayload), + }) -export interface ChatCompletionChunk { - choices: [Choice] - created: number - object: "chat.completion.chunk" - id: string - model: string -} + if (!response.ok) { + const errorText = await response.text() -interface Delta { - content?: string - role?: string -} -interface Choice { - index: number - delta: Delta - finish_reason: "stop" | null - logprobs: null -} -// Non-streaming types + throw new HTTPError( + `Failed to create chat completions: ${errorText}`, + response, + ) + } -export interface ChatCompletionResponse { - id: string - object: string - created: number - model: string - choices: [ChoiceNonStreaming] -} + if (processedPayload.stream) { + return createStreamingResponse(response) + } -interface ChoiceNonStreaming { - index: number - message: Message - logprobs: null - finish_reason: "stop" + const responseData = (await response.json()) as any + return responseData + } catch (error) { + throw error + } } -// Payload types +// Flexible types for maximum compatibility +export type MessageRole = "user" | "assistant" | "system" | "tool" export interface ChatCompletionsPayload { - messages: Array + messages: Array model: string - temperature?: number - top_p?: number - max_tokens?: number - stop?: Array - n?: number - stream?: boolean -} - -export interface Message { - role: "user" | "assistant" | "system" - content: string | Array -} - -// https://platform.openai.com/docs/api-reference - -export interface ContentPart { - type: "input_image" | "input_text" | "image_url" - text?: string - image_url?: string + [key: string]: any // Allow any additional fields } -// https://platform.openai.com/docs/guides/images-vision#giving-a-model-images-as-input -// Note: copilot use "image_url", but openai use "input_image" diff --git a/src/services/copilot/create-embeddings.ts b/src/services/copilot/create-embeddings.ts index 7b43a19..07571c6 100644 --- a/src/services/copilot/create-embeddings.ts +++ b/src/services/copilot/create-embeddings.ts @@ -13,7 +13,11 @@ export const createEmbeddings = async (payload: EmbeddingRequest) => { if (!response.ok) throw new HTTPError("Failed to create embeddings", response) - return (await response.json()) as EmbeddingResponse + // return (await response.json()) as EmbeddingResponse + + const json = await response.json() + + return json as EmbeddingResponse } export interface EmbeddingRequest { diff --git a/src/services/github/poll-access-token.ts b/src/services/github/poll-access-token.ts index 938ff70..674f5e6 100644 --- a/src/services/github/poll-access-token.ts +++ b/src/services/github/poll-access-token.ts @@ -15,7 +15,6 @@ export async function pollAccessToken( // Interval is in seconds, we need to multiply by 1000 to get milliseconds // I'm also adding another second, just to be safe const sleepDuration = (deviceCode.interval + 1) * 1000 - consola.debug(`Polling access token with interval of ${sleepDuration}ms`) while (true) { const response = await fetch( @@ -39,7 +38,6 @@ export async function pollAccessToken( } const json = await response.json() - consola.debug("Polling access token response:", json) const { access_token } = json as AccessTokenResponse diff --git a/tsconfig.json b/tsconfig.json index bfff5e6..be86ccb 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -16,6 +16,8 @@ "noUnusedParameters": true, "noFallthroughCasesInSwitch": true, "noUncheckedSideEffectImports": true, + "noImplicitAny": true, + "baseUrl": ".", "paths": {