Skip to content

Commit 3ee7288

Browse files
authored
🤖 refactor: migrate IPC layer to ORPC for type-safe RPC (#763)
## Summary Replaces the custom IPC layer with [oRPC](https://orpc.unnoq.com/) for type-safe RPC between browser/renderer and backend processes. ## Key Changes ### Architecture - **New ORPC router** (`src/node/orpc/router.ts`) - Central router defining all RPC endpoints with Zod schemas - **Schema definitions** (`src/common/orpc/schemas.ts`) - Shared Zod schemas for request/response validation - **ServiceContainer** (`src/node/services/serviceContainer.ts`) - Dependency injection container for all backend services - **React integration** (`src/browser/orpc/react.tsx`) - `ORPCProvider` and `useORPC()` hook for frontend ### Transport - **Desktop (Electron)**: MessagePort-based RPC via `@orpc/server/message-port` - **Server mode**: HTTP + WebSocket via `@orpc/server/node` and `@orpc/server/ws` - Auth middleware with timing-safe token comparison for server mode ### Subscriptions Streaming endpoints (chat events, metadata updates, terminal output) use async generators: ```typescript // Router handler: async function* ({ context, input }) { const unsubscribe = service.onEvent(push); try { while (!ended) { yield queue.shift() ?? await nextEvent(); } } finally { unsubscribe(); } } // Client for await (const event of client.workspace.onChat({ workspaceId })) { handleEvent(event); } ``` ### Removed - `src/browser/api.ts` (old HTTP/WS client) - `src/node/services/ipcMain.ts` (old IPC handler registration) - `src/desktop/preload.ts` IPC method definitions (now just MessagePort forwarding) - `tests/ipcMain/` directory (migrated to `tests/integration/`) ## Testing - All existing tests migrated to use ORPC test client - New `StreamCollector` utility for testing async generator subscriptions - Server endpoint tests for HTTP and WebSocket transports ## Migration Notes - Frontend components now use `useORPC()` hook instead of `window.api` - Stores receive client via `setClient()` during app initialization - Type safety is enforced at compile time via Zod schema inference --- _Generated with [mux](https://github.com/coder/mux)_ --------- Signed-off-by: Thomas Kosiewski <[email protected]>
1 parent 7d6be92 commit 3ee7288

File tree

224 files changed

+13587
-13626
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

224 files changed

+13587
-13626
lines changed

.github/actions/setup-mux/action.yml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,4 +35,3 @@ runs:
3535
if: steps.cache-node-modules.outputs.cache-hit != 'true'
3636
shell: bash
3737
run: bun install --frozen-lockfile
38-

.github/workflows/release.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ on:
66
workflow_dispatch:
77
inputs:
88
tag:
9-
description: 'Tag to release (e.g., v1.2.3). If provided, will checkout and release this tag regardless of current branch.'
9+
description: "Tag to release (e.g., v1.2.3). If provided, will checkout and release this tag regardless of current branch."
1010
required: false
1111
type: string
1212

.github/workflows/terminal-bench.yml

Lines changed: 18 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -4,34 +4,34 @@ on:
44
workflow_call:
55
inputs:
66
model_name:
7-
description: 'Model to use (e.g., anthropic:claude-sonnet-4-5)'
7+
description: "Model to use (e.g., anthropic:claude-sonnet-4-5)"
88
required: false
99
type: string
1010
thinking_level:
11-
description: 'Thinking level (off, low, medium, high)'
11+
description: "Thinking level (off, low, medium, high)"
1212
required: false
1313
type: string
1414
dataset:
15-
description: 'Terminal-Bench dataset to use'
15+
description: "Terminal-Bench dataset to use"
1616
required: false
1717
type: string
18-
default: 'terminal-bench-core==0.1.1'
18+
default: "terminal-bench-core==0.1.1"
1919
concurrency:
20-
description: 'Number of concurrent tasks (--n-concurrent)'
20+
description: "Number of concurrent tasks (--n-concurrent)"
2121
required: false
2222
type: string
23-
default: '4'
23+
default: "4"
2424
livestream:
25-
description: 'Enable livestream mode (verbose output to console)'
25+
description: "Enable livestream mode (verbose output to console)"
2626
required: false
2727
type: boolean
2828
default: false
2929
sample_size:
30-
description: 'Number of random tasks to run (empty = all tasks)'
30+
description: "Number of random tasks to run (empty = all tasks)"
3131
required: false
3232
type: string
3333
extra_args:
34-
description: 'Additional arguments to pass to terminal-bench'
34+
description: "Additional arguments to pass to terminal-bench"
3535
required: false
3636
type: string
3737
secrets:
@@ -42,34 +42,34 @@ on:
4242
workflow_dispatch:
4343
inputs:
4444
dataset:
45-
description: 'Terminal-Bench dataset to use'
45+
description: "Terminal-Bench dataset to use"
4646
required: false
47-
default: 'terminal-bench-core==0.1.1'
47+
default: "terminal-bench-core==0.1.1"
4848
type: string
4949
concurrency:
50-
description: 'Number of concurrent tasks (--n-concurrent)'
50+
description: "Number of concurrent tasks (--n-concurrent)"
5151
required: false
52-
default: '4'
52+
default: "4"
5353
type: string
5454
livestream:
55-
description: 'Enable livestream mode (verbose output to console)'
55+
description: "Enable livestream mode (verbose output to console)"
5656
required: false
5757
default: false
5858
type: boolean
5959
sample_size:
60-
description: 'Number of random tasks to run (empty = all tasks)'
60+
description: "Number of random tasks to run (empty = all tasks)"
6161
required: false
6262
type: string
6363
model_name:
64-
description: 'Model to use (e.g., anthropic:claude-sonnet-4-5, openai:gpt-5.1-codex)'
64+
description: "Model to use (e.g., anthropic:claude-sonnet-4-5, openai:gpt-5.1-codex)"
6565
required: false
6666
type: string
6767
thinking_level:
68-
description: 'Thinking level (off, low, medium, high)'
68+
description: "Thinking level (off, low, medium, high)"
6969
required: false
7070
type: string
7171
extra_args:
72-
description: 'Additional arguments to pass to terminal-bench'
72+
description: "Additional arguments to pass to terminal-bench"
7373
required: false
7474
type: string
7575

@@ -147,4 +147,3 @@ jobs:
147147
benchmark.log
148148
if-no-files-found: warn
149149
retention-days: 30
150-

.storybook/mocks/orpc.ts

Lines changed: 217 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,217 @@
1+
/**
2+
* Mock ORPC client factory for Storybook stories.
3+
*
4+
* Creates a client that matches the AppRouter interface with configurable mock data.
5+
*/
6+
import type { ORPCClient } from "@/browser/orpc/react";
7+
import type { FrontendWorkspaceMetadata } from "@/common/types/workspace";
8+
import type { ProjectConfig } from "@/node/config";
9+
import type { WorkspaceChatMessage } from "@/common/orpc/types";
10+
import type { ChatStats } from "@/common/types/chatStats";
11+
import { DEFAULT_RUNTIME_CONFIG } from "@/common/constants/workspace";
12+
13+
export interface MockORPCClientOptions {
14+
projects?: Map<string, ProjectConfig>;
15+
workspaces?: FrontendWorkspaceMetadata[];
16+
/** Per-workspace chat callback. Return messages to emit, or use the callback for streaming. */
17+
onChat?: (workspaceId: string, emit: (msg: WorkspaceChatMessage) => void) => (() => void) | void;
18+
/** Mock for executeBash per workspace */
19+
executeBash?: (
20+
workspaceId: string,
21+
script: string
22+
) => Promise<{ success: true; output: string; exitCode: number; wall_duration_ms: number }>;
23+
}
24+
25+
/**
26+
* Creates a mock ORPC client for Storybook.
27+
*
28+
* Usage:
29+
* ```tsx
30+
* const client = createMockORPCClient({
31+
* projects: new Map([...]),
32+
* workspaces: [...],
33+
* onChat: (wsId, emit) => {
34+
* emit({ type: "caught-up" });
35+
* // optionally return cleanup function
36+
* },
37+
* });
38+
*
39+
* return <AppLoader client={client} />;
40+
* ```
41+
*/
42+
export function createMockORPCClient(options: MockORPCClientOptions = {}): ORPCClient {
43+
const { projects = new Map(), workspaces = [], onChat, executeBash } = options;
44+
45+
const workspaceMap = new Map(workspaces.map((w) => [w.id, w]));
46+
47+
const mockStats: ChatStats = {
48+
consumers: [],
49+
totalTokens: 0,
50+
model: "mock-model",
51+
tokenizerName: "mock-tokenizer",
52+
usageHistory: [],
53+
};
54+
55+
// Cast to ORPCClient - TypeScript can't fully validate the proxy structure
56+
return {
57+
tokenizer: {
58+
countTokens: async () => 0,
59+
countTokensBatch: async (_input: { model: string; texts: string[] }) =>
60+
_input.texts.map(() => 0),
61+
calculateStats: async () => mockStats,
62+
},
63+
server: {
64+
getLaunchProject: async () => null,
65+
},
66+
providers: {
67+
list: async () => [],
68+
getConfig: async () => ({}),
69+
setProviderConfig: async () => ({ success: true, data: undefined }),
70+
setModels: async () => ({ success: true, data: undefined }),
71+
},
72+
general: {
73+
listDirectory: async () => ({ entries: [], hasMore: false }),
74+
ping: async (input: string) => `Pong: ${input}`,
75+
tick: async function* () {
76+
// No-op generator
77+
},
78+
},
79+
projects: {
80+
list: async () => Array.from(projects.entries()),
81+
create: async () => ({
82+
success: true,
83+
data: { projectConfig: { workspaces: [] }, normalizedPath: "/mock/project" },
84+
}),
85+
pickDirectory: async () => null,
86+
listBranches: async () => ({
87+
branches: ["main", "develop"],
88+
recommendedTrunk: "main",
89+
}),
90+
remove: async () => ({ success: true, data: undefined }),
91+
secrets: {
92+
get: async () => [],
93+
update: async () => ({ success: true, data: undefined }),
94+
},
95+
},
96+
workspace: {
97+
list: async () => workspaces,
98+
create: async (input: { projectPath: string; branchName: string }) => ({
99+
success: true,
100+
metadata: {
101+
id: Math.random().toString(36).substring(2, 12),
102+
name: input.branchName,
103+
projectPath: input.projectPath,
104+
projectName: input.projectPath.split("/").pop() ?? "project",
105+
namedWorkspacePath: `/mock/workspace/${input.branchName}`,
106+
runtimeConfig: DEFAULT_RUNTIME_CONFIG,
107+
},
108+
}),
109+
remove: async () => ({ success: true }),
110+
rename: async (input: { workspaceId: string }) => ({
111+
success: true,
112+
data: { newWorkspaceId: input.workspaceId },
113+
}),
114+
fork: async () => ({ success: false, error: "Not implemented in mock" }),
115+
sendMessage: async () => ({ success: true, data: undefined }),
116+
resumeStream: async () => ({ success: true, data: undefined }),
117+
interruptStream: async () => ({ success: true, data: undefined }),
118+
clearQueue: async () => ({ success: true, data: undefined }),
119+
truncateHistory: async () => ({ success: true, data: undefined }),
120+
replaceChatHistory: async () => ({ success: true, data: undefined }),
121+
getInfo: async (input: { workspaceId: string }) =>
122+
workspaceMap.get(input.workspaceId) ?? null,
123+
executeBash: async (input: { workspaceId: string; script: string }) => {
124+
if (executeBash) {
125+
const result = await executeBash(input.workspaceId, input.script);
126+
return { success: true, data: result };
127+
}
128+
return {
129+
success: true,
130+
data: { success: true, output: "", exitCode: 0, wall_duration_ms: 0 },
131+
};
132+
},
133+
onChat: async function* (input: { workspaceId: string }) {
134+
if (!onChat) {
135+
yield { type: "caught-up" } as WorkspaceChatMessage;
136+
return;
137+
}
138+
139+
// Create a queue-based async iterator
140+
const queue: WorkspaceChatMessage[] = [];
141+
let resolveNext: ((msg: WorkspaceChatMessage) => void) | null = null;
142+
let ended = false;
143+
144+
const emit = (msg: WorkspaceChatMessage) => {
145+
if (ended) return;
146+
if (resolveNext) {
147+
const resolve = resolveNext;
148+
resolveNext = null;
149+
resolve(msg);
150+
} else {
151+
queue.push(msg);
152+
}
153+
};
154+
155+
// Call the user's onChat handler
156+
const cleanup = onChat(input.workspaceId, emit);
157+
158+
try {
159+
while (!ended) {
160+
if (queue.length > 0) {
161+
yield queue.shift()!;
162+
} else {
163+
const msg = await new Promise<WorkspaceChatMessage>((resolve) => {
164+
resolveNext = resolve;
165+
});
166+
yield msg;
167+
}
168+
}
169+
} finally {
170+
ended = true;
171+
cleanup?.();
172+
}
173+
},
174+
onMetadata: async function* () {
175+
// Empty generator - no metadata updates in mock
176+
await new Promise(() => {}); // Never resolves, keeps stream open
177+
},
178+
activity: {
179+
list: async () => ({}),
180+
subscribe: async function* () {
181+
await new Promise(() => {}); // Never resolves
182+
},
183+
},
184+
},
185+
window: {
186+
setTitle: async () => undefined,
187+
},
188+
terminal: {
189+
create: async () => ({
190+
sessionId: "mock-session",
191+
workspaceId: "mock-workspace",
192+
cols: 80,
193+
rows: 24,
194+
}),
195+
close: async () => undefined,
196+
resize: async () => undefined,
197+
sendInput: () => undefined,
198+
onOutput: async function* () {
199+
await new Promise(() => {});
200+
},
201+
onExit: async function* () {
202+
await new Promise(() => {});
203+
},
204+
openWindow: async () => undefined,
205+
closeWindow: async () => undefined,
206+
openNative: async () => undefined,
207+
},
208+
update: {
209+
check: async () => undefined,
210+
download: async () => undefined,
211+
install: () => undefined,
212+
onStatus: async function* () {
213+
await new Promise(() => {});
214+
},
215+
},
216+
} as unknown as ORPCClient;
217+
}

.storybook/preview.tsx

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
1-
import React from "react";
1+
import React, { useMemo } from "react";
22
import type { Preview } from "@storybook/react-vite";
33
import { ThemeProvider, type ThemeMode } from "../src/browser/contexts/ThemeContext";
4+
import { ORPCProvider } from "../src/browser/orpc/react";
5+
import { createMockORPCClient } from "./mocks/orpc";
46
import "../src/browser/styles/globals.css";
57

68
const preview: Preview = {
@@ -22,6 +24,16 @@ const preview: Preview = {
2224
theme: "dark",
2325
},
2426
decorators: [
27+
// Global ORPC provider - ensures useORPC works in all stories
28+
(Story) => {
29+
const client = useMemo(() => createMockORPCClient(), []);
30+
return (
31+
<ORPCProvider client={client}>
32+
<Story />
33+
</ORPCProvider>
34+
);
35+
},
36+
// Theme provider
2537
(Story, context) => {
2638
// Default to dark if mode not set (e.g., Chromatic headless browser defaults to light)
2739
const mode = (context.globals.theme as ThemeMode | undefined) ?? "dark";

babel.config.js

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
module.exports = {
2+
presets: [
3+
[
4+
"@babel/preset-env",
5+
{
6+
targets: {
7+
node: "current",
8+
},
9+
modules: "commonjs",
10+
},
11+
],
12+
[
13+
"@babel/preset-typescript",
14+
{
15+
allowDeclareFields: true,
16+
},
17+
],
18+
],
19+
};

0 commit comments

Comments
 (0)