Skip to content

Commit e3ed388

Browse files
authored
samples: added openai sample (#3207)
1 parent 3dee9b7 commit e3ed388

File tree

6 files changed

+325
-0
lines changed

6 files changed

+325
-0
lines changed

samples/js-openai/.idx/dev.nix

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
## Default Nix Environment for Typescript + Gemini Examples
2+
## Requires the sample to be started with npx run genkit:dev
3+
4+
# To learn more about how to use Nix to configure your environment
5+
# see: https://developers.google.com/idx/guides/customize-idx-env
6+
{ pkgs, ... }: {
7+
# Which nixpkgs channel to use.
8+
channel = "stable-24.05"; # or "unstable"
9+
# Use https://search.nixos.org/packages to find packages
10+
packages = [
11+
pkgs.nodejs_20
12+
pkgs.util-linux
13+
];
14+
# Sets environment variables in the workspace
15+
env = {
16+
OPENAI_API_KEY = "";
17+
};
18+
idx = {
19+
# Search for the extensions you want on https://open-vsx.org/ and use "publisher.id"
20+
extensions = [
21+
];
22+
23+
# Workspace lifecycle hooks
24+
workspace = {
25+
# Runs when a workspace is first created
26+
onCreate = {
27+
npm-install = "npm ci --no-audit --prefer-offline --no-progress --timing";
28+
default.openFiles = [ "README.md" "src/index.ts" ];
29+
};
30+
# Runs when the workspace is (re)started
31+
onStart = {
32+
run-server = "if [ -z \"\${OPENAI_API_KEY}\" ]; then \
33+
echo 'No OpenAI API key detected, enter your OpenAI API key:' && \
34+
read -s OPENAI_API_KEY && \
35+
echo 'You can also set the key in .idx/dev.nix to automatically add to your workspace'
36+
export OPENAI_API_KEY; \
37+
fi && \
38+
npm run genkit:dev";
39+
};
40+
};
41+
};
42+
}

samples/js-openai/README.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
# OpenAI Samples
2+
3+
Examples of how to use OpenAI models and their custom features.
4+
5+
To run these examples, run: `npm start`
6+
7+
Then navigate to http://localhost:4000/flows and run sample flows.

samples/js-openai/package.json

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
{
2+
"name": "openai",
3+
"version": "1.0.0",
4+
"description": "",
5+
"main": "lib/index.js",
6+
"scripts": {
7+
"test": "echo \"Error: no test specified\" && exit 1",
8+
"start": "npm run genkit:dev",
9+
"genkit:dev": "genkit start -- npx tsx --watch src/index.ts",
10+
"build": "tsc",
11+
"build:watch": "tsc --watch"
12+
},
13+
"keywords": [],
14+
"author": "",
15+
"license": "ISC",
16+
"dependencies": {
17+
"@genkit-ai/compat-oai": "1.15.1",
18+
"genkit": "^1.15.1",
19+
"node-fetch": "3.3.2",
20+
"wav": "^1.0.2"
21+
},
22+
"devDependencies": {
23+
"@types/wav": "^1.0.4",
24+
"genkit-cli": "^1.15.1",
25+
"tsx": "^4.20.3",
26+
"typescript": "^5.5.4"
27+
}
28+
}

samples/js-openai/photo.jpg

295 KB
Loading

samples/js-openai/src/index.ts

Lines changed: 234 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,234 @@
1+
/**
2+
* Copyright 2024 Google LLC
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* http://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
17+
import openAI from '@genkit-ai/compat-oai/openai';
18+
import * as fs from 'fs';
19+
import { genkit, z } from 'genkit';
20+
import wav from 'wav';
21+
22+
const ai = genkit({
23+
plugins: [
24+
// Provide the key via the OPENAI_API_KEY environment variable
25+
openAI(),
26+
],
27+
});
28+
29+
ai.defineFlow('basic-hi', async () => {
30+
const { text } = await ai.generate({
31+
model: openAI.model('o4-mini'),
32+
prompt: 'You are a helpful AI assistant named Walt, say hello',
33+
});
34+
35+
return text;
36+
});
37+
38+
// Multimodal input
39+
ai.defineFlow('multimodal-input', async () => {
40+
const photoBase64 = fs.readFileSync('photo.jpg', { encoding: 'base64' });
41+
42+
const { text } = await ai.generate({
43+
model: openAI.model('gpt-4o'),
44+
prompt: [
45+
{ text: 'describe this photo' },
46+
{
47+
media: {
48+
contentType: 'image/jpeg',
49+
url: `data:image/jpeg;base64,${photoBase64}`,
50+
},
51+
},
52+
],
53+
});
54+
55+
return text;
56+
});
57+
58+
// Streaming
59+
ai.defineFlow('streaming', async (_, { sendChunk }) => {
60+
const { stream } = ai.generateStream({
61+
model: openAI.model('gpt-4o'),
62+
prompt: 'Write a poem about AI.',
63+
});
64+
65+
let poem = '';
66+
for await (const chunk of stream) {
67+
poem += chunk.text;
68+
sendChunk(chunk.text);
69+
}
70+
71+
return poem;
72+
});
73+
74+
// Web search
75+
ai.defineFlow('web-search', async () => {
76+
const response = await ai.generate({
77+
model: openAI.model('gpt-4o-search-preview'),
78+
prompt: 'Who is Albert Einstein?',
79+
config: {
80+
web_search_options: {},
81+
},
82+
});
83+
84+
return {
85+
text: response.text,
86+
annotations: (response.raw as any)?.choices?.[0].message.annotations,
87+
};
88+
});
89+
90+
const getWeather = ai.defineTool(
91+
{
92+
name: 'getWeather',
93+
inputSchema: z.object({
94+
location: z
95+
.string()
96+
.describe(
97+
'Location for which to get the weather, ex: San-Francisco, CA'
98+
),
99+
}),
100+
description: 'can be used to calculate gablorken value',
101+
},
102+
async (input) => {
103+
// pretend we call an actual API
104+
return {
105+
location: input.location,
106+
temperature_celcius: 21.5,
107+
conditions: 'cloudy',
108+
};
109+
}
110+
);
111+
112+
// Tool calling
113+
ai.defineFlow(
114+
{
115+
name: 'tool-calling',
116+
inputSchema: z.string().default('Paris, France'),
117+
outputSchema: z.string(),
118+
streamSchema: z.any(),
119+
},
120+
async (location, { sendChunk }) => {
121+
const { response, stream } = ai.generateStream({
122+
model: openAI.model('gpt-4o'),
123+
config: {
124+
temperature: 1,
125+
},
126+
tools: [getWeather],
127+
prompt: `tell what's the weather in ${location} (in Fahrenheit)`,
128+
});
129+
130+
for await (const chunk of stream) {
131+
sendChunk(chunk);
132+
}
133+
134+
return (await response).text;
135+
}
136+
);
137+
138+
const RpgCharacterSchema = z.object({
139+
name: z.string().describe('name of the character'),
140+
backstory: z.string().describe("character's backstory, about a paragraph"),
141+
weapons: z.array(z.string()),
142+
class: z.enum(['RANGER', 'WIZZARD', 'TANK', 'HEALER', 'ENGINEER']),
143+
});
144+
145+
// A simple example of structured output.
146+
ai.defineFlow(
147+
{
148+
name: 'structured-output',
149+
inputSchema: z.string().default('Glorb'),
150+
outputSchema: RpgCharacterSchema,
151+
},
152+
async (name, { sendChunk }) => {
153+
const { response, stream } = ai.generateStream({
154+
model: openAI.model('gpt-4o'),
155+
config: {
156+
temperature: 1, // we want creativity
157+
},
158+
output: { schema: RpgCharacterSchema },
159+
prompt: `Generate an RPC character called ${name}`,
160+
});
161+
162+
for await (const chunk of stream) {
163+
sendChunk(chunk.output);
164+
}
165+
166+
return (await response).output!;
167+
}
168+
);
169+
170+
// Image generation.
171+
ai.defineFlow('dall-e-image-generation', async (_, { sendChunk }) => {
172+
const { media } = await ai.generate({
173+
model: openAI.model('dall-e-3'),
174+
prompt: `generate an image of a banana riding bicycle`,
175+
});
176+
177+
return media;
178+
});
179+
180+
// TTS sample
181+
ai.defineFlow(
182+
{
183+
name: 'tts',
184+
inputSchema: z.string().default('Genkit is an amazing Gen AI library'),
185+
outputSchema: z.object({ media: z.string() }),
186+
},
187+
async (query) => {
188+
const { media } = await ai.generate({
189+
model: openAI.model('gpt-4o-mini-tts'),
190+
config: {
191+
voice: 'sage',
192+
},
193+
prompt: query,
194+
});
195+
if (!media) {
196+
throw new Error('no media returned');
197+
}
198+
const audioBuffer = Buffer.from(
199+
media.url.substring(media.url.indexOf(',') + 1),
200+
'base64'
201+
);
202+
return {
203+
media: 'data:audio/wav;base64,' + (await toWav(audioBuffer)),
204+
};
205+
}
206+
);
207+
208+
async function toWav(
209+
pcmData: Buffer,
210+
channels = 1,
211+
rate = 24000,
212+
sampleWidth = 2
213+
): Promise<string> {
214+
return new Promise((resolve, reject) => {
215+
// This code depends on `wav` npm library.
216+
const writer = new wav.Writer({
217+
channels,
218+
sampleRate: rate,
219+
bitDepth: sampleWidth * 8,
220+
});
221+
222+
let bufs = [] as any[];
223+
writer.on('error', reject);
224+
writer.on('data', function (d) {
225+
bufs.push(d);
226+
});
227+
writer.on('end', function () {
228+
resolve(Buffer.concat(bufs).toString('base64'));
229+
});
230+
231+
writer.write(pcmData);
232+
writer.end();
233+
});
234+
}

samples/js-openai/tsconfig.json

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
{
2+
"compileOnSave": true,
3+
"include": ["src"],
4+
"compilerOptions": {
5+
"module": "commonjs",
6+
"noImplicitReturns": true,
7+
"outDir": "lib",
8+
"sourceMap": true,
9+
"strict": true,
10+
"target": "es2017",
11+
"skipLibCheck": true,
12+
"esModuleInterop": true
13+
}
14+
}

0 commit comments

Comments
 (0)