From b225799ddca8a27330c155fda2d116803f7a59a0 Mon Sep 17 00:00:00 2001 From: Adekunle Date: Mon, 7 Jul 2025 20:20:25 +0100 Subject: [PATCH] docs: add vision example using image URL --- README.md | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/README.md b/README.md index 9864a4829..888c9cc8e 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,36 @@ const completion = await client.chat.completions.create({ console.log(completion.choices[0].message.content); ``` +## vision +Use the `responses` endpoint to generate text from images and text input. +Useful for describing and analyzing visual content. + +```ts +const response = await openai.responses.create({ + model: 'gpt-4o', + input: [ + { + role: 'user', + content: [ + { + type: 'input_text', + text: 'What is in this image?', + }, + { + type: 'input_image', + detail: 'auto', //can be 'low', 'high', or 'auto' + // Replace with a real, accessible image + image_url: 'https://example.com/image.jpg', + }, + ], + }, + ], + }); + + console.log(response.output_text); + +``` + ## Streaming responses We provide support for streaming responses using Server Sent Events (SSE).