diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index c304b1c10..c959e3340 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "5.10.1"
+ ".": "5.10.2"
}
diff --git a/.stats.yml b/.stats.yml
index 2b9160cf6..2dc4f680a 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 111
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-670ea0d2cc44f52a87dd3cadea45632953283e0636ba30788fdbdb22a232ccac.yml
-openapi_spec_hash: d8b7d38911fead545adf3e4297956410
-config_hash: 5525bda35e48ea6387c6175c4d1651fa
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b2a451656ca64d30d174391ebfd94806b4de3ab76dc55b92843cfb7f1a54ecb6.yml
+openapi_spec_hash: 27d9691b400f28c17ef063a1374048b0
+config_hash: e822d0c9082c8b312264403949243179
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b888a4c87..5aa93696b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,19 @@
# Changelog
+## 5.10.2 (2025-07-22)
+
+Full Changelog: [v5.10.1...v5.10.2](https://github.com/openai/openai-node/compare/v5.10.1...v5.10.2)
+
+### Chores
+
+* **api:** event shapes more accurate ([78f4e1d](https://github.com/openai/openai-node/commit/78f4e1d8e7400001a7bc6a05dc9a6e52a2047523))
+* **internal:** version bump ([ea885ca](https://github.com/openai/openai-node/commit/ea885cac5c4231597141e91bd454e540830deb95))
+
+
+### Documentation
+
+* fix typos in helpers and realtime ([#1592](https://github.com/openai/openai-node/issues/1592)) ([17733b7](https://github.com/openai/openai-node/commit/17733b7e4a19754c9ca2ec815cf7d246b1dc138d))
+
## 5.10.1 (2025-07-16)
Full Changelog: [v5.10.0...v5.10.1](https://github.com/openai/openai-node/compare/v5.10.0...v5.10.1)
diff --git a/api.md b/api.md
index ef66d4e6c..21d0fdd79 100644
--- a/api.md
+++ b/api.md
@@ -710,8 +710,6 @@ Types:
- ResponseOutputTextAnnotationAddedEvent
- ResponsePrompt
- ResponseQueuedEvent
-- ResponseReasoningDeltaEvent
-- ResponseReasoningDoneEvent
- ResponseReasoningItem
- ResponseReasoningSummaryDeltaEvent
- ResponseReasoningSummaryDoneEvent
diff --git a/helpers.md b/helpers.md
index 8b25fe0a5..0ed2abb4c 100644
--- a/helpers.md
+++ b/helpers.md
@@ -7,7 +7,7 @@ provides richer integrations with TS specific types & returns a `ParsedChatCompl
## Auto-parsing response content with Zod schemas
-You can pass zod schemas wrapped with `zodResponseFormat()` to the `.parse()` method and the SDK will automatically conver the model
+You can pass zod schemas wrapped with `zodResponseFormat()` to the `.parse()` method and the SDK will automatically convert the model
into a JSON schema, send it to the API and parse the response content back using the given zod schema.
```ts
diff --git a/jsr.json b/jsr.json
index 1a944f82d..a3145afb0 100644
--- a/jsr.json
+++ b/jsr.json
@@ -1,6 +1,6 @@
{
"name": "@openai/openai",
- "version": "5.10.1",
+ "version": "5.10.2",
"exports": {
".": "./index.ts",
"./helpers/zod": "./helpers/zod.ts",
diff --git a/package.json b/package.json
index 878fb3856..a94445a86 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "5.10.1",
+ "version": "5.10.2",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/realtime.md b/realtime.md
index df55f1a09..9842ad453 100644
--- a/realtime.md
+++ b/realtime.md
@@ -74,7 +74,7 @@ A full example can be found [here](https://github.com/openai/openai-node/blob/ma
When an error is encountered, either on the client side or returned from the server through the [`error` event](https://platform.openai.com/docs/guides/realtime-model-capabilities#error-handling), the `error` event listener will be fired. However, if you haven't registered an `error` event listener then an `unhandled Promise rejection` error will be thrown.
-It is **highly recommended** that you register an `error` event listener and handle errors approriately as typically the underlying connection is still usable.
+It is **highly recommended** that you register an `error` event listener and handle errors appropriately as typically the underlying connection is still usable.
```ts
const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' });
diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts
index 6bb092863..f533a558b 100644
--- a/src/resources/audio/speech.ts
+++ b/src/resources/audio/speech.ts
@@ -51,19 +51,7 @@ export interface SpeechCreateParams {
* `verse`. Previews of the voices are available in the
* [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
*/
- voice:
- | (string & {})
- | 'alloy'
- | 'ash'
- | 'ballad'
- | 'coral'
- | 'echo'
- | 'fable'
- | 'onyx'
- | 'nova'
- | 'sage'
- | 'shimmer'
- | 'verse';
+ voice: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse';
/**
* Control the voice of your generated audio with additional instructions. Does not
diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts
index 5e70bd2d3..14e0ad8bf 100644
--- a/src/resources/beta/realtime/realtime.ts
+++ b/src/resources/beta/realtime/realtime.ts
@@ -1130,22 +1130,9 @@ export interface RealtimeResponse {
/**
* The voice the model used to respond. Current voice options are `alloy`, `ash`,
- * `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
- * `verse`.
- */
- voice?:
- | (string & {})
- | 'alloy'
- | 'ash'
- | 'ballad'
- | 'coral'
- | 'echo'
- | 'fable'
- | 'onyx'
- | 'nova'
- | 'sage'
- | 'shimmer'
- | 'verse';
+ * `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`.
+ */
+ voice?: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse';
}
/**
@@ -1832,22 +1819,9 @@ export namespace ResponseCreateEvent {
/**
* The voice the model uses to respond. Voice cannot be changed during the session
* once the model has responded with audio at least once. Current voice options are
- * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
- * `shimmer`, and `verse`.
- */
- voice?:
- | (string & {})
- | 'alloy'
- | 'ash'
- | 'ballad'
- | 'coral'
- | 'echo'
- | 'fable'
- | 'onyx'
- | 'nova'
- | 'sage'
- | 'shimmer'
- | 'verse';
+ * `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`.
+ */
+ voice?: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse';
}
export namespace Response {
@@ -2325,22 +2299,9 @@ export namespace SessionUpdateEvent {
/**
* The voice the model uses to respond. Voice cannot be changed during the session
* once the model has responded with audio at least once. Current voice options are
- * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
- * `shimmer`, and `verse`.
- */
- voice?:
- | (string & {})
- | 'alloy'
- | 'ash'
- | 'ballad'
- | 'coral'
- | 'echo'
- | 'fable'
- | 'onyx'
- | 'nova'
- | 'sage'
- | 'shimmer'
- | 'verse';
+ * `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`.
+ */
+ voice?: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse';
}
export namespace Session {
diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts
index b3a4eda8d..093a06962 100644
--- a/src/resources/beta/realtime/sessions.ts
+++ b/src/resources/beta/realtime/sessions.ts
@@ -162,22 +162,9 @@ export interface Session {
/**
* The voice the model uses to respond. Voice cannot be changed during the session
* once the model has responded with audio at least once. Current voice options are
- * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
- * `shimmer`, and `verse`.
- */
- voice?:
- | (string & {})
- | 'alloy'
- | 'ash'
- | 'ballad'
- | 'coral'
- | 'echo'
- | 'fable'
- | 'onyx'
- | 'nova'
- | 'sage'
- | 'shimmer'
- | 'verse';
+ * `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`.
+ */
+ voice?: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse';
}
export namespace Session {
@@ -436,21 +423,9 @@ export interface SessionCreateResponse {
/**
* The voice the model uses to respond. Voice cannot be changed during the session
* once the model has responded with audio at least once. Current voice options are
- * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.
- */
- voice?:
- | (string & {})
- | 'alloy'
- | 'ash'
- | 'ballad'
- | 'coral'
- | 'echo'
- | 'fable'
- | 'onyx'
- | 'nova'
- | 'sage'
- | 'shimmer'
- | 'verse';
+ * `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`.
+ */
+ voice?: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse';
}
export namespace SessionCreateResponse {
@@ -694,22 +669,9 @@ export interface SessionCreateParams {
/**
* The voice the model uses to respond. Voice cannot be changed during the session
* once the model has responded with audio at least once. Current voice options are
- * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
- * `shimmer`, and `verse`.
- */
- voice?:
- | (string & {})
- | 'alloy'
- | 'ash'
- | 'ballad'
- | 'coral'
- | 'echo'
- | 'fable'
- | 'onyx'
- | 'nova'
- | 'sage'
- | 'shimmer'
- | 'verse';
+ * `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`.
+ */
+ voice?: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse';
}
export namespace SessionCreateParams {
diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts
index 887f0be5d..88d4249b4 100644
--- a/src/resources/chat/completions/completions.ts
+++ b/src/resources/chat/completions/completions.ts
@@ -278,7 +278,7 @@ export interface ChatCompletion {
* - If set to 'auto', then the request will be processed with the service tier
* configured in the Project settings. Unless otherwise configured, the Project
* will use 'default'.
- * - If set to 'default', then the requset will be processed with the standard
+ * - If set to 'default', then the request will be processed with the standard
* pricing and performance for the selected model.
* - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
* 'priority', then the request will be processed with the corresponding service
@@ -474,19 +474,7 @@ export interface ChatCompletionAudioParam {
* The voice the model uses to respond. Supported voices are `alloy`, `ash`,
* `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`.
*/
- voice:
- | (string & {})
- | 'alloy'
- | 'ash'
- | 'ballad'
- | 'coral'
- | 'echo'
- | 'fable'
- | 'onyx'
- | 'nova'
- | 'sage'
- | 'shimmer'
- | 'verse';
+ voice: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse';
}
/**
@@ -529,7 +517,7 @@ export interface ChatCompletionChunk {
* - If set to 'auto', then the request will be processed with the service tier
* configured in the Project settings. Unless otherwise configured, the Project
* will use 'default'.
- * - If set to 'default', then the requset will be processed with the standard
+ * - If set to 'default', then the request will be processed with the standard
* pricing and performance for the selected model.
* - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
* 'priority', then the request will be processed with the corresponding service
@@ -1451,7 +1439,7 @@ export interface ChatCompletionCreateParamsBase {
* - If set to 'auto', then the request will be processed with the service tier
* configured in the Project settings. Unless otherwise configured, the Project
* will use 'default'.
- * - If set to 'default', then the requset will be processed with the standard
+ * - If set to 'default', then the request will be processed with the standard
* pricing and performance for the selected model.
* - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
* 'priority', then the request will be processed with the corresponding service
diff --git a/src/resources/images.ts b/src/resources/images.ts
index 8433ec22a..8f1dad624 100644
--- a/src/resources/images.ts
+++ b/src/resources/images.ts
@@ -446,7 +446,7 @@ export namespace ImagesResponse {
input_tokens_details: Usage.InputTokensDetails;
/**
- * The number of image tokens in the output image.
+ * The number of output tokens generated by the model.
*/
output_tokens: number;
@@ -589,6 +589,9 @@ export interface ImageEditParamsBase {
* The number of partial images to generate. This parameter is used for streaming
* responses that return partial images. Value must be between 0 and 3. When set to
* 0, the response will be a single image sent in one streaming event.
+ *
+ * Note that the final image may be sent before the full number of partial images
+ * are generated if the full image is generated more quickly.
*/
partial_images?: number | null;
@@ -709,6 +712,9 @@ export interface ImageGenerateParamsBase {
* The number of partial images to generate. This parameter is used for streaming
* responses that return partial images. Value must be between 0 and 3. When set to
* 0, the response will be a single image sent in one streaming event.
+ *
+ * Note that the final image may be sent before the full number of partial images
+ * are generated if the full image is generated more quickly.
*/
partial_images?: number | null;
diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts
index 6d6f6ef1c..ba1447432 100644
--- a/src/resources/responses/responses.ts
+++ b/src/resources/responses/responses.ts
@@ -491,16 +491,15 @@ export interface Response {
* Specifies the latency tier to use for processing the request. This parameter is
* relevant for customers subscribed to the scale tier service:
*
- * - If set to 'auto', and the Project is Scale tier enabled, the system will
- * utilize scale tier credits until they are exhausted.
- * - If set to 'auto', and the Project is not Scale tier enabled, the request will
- * be processed using the default service tier with a lower uptime SLA and no
- * latency guarantee.
- * - If set to 'default', the request will be processed using the default service
- * tier with a lower uptime SLA and no latency guarantee.
- * - If set to 'flex', the request will be processed with the Flex Processing
- * service tier.
- * [Learn more](https://platform.openai.com/docs/guides/flex-processing).
+ * - If set to 'auto', then the request will be processed with the service tier
+ * configured in the Project settings. Unless otherwise configured, the Project
+ * will use 'default'.
+ * - If set to 'default', then the request will be processed with the standard
+ * pricing and performance for the selected model.
+ * - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
+ * 'priority', then the request will be processed with the corresponding service
+ * tier. [Contact sales](https://openai.com/contact-sales) to learn more about
+ * Priority processing.
* - When not set, the default behavior is 'auto'.
*
* When this parameter is set, the response body will include the `service_tier`
@@ -795,7 +794,8 @@ export interface ResponseCodeInterpreterToolCall {
outputs: Array | null;
/**
- * The status of the code interpreter tool call.
+ * The status of the code interpreter tool call. Valid values are `in_progress`,
+ * `completed`, `incomplete`, `interpreting`, and `failed`.
*/
status: 'in_progress' | 'completed' | 'incomplete' | 'interpreting' | 'failed';
@@ -2849,9 +2849,10 @@ export namespace ResponseItem {
*/
export interface ResponseMcpCallArgumentsDeltaEvent {
/**
- * The partial update to the arguments for the MCP tool call.
+ * A JSON string containing the partial update to the arguments for the MCP tool
+ * call.
*/
- delta: unknown;
+ delta: string;
/**
* The unique identifier of the MCP tool call item being processed.
@@ -2879,9 +2880,9 @@ export interface ResponseMcpCallArgumentsDeltaEvent {
*/
export interface ResponseMcpCallArgumentsDoneEvent {
/**
- * The finalized arguments for the MCP tool call.
+ * A JSON string containing the finalized arguments for the MCP tool call.
*/
- arguments: unknown;
+ arguments: string;
/**
* The unique identifier of the MCP tool call item being processed.
@@ -2908,6 +2909,16 @@ export interface ResponseMcpCallArgumentsDoneEvent {
* Emitted when an MCP tool call has completed successfully.
*/
export interface ResponseMcpCallCompletedEvent {
+ /**
+ * The ID of the MCP tool call item that completed.
+ */
+ item_id: string;
+
+ /**
+ * The index of the output item that completed.
+ */
+ output_index: number;
+
/**
* The sequence number of this event.
*/
@@ -2923,6 +2934,16 @@ export interface ResponseMcpCallCompletedEvent {
* Emitted when an MCP tool call has failed.
*/
export interface ResponseMcpCallFailedEvent {
+ /**
+ * The ID of the MCP tool call item that failed.
+ */
+ item_id: string;
+
+ /**
+ * The index of the output item that failed.
+ */
+ output_index: number;
+
/**
* The sequence number of this event.
*/
@@ -2963,6 +2984,16 @@ export interface ResponseMcpCallInProgressEvent {
* Emitted when the list of available MCP tools has been successfully retrieved.
*/
export interface ResponseMcpListToolsCompletedEvent {
+ /**
+ * The ID of the MCP tool call item that produced this output.
+ */
+ item_id: string;
+
+ /**
+ * The index of the output item that was processed.
+ */
+ output_index: number;
+
/**
* The sequence number of this event.
*/
@@ -2978,6 +3009,16 @@ export interface ResponseMcpListToolsCompletedEvent {
* Emitted when the attempt to list available MCP tools has failed.
*/
export interface ResponseMcpListToolsFailedEvent {
+ /**
+ * The ID of the MCP tool call item that failed.
+ */
+ item_id: string;
+
+ /**
+ * The index of the output item that failed.
+ */
+ output_index: number;
+
/**
* The sequence number of this event.
*/
@@ -2994,6 +3035,16 @@ export interface ResponseMcpListToolsFailedEvent {
* MCP tools.
*/
export interface ResponseMcpListToolsInProgressEvent {
+ /**
+ * The ID of the MCP tool call item that is being processed.
+ */
+ item_id: string;
+
+ /**
+ * The index of the output item that is being processed.
+ */
+ output_index: number;
+
/**
* The sequence number of this event.
*/
@@ -3607,76 +3658,6 @@ export interface ResponseQueuedEvent {
type: 'response.queued';
}
-/**
- * Emitted when there is a delta (partial update) to the reasoning content.
- */
-export interface ResponseReasoningDeltaEvent {
- /**
- * The index of the reasoning content part within the output item.
- */
- content_index: number;
-
- /**
- * The partial update to the reasoning content.
- */
- delta: unknown;
-
- /**
- * The unique identifier of the item for which reasoning is being updated.
- */
- item_id: string;
-
- /**
- * The index of the output item in the response's output array.
- */
- output_index: number;
-
- /**
- * The sequence number of this event.
- */
- sequence_number: number;
-
- /**
- * The type of the event. Always 'response.reasoning.delta'.
- */
- type: 'response.reasoning.delta';
-}
-
-/**
- * Emitted when the reasoning content is finalized for an item.
- */
-export interface ResponseReasoningDoneEvent {
- /**
- * The index of the reasoning content part within the output item.
- */
- content_index: number;
-
- /**
- * The unique identifier of the item for which reasoning is finalized.
- */
- item_id: string;
-
- /**
- * The index of the output item in the response's output array.
- */
- output_index: number;
-
- /**
- * The sequence number of this event.
- */
- sequence_number: number;
-
- /**
- * The finalized reasoning text.
- */
- text: string;
-
- /**
- * The type of the event. Always 'response.reasoning.done'.
- */
- type: 'response.reasoning.done';
-}
-
/**
* A description of the chain of thought used by a reasoning model while generating
* a response. Be sure to include these items in your `input` to the Responses API
@@ -4100,8 +4081,6 @@ export type ResponseStreamEvent =
| ResponseMcpListToolsInProgressEvent
| ResponseOutputTextAnnotationAddedEvent
| ResponseQueuedEvent
- | ResponseReasoningDeltaEvent
- | ResponseReasoningDoneEvent
| ResponseReasoningSummaryDeltaEvent
| ResponseReasoningSummaryDoneEvent;
@@ -4150,6 +4129,11 @@ export interface ResponseTextDeltaEvent {
*/
item_id: string;
+ /**
+ * The log probabilities of the tokens in the delta.
+ */
+ logprobs: Array;
+
/**
* The index of the output item that the text delta was added to.
*/
@@ -4166,6 +4150,44 @@ export interface ResponseTextDeltaEvent {
type: 'response.output_text.delta';
}
+export namespace ResponseTextDeltaEvent {
+ /**
+ * A logprob is the logarithmic probability that the model assigns to producing a
+ * particular token at a given position in the sequence. Less-negative (higher)
+ * logprob values indicate greater model confidence in that token choice.
+ */
+ export interface Logprob {
+ /**
+ * A possible text token.
+ */
+ token: string;
+
+ /**
+ * The log probability of this token.
+ */
+ logprob: number;
+
+ /**
+ * The log probability of the top 20 most likely tokens.
+ */
+ top_logprobs?: Array;
+ }
+
+ export namespace Logprob {
+ export interface TopLogprob {
+ /**
+ * A possible text token.
+ */
+ token?: string;
+
+ /**
+ * The log probability of this token.
+ */
+ logprob?: number;
+ }
+ }
+}
+
/**
* Emitted when text content is finalized.
*/
@@ -4180,6 +4202,11 @@ export interface ResponseTextDoneEvent {
*/
item_id: string;
+ /**
+ * The log probabilities of the tokens in the delta.
+ */
+ logprobs: Array;
+
/**
* The index of the output item that the text content is finalized.
*/
@@ -4201,6 +4228,44 @@ export interface ResponseTextDoneEvent {
type: 'response.output_text.done';
}
+export namespace ResponseTextDoneEvent {
+ /**
+ * A logprob is the logarithmic probability that the model assigns to producing a
+ * particular token at a given position in the sequence. Less-negative (higher)
+ * logprob values indicate greater model confidence in that token choice.
+ */
+ export interface Logprob {
+ /**
+ * A possible text token.
+ */
+ token: string;
+
+ /**
+ * The log probability of this token.
+ */
+ logprob: number;
+
+ /**
+ * The log probability of the top 20 most likely tokens.
+ */
+ top_logprobs?: Array;
+ }
+
+ export namespace Logprob {
+ export interface TopLogprob {
+ /**
+ * A possible text token.
+ */
+ token?: string;
+
+ /**
+ * The log probability of this token.
+ */
+ logprob?: number;
+ }
+ }
+}
+
/**
* Represents token usage details including input tokens, output tokens, a
* breakdown of output tokens, and the total tokens used.
@@ -4783,16 +4848,15 @@ export interface ResponseCreateParamsBase {
* Specifies the latency tier to use for processing the request. This parameter is
* relevant for customers subscribed to the scale tier service:
*
- * - If set to 'auto', and the Project is Scale tier enabled, the system will
- * utilize scale tier credits until they are exhausted.
- * - If set to 'auto', and the Project is not Scale tier enabled, the request will
- * be processed using the default service tier with a lower uptime SLA and no
- * latency guarantee.
- * - If set to 'default', the request will be processed using the default service
- * tier with a lower uptime SLA and no latency guarantee.
- * - If set to 'flex', the request will be processed with the Flex Processing
- * service tier.
- * [Learn more](https://platform.openai.com/docs/guides/flex-processing).
+ * - If set to 'auto', then the request will be processed with the service tier
+ * configured in the Project settings. Unless otherwise configured, the Project
+ * will use 'default'.
+ * - If set to 'default', then the request will be processed with the standard
+ * pricing and performance for the selected model.
+ * - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
+ * 'priority', then the request will be processed with the corresponding service
+ * tier. [Contact sales](https://openai.com/contact-sales) to learn more about
+ * Priority processing.
* - When not set, the default behavior is 'auto'.
*
* When this parameter is set, the response body will include the `service_tier`
@@ -5045,8 +5109,6 @@ export declare namespace Responses {
type ResponseOutputTextAnnotationAddedEvent as ResponseOutputTextAnnotationAddedEvent,
type ResponsePrompt as ResponsePrompt,
type ResponseQueuedEvent as ResponseQueuedEvent,
- type ResponseReasoningDeltaEvent as ResponseReasoningDeltaEvent,
- type ResponseReasoningDoneEvent as ResponseReasoningDoneEvent,
type ResponseReasoningItem as ResponseReasoningItem,
type ResponseReasoningSummaryDeltaEvent as ResponseReasoningSummaryDeltaEvent,
type ResponseReasoningSummaryDoneEvent as ResponseReasoningSummaryDoneEvent,
diff --git a/src/resources/shared.ts b/src/resources/shared.ts
index 1f4f73305..5e2a02524 100644
--- a/src/resources/shared.ts
+++ b/src/resources/shared.ts
@@ -155,7 +155,7 @@ export interface FunctionDefinition {
* set to true, the model will follow the exact schema defined in the `parameters`
* field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn
* more about Structured Outputs in the
- * [function calling guide](docs/guides/function-calling).
+ * [function calling guide](https://platform.openai.com/docs/guides/function-calling).
*/
strict?: boolean | null;
}
diff --git a/src/version.ts b/src/version.ts
index 7bd4f740f..8d53f632a 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '5.10.1'; // x-release-please-version
+export const VERSION = '5.10.2'; // x-release-please-version