diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 7b33636f46..9cdfd7b049 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.97.0" + ".": "1.97.1" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 2b9160cf6e..2dc4f680a9 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-670ea0d2cc44f52a87dd3cadea45632953283e0636ba30788fdbdb22a232ccac.yml -openapi_spec_hash: d8b7d38911fead545adf3e4297956410 -config_hash: 5525bda35e48ea6387c6175c4d1651fa +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b2a451656ca64d30d174391ebfd94806b4de3ab76dc55b92843cfb7f1a54ecb6.yml +openapi_spec_hash: 27d9691b400f28c17ef063a1374048b0 +config_hash: e822d0c9082c8b312264403949243179 diff --git a/CHANGELOG.md b/CHANGELOG.md index 2e603f06be..0c8d06cbb6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 1.97.1 (2025-07-22) + +Full Changelog: [v1.97.0...v1.97.1](https://github.com/openai/openai-python/compare/v1.97.0...v1.97.1) + +### Bug Fixes + +* **parsing:** ignore empty metadata ([58c359f](https://github.com/openai/openai-python/commit/58c359ff67fd6103268e4405600fd58844b6f27b)) +* **parsing:** parse extra field types ([d524b7e](https://github.com/openai/openai-python/commit/d524b7e201418ccc9b5c2206da06d1be011808e5)) + + +### Chores + +* **api:** event shapes more accurate ([f3a9a92](https://github.com/openai/openai-python/commit/f3a9a9229280ecb7e0b2779dd44290df6d9824ef)) + ## 1.97.0 (2025-07-16) Full Changelog: [v1.96.1...v1.97.0](https://github.com/openai/openai-python/compare/v1.96.1...v1.97.0) diff --git a/api.md b/api.md index b3a2245cdd..0280b886d1 100644 --- a/api.md +++ b/api.md @@ -791,8 +791,6 @@ from openai.types.responses import ( ResponseOutputTextAnnotationAddedEvent, ResponsePrompt, ResponseQueuedEvent, - ResponseReasoningDeltaEvent, - ResponseReasoningDoneEvent, ResponseReasoningItem, ResponseReasoningSummaryDeltaEvent, ResponseReasoningSummaryDoneEvent, diff --git a/pyproject.toml b/pyproject.toml index 533379d52a..af1366b34e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.97.0" +version = "1.97.1" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_models.py b/src/openai/_models.py index f347a81dac..d84d51d913 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -233,14 +233,18 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride] else: fields_values[name] = field_get_default(field) + extra_field_type = _get_extra_fields_type(__cls) + _extra = {} for key, value in values.items(): if key not in model_fields: + parsed = construct_type(value=value, type_=extra_field_type) if extra_field_type is not None else value + if PYDANTIC_V2: - _extra[key] = value + _extra[key] = parsed else: _fields_set.add(key) - fields_values[key] = value + fields_values[key] = parsed object.__setattr__(m, "__dict__", fields_values) @@ -395,6 +399,23 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object: return construct_type(value=value, type_=type_, metadata=getattr(field, "metadata", None)) +def _get_extra_fields_type(cls: type[pydantic.BaseModel]) -> type | None: + if not PYDANTIC_V2: + # TODO + return None + + schema = cls.__pydantic_core_schema__ + if schema["type"] == "model": + fields = schema["schema"] + if fields["type"] == "model-fields": + extras = fields.get("extras_schema") + if extras and "cls" in extras: + # mypy can't narrow the type + return extras["cls"] # type: ignore[no-any-return] + + return None + + def is_basemodel(type_: type) -> bool: """Returns whether or not the given type is either a `BaseModel` or a union of `BaseModel`""" if is_union(type_): @@ -464,7 +485,7 @@ def construct_type(*, value: object, type_: object, metadata: Optional[List[Any] type_ = type_.__value__ # type: ignore[unreachable] # unwrap `Annotated[T, ...]` -> `T` - if metadata is not None: + if metadata is not None and len(metadata) > 0: meta: tuple[Any, ...] = tuple(metadata) elif is_annotated_type(type_): meta = get_args(type_)[1:] diff --git a/src/openai/_version.py b/src/openai/_version.py index 8e5ed5fa86..9073c643cc 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.97.0" # x-release-please-version +__version__ = "1.97.1" # x-release-please-version diff --git a/src/openai/lib/streaming/responses/_events.py b/src/openai/lib/streaming/responses/_events.py index 6e547815e2..4c8a588944 100644 --- a/src/openai/lib/streaming/responses/_events.py +++ b/src/openai/lib/streaming/responses/_events.py @@ -21,9 +21,7 @@ ResponseRefusalDoneEvent, ResponseRefusalDeltaEvent, ResponseMcpCallFailedEvent, - ResponseReasoningDoneEvent, ResponseOutputItemDoneEvent, - ResponseReasoningDeltaEvent, ResponseContentPartDoneEvent, ResponseOutputItemAddedEvent, ResponseContentPartAddedEvent, @@ -139,10 +137,8 @@ class ResponseCompletedEvent(RawResponseCompletedEvent, GenericModel, Generic[Te ResponseMcpListToolsInProgressEvent, ResponseOutputTextAnnotationAddedEvent, ResponseQueuedEvent, - ResponseReasoningDeltaEvent, ResponseReasoningSummaryDeltaEvent, ResponseReasoningSummaryDoneEvent, - ResponseReasoningDoneEvent, ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/lib/streaming/responses/_responses.py b/src/openai/lib/streaming/responses/_responses.py index 2c2fec5469..d45664de45 100644 --- a/src/openai/lib/streaming/responses/_responses.py +++ b/src/openai/lib/streaming/responses/_responses.py @@ -264,6 +264,7 @@ def handle_event(self, event: RawResponseStreamEvent) -> List[ResponseStreamEven item_id=event.item_id, output_index=event.output_index, sequence_number=event.sequence_number, + logprobs=event.logprobs, type="response.output_text.delta", snapshot=content.text, ) @@ -282,6 +283,7 @@ def handle_event(self, event: RawResponseStreamEvent) -> List[ResponseStreamEven item_id=event.item_id, output_index=event.output_index, sequence_number=event.sequence_number, + logprobs=event.logprobs, type="response.output_text.done", text=event.text, parsed=parse_text(event.text, text_format=self._text_format), diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py index fe776baae8..6251cfed4e 100644 --- a/src/openai/resources/audio/speech.py +++ b/src/openai/resources/audio/speech.py @@ -50,9 +50,7 @@ def create( *, input: str, model: Union[str, SpeechModel], - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ], + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]], instructions: str | NotGiven = NOT_GIVEN, response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, @@ -146,9 +144,7 @@ async def create( *, input: str, model: Union[str, SpeechModel], - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ], + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]], instructions: str | NotGiven = NOT_GIVEN, response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/beta/realtime/sessions.py b/src/openai/resources/beta/realtime/sessions.py index 77f1ec9059..e639c0ba43 100644 --- a/src/openai/resources/beta/realtime/sessions.py +++ b/src/openai/resources/beta/realtime/sessions.py @@ -66,9 +66,7 @@ def create( tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN, tracing: session_create_params.Tracing | NotGiven = NOT_GIVEN, turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN, - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -163,8 +161,7 @@ def create( voice: The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are - `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, - `shimmer`, and `verse`. + `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`. extra_headers: Send extra headers @@ -251,9 +248,7 @@ async def create( tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN, tracing: session_create_params.Tracing | NotGiven = NOT_GIVEN, turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN, - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -348,8 +343,7 @@ async def create( voice: The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are - `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, - `shimmer`, and `verse`. + `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`. extra_headers: Send extra headers diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index 5806296773..739aa662d4 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -417,7 +417,7 @@ def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -697,7 +697,7 @@ def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -968,7 +968,7 @@ def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -1784,7 +1784,7 @@ async def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -2064,7 +2064,7 @@ async def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -2335,7 +2335,7 @@ async def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py index 77b7a1b24e..c8eda8a76f 100644 --- a/src/openai/resources/images.py +++ b/src/openai/resources/images.py @@ -196,6 +196,9 @@ def edit( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`. @@ -310,6 +313,9 @@ def edit( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`. @@ -420,6 +426,9 @@ def edit( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`. @@ -579,6 +588,9 @@ def generate( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. - `auto` (default value) will automatically select the best quality for the @@ -690,6 +702,9 @@ def generate( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. - `auto` (default value) will automatically select the best quality for the @@ -797,6 +812,9 @@ def generate( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. - `auto` (default value) will automatically select the best quality for the @@ -1066,6 +1084,9 @@ async def edit( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`. @@ -1180,6 +1201,9 @@ async def edit( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`. @@ -1290,6 +1314,9 @@ async def edit( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`. @@ -1449,6 +1476,9 @@ async def generate( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. - `auto` (default value) will automatically select the best quality for the @@ -1560,6 +1590,9 @@ async def generate( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. - `auto` (default value) will automatically select the best quality for the @@ -1667,6 +1700,9 @@ async def generate( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. - `auto` (default value) will automatically select the best quality for the diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index ce132bdb05..fe99aa851d 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -198,7 +198,7 @@ def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -414,7 +414,7 @@ def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -623,7 +623,7 @@ def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -1463,7 +1463,7 @@ async def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -1679,7 +1679,7 @@ async def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -1888,7 +1888,7 @@ async def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service diff --git a/src/openai/types/audio/speech_create_params.py b/src/openai/types/audio/speech_create_params.py index 4ee4a3c4e4..feeb68c68b 100644 --- a/src/openai/types/audio/speech_create_params.py +++ b/src/openai/types/audio/speech_create_params.py @@ -20,11 +20,7 @@ class SpeechCreateParams(TypedDict, total=False): `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. """ - voice: Required[ - Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] - ] + voice: Required[Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]] """The voice to use when generating the audio. Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, diff --git a/src/openai/types/beta/realtime/realtime_response.py b/src/openai/types/beta/realtime/realtime_response.py index 28e03c8717..ccc97c5d22 100644 --- a/src/openai/types/beta/realtime/realtime_response.py +++ b/src/openai/types/beta/realtime/realtime_response.py @@ -80,13 +80,8 @@ class RealtimeResponse(BaseModel): will become the input for later turns. """ - voice: Union[ - str, - Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], - None, - ] = None + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None """ The voice the model used to respond. Current voice options are `alloy`, `ash`, - `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and - `verse`. + `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/response_create_event.py b/src/openai/types/beta/realtime/response_create_event.py index 3b8a6de8df..7219cedbf3 100644 --- a/src/openai/types/beta/realtime/response_create_event.py +++ b/src/openai/types/beta/realtime/response_create_event.py @@ -101,16 +101,12 @@ class Response(BaseModel): tools: Optional[List[ResponseTool]] = None """Tools (functions) available to the model.""" - voice: Union[ - str, - Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], - None, - ] = None + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/response_create_event_param.py b/src/openai/types/beta/realtime/response_create_event_param.py index c569d507a0..b4d54bba92 100644 --- a/src/openai/types/beta/realtime/response_create_event_param.py +++ b/src/openai/types/beta/realtime/response_create_event_param.py @@ -102,14 +102,12 @@ class Response(TypedDict, total=False): tools: Iterable[ResponseTool] """Tools (functions) available to the model.""" - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/session.py b/src/openai/types/beta/realtime/session.py index 606fd83851..f84b3ee4a0 100644 --- a/src/openai/types/beta/realtime/session.py +++ b/src/openai/types/beta/realtime/session.py @@ -268,14 +268,10 @@ class Session(BaseModel): natural conversations, but may have a higher latency. """ - voice: Union[ - str, - Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], - None, - ] = None + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/session_create_params.py b/src/openai/types/beta/realtime/session_create_params.py index e04985d2b6..6be09d8bae 100644 --- a/src/openai/types/beta/realtime/session_create_params.py +++ b/src/openai/types/beta/realtime/session_create_params.py @@ -145,14 +145,12 @@ class SessionCreateParams(TypedDict, total=False): natural conversations, but may have a higher latency. """ - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/session_create_response.py b/src/openai/types/beta/realtime/session_create_response.py index 15d5c1742b..471da03691 100644 --- a/src/openai/types/beta/realtime/session_create_response.py +++ b/src/openai/types/beta/realtime/session_create_response.py @@ -187,14 +187,10 @@ class SessionCreateResponse(BaseModel): speech. """ - voice: Union[ - str, - Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], - None, - ] = None + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo` `sage`, `shimmer` and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/session_update_event.py b/src/openai/types/beta/realtime/session_update_event.py index 789b9cd1e5..5b4185dbf6 100644 --- a/src/openai/types/beta/realtime/session_update_event.py +++ b/src/openai/types/beta/realtime/session_update_event.py @@ -290,16 +290,12 @@ class Session(BaseModel): natural conversations, but may have a higher latency. """ - voice: Union[ - str, - Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], - None, - ] = None + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/session_update_event_param.py b/src/openai/types/beta/realtime/session_update_event_param.py index 2dfa2c26f3..3063449bfd 100644 --- a/src/openai/types/beta/realtime/session_update_event_param.py +++ b/src/openai/types/beta/realtime/session_update_event_param.py @@ -288,14 +288,12 @@ class Session(TypedDict, total=False): natural conversations, but may have a higher latency. """ - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/chat/chat_completion.py b/src/openai/types/chat/chat_completion.py index afc23e3f3d..42463f7ec8 100644 --- a/src/openai/types/chat/chat_completion.py +++ b/src/openai/types/chat/chat_completion.py @@ -65,7 +65,7 @@ class ChatCompletion(BaseModel): - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service diff --git a/src/openai/types/chat/chat_completion_audio_param.py b/src/openai/types/chat/chat_completion_audio_param.py index 25caada177..dc68159c1e 100644 --- a/src/openai/types/chat/chat_completion_audio_param.py +++ b/src/openai/types/chat/chat_completion_audio_param.py @@ -15,11 +15,7 @@ class ChatCompletionAudioParam(TypedDict, total=False): Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`. """ - voice: Required[ - Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] - ] + voice: Required[Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]] """The voice the model uses to respond. Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`, diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py index da6e315830..082bb6cc19 100644 --- a/src/openai/types/chat/chat_completion_chunk.py +++ b/src/openai/types/chat/chat_completion_chunk.py @@ -134,7 +134,7 @@ class ChatCompletionChunk(BaseModel): - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 44ea853041..191793c18f 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -214,7 +214,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service diff --git a/src/openai/types/image_edit_params.py b/src/openai/types/image_edit_params.py index d839e2fcbe..c0481012e4 100644 --- a/src/openai/types/image_edit_params.py +++ b/src/openai/types/image_edit_params.py @@ -85,6 +85,9 @@ class ImageEditParamsBase(TypedDict, total=False): This parameter is used for streaming responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. """ quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] diff --git a/src/openai/types/image_generate_params.py b/src/openai/types/image_generate_params.py index bd9f34b28e..e9e9292cc2 100644 --- a/src/openai/types/image_generate_params.py +++ b/src/openai/types/image_generate_params.py @@ -68,6 +68,9 @@ class ImageGenerateParamsBase(TypedDict, total=False): This parameter is used for streaming responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. """ quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] diff --git a/src/openai/types/images_response.py b/src/openai/types/images_response.py index 2a8ca728ab..89cc71df24 100644 --- a/src/openai/types/images_response.py +++ b/src/openai/types/images_response.py @@ -25,7 +25,7 @@ class Usage(BaseModel): """The input tokens detailed information for the image generation.""" output_tokens: int - """The number of image tokens in the output image.""" + """The number of output tokens generated by the model.""" total_tokens: int """The total number of tokens (images and text) used for the image generation.""" diff --git a/src/openai/types/responses/__init__.py b/src/openai/types/responses/__init__.py index 4316e47730..b563035e78 100644 --- a/src/openai/types/responses/__init__.py +++ b/src/openai/types/responses/__init__.py @@ -81,11 +81,9 @@ from .response_refusal_delta_event import ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent from .response_output_message_param import ResponseOutputMessageParam as ResponseOutputMessageParam from .response_output_refusal_param import ResponseOutputRefusalParam as ResponseOutputRefusalParam -from .response_reasoning_done_event import ResponseReasoningDoneEvent as ResponseReasoningDoneEvent from .response_reasoning_item_param import ResponseReasoningItemParam as ResponseReasoningItemParam from .response_file_search_tool_call import ResponseFileSearchToolCall as ResponseFileSearchToolCall from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent as ResponseMcpCallFailedEvent -from .response_reasoning_delta_event import ResponseReasoningDeltaEvent as ResponseReasoningDeltaEvent from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent from .response_function_tool_call_item import ResponseFunctionToolCallItem as ResponseFunctionToolCallItem diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index db85d87f4e..2af85d03fb 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -176,7 +176,7 @@ class Response(BaseModel): - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service diff --git a/src/openai/types/responses/response_code_interpreter_tool_call.py b/src/openai/types/responses/response_code_interpreter_tool_call.py index 7e4dc9f984..257937118b 100644 --- a/src/openai/types/responses/response_code_interpreter_tool_call.py +++ b/src/openai/types/responses/response_code_interpreter_tool_call.py @@ -45,7 +45,11 @@ class ResponseCodeInterpreterToolCall(BaseModel): """ status: Literal["in_progress", "completed", "incomplete", "interpreting", "failed"] - """The status of the code interpreter tool call.""" + """The status of the code interpreter tool call. + + Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and + `failed`. + """ type: Literal["code_interpreter_call"] """The type of the code interpreter tool call. Always `code_interpreter_call`.""" diff --git a/src/openai/types/responses/response_code_interpreter_tool_call_param.py b/src/openai/types/responses/response_code_interpreter_tool_call_param.py index 69e01f99ed..435091001f 100644 --- a/src/openai/types/responses/response_code_interpreter_tool_call_param.py +++ b/src/openai/types/responses/response_code_interpreter_tool_call_param.py @@ -44,7 +44,11 @@ class ResponseCodeInterpreterToolCallParam(TypedDict, total=False): """ status: Required[Literal["in_progress", "completed", "incomplete", "interpreting", "failed"]] - """The status of the code interpreter tool call.""" + """The status of the code interpreter tool call. + + Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and + `failed`. + """ type: Required[Literal["code_interpreter_call"]] """The type of the code interpreter tool call. Always `code_interpreter_call`.""" diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index 0187e1fda8..08feefd081 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -136,7 +136,7 @@ class ResponseCreateParamsBase(TypedDict, total=False): - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service diff --git a/src/openai/types/responses/response_mcp_call_arguments_delta_event.py b/src/openai/types/responses/response_mcp_call_arguments_delta_event.py index 8481506dc3..54eff38373 100644 --- a/src/openai/types/responses/response_mcp_call_arguments_delta_event.py +++ b/src/openai/types/responses/response_mcp_call_arguments_delta_event.py @@ -8,8 +8,11 @@ class ResponseMcpCallArgumentsDeltaEvent(BaseModel): - delta: object - """The partial update to the arguments for the MCP tool call.""" + delta: str + """ + A JSON string containing the partial update to the arguments for the MCP tool + call. + """ item_id: str """The unique identifier of the MCP tool call item being processed.""" diff --git a/src/openai/types/responses/response_mcp_call_arguments_done_event.py b/src/openai/types/responses/response_mcp_call_arguments_done_event.py index 4be09d4862..59ce9bc944 100644 --- a/src/openai/types/responses/response_mcp_call_arguments_done_event.py +++ b/src/openai/types/responses/response_mcp_call_arguments_done_event.py @@ -8,8 +8,8 @@ class ResponseMcpCallArgumentsDoneEvent(BaseModel): - arguments: object - """The finalized arguments for the MCP tool call.""" + arguments: str + """A JSON string containing the finalized arguments for the MCP tool call.""" item_id: str """The unique identifier of the MCP tool call item being processed.""" diff --git a/src/openai/types/responses/response_mcp_call_completed_event.py b/src/openai/types/responses/response_mcp_call_completed_event.py index 009fbc3c60..2fee5dff81 100644 --- a/src/openai/types/responses/response_mcp_call_completed_event.py +++ b/src/openai/types/responses/response_mcp_call_completed_event.py @@ -8,6 +8,12 @@ class ResponseMcpCallCompletedEvent(BaseModel): + item_id: str + """The ID of the MCP tool call item that completed.""" + + output_index: int + """The index of the output item that completed.""" + sequence_number: int """The sequence number of this event.""" diff --git a/src/openai/types/responses/response_mcp_call_failed_event.py b/src/openai/types/responses/response_mcp_call_failed_event.py index e6edc6ded5..ca41ab7159 100644 --- a/src/openai/types/responses/response_mcp_call_failed_event.py +++ b/src/openai/types/responses/response_mcp_call_failed_event.py @@ -8,6 +8,12 @@ class ResponseMcpCallFailedEvent(BaseModel): + item_id: str + """The ID of the MCP tool call item that failed.""" + + output_index: int + """The index of the output item that failed.""" + sequence_number: int """The sequence number of this event.""" diff --git a/src/openai/types/responses/response_mcp_list_tools_completed_event.py b/src/openai/types/responses/response_mcp_list_tools_completed_event.py index 6290c3cf9f..c60ad88ee5 100644 --- a/src/openai/types/responses/response_mcp_list_tools_completed_event.py +++ b/src/openai/types/responses/response_mcp_list_tools_completed_event.py @@ -8,6 +8,12 @@ class ResponseMcpListToolsCompletedEvent(BaseModel): + item_id: str + """The ID of the MCP tool call item that produced this output.""" + + output_index: int + """The index of the output item that was processed.""" + sequence_number: int """The sequence number of this event.""" diff --git a/src/openai/types/responses/response_mcp_list_tools_failed_event.py b/src/openai/types/responses/response_mcp_list_tools_failed_event.py index 1f6e325b36..0c966c447a 100644 --- a/src/openai/types/responses/response_mcp_list_tools_failed_event.py +++ b/src/openai/types/responses/response_mcp_list_tools_failed_event.py @@ -8,6 +8,12 @@ class ResponseMcpListToolsFailedEvent(BaseModel): + item_id: str + """The ID of the MCP tool call item that failed.""" + + output_index: int + """The index of the output item that failed.""" + sequence_number: int """The sequence number of this event.""" diff --git a/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py b/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py index 236e5fe6e7..f451db1ed5 100644 --- a/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py +++ b/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py @@ -8,6 +8,12 @@ class ResponseMcpListToolsInProgressEvent(BaseModel): + item_id: str + """The ID of the MCP tool call item that is being processed.""" + + output_index: int + """The index of the output item that is being processed.""" + sequence_number: int """The sequence number of this event.""" diff --git a/src/openai/types/responses/response_reasoning_delta_event.py b/src/openai/types/responses/response_reasoning_delta_event.py deleted file mode 100644 index f37d3d370c..0000000000 --- a/src/openai/types/responses/response_reasoning_delta_event.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["ResponseReasoningDeltaEvent"] - - -class ResponseReasoningDeltaEvent(BaseModel): - content_index: int - """The index of the reasoning content part within the output item.""" - - delta: object - """The partial update to the reasoning content.""" - - item_id: str - """The unique identifier of the item for which reasoning is being updated.""" - - output_index: int - """The index of the output item in the response's output array.""" - - sequence_number: int - """The sequence number of this event.""" - - type: Literal["response.reasoning.delta"] - """The type of the event. Always 'response.reasoning.delta'.""" diff --git a/src/openai/types/responses/response_reasoning_done_event.py b/src/openai/types/responses/response_reasoning_done_event.py deleted file mode 100644 index 9f8b127d7e..0000000000 --- a/src/openai/types/responses/response_reasoning_done_event.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["ResponseReasoningDoneEvent"] - - -class ResponseReasoningDoneEvent(BaseModel): - content_index: int - """The index of the reasoning content part within the output item.""" - - item_id: str - """The unique identifier of the item for which reasoning is finalized.""" - - output_index: int - """The index of the output item in the response's output array.""" - - sequence_number: int - """The sequence number of this event.""" - - text: str - """The finalized reasoning text.""" - - type: Literal["response.reasoning.done"] - """The type of the event. Always 'response.reasoning.done'.""" diff --git a/src/openai/types/responses/response_stream_event.py b/src/openai/types/responses/response_stream_event.py index 24a83f1aa2..98e1d6c34d 100644 --- a/src/openai/types/responses/response_stream_event.py +++ b/src/openai/types/responses/response_stream_event.py @@ -17,9 +17,7 @@ from .response_in_progress_event import ResponseInProgressEvent from .response_refusal_done_event import ResponseRefusalDoneEvent from .response_refusal_delta_event import ResponseRefusalDeltaEvent -from .response_reasoning_done_event import ResponseReasoningDoneEvent from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent -from .response_reasoning_delta_event import ResponseReasoningDeltaEvent from .response_output_item_done_event import ResponseOutputItemDoneEvent from .response_content_part_done_event import ResponseContentPartDoneEvent from .response_output_item_added_event import ResponseOutputItemAddedEvent @@ -111,8 +109,6 @@ ResponseMcpListToolsInProgressEvent, ResponseOutputTextAnnotationAddedEvent, ResponseQueuedEvent, - ResponseReasoningDeltaEvent, - ResponseReasoningDoneEvent, ResponseReasoningSummaryDeltaEvent, ResponseReasoningSummaryDoneEvent, ], diff --git a/src/openai/types/responses/response_text_delta_event.py b/src/openai/types/responses/response_text_delta_event.py index 7e4aec7024..b5379b7ac3 100644 --- a/src/openai/types/responses/response_text_delta_event.py +++ b/src/openai/types/responses/response_text_delta_event.py @@ -1,10 +1,30 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import List, Optional from typing_extensions import Literal from ..._models import BaseModel -__all__ = ["ResponseTextDeltaEvent"] +__all__ = ["ResponseTextDeltaEvent", "Logprob", "LogprobTopLogprob"] + + +class LogprobTopLogprob(BaseModel): + token: Optional[str] = None + """A possible text token.""" + + logprob: Optional[float] = None + """The log probability of this token.""" + + +class Logprob(BaseModel): + token: str + """A possible text token.""" + + logprob: float + """The log probability of this token.""" + + top_logprobs: Optional[List[LogprobTopLogprob]] = None + """The log probability of the top 20 most likely tokens.""" class ResponseTextDeltaEvent(BaseModel): @@ -17,6 +37,9 @@ class ResponseTextDeltaEvent(BaseModel): item_id: str """The ID of the output item that the text delta was added to.""" + logprobs: List[Logprob] + """The log probabilities of the tokens in the delta.""" + output_index: int """The index of the output item that the text delta was added to.""" diff --git a/src/openai/types/responses/response_text_done_event.py b/src/openai/types/responses/response_text_done_event.py index 0d5ed4dd19..d9776a1844 100644 --- a/src/openai/types/responses/response_text_done_event.py +++ b/src/openai/types/responses/response_text_done_event.py @@ -1,10 +1,30 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import List, Optional from typing_extensions import Literal from ..._models import BaseModel -__all__ = ["ResponseTextDoneEvent"] +__all__ = ["ResponseTextDoneEvent", "Logprob", "LogprobTopLogprob"] + + +class LogprobTopLogprob(BaseModel): + token: Optional[str] = None + """A possible text token.""" + + logprob: Optional[float] = None + """The log probability of this token.""" + + +class Logprob(BaseModel): + token: str + """A possible text token.""" + + logprob: float + """The log probability of this token.""" + + top_logprobs: Optional[List[LogprobTopLogprob]] = None + """The log probability of the top 20 most likely tokens.""" class ResponseTextDoneEvent(BaseModel): @@ -14,6 +34,9 @@ class ResponseTextDoneEvent(BaseModel): item_id: str """The ID of the output item that the text content is finalized.""" + logprobs: List[Logprob] + """The log probabilities of the tokens in the delta.""" + output_index: int """The index of the output item that the text content is finalized.""" diff --git a/src/openai/types/shared/function_definition.py b/src/openai/types/shared/function_definition.py index 06baa23170..33ebb9ad3e 100644 --- a/src/openai/types/shared/function_definition.py +++ b/src/openai/types/shared/function_definition.py @@ -39,5 +39,5 @@ class FunctionDefinition(BaseModel): If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the - [function calling guide](docs/guides/function-calling). + [function calling guide](https://platform.openai.com/docs/guides/function-calling). """ diff --git a/src/openai/types/shared_params/function_definition.py b/src/openai/types/shared_params/function_definition.py index d45ec13f1e..b3fdaf86ff 100644 --- a/src/openai/types/shared_params/function_definition.py +++ b/src/openai/types/shared_params/function_definition.py @@ -41,5 +41,5 @@ class FunctionDefinition(TypedDict, total=False): If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the - [function calling guide](docs/guides/function-calling). + [function calling guide](https://platform.openai.com/docs/guides/function-calling). """ diff --git a/tests/test_models.py b/tests/test_models.py index 7262f45006..54a3a32048 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -1,5 +1,5 @@ import json -from typing import Any, Dict, List, Union, Optional, cast +from typing import TYPE_CHECKING, Any, Dict, List, Union, Optional, cast from datetime import datetime, timezone from typing_extensions import Literal, Annotated, TypeAliasType @@ -934,3 +934,30 @@ class Type2(BaseModel): ) assert isinstance(model, Type1) assert isinstance(model.value, InnerType2) + + +@pytest.mark.skipif(not PYDANTIC_V2, reason="this is only supported in pydantic v2 for now") +def test_extra_properties() -> None: + class Item(BaseModel): + prop: int + + class Model(BaseModel): + __pydantic_extra__: Dict[str, Item] = Field(init=False) # pyright: ignore[reportIncompatibleVariableOverride] + + other: str + + if TYPE_CHECKING: + + def __getattr__(self, attr: str) -> Item: ... + + model = construct_type( + type_=Model, + value={ + "a": {"prop": 1}, + "other": "foo", + }, + ) + assert isinstance(model, Model) + assert model.a.prop == 1 + assert isinstance(model.a, Item) + assert model.other == "foo"