From a5d9448b4b930856309d308603a4b0c6cf698f7d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 11 Aug 2025 12:55:22 +0000 Subject: [PATCH 1/4] codegen metadata --- .stats.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index 1ac09c3c..83ceec2e 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6a1bfd4738fff02ef5becc3fdb2bf0cd6c026f2c924d4147a2a515474477dd9a.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-9cadfad609f94f20ebf74fdc06a80302f1a324dc69700a309a8056aabca82fd2.yml openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba -config_hash: a67c5e195a59855fe8a5db0dc61a3e7f +config_hash: 68337b532875626269c304372a669f67 From 8a401c9eecbe4936de487447be09757859001009 Mon Sep 17 00:00:00 2001 From: Mehmet Yildirim Date: Wed, 13 Aug 2025 12:55:10 +0300 Subject: [PATCH 2/4] docs: give https its missing "h" in Azure OpenAI REST API link (#480) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2e7dabec..cbfdce4c 100644 --- a/README.md +++ b/README.md @@ -911,7 +911,7 @@ func main() { const azureOpenAIEndpoint = "https://.openai.azure.com" // The latest API versions, including previews, can be found here: - // ttps://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versionng + // https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versionng const azureOpenAIAPIVersion = "2024-06-01" tokenCredential, err := azidentity.NewDefaultAzureCredential(nil) From 323154ccec2facf80d9ada76ed3c35553cb8896d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 15 Aug 2025 19:11:31 +0000 Subject: [PATCH 3/4] feat(api): add new text parameters, expiration options --- .stats.yml | 6 +- aliases.go | 4 +- api.md | 2 - batch.go | 28 +++++ batch_test.go | 3 + betathread.go | 4 +- betathreadrun.go | 8 +- chatcompletion.go | 59 +++++++--- chatcompletion_test.go | 3 + file.go | 29 ++++- file_test.go | 5 +- responses/aliases.go | 4 +- responses/response.go | 210 ++++++++++++++--------------------- responses/response_test.go | 4 +- shared/constant/constants.go | 3 + shared/shared.go | 4 +- upload.go | 27 +++++ upload_test.go | 5 +- webhooks/aliases.go | 4 +- 19 files changed, 246 insertions(+), 166 deletions(-) diff --git a/.stats.yml b/.stats.yml index 83ceec2e..64ac5cb4 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-9cadfad609f94f20ebf74fdc06a80302f1a324dc69700a309a8056aabca82fd2.yml -openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba -config_hash: 68337b532875626269c304372a669f67 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-24be531010b354303d741fc9247c1f84f75978f9f7de68aca92cb4f240a04722.yml +openapi_spec_hash: 3e46f439f6a863beadc71577eb4efa15 +config_hash: ed87b9139ac595a04a2162d754df2fed diff --git a/aliases.go b/aliases.go index 92e8eea9..267f2862 100644 --- a/aliases.go +++ b/aliases.go @@ -340,7 +340,7 @@ type FunctionParameters = shared.FunctionParameters // This is an alias to an internal type. type Metadata = shared.Metadata -// **o-series models only** +// **gpt-5 and o-series models only** // // Configuration options for // [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -382,7 +382,7 @@ const ReasoningSummaryConcise = shared.ReasoningSummaryConcise // Equals "detailed" const ReasoningSummaryDetailed = shared.ReasoningSummaryDetailed -// **o-series models only** +// **gpt-5 and o-series models only** // // Configuration options for // [reasoning models](https://platform.openai.com/docs/guides/reasoning). diff --git a/api.md b/api.md index 2621d923..cbb9800c 100644 --- a/api.md +++ b/api.md @@ -648,7 +648,6 @@ Params Types: - responses.ResponseOutputTextParam - responses.ResponsePromptParam - responses.ResponseReasoningItemParam -- responses.ResponseTextConfigParam - responses.ToolUnionParam - responses.ToolChoiceAllowedParam - responses.ToolChoiceCustomParam @@ -744,7 +743,6 @@ Response Types: - responses.ResponseRefusalDoneEvent - responses.ResponseStatus - responses.ResponseStreamEventUnion -- responses.ResponseTextConfig - responses.ResponseTextDeltaEvent - responses.ResponseTextDoneEvent - responses.ResponseUsage diff --git a/batch.go b/batch.go index d775d4af..a8e796a9 100644 --- a/batch.go +++ b/batch.go @@ -290,6 +290,9 @@ type BatchNewParams struct { // Keys are strings with a maximum length of 64 characters. Values are strings with // a maximum length of 512 characters. Metadata shared.Metadata `json:"metadata,omitzero"` + // The expiration policy for the output and/or error file that are generated for a + // batch. + OutputExpiresAfter BatchNewParamsOutputExpiresAfter `json:"output_expires_after,omitzero"` paramObj } @@ -322,6 +325,31 @@ const ( BatchNewParamsEndpointV1Completions BatchNewParamsEndpoint = "/v1/completions" ) +// The expiration policy for the output and/or error file that are generated for a +// batch. +// +// The properties Anchor, Seconds are required. +type BatchNewParamsOutputExpiresAfter struct { + // The number of seconds after the anchor time that the file will expire. Must be + // between 3600 (1 hour) and 2592000 (30 days). + Seconds int64 `json:"seconds,required"` + // Anchor timestamp after which the expiration policy applies. Supported anchors: + // `created_at`. Note that the anchor is the file creation time, not the time the + // batch is created. + // + // This field can be elided, and will marshal its zero value as "created_at". + Anchor constant.CreatedAt `json:"anchor,required"` + paramObj +} + +func (r BatchNewParamsOutputExpiresAfter) MarshalJSON() (data []byte, err error) { + type shadow BatchNewParamsOutputExpiresAfter + return param.MarshalObject(r, (*shadow)(&r)) +} +func (r *BatchNewParamsOutputExpiresAfter) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + type BatchListParams struct { // A cursor for use in pagination. `after` is an object ID that defines your place // in the list. For instance, if you make a list request and receive 100 objects, diff --git a/batch_test.go b/batch_test.go index eff0d30d..cd41bba3 100644 --- a/batch_test.go +++ b/batch_test.go @@ -33,6 +33,9 @@ func TestBatchNewWithOptionalParams(t *testing.T) { Metadata: shared.Metadata{ "foo": "string", }, + OutputExpiresAfter: openai.BatchNewParamsOutputExpiresAfter{ + Seconds: 3600, + }, }) if err != nil { var apierr *openai.Error diff --git a/betathread.go b/betathread.go index bb53983b..5f2527f9 100644 --- a/betathread.go +++ b/betathread.go @@ -1065,7 +1065,7 @@ type BetaThreadNewAndRunParams struct { // modifying the behavior on a per-run basis. Tools []AssistantToolUnionParam `json:"tools,omitzero"` // Controls for how a thread will be truncated prior to the run. Use this to - // control the intial context window of the run. + // control the initial context window of the run. TruncationStrategy BetaThreadNewAndRunParamsTruncationStrategy `json:"truncation_strategy,omitzero"` // Specifies the format that the model must output. Compatible with // [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -1532,7 +1532,7 @@ func (r *BetaThreadNewAndRunParamsToolResourcesFileSearch) UnmarshalJSON(data [] } // Controls for how a thread will be truncated prior to the run. Use this to -// control the intial context window of the run. +// control the initial context window of the run. // // The property Type is required. type BetaThreadNewAndRunParamsTruncationStrategy struct { diff --git a/betathreadrun.go b/betathreadrun.go index 7b95e07b..e35ba1d4 100644 --- a/betathreadrun.go +++ b/betathreadrun.go @@ -364,7 +364,7 @@ type Run struct { // this run. Tools []AssistantToolUnion `json:"tools,required"` // Controls for how a thread will be truncated prior to the run. Use this to - // control the intial context window of the run. + // control the initial context window of the run. TruncationStrategy RunTruncationStrategy `json:"truncation_strategy,required"` // Usage statistics related to the run. This value will be `null` if the run is not // in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -499,7 +499,7 @@ func (r *RunRequiredActionSubmitToolOutputs) UnmarshalJSON(data []byte) error { } // Controls for how a thread will be truncated prior to the run. Use this to -// control the intial context window of the run. +// control the initial context window of the run. type RunTruncationStrategy struct { // The truncation strategy to use for the thread. The default is `auto`. If set to // `last_messages`, the thread will be truncated to the n most recent messages in @@ -633,7 +633,7 @@ type BetaThreadRunNewParams struct { // modifying the behavior on a per-run basis. Tools []AssistantToolUnionParam `json:"tools,omitzero"` // Controls for how a thread will be truncated prior to the run. Use this to - // control the intial context window of the run. + // control the initial context window of the run. TruncationStrategy BetaThreadRunNewParamsTruncationStrategy `json:"truncation_strategy,omitzero"` // A list of additional fields to include in the response. Currently the only // supported value is `step_details.tool_calls[*].file_search.results[*].content` @@ -837,7 +837,7 @@ func (r *BetaThreadRunNewParamsAdditionalMessageAttachmentToolFileSearch) Unmars } // Controls for how a thread will be truncated prior to the run. Use this to -// control the intial context window of the run. +// control the initial context window of the run. // // The property Type is required. type BetaThreadRunNewParamsTruncationStrategy struct { diff --git a/chatcompletion.go b/chatcompletion.go index 6f344b54..bdf0011d 100644 --- a/chatcompletion.go +++ b/chatcompletion.go @@ -183,9 +183,8 @@ type ChatCompletion struct { // - If set to 'default', then the request will be processed with the standard // pricing and performance for the selected model. // - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - // 'priority', then the request will be processed with the corresponding service - // tier. [Contact sales](https://openai.com/contact-sales) to learn more about - // Priority processing. + // '[priority](https://openai.com/api-priority-processing/)', then the request + // will be processed with the corresponding service tier. // - When not set, the default behavior is 'auto'. // // When the `service_tier` parameter is set, the response body will include the @@ -199,6 +198,8 @@ type ChatCompletion struct { // // Can be used in conjunction with the `seed` request parameter to understand when // backend changes have been made that might impact determinism. + // + // Deprecated: deprecated SystemFingerprint string `json:"system_fingerprint"` // Usage statistics for the completion request. Usage CompletionUsage `json:"usage"` @@ -285,9 +286,8 @@ func (r *ChatCompletionChoiceLogprobs) UnmarshalJSON(data []byte) error { // - If set to 'default', then the request will be processed with the standard // pricing and performance for the selected model. // - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or -// 'priority', then the request will be processed with the corresponding service -// tier. [Contact sales](https://openai.com/contact-sales) to learn more about -// Priority processing. +// '[priority](https://openai.com/api-priority-processing/)', then the request +// will be processed with the corresponding service tier. // - When not set, the default behavior is 'auto'. // // When the `service_tier` parameter is set, the response body will include the @@ -598,9 +598,8 @@ type ChatCompletionChunk struct { // - If set to 'default', then the request will be processed with the standard // pricing and performance for the selected model. // - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - // 'priority', then the request will be processed with the corresponding service - // tier. [Contact sales](https://openai.com/contact-sales) to learn more about - // Priority processing. + // '[priority](https://openai.com/api-priority-processing/)', then the request + // will be processed with the corresponding service tier. // - When not set, the default behavior is 'auto'. // // When the `service_tier` parameter is set, the response body will include the @@ -613,6 +612,8 @@ type ChatCompletionChunk struct { // This fingerprint represents the backend configuration that the model runs with. // Can be used in conjunction with the `seed` request parameter to understand when // backend changes have been made that might impact determinism. + // + // Deprecated: deprecated SystemFingerprint string `json:"system_fingerprint"` // An optional field that will only be present when you set // `stream_options: {"include_usage": true}` in your request. When present, it @@ -815,9 +816,8 @@ func (r *ChatCompletionChunkChoiceLogprobs) UnmarshalJSON(data []byte) error { // - If set to 'default', then the request will be processed with the standard // pricing and performance for the selected model. // - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or -// 'priority', then the request will be processed with the corresponding service -// tier. [Contact sales](https://openai.com/contact-sales) to learn more about -// Priority processing. +// '[priority](https://openai.com/api-priority-processing/)', then the request +// will be processed with the corresponding service tier. // - When not set, the default behavior is 'auto'. // // When the `service_tier` parameter is set, the response body will include the @@ -3034,9 +3034,8 @@ type ChatCompletionNewParams struct { // - If set to 'default', then the request will be processed with the standard // pricing and performance for the selected model. // - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - // 'priority', then the request will be processed with the corresponding service - // tier. [Contact sales](https://openai.com/contact-sales) to learn more about - // Priority processing. + // '[priority](https://openai.com/api-priority-processing/)', then the request + // will be processed with the corresponding service tier. // - When not set, the default behavior is 'auto'. // // When the `service_tier` parameter is set, the response body will include the @@ -3092,6 +3091,7 @@ type ChatCompletionNewParams struct { // ensures the message the model generates is valid JSON. Using `json_schema` is // preferred for models that support it. ResponseFormat ChatCompletionNewParamsResponseFormatUnion `json:"response_format,omitzero"` + Text ChatCompletionNewParamsText `json:"text,omitzero"` // Controls which (if any) tool is called by the model. `none` means the model will // not call any tool and instead generates a message. `auto` means the model can // pick between generating a message or calling one or more tools. `required` means @@ -3242,9 +3242,8 @@ func (u ChatCompletionNewParamsResponseFormatUnion) GetType() *string { // - If set to 'default', then the request will be processed with the standard // pricing and performance for the selected model. // - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or -// 'priority', then the request will be processed with the corresponding service -// tier. [Contact sales](https://openai.com/contact-sales) to learn more about -// Priority processing. +// '[priority](https://openai.com/api-priority-processing/)', then the request +// will be processed with the corresponding service tier. // - When not set, the default behavior is 'auto'. // // When the `service_tier` parameter is set, the response body will include the @@ -3286,6 +3285,30 @@ func (u *ChatCompletionNewParamsStopUnion) asAny() any { return nil } +type ChatCompletionNewParamsText struct { + // Constrains the verbosity of the model's response. Lower values will result in + // more concise responses, while higher values will result in more verbose + // responses. Currently supported values are `low`, `medium`, and `high`. + // + // Any of "low", "medium", "high". + Verbosity string `json:"verbosity,omitzero"` + paramObj +} + +func (r ChatCompletionNewParamsText) MarshalJSON() (data []byte, err error) { + type shadow ChatCompletionNewParamsText + return param.MarshalObject(r, (*shadow)(&r)) +} +func (r *ChatCompletionNewParamsText) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +func init() { + apijson.RegisterFieldValidator[ChatCompletionNewParamsText]( + "verbosity", "low", "medium", "high", + ) +} + // Constrains the verbosity of the model's response. Lower values will result in // more concise responses, while higher values will result in more verbose // responses. Currently supported values are `low`, `medium`, and `high`. diff --git a/chatcompletion_test.go b/chatcompletion_test.go index 9bec83c7..1cdf586f 100644 --- a/chatcompletion_test.go +++ b/chatcompletion_test.go @@ -86,6 +86,9 @@ func TestChatCompletionNewWithOptionalParams(t *testing.T) { IncludeUsage: openai.Bool(true), }, Temperature: openai.Float(1), + Text: openai.ChatCompletionNewParamsText{ + Verbosity: "low", + }, ToolChoice: openai.ChatCompletionToolChoiceOptionUnionParam{ OfAuto: openai.String("none"), }, diff --git a/file.go b/file.go index 7cb3a722..b69842fc 100644 --- a/file.go +++ b/file.go @@ -44,7 +44,7 @@ func NewFileService(opts ...option.RequestOption) (r FileService) { // Upload a file that can be used across various endpoints. Individual files can be // up to 512 MB, and the size of all files uploaded by one organization can be up -// to 100 GB. +// to 1 TB. // // The Assistants API supports files up to 2 million tokens and of specific file // types. See the @@ -256,6 +256,9 @@ type FileNewParams struct { // // Any of "assistants", "batch", "fine-tune", "vision", "user_data", "evals". Purpose FilePurpose `json:"purpose,omitzero,required"` + // The expiration policy for a file. By default, files with `purpose=batch` expire + // after 30 days and all other files are persisted until they are manually deleted. + ExpiresAfter FileNewParamsExpiresAfter `json:"expires_after,omitzero"` paramObj } @@ -277,6 +280,30 @@ func (r FileNewParams) MarshalMultipart() (data []byte, contentType string, err return buf.Bytes(), writer.FormDataContentType(), nil } +// The expiration policy for a file. By default, files with `purpose=batch` expire +// after 30 days and all other files are persisted until they are manually deleted. +// +// The properties Anchor, Seconds are required. +type FileNewParamsExpiresAfter struct { + // The number of seconds after the anchor time that the file will expire. Must be + // between 3600 (1 hour) and 2592000 (30 days). + Seconds int64 `json:"seconds,required"` + // Anchor timestamp after which the expiration policy applies. Supported anchors: + // `created_at`. + // + // This field can be elided, and will marshal its zero value as "created_at". + Anchor constant.CreatedAt `json:"anchor,required"` + paramObj +} + +func (r FileNewParamsExpiresAfter) MarshalJSON() (data []byte, err error) { + type shadow FileNewParamsExpiresAfter + return param.MarshalObject(r, (*shadow)(&r)) +} +func (r *FileNewParamsExpiresAfter) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + type FileListParams struct { // A cursor for use in pagination. `after` is an object ID that defines your place // in the list. For instance, if you make a list request and receive 100 objects, diff --git a/file_test.go b/file_test.go index 5dea73d4..7c41412e 100644 --- a/file_test.go +++ b/file_test.go @@ -17,7 +17,7 @@ import ( "github.com/openai/openai-go/v2/option" ) -func TestFileNew(t *testing.T) { +func TestFileNewWithOptionalParams(t *testing.T) { baseURL := "http://localhost:4010" if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { baseURL = envURL @@ -32,6 +32,9 @@ func TestFileNew(t *testing.T) { _, err := client.Files.New(context.TODO(), openai.FileNewParams{ File: io.Reader(bytes.NewBuffer([]byte("some file contents"))), Purpose: openai.FilePurposeAssistants, + ExpiresAfter: openai.FileNewParamsExpiresAfter{ + Seconds: 3600, + }, }) if err != nil { var apierr *openai.Error diff --git a/responses/aliases.go b/responses/aliases.go index 10a9231a..9ff17cbe 100644 --- a/responses/aliases.go +++ b/responses/aliases.go @@ -340,7 +340,7 @@ type FunctionParameters = shared.FunctionParameters // This is an alias to an internal type. type Metadata = shared.Metadata -// **o-series models only** +// **gpt-5 and o-series models only** // // Configuration options for // [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -382,7 +382,7 @@ const ReasoningSummaryConcise = shared.ReasoningSummaryConcise // Equals "detailed" const ReasoningSummaryDetailed = shared.ReasoningSummaryDetailed -// **o-series models only** +// **gpt-5 and o-series models only** // // Configuration options for // [reasoning models](https://platform.openai.com/docs/guides/reasoning). diff --git a/responses/response.go b/responses/response.go index 80a7d0b0..c7a23f8c 100644 --- a/responses/response.go +++ b/responses/response.go @@ -825,7 +825,7 @@ type Response struct { // hit rates. Replaces the `user` field. // [Learn more](https://platform.openai.com/docs/guides/prompt-caching). PromptCacheKey string `json:"prompt_cache_key"` - // **o-series models only** + // **gpt-5 and o-series models only** // // Configuration options for // [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -844,9 +844,8 @@ type Response struct { // - If set to 'default', then the request will be processed with the standard // pricing and performance for the selected model. // - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - // 'priority', then the request will be processed with the corresponding service - // tier. [Contact sales](https://openai.com/contact-sales) to learn more about - // Priority processing. + // '[priority](https://openai.com/api-priority-processing/)', then the request + // will be processed with the corresponding service tier. // - When not set, the default behavior is 'auto'. // // When the `service_tier` parameter is set, the response body will include the @@ -862,12 +861,7 @@ type Response struct { // Any of "completed", "failed", "in_progress", "cancelled", "queued", // "incomplete". Status ResponseStatus `json:"status"` - // Configuration options for a text response from the model. Can be plain text or - // structured JSON data. Learn more: - // - // - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - // - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - Text ResponseTextConfig `json:"text"` + Text ResponseText `json:"text"` // An integer between 0 and 20 specifying the number of most likely tokens to // return at each token position, each with an associated log probability. TopLogprobs int64 `json:"top_logprobs,nullable"` @@ -1079,9 +1073,8 @@ func (r *ResponseToolChoiceUnion) UnmarshalJSON(data []byte) error { // - If set to 'default', then the request will be processed with the standard // pricing and performance for the selected model. // - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or -// 'priority', then the request will be processed with the corresponding service -// tier. [Contact sales](https://openai.com/contact-sales) to learn more about -// Priority processing. +// '[priority](https://openai.com/api-priority-processing/)', then the request +// will be processed with the corresponding service tier. // - When not set, the default behavior is 'auto'. // // When the `service_tier` parameter is set, the response body will include the @@ -1098,6 +1091,42 @@ const ( ResponseServiceTierPriority ResponseServiceTier = "priority" ) +type ResponseText struct { + // An object specifying the format that the model must output. + // + // Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + // ensures the model will match your supplied JSON schema. Learn more in the + // [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + // + // The default format is `{ "type": "text" }` with no additional options. + // + // **Not recommended for gpt-4o and newer models:** + // + // Setting to `{ "type": "json_object" }` enables the older JSON mode, which + // ensures the message the model generates is valid JSON. Using `json_schema` is + // preferred for models that support it. + Format ResponseFormatTextConfigUnion `json:"format"` + // Constrains the verbosity of the model's response. Lower values will result in + // more concise responses, while higher values will result in more verbose + // responses. Currently supported values are `low`, `medium`, and `high`. + // + // Any of "low", "medium", "high". + Verbosity string `json:"verbosity,nullable"` + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + Format respjson.Field + Verbosity respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +// Returns the unmodified JSON received from the API +func (r ResponseText) RawJSON() string { return r.JSON.raw } +func (r *ResponseText) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + // The truncation strategy to use for the model response. // // - `auto`: If the context of this response and previous ones exceeds the model's @@ -11136,104 +11165,6 @@ func (r *ResponseStreamEventUnionLogprobs) UnmarshalJSON(data []byte) error { return apijson.UnmarshalRoot(data, r) } -// Configuration options for a text response from the model. Can be plain text or -// structured JSON data. Learn more: -// -// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) -// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) -type ResponseTextConfig struct { - // An object specifying the format that the model must output. - // - // Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - // ensures the model will match your supplied JSON schema. Learn more in the - // [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - // - // The default format is `{ "type": "text" }` with no additional options. - // - // **Not recommended for gpt-4o and newer models:** - // - // Setting to `{ "type": "json_object" }` enables the older JSON mode, which - // ensures the message the model generates is valid JSON. Using `json_schema` is - // preferred for models that support it. - Format ResponseFormatTextConfigUnion `json:"format"` - // Constrains the verbosity of the model's response. Lower values will result in - // more concise responses, while higher values will result in more verbose - // responses. Currently supported values are `low`, `medium`, and `high`. - // - // Any of "low", "medium", "high". - Verbosity ResponseTextConfigVerbosity `json:"verbosity,nullable"` - // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. - JSON struct { - Format respjson.Field - Verbosity respjson.Field - ExtraFields map[string]respjson.Field - raw string - } `json:"-"` -} - -// Returns the unmodified JSON received from the API -func (r ResponseTextConfig) RawJSON() string { return r.JSON.raw } -func (r *ResponseTextConfig) UnmarshalJSON(data []byte) error { - return apijson.UnmarshalRoot(data, r) -} - -// ToParam converts this ResponseTextConfig to a ResponseTextConfigParam. -// -// Warning: the fields of the param type will not be present. ToParam should only -// be used at the last possible moment before sending a request. Test for this with -// ResponseTextConfigParam.Overrides() -func (r ResponseTextConfig) ToParam() ResponseTextConfigParam { - return param.Override[ResponseTextConfigParam](json.RawMessage(r.RawJSON())) -} - -// Constrains the verbosity of the model's response. Lower values will result in -// more concise responses, while higher values will result in more verbose -// responses. Currently supported values are `low`, `medium`, and `high`. -type ResponseTextConfigVerbosity string - -const ( - ResponseTextConfigVerbosityLow ResponseTextConfigVerbosity = "low" - ResponseTextConfigVerbosityMedium ResponseTextConfigVerbosity = "medium" - ResponseTextConfigVerbosityHigh ResponseTextConfigVerbosity = "high" -) - -// Configuration options for a text response from the model. Can be plain text or -// structured JSON data. Learn more: -// -// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) -// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) -type ResponseTextConfigParam struct { - // Constrains the verbosity of the model's response. Lower values will result in - // more concise responses, while higher values will result in more verbose - // responses. Currently supported values are `low`, `medium`, and `high`. - // - // Any of "low", "medium", "high". - Verbosity ResponseTextConfigVerbosity `json:"verbosity,omitzero"` - // An object specifying the format that the model must output. - // - // Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - // ensures the model will match your supplied JSON schema. Learn more in the - // [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - // - // The default format is `{ "type": "text" }` with no additional options. - // - // **Not recommended for gpt-4o and newer models:** - // - // Setting to `{ "type": "json_object" }` enables the older JSON mode, which - // ensures the message the model generates is valid JSON. Using `json_schema` is - // preferred for models that support it. - Format ResponseFormatTextConfigUnionParam `json:"format,omitzero"` - paramObj -} - -func (r ResponseTextConfigParam) MarshalJSON() (data []byte, err error) { - type shadow ResponseTextConfigParam - return param.MarshalObject(r, (*shadow)(&r)) -} -func (r *ResponseTextConfigParam) UnmarshalJSON(data []byte) error { - return apijson.UnmarshalRoot(data, r) -} - // Emitted when there is an additional text delta. type ResponseTextDeltaEvent struct { // The index of the content part that the text delta was added to. @@ -13467,9 +13398,8 @@ type ResponseNewParams struct { // - If set to 'default', then the request will be processed with the standard // pricing and performance for the selected model. // - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - // 'priority', then the request will be processed with the corresponding service - // tier. [Contact sales](https://openai.com/contact-sales) to learn more about - // Priority processing. + // '[priority](https://openai.com/api-priority-processing/)', then the request + // will be processed with the corresponding service tier. // - When not set, the default behavior is 'auto'. // // When the `service_tier` parameter is set, the response body will include the @@ -13507,17 +13437,12 @@ type ResponseNewParams struct { // [model guide](https://platform.openai.com/docs/models) to browse and compare // available models. Model shared.ResponsesModel `json:"model,omitzero"` - // **o-series models only** + // **gpt-5 and o-series models only** // // Configuration options for // [reasoning models](https://platform.openai.com/docs/guides/reasoning). Reasoning shared.ReasoningParam `json:"reasoning,omitzero"` - // Configuration options for a text response from the model. Can be plain text or - // structured JSON data. Learn more: - // - // - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - // - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - Text ResponseTextConfigParam `json:"text,omitzero"` + Text ResponseNewParamsText `json:"text,omitzero"` // How the model should select which tool (or tools) to use when generating a // response. See the `tools` parameter to see how to specify which tools the model // can call. @@ -13583,9 +13508,8 @@ func (u *ResponseNewParamsInputUnion) asAny() any { // - If set to 'default', then the request will be processed with the standard // pricing and performance for the selected model. // - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or -// 'priority', then the request will be processed with the corresponding service -// tier. [Contact sales](https://openai.com/contact-sales) to learn more about -// Priority processing. +// '[priority](https://openai.com/api-priority-processing/)', then the request +// will be processed with the corresponding service tier. // - When not set, the default behavior is 'auto'. // // When the `service_tier` parameter is set, the response body will include the @@ -13622,6 +13546,44 @@ func (r *ResponseNewParamsStreamOptions) UnmarshalJSON(data []byte) error { return apijson.UnmarshalRoot(data, r) } +type ResponseNewParamsText struct { + // Constrains the verbosity of the model's response. Lower values will result in + // more concise responses, while higher values will result in more verbose + // responses. Currently supported values are `low`, `medium`, and `high`. + // + // Any of "low", "medium", "high". + Verbosity string `json:"verbosity,omitzero"` + // An object specifying the format that the model must output. + // + // Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + // ensures the model will match your supplied JSON schema. Learn more in the + // [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + // + // The default format is `{ "type": "text" }` with no additional options. + // + // **Not recommended for gpt-4o and newer models:** + // + // Setting to `{ "type": "json_object" }` enables the older JSON mode, which + // ensures the message the model generates is valid JSON. Using `json_schema` is + // preferred for models that support it. + Format ResponseFormatTextConfigUnionParam `json:"format,omitzero"` + paramObj +} + +func (r ResponseNewParamsText) MarshalJSON() (data []byte, err error) { + type shadow ResponseNewParamsText + return param.MarshalObject(r, (*shadow)(&r)) +} +func (r *ResponseNewParamsText) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +func init() { + apijson.RegisterFieldValidator[ResponseNewParamsText]( + "verbosity", "low", "medium", "high", + ) +} + // Only one field can be non-zero. // // Use [param.IsOmitted] to confirm if a field is set. diff --git a/responses/response_test.go b/responses/response_test.go index 7b73951f..eccee7f1 100644 --- a/responses/response_test.go +++ b/responses/response_test.go @@ -64,11 +64,11 @@ func TestResponseNewWithOptionalParams(t *testing.T) { IncludeObfuscation: openai.Bool(true), }, Temperature: openai.Float(1), - Text: responses.ResponseTextConfigParam{ + Text: responses.ResponseNewParamsText{ Format: responses.ResponseFormatTextConfigUnionParam{ OfText: &shared.ResponseFormatTextParam{}, }, - Verbosity: responses.ResponseTextConfigVerbosityLow, + Verbosity: "low", }, ToolChoice: responses.ResponseNewParamsToolChoiceUnion{ OfToolChoiceMode: openai.Opt(responses.ToolChoiceOptionsNone), diff --git a/shared/constant/constants.go b/shared/constant/constants.go index d8997258..fdc26c32 100644 --- a/shared/constant/constants.go +++ b/shared/constant/constants.go @@ -41,6 +41,7 @@ type ComputerUsePreview string // Always "computer_use_prev type ContainerFileCitation string // Always "container_file_citation" type ContainerFile string // Always "container.file" type Content string // Always "content" +type CreatedAt string // Always "created_at" type Custom string // Always "custom" type CustomToolCall string // Always "custom_tool_call" type CustomToolCallOutput string // Always "custom_tool_call_output" @@ -250,6 +251,7 @@ func (c ComputerUsePreview) Default() ComputerUsePreview { return "compu func (c ContainerFileCitation) Default() ContainerFileCitation { return "container_file_citation" } func (c ContainerFile) Default() ContainerFile { return "container.file" } func (c Content) Default() Content { return "content" } +func (c CreatedAt) Default() CreatedAt { return "created_at" } func (c Custom) Default() Custom { return "custom" } func (c CustomToolCall) Default() CustomToolCall { return "custom_tool_call" } func (c CustomToolCallOutput) Default() CustomToolCallOutput { return "custom_tool_call_output" } @@ -555,6 +557,7 @@ func (c ComputerUsePreview) MarshalJSON() ([]byte, error) { r func (c ContainerFileCitation) MarshalJSON() ([]byte, error) { return marshalString(c) } func (c ContainerFile) MarshalJSON() ([]byte, error) { return marshalString(c) } func (c Content) MarshalJSON() ([]byte, error) { return marshalString(c) } +func (c CreatedAt) MarshalJSON() ([]byte, error) { return marshalString(c) } func (c Custom) MarshalJSON() ([]byte, error) { return marshalString(c) } func (c CustomToolCall) MarshalJSON() ([]byte, error) { return marshalString(c) } func (c CustomToolCallOutput) MarshalJSON() ([]byte, error) { return marshalString(c) } diff --git a/shared/shared.go b/shared/shared.go index a84aed61..14edbb97 100644 --- a/shared/shared.go +++ b/shared/shared.go @@ -663,7 +663,7 @@ type FunctionParameters map[string]any type Metadata map[string]string -// **o-series models only** +// **gpt-5 and o-series models only** // // Configuration options for // [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -741,7 +741,7 @@ const ( ReasoningSummaryDetailed ReasoningSummary = "detailed" ) -// **o-series models only** +// **gpt-5 and o-series models only** // // Configuration options for // [reasoning models](https://platform.openai.com/docs/guides/reasoning). diff --git a/upload.go b/upload.go index e10d6ed8..e7b93945 100644 --- a/upload.go +++ b/upload.go @@ -172,6 +172,9 @@ type UploadNewParams struct { // // Any of "assistants", "batch", "fine-tune", "vision", "user_data", "evals". Purpose FilePurpose `json:"purpose,omitzero,required"` + // The expiration policy for a file. By default, files with `purpose=batch` expire + // after 30 days and all other files are persisted until they are manually deleted. + ExpiresAfter UploadNewParamsExpiresAfter `json:"expires_after,omitzero"` paramObj } @@ -183,6 +186,30 @@ func (r *UploadNewParams) UnmarshalJSON(data []byte) error { return apijson.UnmarshalRoot(data, r) } +// The expiration policy for a file. By default, files with `purpose=batch` expire +// after 30 days and all other files are persisted until they are manually deleted. +// +// The properties Anchor, Seconds are required. +type UploadNewParamsExpiresAfter struct { + // The number of seconds after the anchor time that the file will expire. Must be + // between 3600 (1 hour) and 2592000 (30 days). + Seconds int64 `json:"seconds,required"` + // Anchor timestamp after which the expiration policy applies. Supported anchors: + // `created_at`. + // + // This field can be elided, and will marshal its zero value as "created_at". + Anchor constant.CreatedAt `json:"anchor,required"` + paramObj +} + +func (r UploadNewParamsExpiresAfter) MarshalJSON() (data []byte, err error) { + type shadow UploadNewParamsExpiresAfter + return param.MarshalObject(r, (*shadow)(&r)) +} +func (r *UploadNewParamsExpiresAfter) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + type UploadCompleteParams struct { // The ordered list of Part IDs. PartIDs []string `json:"part_ids,omitzero,required"` diff --git a/upload_test.go b/upload_test.go index 05eb0343..8c439edd 100644 --- a/upload_test.go +++ b/upload_test.go @@ -13,7 +13,7 @@ import ( "github.com/openai/openai-go/v2/option" ) -func TestUploadNew(t *testing.T) { +func TestUploadNewWithOptionalParams(t *testing.T) { baseURL := "http://localhost:4010" if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { baseURL = envURL @@ -30,6 +30,9 @@ func TestUploadNew(t *testing.T) { Filename: "filename", MimeType: "mime_type", Purpose: openai.FilePurposeAssistants, + ExpiresAfter: openai.UploadNewParamsExpiresAfter{ + Seconds: 3600, + }, }) if err != nil { var apierr *openai.Error diff --git a/webhooks/aliases.go b/webhooks/aliases.go index 9040bcc2..94e7af64 100644 --- a/webhooks/aliases.go +++ b/webhooks/aliases.go @@ -340,7 +340,7 @@ type FunctionParameters = shared.FunctionParameters // This is an alias to an internal type. type Metadata = shared.Metadata -// **o-series models only** +// **gpt-5 and o-series models only** // // Configuration options for // [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -382,7 +382,7 @@ const ReasoningSummaryConcise = shared.ReasoningSummaryConcise // Equals "detailed" const ReasoningSummaryDetailed = shared.ReasoningSummaryDetailed -// **o-series models only** +// **gpt-5 and o-series models only** // // Configuration options for // [reasoning models](https://platform.openai.com/docs/guides/reasoning). From ed424d56fcb76897eabba38eedd4f384ec177bf6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 18 Aug 2025 05:09:27 +0000 Subject: [PATCH 4/4] release: 2.1.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ README.md | 2 +- internal/version.go | 2 +- 4 files changed, 16 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 4c4f7f20..656a2ef1 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.0.2" + ".": "2.1.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 85da9587..4aa0af94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 2.1.0 (2025-08-18) + +Full Changelog: [v2.0.2...v2.1.0](https://github.com/openai/openai-go/compare/v2.0.2...v2.1.0) + +### Features + +* **api:** add new text parameters, expiration options ([323154c](https://github.com/openai/openai-go/commit/323154ccec2facf80d9ada76ed3c35553cb8896d)) + + +### Documentation + +* give https its missing "h" in Azure OpenAI REST API link ([#480](https://github.com/openai/openai-go/issues/480)) ([8a401c9](https://github.com/openai/openai-go/commit/8a401c9eecbe4936de487447be09757859001009)) + ## 2.0.2 (2025-08-09) Full Changelog: [v2.0.1...v2.0.2](https://github.com/openai/openai-go/compare/v2.0.1...v2.0.2) diff --git a/README.md b/README.md index cbfdce4c..07370d9c 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ Or to pin the version: ```sh -go get -u 'github.com/openai/openai-go@v2.0.2' +go get -u 'github.com/openai/openai-go@v2.1.0' ``` diff --git a/internal/version.go b/internal/version.go index 43f4f7da..436f8326 100644 --- a/internal/version.go +++ b/internal/version.go @@ -2,4 +2,4 @@ package internal -const PackageVersion = "2.0.2" // x-release-please-version +const PackageVersion = "2.1.0" // x-release-please-version