diff --git a/.stats.yml b/.stats.yml index 60347bb..0dfc787 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 106 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-c371abef4463f174f8d35ef3da4697fae5eb221db615f9c305319196472f313b.yml -openapi_spec_hash: d9bb62faf229c2c2875c732715e9cfd1 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-83f6df45d805a86b006e19d7e3e2decdfaba03af9b2a7bb8a5080d8801ee0838.yml +openapi_spec_hash: a08671c120ecd7142115b61b5728e537 config_hash: e67fd054e95c1e82f78f4b834e96bb65 diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b96c65..f038668 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,14 @@ ## 0.2.18-alpha.1 (2025-08-12) +Full Changelog: [v0.2.18-alpha.1...v0.2.18-alpha.1](https://github.com/llamastack/llama-stack-client-python/compare/v0.2.18-alpha.1...v0.2.18-alpha.1) + +### Features + +* **api:** update via SDK Studio ([db99707](https://github.com/llamastack/llama-stack-client-python/commit/db9970745de255a3718edb6aee8360b55f58592e)) + +## 0.2.18-alpha.1 (2025-08-12) + Full Changelog: [v0.2.17...v0.2.18-alpha.1](https://github.com/llamastack/llama-stack-client-python/compare/v0.2.17...v0.2.18-alpha.1) ### Features diff --git a/src/llama_stack_client/resources/responses/responses.py b/src/llama_stack_client/resources/responses/responses.py index 375834e..b73be85 100644 --- a/src/llama_stack_client/resources/responses/responses.py +++ b/src/llama_stack_client/resources/responses/responses.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Union, Iterable +from typing import List, Union, Iterable from typing_extensions import Literal, overload import httpx @@ -66,6 +66,7 @@ def create( *, input: Union[str, Iterable[response_create_params.InputUnionMember1]], model: str, + include: List[str] | NotGiven = NOT_GIVEN, instructions: str | NotGiven = NOT_GIVEN, max_infer_iters: int | NotGiven = NOT_GIVEN, previous_response_id: str | NotGiven = NOT_GIVEN, @@ -89,6 +90,8 @@ def create( model: The underlying LLM used for completions. + include: (Optional) Additional fields to include in the response. + previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses. @@ -112,6 +115,7 @@ def create( input: Union[str, Iterable[response_create_params.InputUnionMember1]], model: str, stream: Literal[True], + include: List[str] | NotGiven = NOT_GIVEN, instructions: str | NotGiven = NOT_GIVEN, max_infer_iters: int | NotGiven = NOT_GIVEN, previous_response_id: str | NotGiven = NOT_GIVEN, @@ -134,6 +138,8 @@ def create( model: The underlying LLM used for completions. + include: (Optional) Additional fields to include in the response. + previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses. @@ -157,6 +163,7 @@ def create( input: Union[str, Iterable[response_create_params.InputUnionMember1]], model: str, stream: bool, + include: List[str] | NotGiven = NOT_GIVEN, instructions: str | NotGiven = NOT_GIVEN, max_infer_iters: int | NotGiven = NOT_GIVEN, previous_response_id: str | NotGiven = NOT_GIVEN, @@ -179,6 +186,8 @@ def create( model: The underlying LLM used for completions. + include: (Optional) Additional fields to include in the response. + previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses. @@ -201,6 +210,7 @@ def create( *, input: Union[str, Iterable[response_create_params.InputUnionMember1]], model: str, + include: List[str] | NotGiven = NOT_GIVEN, instructions: str | NotGiven = NOT_GIVEN, max_infer_iters: int | NotGiven = NOT_GIVEN, previous_response_id: str | NotGiven = NOT_GIVEN, @@ -222,6 +232,7 @@ def create( { "input": input, "model": model, + "include": include, "instructions": instructions, "max_infer_iters": max_infer_iters, "previous_response_id": previous_response_id, @@ -362,6 +373,7 @@ async def create( *, input: Union[str, Iterable[response_create_params.InputUnionMember1]], model: str, + include: List[str] | NotGiven = NOT_GIVEN, instructions: str | NotGiven = NOT_GIVEN, max_infer_iters: int | NotGiven = NOT_GIVEN, previous_response_id: str | NotGiven = NOT_GIVEN, @@ -385,6 +397,8 @@ async def create( model: The underlying LLM used for completions. + include: (Optional) Additional fields to include in the response. + previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses. @@ -408,6 +422,7 @@ async def create( input: Union[str, Iterable[response_create_params.InputUnionMember1]], model: str, stream: Literal[True], + include: List[str] | NotGiven = NOT_GIVEN, instructions: str | NotGiven = NOT_GIVEN, max_infer_iters: int | NotGiven = NOT_GIVEN, previous_response_id: str | NotGiven = NOT_GIVEN, @@ -430,6 +445,8 @@ async def create( model: The underlying LLM used for completions. + include: (Optional) Additional fields to include in the response. + previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses. @@ -453,6 +470,7 @@ async def create( input: Union[str, Iterable[response_create_params.InputUnionMember1]], model: str, stream: bool, + include: List[str] | NotGiven = NOT_GIVEN, instructions: str | NotGiven = NOT_GIVEN, max_infer_iters: int | NotGiven = NOT_GIVEN, previous_response_id: str | NotGiven = NOT_GIVEN, @@ -475,6 +493,8 @@ async def create( model: The underlying LLM used for completions. + include: (Optional) Additional fields to include in the response. + previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses. @@ -497,6 +517,7 @@ async def create( *, input: Union[str, Iterable[response_create_params.InputUnionMember1]], model: str, + include: List[str] | NotGiven = NOT_GIVEN, instructions: str | NotGiven = NOT_GIVEN, max_infer_iters: int | NotGiven = NOT_GIVEN, previous_response_id: str | NotGiven = NOT_GIVEN, @@ -518,6 +539,7 @@ async def create( { "input": input, "model": model, + "include": include, "instructions": instructions, "max_infer_iters": max_infer_iters, "previous_response_id": previous_response_id, diff --git a/src/llama_stack_client/types/create_response.py b/src/llama_stack_client/types/create_response.py index fbb519f..b0eaf3e 100644 --- a/src/llama_stack_client/types/create_response.py +++ b/src/llama_stack_client/types/create_response.py @@ -22,7 +22,13 @@ class Result(BaseModel): """ category_scores: Optional[Dict[str, float]] = None - """A list of the categories along with their scores as predicted by model.""" + """A list of the categories along with their scores as predicted by model. + + Required set of categories that need to be in response - violence - + violence/graphic - harassment - harassment/threatening - hate - + hate/threatening - illicit - illicit/violent - sexual - sexual/minors - + self-harm - self-harm/intent - self-harm/instructions + """ user_message: Optional[str] = None diff --git a/src/llama_stack_client/types/response_create_params.py b/src/llama_stack_client/types/response_create_params.py index 7485771..dba9fce 100644 --- a/src/llama_stack_client/types/response_create_params.py +++ b/src/llama_stack_client/types/response_create_params.py @@ -46,6 +46,9 @@ class ResponseCreateParamsBase(TypedDict, total=False): model: Required[str] """The underlying LLM used for completions.""" + include: List[str] + """(Optional) Additional fields to include in the response.""" + instructions: str max_infer_iters: int diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 5604dde..44366d6 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -31,6 +31,7 @@ def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient response = client.responses.create( input="string", model="model", + include=["string"], instructions="instructions", max_infer_iters=0, previous_response_id="previous_response_id", @@ -96,6 +97,7 @@ def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient input="string", model="model", stream=True, + include=["string"], instructions="instructions", max_infer_iters=0, previous_response_id="previous_response_id", @@ -238,6 +240,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn response = await async_client.responses.create( input="string", model="model", + include=["string"], instructions="instructions", max_infer_iters=0, previous_response_id="previous_response_id", @@ -303,6 +306,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn input="string", model="model", stream=True, + include=["string"], instructions="instructions", max_infer_iters=0, previous_response_id="previous_response_id",