Skip to content
This repository was archived by the owner on Aug 14, 2025. It is now read-only.

release: 0.2.18-alpha.1 #21

Merged
merged 2 commits into from
Aug 12, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .stats.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
configured_endpoints: 106
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-c371abef4463f174f8d35ef3da4697fae5eb221db615f9c305319196472f313b.yml
openapi_spec_hash: d9bb62faf229c2c2875c732715e9cfd1
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-83f6df45d805a86b006e19d7e3e2decdfaba03af9b2a7bb8a5080d8801ee0838.yml
openapi_spec_hash: a08671c120ecd7142115b61b5728e537
config_hash: e67fd054e95c1e82f78f4b834e96bb65
8 changes: 8 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,14 @@

## 0.2.18-alpha.1 (2025-08-12)

Full Changelog: [v0.2.18-alpha.1...v0.2.18-alpha.1](https://github.com/llamastack/llama-stack-client-python/compare/v0.2.18-alpha.1...v0.2.18-alpha.1)

### Features

* **api:** update via SDK Studio ([db99707](https://github.com/llamastack/llama-stack-client-python/commit/db9970745de255a3718edb6aee8360b55f58592e))

## 0.2.18-alpha.1 (2025-08-12)

Full Changelog: [v0.2.17...v0.2.18-alpha.1](https://github.com/llamastack/llama-stack-client-python/compare/v0.2.17...v0.2.18-alpha.1)

### Features
Expand Down
24 changes: 23 additions & 1 deletion src/llama_stack_client/resources/responses/responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from __future__ import annotations

from typing import Union, Iterable
from typing import List, Union, Iterable
from typing_extensions import Literal, overload

import httpx
Expand Down Expand Up @@ -66,6 +66,7 @@ def create(
*,
input: Union[str, Iterable[response_create_params.InputUnionMember1]],
model: str,
include: List[str] | NotGiven = NOT_GIVEN,
instructions: str | NotGiven = NOT_GIVEN,
max_infer_iters: int | NotGiven = NOT_GIVEN,
previous_response_id: str | NotGiven = NOT_GIVEN,
Expand All @@ -89,6 +90,8 @@ def create(

model: The underlying LLM used for completions.

include: (Optional) Additional fields to include in the response.

previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
response. This can be used to easily fork-off new responses from existing
responses.
Expand All @@ -112,6 +115,7 @@ def create(
input: Union[str, Iterable[response_create_params.InputUnionMember1]],
model: str,
stream: Literal[True],
include: List[str] | NotGiven = NOT_GIVEN,
instructions: str | NotGiven = NOT_GIVEN,
max_infer_iters: int | NotGiven = NOT_GIVEN,
previous_response_id: str | NotGiven = NOT_GIVEN,
Expand All @@ -134,6 +138,8 @@ def create(

model: The underlying LLM used for completions.

include: (Optional) Additional fields to include in the response.

previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
response. This can be used to easily fork-off new responses from existing
responses.
Expand All @@ -157,6 +163,7 @@ def create(
input: Union[str, Iterable[response_create_params.InputUnionMember1]],
model: str,
stream: bool,
include: List[str] | NotGiven = NOT_GIVEN,
instructions: str | NotGiven = NOT_GIVEN,
max_infer_iters: int | NotGiven = NOT_GIVEN,
previous_response_id: str | NotGiven = NOT_GIVEN,
Expand All @@ -179,6 +186,8 @@ def create(

model: The underlying LLM used for completions.

include: (Optional) Additional fields to include in the response.

previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
response. This can be used to easily fork-off new responses from existing
responses.
Expand All @@ -201,6 +210,7 @@ def create(
*,
input: Union[str, Iterable[response_create_params.InputUnionMember1]],
model: str,
include: List[str] | NotGiven = NOT_GIVEN,
instructions: str | NotGiven = NOT_GIVEN,
max_infer_iters: int | NotGiven = NOT_GIVEN,
previous_response_id: str | NotGiven = NOT_GIVEN,
Expand All @@ -222,6 +232,7 @@ def create(
{
"input": input,
"model": model,
"include": include,
"instructions": instructions,
"max_infer_iters": max_infer_iters,
"previous_response_id": previous_response_id,
Expand Down Expand Up @@ -362,6 +373,7 @@ async def create(
*,
input: Union[str, Iterable[response_create_params.InputUnionMember1]],
model: str,
include: List[str] | NotGiven = NOT_GIVEN,
instructions: str | NotGiven = NOT_GIVEN,
max_infer_iters: int | NotGiven = NOT_GIVEN,
previous_response_id: str | NotGiven = NOT_GIVEN,
Expand All @@ -385,6 +397,8 @@ async def create(

model: The underlying LLM used for completions.

include: (Optional) Additional fields to include in the response.

previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
response. This can be used to easily fork-off new responses from existing
responses.
Expand All @@ -408,6 +422,7 @@ async def create(
input: Union[str, Iterable[response_create_params.InputUnionMember1]],
model: str,
stream: Literal[True],
include: List[str] | NotGiven = NOT_GIVEN,
instructions: str | NotGiven = NOT_GIVEN,
max_infer_iters: int | NotGiven = NOT_GIVEN,
previous_response_id: str | NotGiven = NOT_GIVEN,
Expand All @@ -430,6 +445,8 @@ async def create(

model: The underlying LLM used for completions.

include: (Optional) Additional fields to include in the response.

previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
response. This can be used to easily fork-off new responses from existing
responses.
Expand All @@ -453,6 +470,7 @@ async def create(
input: Union[str, Iterable[response_create_params.InputUnionMember1]],
model: str,
stream: bool,
include: List[str] | NotGiven = NOT_GIVEN,
instructions: str | NotGiven = NOT_GIVEN,
max_infer_iters: int | NotGiven = NOT_GIVEN,
previous_response_id: str | NotGiven = NOT_GIVEN,
Expand All @@ -475,6 +493,8 @@ async def create(

model: The underlying LLM used for completions.

include: (Optional) Additional fields to include in the response.

previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
response. This can be used to easily fork-off new responses from existing
responses.
Expand All @@ -497,6 +517,7 @@ async def create(
*,
input: Union[str, Iterable[response_create_params.InputUnionMember1]],
model: str,
include: List[str] | NotGiven = NOT_GIVEN,
instructions: str | NotGiven = NOT_GIVEN,
max_infer_iters: int | NotGiven = NOT_GIVEN,
previous_response_id: str | NotGiven = NOT_GIVEN,
Expand All @@ -518,6 +539,7 @@ async def create(
{
"input": input,
"model": model,
"include": include,
"instructions": instructions,
"max_infer_iters": max_infer_iters,
"previous_response_id": previous_response_id,
Expand Down
8 changes: 7 additions & 1 deletion src/llama_stack_client/types/create_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,13 @@ class Result(BaseModel):
"""

category_scores: Optional[Dict[str, float]] = None
"""A list of the categories along with their scores as predicted by model."""
"""A list of the categories along with their scores as predicted by model.

Required set of categories that need to be in response - violence -
violence/graphic - harassment - harassment/threatening - hate -
hate/threatening - illicit - illicit/violent - sexual - sexual/minors -
self-harm - self-harm/intent - self-harm/instructions
"""

user_message: Optional[str] = None

Expand Down
3 changes: 3 additions & 0 deletions src/llama_stack_client/types/response_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ class ResponseCreateParamsBase(TypedDict, total=False):
model: Required[str]
"""The underlying LLM used for completions."""

include: List[str]
"""(Optional) Additional fields to include in the response."""

instructions: str

max_infer_iters: int
Expand Down
4 changes: 4 additions & 0 deletions tests/api_resources/test_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient
response = client.responses.create(
input="string",
model="model",
include=["string"],
instructions="instructions",
max_infer_iters=0,
previous_response_id="previous_response_id",
Expand Down Expand Up @@ -96,6 +97,7 @@ def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient
input="string",
model="model",
stream=True,
include=["string"],
instructions="instructions",
max_infer_iters=0,
previous_response_id="previous_response_id",
Expand Down Expand Up @@ -238,6 +240,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
response = await async_client.responses.create(
input="string",
model="model",
include=["string"],
instructions="instructions",
max_infer_iters=0,
previous_response_id="previous_response_id",
Expand Down Expand Up @@ -303,6 +306,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
input="string",
model="model",
stream=True,
include=["string"],
instructions="instructions",
max_infer_iters=0,
previous_response_id="previous_response_id",
Expand Down
Loading