diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7d8140ed..dd3f6e2b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,6 +7,10 @@ on: - 'integrated/**' - 'stl-preview-head/**' - 'stl-preview-base/**' + pull_request: + branches-ignore: + - 'stl-preview-head/**' + - 'stl-preview-base/**' jobs: lint: @@ -30,6 +34,30 @@ jobs: - name: Run lints run: ./scripts/lint + upload: + if: github.repository == 'stainless-sdks/llama-stack-client-python' + timeout-minutes: 10 + name: upload + permissions: + contents: read + id-token: write + runs-on: depot-ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Get GitHub OIDC Token + id: github-oidc + uses: actions/github-script@v6 + with: + script: core.setOutput('github_token', await core.getIDToken()); + + - name: Upload tarball + env: + URL: https://pkg.stainless.com/s + AUTH: ${{ steps.github-oidc.outputs.github_token }} + SHA: ${{ github.sha }} + run: ./scripts/utils/upload-artifact.sh + test: timeout-minutes: 10 name: test diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml new file mode 100644 index 00000000..e62e7dd5 --- /dev/null +++ b/.github/workflows/publish-pypi.yml @@ -0,0 +1,31 @@ +# This workflow is triggered when a GitHub release is created. +# It can also be run manually to re-publish to PyPI in case it failed for some reason. +# You can run this workflow by navigating to https://www.github.com/llamastack/llama-stack-client-python/actions/workflows/publish-pypi.yml +name: Publish PyPI +on: + workflow_dispatch: + + release: + types: [published] + +jobs: + publish: + name: publish + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Publish to PyPI + run: | + bash ./bin/publish-pypi + env: + PYPI_TOKEN: ${{ secrets.LLAMA_STACK_CLIENT_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml new file mode 100644 index 00000000..1ac94793 --- /dev/null +++ b/.github/workflows/release-doctor.yml @@ -0,0 +1,21 @@ +name: Release Doctor +on: + pull_request: + branches: + - main + workflow_dispatch: + +jobs: + release_doctor: + name: release doctor + runs-on: ubuntu-latest + if: github.repository == 'llamastack/llama-stack-client-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') + + steps: + - uses: actions/checkout@v4 + + - name: Check release environment + run: | + bash ./bin/check-release-environment + env: + PYPI_TOKEN: ${{ secrets.LLAMA_STACK_CLIENT_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.release-please-manifest.json b/.release-please-manifest.json new file mode 100644 index 00000000..ba6c3483 --- /dev/null +++ b/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "0.1.0-alpha.1" +} \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index fa112235..32b2676c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 91 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-0e756984d87c3fd1eb96d486947b3bc2086d5afcf299e8119b6b89bbd86dbe75.yml openapi_spec_hash: 7c519a25bb9a094d4b4bda17bb20dd88 -config_hash: b83ca660b1609a8903f32e3d54b4ff00 +config_hash: d1f21dfdbf5d9925eecf56b6c1fab755 diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..ee3351d8 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,42 @@ +# Changelog + +## 0.1.0-alpha.1 (2025-06-27) + +Full Changelog: [v0.0.1-alpha.0...v0.1.0-alpha.1](https://github.com/llamastack/llama-stack-client-python/compare/v0.0.1-alpha.0...v0.1.0-alpha.1) + +### Features + +* **client:** add follow_redirects request option ([a77a9ee](https://github.com/llamastack/llama-stack-client-python/commit/a77a9eed9038782ba6b93ce0d3147ee4a6b8a3b7)) +* **client:** add support for aiohttp ([d78982b](https://github.com/llamastack/llama-stack-client-python/commit/d78982b197c5e0a0fb67afcb44e9644fd8d931be)) + + +### Bug Fixes + +* **ci:** release-doctor — report correct token name ([6f3a4e2](https://github.com/llamastack/llama-stack-client-python/commit/6f3a4e24d8b357d7dc01adb0d9f736989fa9517d)) +* **client:** correctly parse binary response | stream ([85d6bbd](https://github.com/llamastack/llama-stack-client-python/commit/85d6bbd97efac7509cbff0bb2d461a80d09b5e61)) +* **package:** support direct resource imports ([a862d55](https://github.com/llamastack/llama-stack-client-python/commit/a862d551553aac41573306ce39480e1eb16ea3d3)) +* **tests:** fix: tests which call HTTP endpoints directly with the example parameters ([347a4bf](https://github.com/llamastack/llama-stack-client-python/commit/347a4bffa920f5727a4c02eba18bd207001698b5)) + + +### Chores + +* change publish docs url ([fdd7a07](https://github.com/llamastack/llama-stack-client-python/commit/fdd7a075564ac206e91b2d06bf130c4de9473838)) +* **ci:** enable for pull requests ([c9b6347](https://github.com/llamastack/llama-stack-client-python/commit/c9b6347f084acb1566b8e8283cf0bcfde7f6562c)) +* **ci:** fix installation instructions ([40d9854](https://github.com/llamastack/llama-stack-client-python/commit/40d9854bd2630a471f1ca93d249e4d44b73fa864)) +* **ci:** upload sdks to package manager ([2d2282b](https://github.com/llamastack/llama-stack-client-python/commit/2d2282bb49d58daef1f32fa0f1e5a356abf8df0d)) +* **docs:** grammar improvements ([6f57b13](https://github.com/llamastack/llama-stack-client-python/commit/6f57b1363367de7ed5035fd1d6ba1a071eee67ba)) +* **docs:** remove reference to rye shell ([bcf315a](https://github.com/llamastack/llama-stack-client-python/commit/bcf315ae00c458f89dfa3684bcc7abdb732b6c5f)) +* **docs:** remove unnecessary param examples ([60ec829](https://github.com/llamastack/llama-stack-client-python/commit/60ec829e809156217cf2f911b3cac6b23a06baad)) +* **internal:** avoid errors for isinstance checks on proxies ([758a188](https://github.com/llamastack/llama-stack-client-python/commit/758a188dbfaa284a13b70816689c99917a05d16c)) +* **internal:** codegen related update ([ab9f05c](https://github.com/llamastack/llama-stack-client-python/commit/ab9f05cc1da5b21afceacdf9c8eb54b6e59eed01)) +* **internal:** update conftest.py ([218e172](https://github.com/llamastack/llama-stack-client-python/commit/218e172c16014dad41a7c189c5620077955d6bdf)) +* **readme:** update badges ([9b63e1b](https://github.com/llamastack/llama-stack-client-python/commit/9b63e1b7dbbbd7556d046a2a4224a8385bbea24c)) +* **tests:** add tests for httpx client instantiation & proxies ([b27b11b](https://github.com/llamastack/llama-stack-client-python/commit/b27b11bbe0a9c5778b757733c11828d9603307ea)) +* **tests:** run tests in parallel ([1287a3c](https://github.com/llamastack/llama-stack-client-python/commit/1287a3c11f668d916c8c7af534a48523e2e69140)) +* **tests:** skip some failing tests on the latest python versions ([73b5705](https://github.com/llamastack/llama-stack-client-python/commit/73b57051c48d2ec42b844a288ffc9b5e3bbe6f2b)) +* update SDK settings ([e54ba91](https://github.com/llamastack/llama-stack-client-python/commit/e54ba9163792ab80362a189acb825bcd00e5384b)) + + +### Documentation + +* **client:** fix httpx.Timeout documentation reference ([497f2a1](https://github.com/llamastack/llama-stack-client-python/commit/497f2a198140f73525a880497bf1c51b5749c1f3)) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8e5d5903..e4d657d0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,8 +17,7 @@ $ rye sync --all-features You can then run scripts using `rye run python script.py` or by activating the virtual environment: ```sh -$ rye shell -# or manually activate - https://docs.python.org/3/library/venv.html#how-venvs-work +# Activate the virtual environment - https://docs.python.org/3/library/venv.html#how-venvs-work $ source .venv/bin/activate # now you can omit the `rye run` prefix @@ -63,7 +62,7 @@ If you’d like to use the repository from source, you can either install from g To install via git: ```sh -$ pip install git+ssh://git@github.com/stainless-sdks/llama-stack-client-python.git +$ pip install git+ssh://git@github.com/llamastack/llama-stack-client-python.git ``` Alternatively, you can build from source and install the wheel file: @@ -121,7 +120,7 @@ the changes aren't made through the automated pipeline, you may want to make rel ### Publish with a GitHub workflow -You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/stainless-sdks/llama-stack-client-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. +You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/llamastack/llama-stack-client-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. ### Publish manually diff --git a/README.md b/README.md index 745c0719..70a15cc6 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Llama Stack Client Python API library -[![PyPI version](https://img.shields.io/pypi/v/llama_stack_client.svg)](https://pypi.org/project/llama_stack_client/) +[![PyPI version]()](https://pypi.org/project/llama_stack_client/) The Llama Stack Client Python library provides convenient access to the Llama Stack Client REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, @@ -15,12 +15,12 @@ The full API of this library can be found in [api.md](api.md). ## Installation ```sh -# install from this staging repo -pip install git+ssh://git@github.com/stainless-sdks/llama-stack-client-python.git +# install from the production repo +pip install git+ssh://git@github.com/llamastack/llama-stack-client-python.git ``` > [!NOTE] -> Once this package is [published to PyPI](https://app.stainless.com/docs/guides/publish), this will become: `pip install --pre llama_stack_client` +> Once this package is [published to PyPI](https://www.stainless.com/docs/guides/publish), this will become: `pip install --pre llama_stack_client` ## Usage @@ -71,6 +71,42 @@ asyncio.run(main()) Functionality between the synchronous and asynchronous clients is otherwise identical. +### With aiohttp + +By default, the async client uses `httpx` for HTTP requests. However, for improved concurrency performance you may also use `aiohttp` as the HTTP backend. + +You can enable this by installing `aiohttp`: + +```sh +# install from the production repo +pip install 'llama_stack_client[aiohttp] @ git+ssh://git@github.com/llamastack/llama-stack-client-python.git' +``` + +Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`: + +```python +import os +import asyncio +from llama_stack_client import DefaultAioHttpClient +from llama_stack_client import AsyncLlamaStackClient + + +async def main() -> None: + async with AsyncLlamaStackClient( + api_key=os.environ.get( + "LLAMA_STACK_CLIENT_API_KEY" + ), # This is the default and can be omitted + http_client=DefaultAioHttpClient(), + ) as client: + await client.datasetio.append_rows( + dataset_id="REPLACE_ME", + rows=[{"foo": True}], + ) + + +asyncio.run(main()) +``` + ## Using types Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev) which also provide helper methods for things like: @@ -99,7 +135,7 @@ response = client.inference.batch_chat_completion( ] ], model_id="model_id", - logprobs={"top_k": 0}, + logprobs={}, ) print(response.logprobs) ``` @@ -175,7 +211,7 @@ client.with_options(max_retries=5).datasetio.append_rows( ### Timeouts By default requests time out after 1 minute. You can configure this with a `timeout` option, -which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/#fine-tuning-the-configuration) object: +which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object: ```python from llama_stack_client import LlamaStackClient @@ -248,9 +284,9 @@ datasetio = response.parse() # get the object that `datasetio.append_rows()` wo print(datasetio) ``` -These methods return an [`APIResponse`](https://github.com/stainless-sdks/llama-stack-client-python/tree/main/src/llama_stack_client/_response.py) object. +These methods return an [`APIResponse`](https://github.com/llamastack/llama-stack-client-python/tree/main/src/llama_stack_client/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/stainless-sdks/llama-stack-client-python/tree/main/src/llama_stack_client/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/llamastack/llama-stack-client-python/tree/main/src/llama_stack_client/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -357,7 +393,7 @@ This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) con We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. -We are keen for your feedback; please open an [issue](https://www.github.com/stainless-sdks/llama-stack-client-python/issues) with questions, bugs, or suggestions. +We are keen for your feedback; please open an [issue](https://www.github.com/llamastack/llama-stack-client-python/issues) with questions, bugs, or suggestions. ### Determining the installed version diff --git a/SECURITY.md b/SECURITY.md index 5bbcc8f1..52ce19c1 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -16,7 +16,7 @@ before making any information public. ## Reporting Non-SDK Related Security Issues If you encounter security issues that are not directly related to SDKs but pertain to the services -or products provided by Llama Stack Client please follow the respective company's security reporting guidelines. +or products provided by Llama Stack Client, please follow the respective company's security reporting guidelines. --- diff --git a/bin/check-release-environment b/bin/check-release-environment new file mode 100644 index 00000000..b845b0f4 --- /dev/null +++ b/bin/check-release-environment @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +errors=() + +if [ -z "${PYPI_TOKEN}" ]; then + errors+=("The PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") +fi + +lenErrors=${#errors[@]} + +if [[ lenErrors -gt 0 ]]; then + echo -e "Found the following errors in the release environment:\n" + + for error in "${errors[@]}"; do + echo -e "- $error\n" + done + + exit 1 +fi + +echo "The environment is ready to push releases!" diff --git a/pyproject.toml b/pyproject.toml index 34dbaa46..dc25fd75 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "llama_stack_client" -version = "0.0.1-alpha.0" +version = "0.1.0-alpha.1" description = "The official Python library for the llama-stack-client API" dynamic = ["readme"] license = "Apache-2.0" @@ -34,9 +34,11 @@ classifiers = [ ] [project.urls] -Homepage = "https://github.com/stainless-sdks/llama-stack-client-python" -Repository = "https://github.com/stainless-sdks/llama-stack-client-python" +Homepage = "https://github.com/llamastack/llama-stack-client-python" +Repository = "https://github.com/llamastack/llama-stack-client-python" +[project.optional-dependencies] +aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.6"] [tool.rye] managed = true @@ -54,6 +56,7 @@ dev-dependencies = [ "importlib-metadata>=6.7.0", "rich>=13.7.1", "nest_asyncio==1.6.0", + "pytest-xdist>=3.6.1", ] [tool.rye.scripts] @@ -121,11 +124,11 @@ path = "README.md" [[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]] # replace relative links with absolute links pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)' -replacement = '[\1](https://github.com/stainless-sdks/llama-stack-client-python/tree/main/\g<2>)' +replacement = '[\1](https://github.com/llamastack/llama-stack-client-python/tree/main/\g<2>)' [tool.pytest.ini_options] testpaths = ["tests"] -addopts = "--tb=short" +addopts = "--tb=short -n auto" xfail_strict = true asyncio_mode = "auto" asyncio_default_fixture_loop_scope = "session" diff --git a/release-please-config.json b/release-please-config.json new file mode 100644 index 00000000..3602e961 --- /dev/null +++ b/release-please-config.json @@ -0,0 +1,66 @@ +{ + "packages": { + ".": {} + }, + "$schema": "https://raw.githubusercontent.com/stainless-api/release-please/main/schemas/config.json", + "include-v-in-tag": true, + "include-component-in-tag": false, + "versioning": "prerelease", + "prerelease": true, + "bump-minor-pre-major": true, + "bump-patch-for-minor-pre-major": false, + "pull-request-header": "Automated Release PR", + "pull-request-title-pattern": "release: ${version}", + "changelog-sections": [ + { + "type": "feat", + "section": "Features" + }, + { + "type": "fix", + "section": "Bug Fixes" + }, + { + "type": "perf", + "section": "Performance Improvements" + }, + { + "type": "revert", + "section": "Reverts" + }, + { + "type": "chore", + "section": "Chores" + }, + { + "type": "docs", + "section": "Documentation" + }, + { + "type": "style", + "section": "Styles" + }, + { + "type": "refactor", + "section": "Refactors" + }, + { + "type": "test", + "section": "Tests", + "hidden": true + }, + { + "type": "build", + "section": "Build System" + }, + { + "type": "ci", + "section": "Continuous Integration", + "hidden": true + } + ], + "release-type": "python", + "extra-files": [ + "src/llama_stack_client/_version.py" + ] +} \ No newline at end of file diff --git a/requirements-dev.lock b/requirements-dev.lock index 8c2e8e4d..869ac57f 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -10,6 +10,13 @@ # universal: false -e file:. +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.12.8 + # via httpx-aiohttp + # via llama-stack-client +aiosignal==1.3.2 + # via aiohttp annotated-types==0.6.0 # via pydantic anyio==4.4.0 @@ -17,6 +24,10 @@ anyio==4.4.0 # via llama-stack-client argcomplete==3.1.2 # via nox +async-timeout==5.0.1 + # via aiohttp +attrs==25.3.0 + # via aiohttp certifi==2023.7.22 # via httpcore # via httpx @@ -30,18 +41,27 @@ distro==1.8.0 exceptiongroup==1.2.2 # via anyio # via pytest +execnet==2.1.1 + # via pytest-xdist filelock==3.12.4 # via virtualenv +frozenlist==1.6.2 + # via aiohttp + # via aiosignal h11==0.14.0 # via httpcore httpcore==1.0.2 # via httpx httpx==0.28.1 + # via httpx-aiohttp # via llama-stack-client # via respx +httpx-aiohttp==0.1.6 + # via llama-stack-client idna==3.4 # via anyio # via httpx + # via yarl importlib-metadata==7.0.0 iniconfig==2.0.0 # via pytest @@ -49,6 +69,9 @@ markdown-it-py==3.0.0 # via rich mdurl==0.1.2 # via markdown-it-py +multidict==6.4.4 + # via aiohttp + # via yarl mypy==1.14.1 mypy-extensions==1.0.0 # via mypy @@ -63,6 +86,9 @@ platformdirs==3.11.0 # via virtualenv pluggy==1.5.0 # via pytest +propcache==0.3.1 + # via aiohttp + # via yarl pydantic==2.10.3 # via llama-stack-client pydantic-core==2.27.1 @@ -72,7 +98,9 @@ pygments==2.18.0 pyright==1.1.399 pytest==8.3.3 # via pytest-asyncio + # via pytest-xdist pytest-asyncio==0.24.0 +pytest-xdist==3.7.0 python-dateutil==2.8.2 # via time-machine pytz==2023.3.post1 @@ -94,11 +122,14 @@ tomli==2.0.2 typing-extensions==4.12.2 # via anyio # via llama-stack-client + # via multidict # via mypy # via pydantic # via pydantic-core # via pyright virtualenv==20.24.5 # via nox +yarl==1.20.0 + # via aiohttp zipp==3.17.0 # via importlib-metadata diff --git a/requirements.lock b/requirements.lock index d2711460..ac621298 100644 --- a/requirements.lock +++ b/requirements.lock @@ -10,11 +10,22 @@ # universal: false -e file:. +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.12.8 + # via httpx-aiohttp + # via llama-stack-client +aiosignal==1.3.2 + # via aiohttp annotated-types==0.6.0 # via pydantic anyio==4.4.0 # via httpx # via llama-stack-client +async-timeout==5.0.1 + # via aiohttp +attrs==25.3.0 + # via aiohttp certifi==2023.7.22 # via httpcore # via httpx @@ -22,15 +33,28 @@ distro==1.8.0 # via llama-stack-client exceptiongroup==1.2.2 # via anyio +frozenlist==1.6.2 + # via aiohttp + # via aiosignal h11==0.14.0 # via httpcore httpcore==1.0.2 # via httpx httpx==0.28.1 + # via httpx-aiohttp + # via llama-stack-client +httpx-aiohttp==0.1.6 # via llama-stack-client idna==3.4 # via anyio # via httpx + # via yarl +multidict==6.4.4 + # via aiohttp + # via yarl +propcache==0.3.1 + # via aiohttp + # via yarl pydantic==2.10.3 # via llama-stack-client pydantic-core==2.27.1 @@ -41,5 +65,8 @@ sniffio==1.3.0 typing-extensions==4.12.2 # via anyio # via llama-stack-client + # via multidict # via pydantic # via pydantic-core +yarl==1.20.0 + # via aiohttp diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh new file mode 100755 index 00000000..f6c7413b --- /dev/null +++ b/scripts/utils/upload-artifact.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -exuo pipefail + +RESPONSE=$(curl -X POST "$URL" \ + -H "Authorization: Bearer $AUTH" \ + -H "Content-Type: application/json") + +SIGNED_URL=$(echo "$RESPONSE" | jq -r '.url') + +if [[ "$SIGNED_URL" == "null" ]]; then + echo -e "\033[31mFailed to get signed URL.\033[0m" + exit 1 +fi + +UPLOAD_RESPONSE=$(tar -cz . | curl -v -X PUT \ + -H "Content-Type: application/gzip" \ + --data-binary @- "$SIGNED_URL" 2>&1) + +if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then + echo -e "\033[32mUploaded build to Stainless storage.\033[0m" + echo -e "\033[32mInstallation: pip install --pre 'https://pkg.stainless.com/s/llama-stack-client-python/$SHA'\033[0m" +else + echo -e "\033[31mFailed to upload artifact.\033[0m" + exit 1 +fi diff --git a/src/llama_stack_client/__init__.py b/src/llama_stack_client/__init__.py index 30e6e9cb..3c952a95 100644 --- a/src/llama_stack_client/__init__.py +++ b/src/llama_stack_client/__init__.py @@ -1,5 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +import typing as _t + from . import types from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes from ._utils import file_from_path @@ -34,7 +36,7 @@ UnprocessableEntityError, APIResponseValidationError, ) -from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient +from ._base_client import DefaultHttpxClient, DefaultAioHttpClient, DefaultAsyncHttpxClient from ._utils._logs import setup_logging as _setup_logging __all__ = [ @@ -76,8 +78,12 @@ "DEFAULT_CONNECTION_LIMITS", "DefaultHttpxClient", "DefaultAsyncHttpxClient", + "DefaultAioHttpClient", ] +if not _t.TYPE_CHECKING: + from ._utils._resources_proxy import resources as resources + _setup_logging() # Update the __module__ attribute for exported symbols so that diff --git a/src/llama_stack_client/_base_client.py b/src/llama_stack_client/_base_client.py index a0c4ea5b..a0bbc468 100644 --- a/src/llama_stack_client/_base_client.py +++ b/src/llama_stack_client/_base_client.py @@ -960,6 +960,9 @@ def request( if self.custom_auth is not None: kwargs["auth"] = self.custom_auth + if options.follow_redirects is not None: + kwargs["follow_redirects"] = options.follow_redirects + log.debug("Sending HTTP Request: %s %s", request.method, request.url) response = None @@ -1068,7 +1071,14 @@ def _process_response( ) -> ResponseT: origin = get_origin(cast_to) or cast_to - if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): + if ( + inspect.isclass(origin) + and issubclass(origin, BaseAPIResponse) + # we only want to actually return the custom BaseAPIResponse class if we're + # returning the raw response, or if we're not streaming SSE, as if we're streaming + # SSE then `cast_to` doesn't actively reflect the type we need to parse into + and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER))) + ): if not issubclass(origin, APIResponse): raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}") @@ -1279,6 +1289,24 @@ def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) +try: + import httpx_aiohttp +except ImportError: + + class _DefaultAioHttpClient(httpx.AsyncClient): + def __init__(self, **_kwargs: Any) -> None: + raise RuntimeError("To use the aiohttp client you must have installed the package with the `aiohttp` extra") +else: + + class _DefaultAioHttpClient(httpx_aiohttp.HttpxAiohttpClient): # type: ignore + def __init__(self, **kwargs: Any) -> None: + kwargs.setdefault("timeout", DEFAULT_TIMEOUT) + kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS) + kwargs.setdefault("follow_redirects", True) + + super().__init__(**kwargs) + + if TYPE_CHECKING: DefaultAsyncHttpxClient = httpx.AsyncClient """An alias to `httpx.AsyncClient` that provides the same defaults that this SDK @@ -1287,8 +1315,12 @@ def __init__(self, **kwargs: Any) -> None: This is useful because overriding the `http_client` with your own instance of `httpx.AsyncClient` will result in httpx's defaults being used, not ours. """ + + DefaultAioHttpClient = httpx.AsyncClient + """An alias to `httpx.AsyncClient` that changes the default HTTP transport to `aiohttp`.""" else: DefaultAsyncHttpxClient = _DefaultAsyncHttpxClient + DefaultAioHttpClient = _DefaultAioHttpClient class AsyncHttpxClientWrapper(DefaultAsyncHttpxClient): @@ -1460,6 +1492,9 @@ async def request( if self.custom_auth is not None: kwargs["auth"] = self.custom_auth + if options.follow_redirects is not None: + kwargs["follow_redirects"] = options.follow_redirects + log.debug("Sending HTTP Request: %s %s", request.method, request.url) response = None @@ -1568,7 +1603,14 @@ async def _process_response( ) -> ResponseT: origin = get_origin(cast_to) or cast_to - if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): + if ( + inspect.isclass(origin) + and issubclass(origin, BaseAPIResponse) + # we only want to actually return the custom BaseAPIResponse class if we're + # returning the raw response, or if we're not streaming SSE, as if we're streaming + # SSE then `cast_to` doesn't actively reflect the type we need to parse into + and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER))) + ): if not issubclass(origin, AsyncAPIResponse): raise TypeError(f"API Response types must subclass {AsyncAPIResponse}; Received {origin}") diff --git a/src/llama_stack_client/_models.py b/src/llama_stack_client/_models.py index 798956f1..4f214980 100644 --- a/src/llama_stack_client/_models.py +++ b/src/llama_stack_client/_models.py @@ -737,6 +737,7 @@ class FinalRequestOptionsInput(TypedDict, total=False): idempotency_key: str json_data: Body extra_json: AnyMapping + follow_redirects: bool @final @@ -750,6 +751,7 @@ class FinalRequestOptions(pydantic.BaseModel): files: Union[HttpxRequestFiles, None] = None idempotency_key: Union[str, None] = None post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven() + follow_redirects: Union[bool, None] = None # It should be noted that we cannot use `json` here as that would override # a BaseModel method in an incompatible fashion. diff --git a/src/llama_stack_client/_types.py b/src/llama_stack_client/_types.py index ed5b188d..63631322 100644 --- a/src/llama_stack_client/_types.py +++ b/src/llama_stack_client/_types.py @@ -100,6 +100,7 @@ class RequestOptions(TypedDict, total=False): params: Query extra_json: AnyMapping idempotency_key: str + follow_redirects: bool # Sentinel class used until PEP 0661 is accepted @@ -215,3 +216,4 @@ class _GenericAlias(Protocol): class HttpxSendArgs(TypedDict, total=False): auth: httpx.Auth + follow_redirects: bool diff --git a/src/llama_stack_client/_utils/_proxy.py b/src/llama_stack_client/_utils/_proxy.py index ffd883e9..0f239a33 100644 --- a/src/llama_stack_client/_utils/_proxy.py +++ b/src/llama_stack_client/_utils/_proxy.py @@ -46,7 +46,10 @@ def __dir__(self) -> Iterable[str]: @property # type: ignore @override def __class__(self) -> type: # pyright: ignore - proxied = self.__get_proxied__() + try: + proxied = self.__get_proxied__() + except Exception: + return type(self) if issubclass(type(proxied), LazyProxy): return type(proxied) return proxied.__class__ diff --git a/src/llama_stack_client/_utils/_resources_proxy.py b/src/llama_stack_client/_utils/_resources_proxy.py new file mode 100644 index 00000000..bf0a876a --- /dev/null +++ b/src/llama_stack_client/_utils/_resources_proxy.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +from typing import Any +from typing_extensions import override + +from ._proxy import LazyProxy + + +class ResourcesProxy(LazyProxy[Any]): + """A proxy for the `llama_stack_client.resources` module. + + This is used so that we can lazily import `llama_stack_client.resources` only when + needed *and* so that users can just import `llama_stack_client` and reference `llama_stack_client.resources` + """ + + @override + def __load__(self) -> Any: + import importlib + + mod = importlib.import_module("llama_stack_client.resources") + return mod + + +resources = ResourcesProxy().__as_proxied__() diff --git a/src/llama_stack_client/_version.py b/src/llama_stack_client/_version.py index d4078242..3cd34bab 100644 --- a/src/llama_stack_client/_version.py +++ b/src/llama_stack_client/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "llama_stack_client" -__version__ = "0.0.1-alpha.0" +__version__ = "0.1.0-alpha.1" # x-release-please-version diff --git a/src/llama_stack_client/resources/agents/agents.py b/src/llama_stack_client/resources/agents/agents.py index ccc28bd5..cae1d400 100644 --- a/src/llama_stack_client/resources/agents/agents.py +++ b/src/llama_stack_client/resources/agents/agents.py @@ -44,7 +44,7 @@ def with_raw_response(self) -> AgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AgentsResourceWithRawResponse(self) @@ -53,7 +53,7 @@ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AgentsResourceWithStreamingResponse(self) @@ -222,7 +222,7 @@ def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncAgentsResourceWithRawResponse(self) @@ -231,7 +231,7 @@ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncAgentsResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/agents/session/session.py b/src/llama_stack_client/resources/agents/session/session.py index a88ccc2d..5298db2a 100644 --- a/src/llama_stack_client/resources/agents/session/session.py +++ b/src/llama_stack_client/resources/agents/session/session.py @@ -43,7 +43,7 @@ def with_raw_response(self) -> SessionResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return SessionResourceWithRawResponse(self) @@ -52,7 +52,7 @@ def with_streaming_response(self) -> SessionResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return SessionResourceWithStreamingResponse(self) @@ -185,7 +185,7 @@ def with_raw_response(self) -> AsyncSessionResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncSessionResourceWithRawResponse(self) @@ -194,7 +194,7 @@ def with_streaming_response(self) -> AsyncSessionResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncSessionResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/agents/session/turn/step.py b/src/llama_stack_client/resources/agents/session/turn/step.py index 18ba0cfd..cfbe35d9 100644 --- a/src/llama_stack_client/resources/agents/session/turn/step.py +++ b/src/llama_stack_client/resources/agents/session/turn/step.py @@ -26,7 +26,7 @@ def with_raw_response(self) -> StepResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return StepResourceWithRawResponse(self) @@ -35,7 +35,7 @@ def with_streaming_response(self) -> StepResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return StepResourceWithStreamingResponse(self) @@ -89,7 +89,7 @@ def with_raw_response(self) -> AsyncStepResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncStepResourceWithRawResponse(self) @@ -98,7 +98,7 @@ def with_streaming_response(self) -> AsyncStepResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncStepResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/agents/session/turn/turn.py b/src/llama_stack_client/resources/agents/session/turn/turn.py index d2423ba6..7649a060 100644 --- a/src/llama_stack_client/resources/agents/session/turn/turn.py +++ b/src/llama_stack_client/resources/agents/session/turn/turn.py @@ -45,7 +45,7 @@ def with_raw_response(self) -> TurnResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return TurnResourceWithRawResponse(self) @@ -54,7 +54,7 @@ def with_streaming_response(self) -> TurnResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return TurnResourceWithStreamingResponse(self) @@ -229,7 +229,7 @@ def with_raw_response(self) -> AsyncTurnResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncTurnResourceWithRawResponse(self) @@ -238,7 +238,7 @@ def with_streaming_response(self) -> AsyncTurnResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncTurnResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/datasetio.py b/src/llama_stack_client/resources/datasetio.py index 204c53e9..4117dfc4 100644 --- a/src/llama_stack_client/resources/datasetio.py +++ b/src/llama_stack_client/resources/datasetio.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> DatasetioResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return DatasetioResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> DatasetioResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return DatasetioResourceWithStreamingResponse(self) @@ -143,7 +143,7 @@ def with_raw_response(self) -> AsyncDatasetioResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncDatasetioResourceWithRawResponse(self) @@ -152,7 +152,7 @@ def with_streaming_response(self) -> AsyncDatasetioResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncDatasetioResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/datasets.py b/src/llama_stack_client/resources/datasets.py index 53440a24..826d7bb4 100644 --- a/src/llama_stack_client/resources/datasets.py +++ b/src/llama_stack_client/resources/datasets.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> DatasetsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return DatasetsResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> DatasetsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return DatasetsResourceWithStreamingResponse(self) @@ -204,7 +204,7 @@ def with_raw_response(self) -> AsyncDatasetsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncDatasetsResourceWithRawResponse(self) @@ -213,7 +213,7 @@ def with_streaming_response(self) -> AsyncDatasetsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncDatasetsResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/eval/benchmarks/benchmarks.py b/src/llama_stack_client/resources/eval/benchmarks/benchmarks.py index 965137e1..b753f95a 100644 --- a/src/llama_stack_client/resources/eval/benchmarks/benchmarks.py +++ b/src/llama_stack_client/resources/eval/benchmarks/benchmarks.py @@ -45,7 +45,7 @@ def with_raw_response(self) -> BenchmarksResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return BenchmarksResourceWithRawResponse(self) @@ -54,7 +54,7 @@ def with_streaming_response(self) -> BenchmarksResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return BenchmarksResourceWithStreamingResponse(self) @@ -215,7 +215,7 @@ def with_raw_response(self) -> AsyncBenchmarksResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncBenchmarksResourceWithRawResponse(self) @@ -224,7 +224,7 @@ def with_streaming_response(self) -> AsyncBenchmarksResourceWithStreamingRespons """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncBenchmarksResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/eval/benchmarks/jobs.py b/src/llama_stack_client/resources/eval/benchmarks/jobs.py index eb5118e1..24edd80a 100644 --- a/src/llama_stack_client/resources/eval/benchmarks/jobs.py +++ b/src/llama_stack_client/resources/eval/benchmarks/jobs.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> JobsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return JobsResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> JobsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return JobsResourceWithStreamingResponse(self) @@ -197,7 +197,7 @@ def with_raw_response(self) -> AsyncJobsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncJobsResourceWithRawResponse(self) @@ -206,7 +206,7 @@ def with_streaming_response(self) -> AsyncJobsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncJobsResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/eval/eval.py b/src/llama_stack_client/resources/eval/eval.py index d441e624..fb847c17 100644 --- a/src/llama_stack_client/resources/eval/eval.py +++ b/src/llama_stack_client/resources/eval/eval.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> EvalResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return EvalResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> EvalResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return EvalResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncEvalResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncEvalResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncEvalResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncEvalResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/files/files.py b/src/llama_stack_client/resources/files/files.py index 1bda9e18..9ff01adb 100644 --- a/src/llama_stack_client/resources/files/files.py +++ b/src/llama_stack_client/resources/files/files.py @@ -43,7 +43,7 @@ def with_raw_response(self) -> FilesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return FilesResourceWithRawResponse(self) @@ -52,7 +52,7 @@ def with_streaming_response(self) -> FilesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return FilesResourceWithStreamingResponse(self) @@ -260,7 +260,7 @@ def with_raw_response(self) -> AsyncFilesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncFilesResourceWithRawResponse(self) @@ -269,7 +269,7 @@ def with_streaming_response(self) -> AsyncFilesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncFilesResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/files/session.py b/src/llama_stack_client/resources/files/session.py index 0fea6271..632aa5d2 100644 --- a/src/llama_stack_client/resources/files/session.py +++ b/src/llama_stack_client/resources/files/session.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> SessionResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return SessionResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> SessionResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return SessionResourceWithStreamingResponse(self) @@ -122,7 +122,7 @@ def with_raw_response(self) -> AsyncSessionResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncSessionResourceWithRawResponse(self) @@ -131,7 +131,7 @@ def with_streaming_response(self) -> AsyncSessionResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncSessionResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/health.py b/src/llama_stack_client/resources/health.py index 4d40dbc2..1c4cdecd 100644 --- a/src/llama_stack_client/resources/health.py +++ b/src/llama_stack_client/resources/health.py @@ -26,7 +26,7 @@ def with_raw_response(self) -> HealthResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return HealthResourceWithRawResponse(self) @@ -35,7 +35,7 @@ def with_streaming_response(self) -> HealthResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return HealthResourceWithStreamingResponse(self) @@ -65,7 +65,7 @@ def with_raw_response(self) -> AsyncHealthResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncHealthResourceWithRawResponse(self) @@ -74,7 +74,7 @@ def with_streaming_response(self) -> AsyncHealthResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncHealthResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/inference.py b/src/llama_stack_client/resources/inference.py index 932e202f..cd81d83b 100644 --- a/src/llama_stack_client/resources/inference.py +++ b/src/llama_stack_client/resources/inference.py @@ -48,7 +48,7 @@ def with_raw_response(self) -> InferenceResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return InferenceResourceWithRawResponse(self) @@ -57,7 +57,7 @@ def with_streaming_response(self) -> InferenceResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return InferenceResourceWithStreamingResponse(self) @@ -380,7 +380,7 @@ def with_raw_response(self) -> AsyncInferenceResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncInferenceResourceWithRawResponse(self) @@ -389,7 +389,7 @@ def with_streaming_response(self) -> AsyncInferenceResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncInferenceResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/inspect.py b/src/llama_stack_client/resources/inspect.py index 10a83b06..72f7b374 100644 --- a/src/llama_stack_client/resources/inspect.py +++ b/src/llama_stack_client/resources/inspect.py @@ -26,7 +26,7 @@ def with_raw_response(self) -> InspectResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return InspectResourceWithRawResponse(self) @@ -35,7 +35,7 @@ def with_streaming_response(self) -> InspectResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return InspectResourceWithStreamingResponse(self) @@ -65,7 +65,7 @@ def with_raw_response(self) -> AsyncInspectResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncInspectResourceWithRawResponse(self) @@ -74,7 +74,7 @@ def with_streaming_response(self) -> AsyncInspectResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncInspectResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/models.py b/src/llama_stack_client/resources/models.py index 55ddb52a..86862a7b 100644 --- a/src/llama_stack_client/resources/models.py +++ b/src/llama_stack_client/resources/models.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> ModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return ModelsResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return ModelsResourceWithStreamingResponse(self) @@ -177,7 +177,7 @@ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncModelsResourceWithRawResponse(self) @@ -186,7 +186,7 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncModelsResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/openai/openai.py b/src/llama_stack_client/resources/openai/openai.py index f794e84c..e1900239 100644 --- a/src/llama_stack_client/resources/openai/openai.py +++ b/src/llama_stack_client/resources/openai/openai.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> OpenAIResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return OpenAIResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return OpenAIResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncOpenAIResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncOpenAIResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/openai/v1/chat.py b/src/llama_stack_client/resources/openai/v1/chat.py index c94cfd53..00332b34 100644 --- a/src/llama_stack_client/resources/openai/v1/chat.py +++ b/src/llama_stack_client/resources/openai/v1/chat.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> ChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return ChatResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> ChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return ChatResourceWithStreamingResponse(self) @@ -188,7 +188,7 @@ def with_raw_response(self) -> AsyncChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncChatResourceWithRawResponse(self) @@ -197,7 +197,7 @@ def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncChatResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/openai/v1/responses.py b/src/llama_stack_client/resources/openai/v1/responses.py index a51a0154..052e1ed1 100644 --- a/src/llama_stack_client/resources/openai/v1/responses.py +++ b/src/llama_stack_client/resources/openai/v1/responses.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> ResponsesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return ResponsesResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> ResponsesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return ResponsesResourceWithStreamingResponse(self) @@ -141,7 +141,7 @@ def with_raw_response(self) -> AsyncResponsesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncResponsesResourceWithRawResponse(self) @@ -150,7 +150,7 @@ def with_streaming_response(self) -> AsyncResponsesResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncResponsesResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/openai/v1/v1.py b/src/llama_stack_client/resources/openai/v1/v1.py index cabf4c9d..30af7bad 100644 --- a/src/llama_stack_client/resources/openai/v1/v1.py +++ b/src/llama_stack_client/resources/openai/v1/v1.py @@ -55,7 +55,7 @@ def with_raw_response(self) -> V1ResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return V1ResourceWithRawResponse(self) @@ -64,7 +64,7 @@ def with_streaming_response(self) -> V1ResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return V1ResourceWithStreamingResponse(self) @@ -211,7 +211,7 @@ def with_raw_response(self) -> AsyncV1ResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncV1ResourceWithRawResponse(self) @@ -220,7 +220,7 @@ def with_streaming_response(self) -> AsyncV1ResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncV1ResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/post_training/job.py b/src/llama_stack_client/resources/post_training/job.py index 945d02f5..76ff806e 100644 --- a/src/llama_stack_client/resources/post_training/job.py +++ b/src/llama_stack_client/resources/post_training/job.py @@ -29,7 +29,7 @@ def with_raw_response(self) -> JobResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return JobResourceWithRawResponse(self) @@ -38,7 +38,7 @@ def with_streaming_response(self) -> JobResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return JobResourceWithStreamingResponse(self) @@ -147,7 +147,7 @@ def with_raw_response(self) -> AsyncJobResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncJobResourceWithRawResponse(self) @@ -156,7 +156,7 @@ def with_streaming_response(self) -> AsyncJobResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncJobResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/post_training/post_training.py b/src/llama_stack_client/resources/post_training/post_training.py index 91612810..1a43a68c 100644 --- a/src/llama_stack_client/resources/post_training/post_training.py +++ b/src/llama_stack_client/resources/post_training/post_training.py @@ -47,7 +47,7 @@ def with_raw_response(self) -> PostTrainingResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return PostTrainingResourceWithRawResponse(self) @@ -56,7 +56,7 @@ def with_streaming_response(self) -> PostTrainingResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return PostTrainingResourceWithStreamingResponse(self) @@ -182,7 +182,7 @@ def with_raw_response(self) -> AsyncPostTrainingResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncPostTrainingResourceWithRawResponse(self) @@ -191,7 +191,7 @@ def with_streaming_response(self) -> AsyncPostTrainingResourceWithStreamingRespo """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncPostTrainingResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/providers.py b/src/llama_stack_client/resources/providers.py index 5936d6b0..96feebcb 100644 --- a/src/llama_stack_client/resources/providers.py +++ b/src/llama_stack_client/resources/providers.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> ProvidersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return ProvidersResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return ProvidersResourceWithStreamingResponse(self) @@ -97,7 +97,7 @@ def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncProvidersResourceWithRawResponse(self) @@ -106,7 +106,7 @@ def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncProvidersResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/safety.py b/src/llama_stack_client/resources/safety.py index 77df2f6d..c6ed1ad9 100644 --- a/src/llama_stack_client/resources/safety.py +++ b/src/llama_stack_client/resources/safety.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> SafetyResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return SafetyResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> SafetyResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return SafetyResourceWithStreamingResponse(self) @@ -91,7 +91,7 @@ def with_raw_response(self) -> AsyncSafetyResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncSafetyResourceWithRawResponse(self) @@ -100,7 +100,7 @@ def with_streaming_response(self) -> AsyncSafetyResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncSafetyResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/scoring.py b/src/llama_stack_client/resources/scoring.py index 2e7fea47..ee345778 100644 --- a/src/llama_stack_client/resources/scoring.py +++ b/src/llama_stack_client/resources/scoring.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> ScoringResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return ScoringResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> ScoringResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return ScoringResourceWithStreamingResponse(self) @@ -135,7 +135,7 @@ def with_raw_response(self) -> AsyncScoringResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncScoringResourceWithRawResponse(self) @@ -144,7 +144,7 @@ def with_streaming_response(self) -> AsyncScoringResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncScoringResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/scoring_functions.py b/src/llama_stack_client/resources/scoring_functions.py index 3acacab1..e6e910b0 100644 --- a/src/llama_stack_client/resources/scoring_functions.py +++ b/src/llama_stack_client/resources/scoring_functions.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> ScoringFunctionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return ScoringFunctionsResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> ScoringFunctionsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return ScoringFunctionsResourceWithStreamingResponse(self) @@ -147,7 +147,7 @@ def with_raw_response(self) -> AsyncScoringFunctionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncScoringFunctionsResourceWithRawResponse(self) @@ -156,7 +156,7 @@ def with_streaming_response(self) -> AsyncScoringFunctionsResourceWithStreamingR """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncScoringFunctionsResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/shields.py b/src/llama_stack_client/resources/shields.py index 5c116691..2f75a8a9 100644 --- a/src/llama_stack_client/resources/shields.py +++ b/src/llama_stack_client/resources/shields.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> ShieldsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return ShieldsResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> ShieldsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return ShieldsResourceWithStreamingResponse(self) @@ -142,7 +142,7 @@ def with_raw_response(self) -> AsyncShieldsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncShieldsResourceWithRawResponse(self) @@ -151,7 +151,7 @@ def with_streaming_response(self) -> AsyncShieldsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncShieldsResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/synthetic_data_generation.py b/src/llama_stack_client/resources/synthetic_data_generation.py index 07109275..b05627f9 100644 --- a/src/llama_stack_client/resources/synthetic_data_generation.py +++ b/src/llama_stack_client/resources/synthetic_data_generation.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> SyntheticDataGenerationResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return SyntheticDataGenerationResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> SyntheticDataGenerationResourceWithStreamin """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return SyntheticDataGenerationResourceWithStreamingResponse(self) @@ -94,7 +94,7 @@ def with_raw_response(self) -> AsyncSyntheticDataGenerationResourceWithRawRespon This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncSyntheticDataGenerationResourceWithRawResponse(self) @@ -103,7 +103,7 @@ def with_streaming_response(self) -> AsyncSyntheticDataGenerationResourceWithStr """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncSyntheticDataGenerationResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/telemetry/spans.py b/src/llama_stack_client/resources/telemetry/spans.py index 5b84f666..9f6bb681 100644 --- a/src/llama_stack_client/resources/telemetry/spans.py +++ b/src/llama_stack_client/resources/telemetry/spans.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> SpansResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return SpansResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> SpansResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return SpansResourceWithStreamingResponse(self) @@ -174,7 +174,7 @@ def with_raw_response(self) -> AsyncSpansResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncSpansResourceWithRawResponse(self) @@ -183,7 +183,7 @@ def with_streaming_response(self) -> AsyncSpansResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncSpansResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/telemetry/telemetry.py b/src/llama_stack_client/resources/telemetry/telemetry.py index 1dc60da3..018b6b96 100644 --- a/src/llama_stack_client/resources/telemetry/telemetry.py +++ b/src/llama_stack_client/resources/telemetry/telemetry.py @@ -51,7 +51,7 @@ def with_raw_response(self) -> TelemetryResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return TelemetryResourceWithRawResponse(self) @@ -60,7 +60,7 @@ def with_streaming_response(self) -> TelemetryResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return TelemetryResourceWithStreamingResponse(self) @@ -118,7 +118,7 @@ def with_raw_response(self) -> AsyncTelemetryResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncTelemetryResourceWithRawResponse(self) @@ -127,7 +127,7 @@ def with_streaming_response(self) -> AsyncTelemetryResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncTelemetryResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/telemetry/traces.py b/src/llama_stack_client/resources/telemetry/traces.py index 17c26ba3..af5d7885 100644 --- a/src/llama_stack_client/resources/telemetry/traces.py +++ b/src/llama_stack_client/resources/telemetry/traces.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> TracesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return TracesResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> TracesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return TracesResourceWithStreamingResponse(self) @@ -160,7 +160,7 @@ def with_raw_response(self) -> AsyncTracesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncTracesResourceWithRawResponse(self) @@ -169,7 +169,7 @@ def with_streaming_response(self) -> AsyncTracesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncTracesResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/tool_runtime/rag_tool.py b/src/llama_stack_client/resources/tool_runtime/rag_tool.py index 347972ac..b9f9a0f0 100644 --- a/src/llama_stack_client/resources/tool_runtime/rag_tool.py +++ b/src/llama_stack_client/resources/tool_runtime/rag_tool.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> RagToolResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return RagToolResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> RagToolResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return RagToolResourceWithStreamingResponse(self) @@ -137,7 +137,7 @@ def with_raw_response(self) -> AsyncRagToolResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncRagToolResourceWithRawResponse(self) @@ -146,7 +146,7 @@ def with_streaming_response(self) -> AsyncRagToolResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncRagToolResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/tool_runtime/tool_runtime.py b/src/llama_stack_client/resources/tool_runtime/tool_runtime.py index 9d5afe82..92af76ef 100644 --- a/src/llama_stack_client/resources/tool_runtime/tool_runtime.py +++ b/src/llama_stack_client/resources/tool_runtime/tool_runtime.py @@ -44,7 +44,7 @@ def with_raw_response(self) -> ToolRuntimeResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return ToolRuntimeResourceWithRawResponse(self) @@ -53,7 +53,7 @@ def with_streaming_response(self) -> ToolRuntimeResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return ToolRuntimeResourceWithStreamingResponse(self) @@ -148,7 +148,7 @@ def with_raw_response(self) -> AsyncToolRuntimeResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncToolRuntimeResourceWithRawResponse(self) @@ -157,7 +157,7 @@ def with_streaming_response(self) -> AsyncToolRuntimeResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncToolRuntimeResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/toolgroups.py b/src/llama_stack_client/resources/toolgroups.py index 4fb15858..a34c5f14 100644 --- a/src/llama_stack_client/resources/toolgroups.py +++ b/src/llama_stack_client/resources/toolgroups.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> ToolgroupsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return ToolgroupsResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> ToolgroupsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return ToolgroupsResourceWithStreamingResponse(self) @@ -181,7 +181,7 @@ def with_raw_response(self) -> AsyncToolgroupsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncToolgroupsResourceWithRawResponse(self) @@ -190,7 +190,7 @@ def with_streaming_response(self) -> AsyncToolgroupsResourceWithStreamingRespons """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncToolgroupsResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/tools.py b/src/llama_stack_client/resources/tools.py index 9c161451..9cc02e12 100644 --- a/src/llama_stack_client/resources/tools.py +++ b/src/llama_stack_client/resources/tools.py @@ -29,7 +29,7 @@ def with_raw_response(self) -> ToolsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return ToolsResourceWithRawResponse(self) @@ -38,7 +38,7 @@ def with_streaming_response(self) -> ToolsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return ToolsResourceWithStreamingResponse(self) @@ -116,7 +116,7 @@ def with_raw_response(self) -> AsyncToolsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncToolsResourceWithRawResponse(self) @@ -125,7 +125,7 @@ def with_streaming_response(self) -> AsyncToolsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncToolsResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/vector_dbs.py b/src/llama_stack_client/resources/vector_dbs.py index 008102a4..dfc683a9 100644 --- a/src/llama_stack_client/resources/vector_dbs.py +++ b/src/llama_stack_client/resources/vector_dbs.py @@ -29,7 +29,7 @@ def with_raw_response(self) -> VectorDBsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return VectorDBsResourceWithRawResponse(self) @@ -38,7 +38,7 @@ def with_streaming_response(self) -> VectorDBsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return VectorDBsResourceWithStreamingResponse(self) @@ -174,7 +174,7 @@ def with_raw_response(self) -> AsyncVectorDBsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncVectorDBsResourceWithRawResponse(self) @@ -183,7 +183,7 @@ def with_streaming_response(self) -> AsyncVectorDBsResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncVectorDBsResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/vector_io.py b/src/llama_stack_client/resources/vector_io.py index cb3f5768..1bf62bc3 100644 --- a/src/llama_stack_client/resources/vector_io.py +++ b/src/llama_stack_client/resources/vector_io.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> VectorIoResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return VectorIoResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> VectorIoResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return VectorIoResourceWithStreamingResponse(self) @@ -133,7 +133,7 @@ def with_raw_response(self) -> AsyncVectorIoResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncVectorIoResourceWithRawResponse(self) @@ -142,7 +142,7 @@ def with_streaming_response(self) -> AsyncVectorIoResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncVectorIoResourceWithStreamingResponse(self) diff --git a/src/llama_stack_client/resources/version.py b/src/llama_stack_client/resources/version.py index 70234826..391b5ed2 100644 --- a/src/llama_stack_client/resources/version.py +++ b/src/llama_stack_client/resources/version.py @@ -26,7 +26,7 @@ def with_raw_response(self) -> VersionResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return VersionResourceWithRawResponse(self) @@ -35,7 +35,7 @@ def with_streaming_response(self) -> VersionResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return VersionResourceWithStreamingResponse(self) @@ -65,7 +65,7 @@ def with_raw_response(self) -> AsyncVersionResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers """ return AsyncVersionResourceWithRawResponse(self) @@ -74,7 +74,7 @@ def with_streaming_response(self) -> AsyncVersionResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/llama-stack-client-python#with_streaming_response + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response """ return AsyncVersionResourceWithStreamingResponse(self) diff --git a/tests/api_resources/agents/session/test_turn.py b/tests/api_resources/agents/session/test_turn.py index 6c89efc6..4f56eee8 100644 --- a/tests/api_resources/agents/session/test_turn.py +++ b/tests/api_resources/agents/session/test_turn.py @@ -319,7 +319,9 @@ def test_path_params_resume(self, client: LlamaStackClient) -> None: class TestAsyncTurn: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/agents/session/turn/test_step.py b/tests/api_resources/agents/session/turn/test_step.py index 7ef9e200..b847f4c3 100644 --- a/tests/api_resources/agents/session/turn/test_step.py +++ b/tests/api_resources/agents/session/turn/test_step.py @@ -97,7 +97,9 @@ def test_path_params_retrieve(self, client: LlamaStackClient) -> None: class TestAsyncStep: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/agents/test_session.py b/tests/api_resources/agents/test_session.py index 7efd0b41..79b6673e 100644 --- a/tests/api_resources/agents/test_session.py +++ b/tests/api_resources/agents/test_session.py @@ -180,7 +180,9 @@ def test_path_params_delete(self, client: LlamaStackClient) -> None: class TestAsyncSession: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/eval/benchmarks/test_jobs.py b/tests/api_resources/eval/benchmarks/test_jobs.py index 984cd704..41c7302e 100644 --- a/tests/api_resources/eval/benchmarks/test_jobs.py +++ b/tests/api_resources/eval/benchmarks/test_jobs.py @@ -312,7 +312,9 @@ def test_path_params_run(self, client: LlamaStackClient) -> None: class TestAsyncJobs: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/eval/test_benchmarks.py b/tests/api_resources/eval/test_benchmarks.py index 6cefaf46..338c31ad 100644 --- a/tests/api_resources/eval/test_benchmarks.py +++ b/tests/api_resources/eval/test_benchmarks.py @@ -292,7 +292,9 @@ def test_path_params_evaluate(self, client: LlamaStackClient) -> None: class TestAsyncBenchmarks: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/files/test_session.py b/tests/api_resources/files/test_session.py index 3951efff..618e7e2a 100644 --- a/tests/api_resources/files/test_session.py +++ b/tests/api_resources/files/test_session.py @@ -107,7 +107,9 @@ def test_path_params_upload_content(self, client: LlamaStackClient) -> None: class TestAsyncSession: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/openai/test_v1.py b/tests/api_resources/openai/test_v1.py index 3df65f6e..2b7b722e 100644 --- a/tests/api_resources/openai/test_v1.py +++ b/tests/api_resources/openai/test_v1.py @@ -113,7 +113,9 @@ def test_streaming_response_list_models(self, client: LlamaStackClient) -> None: class TestAsyncV1: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/openai/v1/test_chat.py b/tests/api_resources/openai/v1/test_chat.py index b3cb940a..3baa1df8 100644 --- a/tests/api_resources/openai/v1/test_chat.py +++ b/tests/api_resources/openai/v1/test_chat.py @@ -107,7 +107,9 @@ def test_streaming_response_generate_completion(self, client: LlamaStackClient) class TestAsyncChat: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/openai/v1/test_responses.py b/tests/api_resources/openai/v1/test_responses.py index 5ae4d001..72a7cd1d 100644 --- a/tests/api_resources/openai/v1/test_responses.py +++ b/tests/api_resources/openai/v1/test_responses.py @@ -117,7 +117,9 @@ def test_path_params_retrieve(self, client: LlamaStackClient) -> None: class TestAsyncResponses: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/post_training/test_job.py b/tests/api_resources/post_training/test_job.py index 4fb2dccb..5b9ea28e 100644 --- a/tests/api_resources/post_training/test_job.py +++ b/tests/api_resources/post_training/test_job.py @@ -124,7 +124,9 @@ def test_streaming_response_retrieve_status(self, client: LlamaStackClient) -> N class TestAsyncJob: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/telemetry/test_spans.py b/tests/api_resources/telemetry/test_spans.py index 84a70ef1..5982a183 100644 --- a/tests/api_resources/telemetry/test_spans.py +++ b/tests/api_resources/telemetry/test_spans.py @@ -220,7 +220,9 @@ def test_streaming_response_export(self, client: LlamaStackClient) -> None: class TestAsyncSpans: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/telemetry/test_traces.py b/tests/api_resources/telemetry/test_traces.py index ec95822f..7406a801 100644 --- a/tests/api_resources/telemetry/test_traces.py +++ b/tests/api_resources/telemetry/test_traces.py @@ -158,7 +158,9 @@ def test_path_params_retrieve_trace(self, client: LlamaStackClient) -> None: class TestAsyncTraces: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index a6b1e33e..db65ed56 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -271,7 +271,9 @@ def test_path_params_list_sessions(self, client: LlamaStackClient) -> None: class TestAsyncAgents: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_datasetio.py b/tests/api_resources/test_datasetio.py index 4f38eb3b..9130b380 100644 --- a/tests/api_resources/test_datasetio.py +++ b/tests/api_resources/test_datasetio.py @@ -119,7 +119,9 @@ def test_path_params_iterate_rows(self, client: LlamaStackClient) -> None: class TestAsyncDatasetio: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_datasets.py b/tests/api_resources/test_datasets.py index 668f3e0b..30df5161 100644 --- a/tests/api_resources/test_datasets.py +++ b/tests/api_resources/test_datasets.py @@ -191,7 +191,9 @@ def test_path_params_delete(self, client: LlamaStackClient) -> None: class TestAsyncDatasets: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_files.py b/tests/api_resources/test_files.py index 6ea7652f..e28e3353 100644 --- a/tests/api_resources/test_files.py +++ b/tests/api_resources/test_files.py @@ -247,7 +247,9 @@ def test_path_params_list_in_bucket(self, client: LlamaStackClient) -> None: class TestAsyncFiles: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_health.py b/tests/api_resources/test_health.py index 16175743..cd820405 100644 --- a/tests/api_resources/test_health.py +++ b/tests/api_resources/test_health.py @@ -47,7 +47,9 @@ def test_streaming_response_check(self, client: LlamaStackClient) -> None: class TestAsyncHealth: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_inference.py b/tests/api_resources/test_inference.py index 47ea2b95..bcd63c8b 100644 --- a/tests/api_resources/test_inference.py +++ b/tests/api_resources/test_inference.py @@ -394,7 +394,9 @@ def test_streaming_response_embeddings(self, client: LlamaStackClient) -> None: class TestAsyncInference: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_inspect.py b/tests/api_resources/test_inspect.py index e4b7f30f..6e5d8669 100644 --- a/tests/api_resources/test_inspect.py +++ b/tests/api_resources/test_inspect.py @@ -47,7 +47,9 @@ def test_streaming_response_list_routes(self, client: LlamaStackClient) -> None: class TestAsyncInspect: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 2017423d..d2476ca4 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -177,7 +177,9 @@ def test_path_params_delete(self, client: LlamaStackClient) -> None: class TestAsyncModels: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_post_training.py b/tests/api_resources/test_post_training.py index 0692c60a..c6441dc9 100644 --- a/tests/api_resources/test_post_training.py +++ b/tests/api_resources/test_post_training.py @@ -276,7 +276,9 @@ def test_streaming_response_optimize_preferences(self, client: LlamaStackClient) class TestAsyncPostTraining: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_providers.py b/tests/api_resources/test_providers.py index fb9c92ba..8870e4de 100644 --- a/tests/api_resources/test_providers.py +++ b/tests/api_resources/test_providers.py @@ -89,7 +89,9 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None: class TestAsyncProviders: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_safety.py b/tests/api_resources/test_safety.py index 568733d6..49e9edfb 100644 --- a/tests/api_resources/test_safety.py +++ b/tests/api_resources/test_safety.py @@ -74,7 +74,9 @@ def test_streaming_response_run_shield(self, client: LlamaStackClient) -> None: class TestAsyncSafety: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_scoring.py b/tests/api_resources/test_scoring.py index 1e36d526..122dddab 100644 --- a/tests/api_resources/test_scoring.py +++ b/tests/api_resources/test_scoring.py @@ -141,7 +141,9 @@ def test_streaming_response_score_batch(self, client: LlamaStackClient) -> None: class TestAsyncScoring: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_scoring_functions.py b/tests/api_resources/test_scoring_functions.py index 38aa98bf..82628084 100644 --- a/tests/api_resources/test_scoring_functions.py +++ b/tests/api_resources/test_scoring_functions.py @@ -151,7 +151,9 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None: class TestAsyncScoringFunctions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_shields.py b/tests/api_resources/test_shields.py index b352a607..7b5ae969 100644 --- a/tests/api_resources/test_shields.py +++ b/tests/api_resources/test_shields.py @@ -134,7 +134,9 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None: class TestAsyncShields: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_synthetic_data_generation.py b/tests/api_resources/test_synthetic_data_generation.py index 13374ac1..fc9c0398 100644 --- a/tests/api_resources/test_synthetic_data_generation.py +++ b/tests/api_resources/test_synthetic_data_generation.py @@ -87,7 +87,9 @@ def test_streaming_response_generate(self, client: LlamaStackClient) -> None: class TestAsyncSyntheticDataGeneration: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_telemetry.py b/tests/api_resources/test_telemetry.py index 688a7724..3d8abd29 100644 --- a/tests/api_resources/test_telemetry.py +++ b/tests/api_resources/test_telemetry.py @@ -93,7 +93,9 @@ def test_streaming_response_create_event(self, client: LlamaStackClient) -> None class TestAsyncTelemetry: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_tool_runtime.py b/tests/api_resources/test_tool_runtime.py index b66a378f..7bccfafe 100644 --- a/tests/api_resources/test_tool_runtime.py +++ b/tests/api_resources/test_tool_runtime.py @@ -96,7 +96,9 @@ def test_streaming_response_list_tools(self, client: LlamaStackClient) -> None: class TestAsyncToolRuntime: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_toolgroups.py b/tests/api_resources/test_toolgroups.py index ffb342f0..11f9e3ac 100644 --- a/tests/api_resources/test_toolgroups.py +++ b/tests/api_resources/test_toolgroups.py @@ -179,7 +179,9 @@ def test_path_params_unregister(self, client: LlamaStackClient) -> None: class TestAsyncToolgroups: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_tools.py b/tests/api_resources/test_tools.py index ce9e3f10..e6d8408e 100644 --- a/tests/api_resources/test_tools.py +++ b/tests/api_resources/test_tools.py @@ -97,7 +97,9 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None: class TestAsyncTools: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_vector_dbs.py b/tests/api_resources/test_vector_dbs.py index 5b7ddd2c..dfad4a3a 100644 --- a/tests/api_resources/test_vector_dbs.py +++ b/tests/api_resources/test_vector_dbs.py @@ -180,7 +180,9 @@ def test_path_params_delete(self, client: LlamaStackClient) -> None: class TestAsyncVectorDBs: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_vector_io.py b/tests/api_resources/test_vector_io.py index 4c3f0a8b..d06a3664 100644 --- a/tests/api_resources/test_vector_io.py +++ b/tests/api_resources/test_vector_io.py @@ -135,7 +135,9 @@ def test_streaming_response_query(self, client: LlamaStackClient) -> None: class TestAsyncVectorIo: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_version.py b/tests/api_resources/test_version.py index fcd975e8..3863855f 100644 --- a/tests/api_resources/test_version.py +++ b/tests/api_resources/test_version.py @@ -47,7 +47,9 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: class TestAsyncVersion: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/tool_runtime/test_rag_tool.py b/tests/api_resources/tool_runtime/test_rag_tool.py index 6c1eda4e..2599886a 100644 --- a/tests/api_resources/tool_runtime/test_rag_tool.py +++ b/tests/api_resources/tool_runtime/test_rag_tool.py @@ -133,7 +133,9 @@ def test_streaming_response_query_context(self, client: LlamaStackClient) -> Non class TestAsyncRagTool: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/conftest.py b/tests/conftest.py index d40bf410..054ff89e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,13 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + from __future__ import annotations import os import logging from typing import TYPE_CHECKING, Iterator, AsyncIterator +import httpx import pytest from pytest_asyncio import is_async_test -from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient +from llama_stack_client import LlamaStackClient, DefaultAioHttpClient, AsyncLlamaStackClient +from llama_stack_client._utils import is_dict if TYPE_CHECKING: from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage] @@ -25,6 +29,19 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None: for async_test in pytest_asyncio_tests: async_test.add_marker(session_scope_marker, append=False) + # We skip tests that use both the aiohttp client and respx_mock as respx_mock + # doesn't support custom transports. + for item in items: + if "async_client" not in item.fixturenames or "respx_mock" not in item.fixturenames: + continue + + if not hasattr(item, "callspec"): + continue + + async_client_param = item.callspec.params.get("async_client") + if is_dict(async_client_param) and async_client_param.get("http_client") == "aiohttp": + item.add_marker(pytest.mark.skip(reason="aiohttp client is not compatible with respx_mock")) + base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -43,9 +60,25 @@ def client(request: FixtureRequest) -> Iterator[LlamaStackClient]: @pytest.fixture(scope="session") async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncLlamaStackClient]: - strict = getattr(request, "param", True) - if not isinstance(strict, bool): - raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") - - async with AsyncLlamaStackClient(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client: + param = getattr(request, "param", True) + + # defaults + strict = True + http_client: None | httpx.AsyncClient = None + + if isinstance(param, bool): + strict = param + elif is_dict(param): + strict = param.get("strict", True) + assert isinstance(strict, bool) + + http_client_type = param.get("http_client", "httpx") + if http_client_type == "aiohttp": + http_client = DefaultAioHttpClient() + else: + raise TypeError(f"Unexpected fixture parameter type {type(param)}, expected bool or dict") + + async with AsyncLlamaStackClient( + base_url=base_url, api_key=api_key, _strict_response_validation=strict, http_client=http_client + ) as client: yield client diff --git a/tests/test_client.py b/tests/test_client.py index a2ddcb62..c7901e7e 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -23,17 +23,16 @@ from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient, APIResponseValidationError from llama_stack_client._types import Omit -from llama_stack_client._utils import maybe_transform from llama_stack_client._models import BaseModel, FinalRequestOptions -from llama_stack_client._constants import RAW_RESPONSE_HEADER from llama_stack_client._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError from llama_stack_client._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, + DefaultHttpxClient, + DefaultAsyncHttpxClient, make_request_options, ) -from llama_stack_client.types.datasetio_append_rows_params import DatasetioAppendRowsParams from .utils import update_env @@ -192,6 +191,7 @@ def test_copy_signature(self) -> None: copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" + @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") @@ -732,34 +732,27 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str @mock.patch("llama_stack_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: LlamaStackClient) -> None: respx_mock.post("/v1/datasetio/append-rows/dataset_id").mock( side_effect=httpx.TimeoutException("Test timeout error") ) with pytest.raises(APITimeoutError): - self.client.post( - "/v1/datasetio/append-rows/dataset_id", - body=cast(object, maybe_transform(dict(rows=[{"foo": True}]), DatasetioAppendRowsParams)), - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) + client.datasetio.with_streaming_response.append_rows( + dataset_id="dataset_id", rows=[{"foo": True}] + ).__enter__() assert _get_open_connections(self.client) == 0 @mock.patch("llama_stack_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: LlamaStackClient) -> None: respx_mock.post("/v1/datasetio/append-rows/dataset_id").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - self.client.post( - "/v1/datasetio/append-rows/dataset_id", - body=cast(object, maybe_transform(dict(rows=[{"foo": True}]), DatasetioAppendRowsParams)), - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) - + client.datasetio.with_streaming_response.append_rows( + dataset_id="dataset_id", rows=[{"foo": True}] + ).__enter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -843,6 +836,55 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert response.http_request.headers.get("x-stainless-retry-count") == "42" + def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: + # Test that the proxy environment variables are set correctly + monkeypatch.setenv("HTTPS_PROXY", "https://example.org") + + client = DefaultHttpxClient() + + mounts = tuple(client._mounts.items()) + assert len(mounts) == 1 + assert mounts[0][0].pattern == "https://" + + @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning") + def test_default_client_creation(self) -> None: + # Ensure that the client can be initialized without any exceptions + DefaultHttpxClient( + verify=True, + cert=None, + trust_env=True, + http1=True, + http2=False, + limits=httpx.Limits(max_connections=100, max_keepalive_connections=20), + ) + + @pytest.mark.respx(base_url=base_url) + def test_follow_redirects(self, respx_mock: MockRouter) -> None: + # Test that the default follow_redirects=True allows following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"})) + + response = self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) + assert response.status_code == 200 + assert response.json() == {"status": "ok"} + + @pytest.mark.respx(base_url=base_url) + def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: + # Test that follow_redirects=False prevents following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + + with pytest.raises(APIStatusError) as exc_info: + self.client.post( + "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response + ) + + assert exc_info.value.response.status_code == 302 + assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected" + class TestAsyncLlamaStackClient: client = AsyncLlamaStackClient(base_url=base_url, api_key=api_key, _strict_response_validation=True) @@ -979,6 +1021,7 @@ def test_copy_signature(self) -> None: copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" + @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") @@ -1523,34 +1566,31 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte @mock.patch("llama_stack_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + async def test_retrying_timeout_errors_doesnt_leak( + self, respx_mock: MockRouter, async_client: AsyncLlamaStackClient + ) -> None: respx_mock.post("/v1/datasetio/append-rows/dataset_id").mock( side_effect=httpx.TimeoutException("Test timeout error") ) with pytest.raises(APITimeoutError): - await self.client.post( - "/v1/datasetio/append-rows/dataset_id", - body=cast(object, maybe_transform(dict(rows=[{"foo": True}]), DatasetioAppendRowsParams)), - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) + await async_client.datasetio.with_streaming_response.append_rows( + dataset_id="dataset_id", rows=[{"foo": True}] + ).__aenter__() assert _get_open_connections(self.client) == 0 @mock.patch("llama_stack_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + async def test_retrying_status_errors_doesnt_leak( + self, respx_mock: MockRouter, async_client: AsyncLlamaStackClient + ) -> None: respx_mock.post("/v1/datasetio/append-rows/dataset_id").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - await self.client.post( - "/v1/datasetio/append-rows/dataset_id", - body=cast(object, maybe_transform(dict(rows=[{"foo": True}]), DatasetioAppendRowsParams)), - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) - + await async_client.datasetio.with_streaming_response.append_rows( + dataset_id="dataset_id", rows=[{"foo": True}] + ).__aenter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -1681,3 +1721,52 @@ async def test_main() -> None: raise AssertionError("calling get_platform using asyncify resulted in a hung process") time.sleep(0.1) + + async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: + # Test that the proxy environment variables are set correctly + monkeypatch.setenv("HTTPS_PROXY", "https://example.org") + + client = DefaultAsyncHttpxClient() + + mounts = tuple(client._mounts.items()) + assert len(mounts) == 1 + assert mounts[0][0].pattern == "https://" + + @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning") + async def test_default_client_creation(self) -> None: + # Ensure that the client can be initialized without any exceptions + DefaultAsyncHttpxClient( + verify=True, + cert=None, + trust_env=True, + http1=True, + http2=False, + limits=httpx.Limits(max_connections=100, max_keepalive_connections=20), + ) + + @pytest.mark.respx(base_url=base_url) + async def test_follow_redirects(self, respx_mock: MockRouter) -> None: + # Test that the default follow_redirects=True allows following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"})) + + response = await self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) + assert response.status_code == 200 + assert response.json() == {"status": "ok"} + + @pytest.mark.respx(base_url=base_url) + async def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: + # Test that follow_redirects=False prevents following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + + with pytest.raises(APIStatusError) as exc_info: + await self.client.post( + "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response + ) + + assert exc_info.value.response.status_code == 302 + assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected" diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py index 9cefe4ea..76a29efd 100644 --- a/tests/test_utils/test_proxy.py +++ b/tests/test_utils/test_proxy.py @@ -21,3 +21,14 @@ def test_recursive_proxy() -> None: assert dir(proxy) == [] assert type(proxy).__name__ == "RecursiveLazyProxy" assert type(operator.attrgetter("name.foo.bar.baz")(proxy)).__name__ == "RecursiveLazyProxy" + + +def test_isinstance_does_not_error() -> None: + class AlwaysErrorProxy(LazyProxy[Any]): + @override + def __load__(self) -> Any: + raise RuntimeError("Mocking missing dependency") + + proxy = AlwaysErrorProxy() + assert not isinstance(proxy, dict) + assert isinstance(proxy, LazyProxy)