Skip to content
This repository was archived by the owner on Aug 14, 2025. It is now read-only.

Commit cd17ce1

Browse files
feat(api): update via SDK Studio
1 parent 246492c commit cd17ce1

File tree

6 files changed

+45
-2
lines changed

6 files changed

+45
-2
lines changed

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
configured_endpoints: 105
1+
configured_endpoints: 106
22
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-ab9c9bf2527d3b4179e2bc3e6495c64d43c42b2ea8dc1a55d472986e1a1430a0.yml
33
openapi_spec_hash: b93c85fb747e3c29134451d2f364ce8b
4-
config_hash: 0394c2b14022becb0352c36afcdfbafe
4+
config_hash: b0cd3ed9be70b0310bc685a4014eb0a5

api.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -412,6 +412,7 @@ Types:
412412

413413
Methods:
414414

415+
- <code title="post /v1/openai/v1/moderations">client.safety.<a href="./src/resources/safety.ts">openaiModerations</a>({ ...params }) -> OpenAIModerationsResponse</code>
415416
- <code title="post /v1/safety/run-shield">client.safety.<a href="./src/resources/safety.ts">runShield</a>({ ...params }) -> RunShieldResponse</code>
416417

417418
# Shields

src/index.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,7 @@ import {
7474
OpenAIModerationsResponse,
7575
RunShieldResponse,
7676
Safety,
77+
SafetyOpenAIModerationsParams,
7778
SafetyRunShieldParams,
7879
} from './resources/safety';
7980
import {
@@ -606,6 +607,7 @@ export declare namespace LlamaStackClient {
606607
Safety as Safety,
607608
type OpenAIModerationsResponse as OpenAIModerationsResponse,
608609
type RunShieldResponse as RunShieldResponse,
610+
type SafetyOpenAIModerationsParams as SafetyOpenAIModerationsParams,
609611
type SafetyRunShieldParams as SafetyRunShieldParams,
610612
};
611613

src/resources/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,7 @@ export {
110110
Safety,
111111
type OpenAIModerationsResponse,
112112
type RunShieldResponse,
113+
type SafetyOpenAIModerationsParams,
113114
type SafetyRunShieldParams,
114115
} from './safety';
115116
export {

src/resources/safety.ts

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,16 @@ import * as Core from '../core';
55
import * as Shared from './shared';
66

77
export class Safety extends APIResource {
8+
/**
9+
* Classifies if text and/or image inputs are potentially harmful.
10+
*/
11+
openaiModerations(
12+
body: SafetyOpenAIModerationsParams,
13+
options?: Core.RequestOptions,
14+
): Core.APIPromise<OpenAIModerationsResponse> {
15+
return this._client.post('/v1/openai/v1/moderations', { body, ...options });
16+
}
17+
818
/**
919
* Run a shield.
1020
*/
@@ -63,6 +73,19 @@ export interface RunShieldResponse {
6373
violation?: Shared.SafetyViolation;
6474
}
6575

76+
export interface SafetyOpenAIModerationsParams {
77+
/**
78+
* Input (or inputs) to classify. Can be a single string, an array of strings, or
79+
* an array of multi-modal input objects similar to other models.
80+
*/
81+
input: string | Array<string>;
82+
83+
/**
84+
* The content moderation model you would like to use.
85+
*/
86+
model?: string;
87+
}
88+
6689
export interface SafetyRunShieldParams {
6790
/**
6891
* The messages to run the shield on.
@@ -84,6 +107,7 @@ export declare namespace Safety {
84107
export {
85108
type OpenAIModerationsResponse as OpenAIModerationsResponse,
86109
type RunShieldResponse as RunShieldResponse,
110+
type SafetyOpenAIModerationsParams as SafetyOpenAIModerationsParams,
87111
type SafetyRunShieldParams as SafetyRunShieldParams,
88112
};
89113
}

tests/api-resources/safety.test.ts

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,21 @@ import { Response } from 'node-fetch';
66
const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' });
77

88
describe('resource safety', () => {
9+
test('openaiModerations: only required params', async () => {
10+
const responsePromise = client.safety.openaiModerations({ input: 'string' });
11+
const rawResponse = await responsePromise.asResponse();
12+
expect(rawResponse).toBeInstanceOf(Response);
13+
const response = await responsePromise;
14+
expect(response).not.toBeInstanceOf(Response);
15+
const dataAndResponse = await responsePromise.withResponse();
16+
expect(dataAndResponse.data).toBe(response);
17+
expect(dataAndResponse.response).toBe(rawResponse);
18+
});
19+
20+
test('openaiModerations: required and optional params', async () => {
21+
const response = await client.safety.openaiModerations({ input: 'string', model: 'model' });
22+
});
23+
924
test('runShield: only required params', async () => {
1025
const responsePromise = client.safety.runShield({
1126
messages: [{ content: 'string', role: 'user' }],

0 commit comments

Comments
 (0)