@@ -5,6 +5,16 @@ import * as Core from '../core';
5
5
import * as Shared from './shared' ;
6
6
7
7
export class Safety extends APIResource {
8
+ /**
9
+ * Classifies if text and/or image inputs are potentially harmful.
10
+ */
11
+ openaiModerations (
12
+ body : SafetyOpenAIModerationsParams ,
13
+ options ?: Core . RequestOptions ,
14
+ ) : Core . APIPromise < OpenAIModerationsResponse > {
15
+ return this . _client . post ( '/v1/openai/v1/moderations' , { body, ...options } ) ;
16
+ }
17
+
8
18
/**
9
19
* Run a shield.
10
20
*/
@@ -63,6 +73,19 @@ export interface RunShieldResponse {
63
73
violation ?: Shared . SafetyViolation ;
64
74
}
65
75
76
+ export interface SafetyOpenAIModerationsParams {
77
+ /**
78
+ * Input (or inputs) to classify. Can be a single string, an array of strings, or
79
+ * an array of multi-modal input objects similar to other models.
80
+ */
81
+ input : string | Array < string > ;
82
+
83
+ /**
84
+ * The content moderation model you would like to use.
85
+ */
86
+ model ?: string ;
87
+ }
88
+
66
89
export interface SafetyRunShieldParams {
67
90
/**
68
91
* The messages to run the shield on.
@@ -84,6 +107,7 @@ export declare namespace Safety {
84
107
export {
85
108
type OpenAIModerationsResponse as OpenAIModerationsResponse ,
86
109
type RunShieldResponse as RunShieldResponse ,
110
+ type SafetyOpenAIModerationsParams as SafetyOpenAIModerationsParams ,
87
111
type SafetyRunShieldParams as SafetyRunShieldParams ,
88
112
} ;
89
113
}
0 commit comments