Skip to content

Commit 3cfeca7

Browse files
Auto-generated API code (#3033)
1 parent fe4464b commit 3cfeca7

File tree

4 files changed

+223
-7
lines changed

4 files changed

+223
-7
lines changed

docs/reference/api-reference.md

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7849,6 +7849,31 @@ These settings are specific to the `cohere` service.
78497849
These settings are specific to the task type you specified.
78507850
- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created.
78517851

7852+
## client.inference.putContextualai [_inference.put_contextualai]
7853+
Create an Contextual AI inference endpoint.
7854+
7855+
Create an inference endpoint to perform an inference task with the `contexualai` service.
7856+
7857+
To review the available `rerank` models, refer to <https://docs.contextual.ai/api-reference/rerank/rerank#body-model>.
7858+
7859+
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-contextualai)
7860+
7861+
```ts
7862+
client.inference.putContextualai({ task_type, contextualai_inference_id, service, service_settings })
7863+
```
7864+
7865+
### Arguments [_arguments_inference.put_contextualai]
7866+
7867+
#### Request (object) [_request_inference.put_contextualai]
7868+
- **`task_type` (Enum("rerank"))**: The type of the inference task that the model will perform.
7869+
- **`contextualai_inference_id` (string)**: The unique identifier of the inference endpoint.
7870+
- **`service` (Enum("contextualai"))**: The type of service supported for the specified task type. In this case, `contextualai`.
7871+
- **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `contextualai` service.
7872+
- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object.
7873+
- **`task_settings` (Optional, { instruction, return_documents, top_k })**: Settings to configure the inference task.
7874+
These settings are specific to the task type you specified.
7875+
- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created.
7876+
78527877
## client.inference.putCustom [_inference.put_custom]
78537878
Create a custom inference endpoint.
78547879

@@ -12347,7 +12372,9 @@ client.security.getSettings({ ... })
1234712372
If no response is received before the timeout expires, the request fails and returns an error.
1234812373

1234912374
## client.security.getStats [_security.get_stats]
12350-
Get security statistics for all nodes
12375+
Get security stats.
12376+
12377+
Gather security usage statistics from all node(s) within the cluster.
1235112378

1235212379
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-stats)
1235312380

src/api/api/inference.ts

Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -223,6 +223,21 @@ export default class Inference {
223223
'timeout'
224224
]
225225
},
226+
'inference.put_contextualai': {
227+
path: [
228+
'task_type',
229+
'contextualai_inference_id'
230+
],
231+
body: [
232+
'chunking_settings',
233+
'service',
234+
'service_settings',
235+
'task_settings'
236+
],
237+
query: [
238+
'timeout'
239+
]
240+
},
226241
'inference.put_custom': {
227242
path: [
228243
'task_type',
@@ -1376,6 +1391,73 @@ export default class Inference {
13761391
return await this.transport.request({ path, method, querystring, body, meta }, options)
13771392
}
13781393

1394+
/**
1395+
* Create an Contextual AI inference endpoint. Create an inference endpoint to perform an inference task with the `contexualai` service. To review the available `rerank` models, refer to <https://docs.contextual.ai/api-reference/rerank/rerank#body-model>.
1396+
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-contextualai | Elasticsearch API documentation}
1397+
*/
1398+
async putContextualai (this: That, params: T.InferencePutContextualaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutContextualaiResponse>
1399+
async putContextualai (this: That, params: T.InferencePutContextualaiRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutContextualaiResponse, unknown>>
1400+
async putContextualai (this: That, params: T.InferencePutContextualaiRequest, options?: TransportRequestOptions): Promise<T.InferencePutContextualaiResponse>
1401+
async putContextualai (this: That, params: T.InferencePutContextualaiRequest, options?: TransportRequestOptions): Promise<any> {
1402+
const {
1403+
path: acceptedPath,
1404+
body: acceptedBody,
1405+
query: acceptedQuery
1406+
} = this[kAcceptedParams]['inference.put_contextualai']
1407+
1408+
const userQuery = params?.querystring
1409+
const querystring: Record<string, any> = userQuery != null ? { ...userQuery } : {}
1410+
1411+
let body: Record<string, any> | string | undefined
1412+
const userBody = params?.body
1413+
if (userBody != null) {
1414+
if (typeof userBody === 'string') {
1415+
body = userBody
1416+
} else {
1417+
body = { ...userBody }
1418+
}
1419+
}
1420+
1421+
for (const key in params) {
1422+
if (acceptedBody.includes(key)) {
1423+
body = body ?? {}
1424+
// @ts-expect-error
1425+
body[key] = params[key]
1426+
} else if (acceptedPath.includes(key)) {
1427+
continue
1428+
} else if (key !== 'body' && key !== 'querystring') {
1429+
if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) {
1430+
// @ts-expect-error
1431+
querystring[key] = params[key]
1432+
} else {
1433+
body = body ?? {}
1434+
// @ts-expect-error
1435+
body[key] = params[key]
1436+
}
1437+
}
1438+
}
1439+
1440+
const method = 'PUT'
1441+
const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.contextualai_inference_id.toString())}`
1442+
const meta: TransportRequestMetadata = {
1443+
name: 'inference.put_contextualai',
1444+
pathParts: {
1445+
task_type: params.task_type,
1446+
contextualai_inference_id: params.contextualai_inference_id
1447+
},
1448+
acceptedParams: [
1449+
'task_type',
1450+
'contextualai_inference_id',
1451+
'chunking_settings',
1452+
'service',
1453+
'service_settings',
1454+
'task_settings',
1455+
'timeout'
1456+
]
1457+
}
1458+
return await this.transport.request({ path, method, querystring, body, meta }, options)
1459+
}
1460+
13791461
/**
13801462
* Create a custom inference endpoint. The custom service gives more control over how to interact with external inference services that aren't explicitly supported through dedicated integrations. The custom service gives you the ability to define the headers, url, query parameters, request body, and secrets. The custom service supports the template replacement functionality, which enables you to define a template that can be replaced with the value associated with that key. Templates are portions of a string that start with `${` and end with `}`. The parameters `secret_parameters` and `task_settings` are checked for keys for template replacement. Template replacement is supported in the `request`, `headers`, `url`, and `query_parameters`. If the definition (key) is not found for a template, an error message is returned. In case of an endpoint definition like the following: ``` PUT _inference/text_embedding/test-text-embedding { "service": "custom", "service_settings": { "secret_parameters": { "api_key": "<some api key>" }, "url": "...endpoints.huggingface.cloud/v1/embeddings", "headers": { "Authorization": "Bearer ${api_key}", "Content-Type": "application/json" }, "request": "{\"input\": ${input}}", "response": { "json_parser": { "text_embeddings":"$.data[*].embedding[*]" } } } } ``` To replace `${api_key}` the `secret_parameters` and `task_settings` are checked for a key named `api_key`. > info > Templates should not be surrounded by quotes. Pre-defined templates: * `${input}` refers to the array of input strings that comes from the `input` field of the subsequent inference requests. * `${input_type}` refers to the input type translation values. * `${query}` refers to the query field used specifically for reranking tasks. * `${top_n}` refers to the `top_n` field available when performing rerank requests. * `${return_documents}` refers to the `return_documents` field available when performing rerank requests.
13811463
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-custom | Elasticsearch API documentation}

src/api/api/security.ts

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2471,13 +2471,13 @@ export default class Security {
24712471
}
24722472

24732473
/**
2474-
* Get security statistics for all nodes
2474+
* Get security stats. Gather security usage statistics from all node(s) within the cluster.
24752475
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-stats | Elasticsearch API documentation}
24762476
*/
2477-
async getStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
2478-
async getStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
2479-
async getStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<T.TODO>
2480-
async getStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<any> {
2477+
async getStats (this: That, params?: T.SecurityGetStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.SecurityGetStatsResponse>
2478+
async getStats (this: That, params?: T.SecurityGetStatsRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.SecurityGetStatsResponse, unknown>>
2479+
async getStats (this: That, params?: T.SecurityGetStatsRequest, options?: TransportRequestOptions): Promise<T.SecurityGetStatsResponse>
2480+
async getStats (this: That, params?: T.SecurityGetStatsRequest, options?: TransportRequestOptions): Promise<any> {
24812481
const {
24822482
path: acceptedPath
24832483
} = this[kAcceptedParams]['security.get_stats']
@@ -2500,6 +2500,7 @@ export default class Security {
25002500
if (acceptedPath.includes(key)) {
25012501
continue
25022502
} else if (key !== 'body' && key !== 'querystring') {
2503+
// @ts-expect-error
25032504
querystring[key] = params[key]
25042505
}
25052506
}

src/api/types.ts

Lines changed: 107 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4781,7 +4781,9 @@ export interface TaskFailure {
47814781
export type TaskId = string
47824782

47834783
export interface TextEmbedding {
4784-
model_id: string
4784+
/** Model ID is required for all dense_vector fields but
4785+
* may be inferred for semantic_text fields */
4786+
model_id?: string
47854787
model_text: string
47864788
}
47874789

@@ -8641,6 +8643,7 @@ export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase
86418643

86428644
export interface MappingSemanticTextIndexOptions {
86438645
dense_vector?: MappingDenseVectorIndexOptions
8646+
sparse_vector?: MappingSparseVectorIndexOptions
86448647
}
86458648

86468649
export interface MappingSemanticTextProperty {
@@ -18864,6 +18867,7 @@ export interface IlmPolicy {
1886418867
}
1886518868

1886618869
export interface IlmRolloverAction {
18870+
/** The `max_size` condition has been deprecated in 9.3.0 and `max_primary_shard_size` should be used instead */
1886718871
max_size?: ByteSize
1886818872
max_primary_shard_size?: ByteSize
1886918873
max_age?: Duration
@@ -21999,6 +22003,7 @@ export interface IndicesRolloverRolloverConditions {
2199922003
max_age_millis?: DurationValue<UnitMillis>
2200022004
min_docs?: long
2200122005
max_docs?: long
22006+
/** The `max_size` condition has been deprecated in 9.3.0 and `max_primary_shard_size` should be used instead */
2200222007
max_size?: ByteSize
2200322008
max_size_bytes?: long
2200422009
min_size?: ByteSize
@@ -23043,6 +23048,38 @@ export interface InferenceContentObject {
2304323048
type: string
2304423049
}
2304523050

23051+
export interface InferenceContextualAIServiceSettings {
23052+
/** A valid API key for your Contexutual AI account.
23053+
*
23054+
* IMPORTANT: You need to provide the API key only once, during the inference model creation.
23055+
* The get inference endpoint API does not retrieve your API key.
23056+
* After creating the inference model, you cannot change the associated API key.
23057+
* If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */
23058+
api_key: string
23059+
/** The name of the model to use for the inference task.
23060+
* Refer to the Contextual AI documentation for the list of available rerank models. */
23061+
model_id: string
23062+
/** This setting helps to minimize the number of rate limit errors returned from Contextual AI.
23063+
* The `contextualai` service sets a default number of requests allowed per minute depending on the task type.
23064+
* For `rerank`, it is set to `1000`. */
23065+
rate_limit?: InferenceRateLimitSetting
23066+
}
23067+
23068+
export type InferenceContextualAIServiceType = 'contextualai'
23069+
23070+
export interface InferenceContextualAITaskSettings {
23071+
/** Instructions for the reranking model. Refer to <https://docs.contextual.ai/api-reference/rerank/rerank#body-instruction>
23072+
* Only for the `rerank` task type. */
23073+
instruction?: string
23074+
/** Whether to return the source documents in the response.
23075+
* Only for the `rerank` task type. */
23076+
return_documents?: boolean
23077+
/** The number of most relevant documents to return.
23078+
* If not specified, the reranking results of all documents will be returned.
23079+
* Only for the `rerank` task type. */
23080+
top_k?: integer
23081+
}
23082+
2304623083
export interface InferenceCustomRequestParams {
2304723084
/** The body structure of the request. It requires passing in the string-escaped result of the JSON format HTTP request body.
2304823085
* For example:
@@ -23553,6 +23590,13 @@ export interface InferenceInferenceEndpointInfoCohere extends InferenceInference
2355323590
task_type: InferenceTaskTypeCohere
2355423591
}
2355523592

23593+
export interface InferenceInferenceEndpointInfoContextualAi extends InferenceInferenceEndpoint {
23594+
/** The inference Id */
23595+
inference_id: string
23596+
/** The task type */
23597+
task_type: InferenceTaskTypeContextualAI
23598+
}
23599+
2355623600
export interface InferenceInferenceEndpointInfoCustom extends InferenceInferenceEndpoint {
2355723601
/** The inference Id */
2355823602
inference_id: string
@@ -23851,6 +23895,7 @@ export interface InferenceRateLimitSetting {
2385123895
* * `azureopenai` service and task type `text_embedding`: `1440`
2385223896
* * `azureopenai` service and task type `completion`: `120`
2385323897
* * `cohere` service: `10000`
23898+
* * `contextualai` service: `1000`
2385423899
* * `elastic` service and task type `chat_completion`: `240`
2385523900
* * `googleaistudio` service: `360`
2385623901
* * `googlevertexai` service: `30000`
@@ -23959,6 +24004,8 @@ export type InferenceTaskTypeAzureOpenAI = 'text_embedding' | 'completion'
2395924004

2396024005
export type InferenceTaskTypeCohere = 'text_embedding' | 'rerank' | 'completion'
2396124006

24007+
export type InferenceTaskTypeContextualAI = 'rerank'
24008+
2396224009
export type InferenceTaskTypeCustom = 'text_embedding' | 'sparse_embedding' | 'rerank' | 'completion'
2396324010

2396424011
export type InferenceTaskTypeDeepSeek = 'completion' | 'chat_completion'
@@ -24399,6 +24446,30 @@ export interface InferencePutCohereRequest extends RequestBase {
2439924446

2440024447
export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere
2440124448

24449+
export interface InferencePutContextualaiRequest extends RequestBase {
24450+
/** The type of the inference task that the model will perform. */
24451+
task_type: InferenceTaskTypeContextualAI
24452+
/** The unique identifier of the inference endpoint. */
24453+
contextualai_inference_id: Id
24454+
/** Specifies the amount of time to wait for the inference endpoint to be created. */
24455+
timeout?: Duration
24456+
/** The chunking configuration object. */
24457+
chunking_settings?: InferenceInferenceChunkingSettings
24458+
/** The type of service supported for the specified task type. In this case, `contextualai`. */
24459+
service: InferenceContextualAIServiceType
24460+
/** Settings used to install the inference model. These settings are specific to the `contextualai` service. */
24461+
service_settings: InferenceContextualAIServiceSettings
24462+
/** Settings to configure the inference task.
24463+
* These settings are specific to the task type you specified. */
24464+
task_settings?: InferenceContextualAITaskSettings
24465+
/** All values in `body` will be added to the request body. */
24466+
body?: string | { [key: string]: any } & { task_type?: never, contextualai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never }
24467+
/** All values in `querystring` will be added to the request querystring. */
24468+
querystring?: { [key: string]: any } & { task_type?: never, contextualai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never }
24469+
}
24470+
24471+
export type InferencePutContextualaiResponse = InferenceInferenceEndpointInfoContextualAi
24472+
2440224473
export interface InferencePutCustomRequest extends RequestBase {
2440324474
/** The type of the inference task that the model will perform. */
2440424475
task_type: InferenceCustomTaskType
@@ -33187,6 +33258,11 @@ export interface SecurityManageUserPrivileges {
3318733258
applications: string[]
3318833259
}
3318933260

33261+
export interface SecurityNodeSecurityStats {
33262+
/** Role statistics. */
33263+
roles: SecurityRolesStats
33264+
}
33265+
3319033266
export interface SecurityRealmInfo {
3319133267
name: Name
3319233268
type: string
@@ -33352,6 +33428,11 @@ export interface SecurityRoleTemplateScript {
3335233428
options?: Record<string, string>
3335333429
}
3335433430

33431+
export interface SecurityRolesStats {
33432+
/** Document-level security (DLS) statistics. */
33433+
dls: XpackUsageSecurityRolesDls
33434+
}
33435+
3335533436
export interface SecuritySearchAccess {
3335633437
/** The document fields that the owners of the role have read access to. */
3335733438
field_security?: SecurityFieldSecurity
@@ -34194,6 +34275,18 @@ export interface SecurityGetSettingsResponse {
3419434275
'security-tokens': SecuritySecuritySettings
3419534276
}
3419634277

34278+
export interface SecurityGetStatsRequest extends RequestBase {
34279+
/** All values in `body` will be added to the request body. */
34280+
body?: string | { [key: string]: any }
34281+
/** All values in `querystring` will be added to the request querystring. */
34282+
querystring?: { [key: string]: any }
34283+
}
34284+
34285+
export interface SecurityGetStatsResponse {
34286+
/** A map of node IDs to security statistics for that node. */
34287+
nodes: Record<string, SecurityNodeSecurityStats>
34288+
}
34289+
3419734290
export type SecurityGetTokenAccessTokenGrantType = 'password' | 'client_credentials' | '_kerberos' | 'refresh_token'
3419834291

3419934292
export interface SecurityGetTokenAuthenticatedUser extends SecurityUser {
@@ -39685,9 +39778,22 @@ export interface XpackUsageSecurityRolesDls {
3968539778
}
3968639779

3968739780
export interface XpackUsageSecurityRolesDlsBitSetCache {
39781+
/** Number of entries in the cache. */
3968839782
count: integer
39783+
/** Human-readable amount of memory taken up by the cache. */
3968939784
memory?: ByteSize
39785+
/** Memory taken up by the cache in bytes. */
3969039786
memory_in_bytes: ulong
39787+
/** Total number of cache hits. */
39788+
hits: long
39789+
/** Total number of cache misses. */
39790+
misses: long
39791+
/** Total number of cache evictions. */
39792+
evictions: long
39793+
/** Total combined time spent in cache for hits in milliseconds. */
39794+
hits_time_in_millis: DurationValue<UnitMillis>
39795+
/** Total combined time spent in cache for misses in milliseconds. */
39796+
misses_time_in_millis: DurationValue<UnitMillis>
3969139797
}
3969239798

3969339799
export interface XpackUsageSecurityRolesFile {

0 commit comments

Comments
 (0)