From db9d57416c6f82ea48b82bd13426df26417b54af Mon Sep 17 00:00:00 2001 From: kosabogi <105062005+kosabogi@users.noreply.github.com> Date: Mon, 11 Aug 2025 11:28:27 +0200 Subject: [PATCH 1/3] Adds cat-h values (#5096) * Adds cat-h values * Fixes typo * Clean up CatBase.ts quotation mark usage (cherry picked from commit 68ae2c15382b796ea853a15aae05216e2e36e07e) --- output/typescript/types.ts | 6 +- specification/cat/_types/CatBase.ts | 957 +++++++++++++++++- .../cat/aliases/CatAliasesRequest.ts | 6 +- .../cat/allocation/CatAllocationRequest.ts | 6 +- .../CatComponentTemplatesRequest.ts | 6 +- specification/cat/count/CatCountRequest.ts | 6 +- .../cat/fielddata/CatFielddataRequest.ts | 6 +- specification/cat/ml_jobs/CatJobsRequest.ts | 6 +- 8 files changed, 973 insertions(+), 26 deletions(-) diff --git a/output/typescript/types.ts b/output/typescript/types.ts index 85c2086092..7dbe59fafd 100644 --- a/output/typescript/types.ts +++ b/output/typescript/types.ts @@ -7033,7 +7033,7 @@ export type AutoscalingPutAutoscalingPolicyResponse = AcknowledgedResponseBase export type CatCatAnomalyDetectorColumn = 'assignment_explanation' | 'ae' | 'buckets.count' | 'bc' | 'bucketsCount' | 'buckets.time.exp_avg' | 'btea' | 'bucketsTimeExpAvg' | 'buckets.time.exp_avg_hour' | 'bteah' | 'bucketsTimeExpAvgHour' | 'buckets.time.max' | 'btmax' | 'bucketsTimeMax' | 'buckets.time.min' | 'btmin' | 'bucketsTimeMin' | 'buckets.time.total' | 'btt' | 'bucketsTimeTotal' | 'data.buckets' | 'db' | 'dataBuckets' | 'data.earliest_record' | 'der' | 'dataEarliestRecord' | 'data.empty_buckets' | 'deb' | 'dataEmptyBuckets' | 'data.input_bytes' | 'dib' | 'dataInputBytes' | 'data.input_fields' | 'dif' | 'dataInputFields' | 'data.input_records' | 'dir' | 'dataInputRecords' | 'data.invalid_dates' | 'did' | 'dataInvalidDates' | 'data.last' | 'dl' | 'dataLast' | 'data.last_empty_bucket' | 'dleb' | 'dataLastEmptyBucket' | 'data.last_sparse_bucket' | 'dlsb' | 'dataLastSparseBucket' | 'data.latest_record' | 'dlr' | 'dataLatestRecord' | 'data.missing_fields' | 'dmf' | 'dataMissingFields' | 'data.out_of_order_timestamps' | 'doot' | 'dataOutOfOrderTimestamps' | 'data.processed_fields' | 'dpf' | 'dataProcessedFields' | 'data.processed_records' | 'dpr' | 'dataProcessedRecords' | 'data.sparse_buckets' | 'dsb' | 'dataSparseBuckets' | 'forecasts.memory.avg' | 'fmavg' | 'forecastsMemoryAvg' | 'forecasts.memory.max' | 'fmmax' | 'forecastsMemoryMax' | 'forecasts.memory.min' | 'fmmin' | 'forecastsMemoryMin' | 'forecasts.memory.total' | 'fmt' | 'forecastsMemoryTotal' | 'forecasts.records.avg' | 'fravg' | 'forecastsRecordsAvg' | 'forecasts.records.max' | 'frmax' | 'forecastsRecordsMax' | 'forecasts.records.min' | 'frmin' | 'forecastsRecordsMin' | 'forecasts.records.total' | 'frt' | 'forecastsRecordsTotal' | 'forecasts.time.avg' | 'ftavg' | 'forecastsTimeAvg' | 'forecasts.time.max' | 'ftmax' | 'forecastsTimeMax' | 'forecasts.time.min' | 'ftmin' | 'forecastsTimeMin' | 'forecasts.time.total' | 'ftt' | 'forecastsTimeTotal' | 'forecasts.total' | 'ft' | 'forecastsTotal' | 'id' | 'model.bucket_allocation_failures' | 'mbaf' | 'modelBucketAllocationFailures' | 'model.by_fields' | 'mbf' | 'modelByFields' | 'model.bytes' | 'mb' | 'modelBytes' | 'model.bytes_exceeded' | 'mbe' | 'modelBytesExceeded' | 'model.categorization_status' | 'mcs' | 'modelCategorizationStatus' | 'model.categorized_doc_count' | 'mcdc' | 'modelCategorizedDocCount' | 'model.dead_category_count' | 'mdcc' | 'modelDeadCategoryCount' | 'model.failed_category_count' | 'mdcc' | 'modelFailedCategoryCount' | 'model.frequent_category_count' | 'mfcc' | 'modelFrequentCategoryCount' | 'model.log_time' | 'mlt' | 'modelLogTime' | 'model.memory_limit' | 'mml' | 'modelMemoryLimit' | 'model.memory_status' | 'mms' | 'modelMemoryStatus' | 'model.over_fields' | 'mof' | 'modelOverFields' | 'model.partition_fields' | 'mpf' | 'modelPartitionFields' | 'model.rare_category_count' | 'mrcc' | 'modelRareCategoryCount' | 'model.timestamp' | 'mt' | 'modelTimestamp' | 'model.total_category_count' | 'mtcc' | 'modelTotalCategoryCount' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'opened_time' | 'ot' | 'state' | 's' -export type CatCatAnonalyDetectorColumns = CatCatAnomalyDetectorColumn | CatCatAnomalyDetectorColumn[] +export type CatCatAnomalyDetectorColumns = CatCatAnomalyDetectorColumn | CatCatAnomalyDetectorColumn[] export type CatCatDatafeedColumn = 'ae' | 'assignment_explanation' | 'bc' | 'buckets.count' | 'bucketsCount' | 'id' | 'na' | 'node.address' | 'nodeAddress' | 'ne' | 'node.ephemeral_id' | 'nodeEphemeralId' | 'ni' | 'node.id' | 'nodeId' | 'nn' | 'node.name' | 'nodeName' | 'sba' | 'search.bucket_avg' | 'searchBucketAvg' | 'sc' | 'search.count' | 'searchCount' | 'seah' | 'search.exp_avg_hour' | 'searchExpAvgHour' | 'st' | 'search.time' | 'searchTime' | 's' | 'state' @@ -7872,8 +7872,8 @@ export interface CatMlJobsRequest extends CatCatRequestBase { job_id?: Id allow_no_match?: boolean bytes?: Bytes - h?: CatCatAnonalyDetectorColumns - s?: CatCatAnonalyDetectorColumns + h?: CatCatAnomalyDetectorColumns + s?: CatCatAnomalyDetectorColumns time?: TimeUnit } diff --git a/specification/cat/_types/CatBase.ts b/specification/cat/_types/CatBase.ts index 8e5c503073..c9488613c3 100644 --- a/specification/cat/_types/CatBase.ts +++ b/specification/cat/_types/CatBase.ts @@ -399,7 +399,7 @@ export enum CatAnomalyDetectorColumn { */ state } -export type CatAnonalyDetectorColumns = +export type CatAnomalyDetectorColumns = | CatAnomalyDetectorColumn | CatAnomalyDetectorColumn[] export enum CatDatafeedColumn { @@ -683,7 +683,7 @@ export enum CatNodeColumn { * The bound HTTP address. * @aliases http */ - 'http_address', + http_address, /** * The identifier for the node. * @aliases nodeId @@ -743,17 +743,17 @@ export enum CatNodeColumn { * The most recent load average. For example: `0.22`. * @aliases l */ - 'load_1m', + load_1m, /** * The load average for the last five minutes. For example: `0.78`. * @aliases l */ - 'load_5m', + load_5m, /** * The load average for the last fifteen minutes. For example: `1.24`. * @aliases l */ - 'load_15m', + load_15m, /** * The number of mappings, including runtime and object fields. * @aliases mtc, mappingsTotalCount @@ -1011,9 +1011,477 @@ export enum CatNodeColumn { version } +/** @non_exhaustive */ +export enum CatRecoveryColumn { + /** + * The name of the index. + * @aliases i, idx + */ + index, + /** + * The name of the shard. + * @aliases s, sh + */ + shard, + /** + * The recovery time elasped. + * @aliases t, ti, primaryOrReplica + */ + time, + /** + * The type of recovery, from a peer or a snapshot. + */ + type, + /** + * The stage of the recovery. Returned values are: `INIT`, `INDEX`: recovery of lucene files, either reusing local ones are copying new ones, `VERIFY_INDEX`: potentially running check index, `TRANSLOG`: starting up the engine, replaying the translog, `FINALIZE`: performing final task after all translog ops have been done, `DONE` + * @aliases st + */ + stage, + /** + * The host address the index is moving from. + * @aliases shost + */ + source_host, + /** + * The node name the index is moving from. + * @aliases snode + */ + source_node, + /** + * The host address the index is moving to. + * @aliases thost + */ + target_host, + /** + * The node name the index is moving to. + * @aliases tnode + */ + target_node, + /** + * The name of the repository being used. if not relevant 'n/a'. + * @aliases tnode + */ + repository, + /** + * The name of the snapshot being used. if not relevant 'n/a'. + * @aliases snap + */ + snapshot, + /** + * The total number of files to recover. + * @aliases f + */ + files, + /** + * The number of files currently recovered. + * @aliases fr + */ + files_recovered, + /** + * The percentage of files currently recovered. + * @aliases fp + */ + files_percent, + /** + * The total number of files. + * @aliases tf + */ + files_total, + /** + * The total number of bytes to recover. + * @aliases b + */ + bytes, + /** + * Total number of bytes currently recovered. + * @aliases br + */ + bytes_recovered, + /** + * The percentage of bytes currently recovered. + * @aliases bp + */ + bytes_percent, + /** + * The total number of bytes. + * @aliases tb + */ + bytes_total, + /** + * The total number of translog ops to recover. + * @aliases to + */ + translog_ops, + /** + * The total number of translog ops currently recovered. + * @aliases tor + */ + translog_ops_recovered, + /** + * The percentage of translog ops currently recovered. + * @aliases top + */ + translog_ops_percent, + /** + * The start time of the recovery operation. + * @aliases start + */ + start_time, + /** + * The start time of the recovery operation in eopch milliseconds. + * @aliases start_millis + */ + start_time_millis, + /** + * The end time of the recovery operation. If ongoing '1970-01-01T00:00:00.000Z' + * @aliases stop + */ + stop_time, + /** + * The end time of the recovery operation in eopch milliseconds. If ongoing '0' + * @aliases stop_millis + */ + stop_time_millis +} + +/** @non_exhaustive */ +export enum CatSegmentsColumn { + /** + * The name of the index. + * @aliases i, idx + */ + index, + /** + * The name of the shard. + * @aliases s, sh + */ + shard, + /** + * The shard type. Returned values are 'primary' or 'replica'. + * @aliases p, pr, primaryOrReplica + */ + prirep, + /** + * IP address of the segment’s shard, such as '127.0.1.1'. + */ + ip, + /** + * The name of the segment, such as '_0'. The segment name is derived from the segment generation and used internally to create file names in the directory of the shard. + */ + segment, + /** + * Generation number, such as '0'. Elasticsearch increments this generation number for each segment written. Elasticsearch then uses this number to derive the segment name. + */ + generation, + /** + * The number of documents as reported by Lucene. This excludes deleted documents and counts any [nested documents](https://www.elastic.co/docs/reference/elasticsearch/mapping-reference/nested) separately from their parents. It also excludes documents which were indexed recently and do not yet belong to a segment. + */ + 'docs.count', + /** + * The number of deleted documents as reported by Lucene, which may be higher or lower than the number of delete operations you have performed. This number excludes deletes that were performed recently and do not yet belong to a segment. Deleted documents are cleaned up by the [automatic merge process](https://www.elastic.co/docs/reference/elasticsearch/index-settings/merge) if it makes sense to do so. Also, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard. + */ + 'docs.deleted', + /** + * The disk space used by the segment, such as '50kb'. + */ + size, + /** + * The bytes of segment data stored in memory for efficient search, such as '1264'. A value of '-1' indicates Elasticsearch was unable to compute this number. + */ + 'size.memory', + /** + * If 'true', the segments is synced to disk. Segments that are synced can survive a hard reboot. If 'false', the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start. + */ + committed, + /** + * If 'true', the segment is searchable. If 'false', the segment has most likely been written to disk but needs a [refresh](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh) to be searchable. + */ + searchable, + /** + * The version of Lucene used to write the segment. + */ + version, + /** + * If 'true', the segment is stored in a compound file. This means Lucene merged all files from the segment in a single file to save file descriptors. + */ + compound, + /** + * The ID of the node, such as 'k0zy'. + */ + id +} + +/** @non_exhaustive */ +export enum CatSnapshotsColumn { + /** + * The ID of the snapshot, such as 'snap1'. + * @aliases snapshot + */ + id, + /** + * The name of the repository, such as 'repo1'. + * @aliases re, repo + */ + repository, + /** + * State of the snapshot process. Returned values are: 'FAILED': The snapshot process failed. 'INCOMPATIBLE': The snapshot process is incompatible with the current cluster version. 'IN_PROGRESS': The snapshot process started but has not completed. 'PARTIAL': The snapshot process completed with a partial success. 'SUCCESS': The snapshot process completed with a full success. + * @aliases s + */ + status, + /** + * The [unix epoch time](https://en.wikipedia.org/wiki/Unix_time) at which the snapshot process started. + * @aliases ste, startEpoch + */ + start_epoch, + /** + * 'HH:MM:SS' time at which the snapshot process started. + * @aliases sti, startTime + */ + start_time, + /** + * The [unix epoch time](https://en.wikipedia.org/wiki/Unix_time) at which the snapshot process ended. + * @aliases ete, endEpoch + */ + end_epoch, + /** + * 'HH:MM:SS' time at which the snapshot process ended. + * @aliases eti, endTime + */ + end_time, + /** + * The time it took the snapshot process to complete in [time units](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/api-conventions#time-units). + * @aliases dur + */ + duration, + /** + * The number of indices in the snapshot. + * @aliases i + */ + indices, + /** + * The number of successful shards in the snapshot. + * @aliases ss + */ + successful_shards, + /** + * The number of failed shards in the snapshot. + * @aliases fs + */ + failed_shards, + /** + * The total number of shards in the snapshot. + * @aliases ts + */ + total_shards, + /** + * The reason for any snapshot failures. + * @aliases r + */ + reason +} + +/** @non_exhaustive */ +export enum CatAliasesColumn { + /** + * The name of the alias. + * @aliases a + */ + alias, + /** + * The name of the index the alias points to. + * @aliases i, idx + */ + index, + /** + * The filter applied to the alias. + * @aliases f, fi + */ + filter, + /** + * Index routing value for the alias. + * @aliases ri, routingIndex + */ + 'routing.index', + /** + * Search routing value for the alias. + * @aliases rs, routingSearch + */ + 'routing.search', + /** + * Indicates if the index is the write index for the alias. + * @aliases w, isWriteIndex + */ + is_write_index +} + +/** @non_exhaustive */ +export enum CatAllocationColumn { + /** + * The number of shards on the node. + * @aliases s + */ + shards, + /** + * The number of shards scheduled to be moved elsewhere in the cluster. + */ + 'shards.undesired', + /** + * The sum of index write load forecasts. + * @aliases wlf, writeLoadForecast + */ + 'write_load.forecast', + /** + * The sum of shard size forecasts. + * @aliases dif, diskIndicesForecast + */ + 'disk.indices.forecast', + /** + * The disk space used by Elasticsearch indices. + * @aliases di, diskIndices + */ + 'disk.indices', + /** + * The total disk space used on the node. + * @aliases du,diskUsed + */ + 'disk.used', + /** + * The available disk space on the node. + * @aliases da, diskAvail + */ + 'disk.avail', + /** + * The total disk capacity of all volumes on the node. + * @aliases dt, diskTotal + */ + 'disk.total', + /** + * The percentage of disk space used on the node. + * @aliases dp, diskPercent + */ + 'disk.percent', + /** + * IThe host of the node. + * @aliases h + */ + host, + /** + * The IP address of the node. + */ + ip, + /** + * The name of the node. + * @aliases n + */ + node, + /** + * The roles assigned to the node. + * @aliases r, role, nodeRole + */ + 'node.role' +} + +/** @non_exhaustive */ +export enum CatComponentColumn { + /** + * The name of the component template. + * @aliases n + */ + name, + /** + * The version number of the component template. + * @aliases v + */ + version, + /** + * The number of aliases in the component template. + * @aliases a + */ + alias_count, + /** + * The number of mappings in the component template. + * @aliases m + */ + mapping_count, + /** + * The number of settings in the component template. + * @aliases s + */ + settings_count, + /** + * The number of metadata entries in the component template. + * @aliases me + */ + metadata_count, + /** + * The index templates that include this component template. + * @aliases i + */ + included_in +} + +/** @non_exhaustive */ +export enum CatCountColumn { + /** + * The Unix epoch time in seconds since 1970-01-01 00:00:00. + * @aliases t,time + */ + epoch, + /** + * The current time in HH:MM:SS format. + * @aliases ts,hms,hhmmss + */ + timestamp, + /** + * The document count in the cluster or index. + * @aliases dc,docs.count,docsCount + */ + count +} + +/** @non_exhaustive */ +export enum CatFieldDataColumn { + /** + * The node ID. + */ + id, + /** + * The host name of the node. + * @aliases h + */ + host, + /** + * The IP address of the node. + */ + ip, + /** + * The node name. + * @aliases n + */ + node, + /** + * The field name. + * @aliases f + */ + field, + /** + * The field data usage. + * @aliases s + */ + size +} + export type CatDfaColumns = CatDfaColumn | CatDfaColumn[] export type CatDatafeedColumns = CatDatafeedColumn | CatDatafeedColumn[] export type CatNodeColumns = CatNodeColumn | CatNodeColumn[] +export type CatRecoveryColumns = CatRecoveryColumn | CatRecoveryColumn[] +export type CatSegmentsColumns = CatSegmentsColumn | CatSegmentsColumn[] +export type CatSnapshotsColumns = CatSnapshotsColumn | CatSnapshotsColumn[] +export type CatAliasesColumns = CatAliasesColumn | CatAliasesColumn[] +export type CatAllocationColumns = CatAllocationColumn | CatAllocationColumn[] +export type CatComponentColumns = CatComponentColumn | CatComponentColumn[] +export type CatCountColumns = CatCountColumn | CatCountColumn[] +export type CatFieldDataColumns = CatFieldDataColumn | CatFieldDataColumn[] export enum CatTrainedModelsColumn { /** @@ -1300,3 +1768,482 @@ export enum CatTransformColumn { version } export type CatTransformColumns = CatTransformColumn | CatTransformColumn[] + +/** @non_exhaustive */ +export enum CatShardColumn { + /** + * Size of completion. For example: `0b`. + * @aliases cs, completionSize + */ + 'completion.size', + /** + * Disk space used by the shard’s dataset, which may or may not be the size on + * disk, but includes space used by the shard on object storage. Reported as a size value for example: `5kb`. + */ + 'dataset.size', + /** + * Number of indexed dense vectors. + * @aliases dvc, denseVectorCount + */ + 'dense_vector.value_count', + /** + * Number of documents in shard, for example: `25`. + * @aliases d, dc + */ + docs, + /** + * Fielddata cache evictions, for example: `0`. + * @aliases fe, fielddataEvictions + */ + 'fielddata.evictions', + /** + * Used fielddata cache memory, for example: `0b`. + * @aliases fm, fielddataMemory + */ + 'fielddata.memory_size', + /** + * Number of flushes, for example: `1`. + * @aliases ft, flushTotal + */ + 'flush.total', + /** + * Time spent in flush, for example: `1`. + * @aliases ftt, flushTotalTime + */ + 'flush.total_time', + /** + * Number of current get operations, for example: `0`. + * @aliases gc, getCurrent + */ + 'get.current', + /** + * Time spent in successful gets, for example: `14ms`. + * @aliases geti, getExistsTime + */ + 'get.exists_time', + /** + * Number of successful get operations, for example: `2`. + * @aliases geto, getExistsTotal + */ + 'get.exists_total', + /** + * Time spent in failed gets, for example: `0s`. + * @aliases gmti, getMissingTime + */ + 'get.missing_time', + /** + * Number of failed get operations, for example: `1`. + * @aliases gmto, getMissingTotal + */ + 'get.missing_total', + /** + * Time spent in get, for example: `14ms`. + * @aliases gti, getTime + */ + 'get.time', + /** + * Number of get operations, for example: `2`. + * @aliases gto, getTotal + */ + 'get.total', + /** + * ID of the node, for example: `k0zy`. + */ + id, + /** + * Name of the index. + * @aliases i, idx + */ + index, + /** + * Number of current deletion operations, for example: `0`. + * @aliases idc, indexingDeleteCurrent + */ + 'indexing.delete_current', + /** + * Time spent in deletions, for example: `2ms`. + * @aliases idti, indexingDeleteTime + */ + 'indexing.delete_time', + /** + * Number of deletion operations, for example: `2`. + * @aliases idto, indexingDeleteTotal + */ + 'indexing.delete_total', + /** + * Number of current indexing operations, for example: `0`. + * @aliases iic, indexingIndexCurrent + */ + 'indexing.index_current', + /** + * Number of failed indexing operations due to version conflict, for example: `0`. + * @aliases iifvc, indexingIndexFailedDueToVersionConflict + */ + 'indexing.index_failed_due_to_version_conflict', + /** + * Number of failed indexing operations, for example: `0`. + * @aliases iif, indexingIndexFailed + */ + 'indexing.index_failed', + /** + * Time spent in indexing, such as for example: `134ms`. + * @aliases iiti, indexingIndexTime + */ + 'indexing.index_time', + /** + * Number of indexing operations, for example: `1`. + * @aliases iito, indexingIndexTotal + */ + 'indexing.index_total', + /** + * IP address of the node, for example: `127.0.1.1`. + */ + ip, + /** + * Number of current merge operations, for example: `0`. + * @aliases mc, mergesCurrent + */ + 'merges.current', + /** + * Number of current merging documents, for example: `0`. + * @aliases mcd, mergesCurrentDocs + */ + 'merges.current_docs', + /** + * Size of current merges, for example: `0b`. + * @aliases mcs, mergesCurrentSize + */ + 'merges.current_size', + /** + * Number of completed merge operations, for example: `0`. + * @aliases mt, mergesTotal + */ + 'merges.total', + /** + * Number of merged documents, for example: `0`. + * @aliases mtd, mergesTotalDocs + */ + 'merges.total_docs', + /** + * Size of current merges, for example: `0b`. + * @aliases mts, mergesTotalSize + */ + 'merges.total_size', + /** + * Time spent merging documents, for example: `0s`. + * @aliases mtt, mergesTotalTime + */ + 'merges.total_time', + /** + * Node name, for example: `I8hydUG`. + * @aliases n + */ + node, + /** + * Shard type. Returned values are `primary` or `replica`. + * @aliases p, pr, primaryOrReplica + */ + prirep, + /** + * Query cache evictions, for example: `0`. + * @aliases qce, queryCacheEvictions + */ + 'query_cache.evictions', + /** + * Used query cache memory, for example: `0b`. + * @aliases qcm, queryCacheMemory + */ + 'query_cache.memory_size', + /** + * Type of recovery source. + * @aliases rs + */ + 'recoverysource.type', + /** + * Time spent in refreshes, for example: `91ms`. + * @aliases rti, refreshTime + */ + 'refresh.time', + /** + * Number of refreshes, for example: `16`. + * @aliases rto, refreshTotal + */ + 'refresh.total', + /** + * Current fetch phase operations, for example: `0`. + * @aliases sfc, searchFetchCurrent + */ + 'search.fetch_current', + /** + * Time spent in fetch phase, for example: `37ms`. + * @aliases sfti, searchFetchTime + */ + 'search.fetch_time', + /** + * Number of fetch operations, for example: `7`. + * @aliases sfto, searchFetchTotal + */ + 'search.fetch_total', + /** + * Open search contexts, for example: `0`. + * @aliases so, searchOpenContexts + */ + 'search.open_contexts', + /** + * Current query phase operations, for example: `0`. + * @aliases sqc, searchQueryCurrent + */ + 'search.query_current', + /** + * Time spent in query phase, for example: `43ms`. + * @aliases sqti, searchQueryTime + */ + 'search.query_time', + /** + * Number of query operations, for example: `9`. + * @aliases sqto, searchQueryTotal + */ + 'search.query_total', + /** + * Open scroll contexts, for example: `2`. + * @aliases scc, searchScrollCurrent + */ + 'search.scroll_current', + /** + * Time scroll contexts held open, for example: `2m`. + * @aliases scti, searchScrollTime + */ + 'search.scroll_time', + /** + * Completed scroll contexts, for example: `1`. + * @aliases scto, searchScrollTotal + */ + 'search.scroll_total', + /** + * Number of segments, for example: `4`. + * @aliases sc, segmentsCount + */ + 'segments.count', + /** + * Memory used by fixed bit sets for nested object field types and type filters for types referred in join fields, for example: `1.0kb`. + * @aliases sfbm, fixedBitsetMemory + */ + 'segments.fixed_bitset_memory', + /** + * Memory used by index writer, for example: `18mb`. + * @aliases siwm, segmentsIndexWriterMemory + */ + 'segments.index_writer_memory', + /** + * Memory used by segments, for example: `1.4kb`. + * @aliases sm, segmentsMemory + */ + 'segments.memory', + /** + * Memory used by version map, for example: `1.0kb`. + * @aliases svmm, segmentsVersionMapMemory + */ + 'segments.version_map_memory', + /** + * Global checkpoint. + * @aliases sqg, globalCheckpoint + */ + 'seq_no.global_checkpoint', + /** + * Local checkpoint. + * @aliases sql, localCheckpoint + */ + 'seq_no.local_checkpoint', + /** + * Maximum sequence number. + * @aliases sqm, maxSeqNo + */ + 'seq_no.max', + /** + * Name of the shard. + * @aliases s, sh + */ + shard, + /** + * Number of indexed [sparse vectors](https://www.elastic.co/docs/reference/elasticsearch/mapping-reference/sparse-vector). + * @aliases svc, sparseVectorCount + */ + 'dsparse_vector.value_count', + /** + * State of the shard. Returned values are: + * * `INITIALIZING`: The shard is recovering from a peer shard or gateway. + * * `RELOCATING`: The shard is relocating. + * * `STARTED`: The shard has started. + * * `UNASSIGNED`: The shard is not assigned to any node. + * @aliases st + */ + state, + /** + * Disk space used by the shard, for example: `5kb`. + * @aliases sto + */ + store, + /** + * Number of current suggest operations, for example: `0`. + * @aliases suc, suggestCurrent + */ + 'suggest.current', + /** + * Time spent in suggest, for example: `0`. + * @aliases suti, suggestTime + */ + 'suggest.time', + /** + * Number of suggest operations, for example: `0`. + * @aliases suto, suggestTotal + */ + 'suggest.total', + /** + * Sync ID of the shard. + */ + sync_id, + /** + * Time at which the shard became unassigned in [Coordinated Universal Time (UTC)](https://en.wikipedia.org/wiki/List_of_UTC_offsets). + * @aliases ua + */ + 'unassigned.at', + /** + * Details about why the shard became unassigned. This does not explain why the shard is currently unassigned. To understand why a shard + * is not assigned, use the [Cluster allocation explain](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain) API. + * @aliases ud + */ + 'unassigned.details', + /** + * Time at which the shard was requested to be unassigned in [Coordinated Universal Time (UTC)](https://en.wikipedia.org/wiki/List_of_UTC_offsets). + * @aliases uf + */ + 'unassigned.for', + /** + * Indicates the reason for the last change to the state of this unassigned shard. This does not explain why the shard is currently unassigned. + * To understand why a shard is not assigned, use the [Cluster allocation explain](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain) API. Returned values include: + * + * * `ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard. + * * `CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery. + * * `DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index. + * * `EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index. + * * `FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing an empty primary using the Cluster reroute API. + * * `INDEX_CLOSED`: Unassigned because the index was closed. + * * `INDEX_CREATED`: Unassigned as a result of an API creation of an index. + * * `INDEX_REOPENED`: Unassigned as a result of opening a closed index. + * * `MANUAL_ALLOCATION`: The shard’s allocation was last modified by the Cluster reroute API. + * * `NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index. + * * `NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster. + * * `NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the Node shutdown API. + * * `PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed. + * * `REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled. + * * `REINITIALIZED`: When a shard moves from started back to initializing. + * * `REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica. + * * `REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute command. + * @aliases ur + */ + 'unassigned.reason' +} +export type CatShardColumns = CatShardColumn | CatShardColumn[] + +/** @non_exhaustive */ +export enum CatThreadPoolColumn { + /** + * Number of active threads in the current thread pool. + * @aliases a + */ + active, + /** + * Number of tasks completed by the thread pool executor. + * @aliases c + */ + completed, + /** + * Configured core number of active threads allowed in the current thread pool. + * @aliases cr + */ + core, + /** + * Ephemeral node ID. + * @aliases eid + */ + ephemeral_id, + /** + * Hostname for the current node. + * @aliases h + */ + host, + /** + * IP address for the current node. + * @aliases i + */ + ip, + /** + * Configured keep alive time for threads. + * @aliases k + */ + keep_alive, + /** + * Highest number of active threads in the current thread pool. + * @aliases l + */ + largest, + /** + * Configured maximum number of active threads allowed in the current thread pool. + * @aliases mx + */ + max, + /** + * Name of the thread pool, such as `analyze` or `generic`. + */ + name, + /** + * ID of the node, such as `k0zy`. + * @aliases id + */ + node_id, + /** + * Node name, such as `I8hydUG`. + */ + node_name, + /** + * Process ID of the running node. + * @aliases p + */ + pid, + /** + * Number of threads in the current thread pool. + * @aliases psz + */ + pool_size, + /** + * Bound transport port for the current node. + * @aliases po + */ + port, + /** + * Number of tasks in the queue for the current thread pool. + * @aliases q + */ + queue, + /** + * Maximum number of tasks permitted in the queue for the current thread pool. + * @aliases qs + */ + queue_size, + /** + * Number of tasks rejected by the thread pool executor. + * @aliases r + */ + rejected, + /** + * Configured fixed number of active threads allowed in the current thread pool. + * @aliases sz + */ + size, + /** + * Type of thread pool. Returned values are `fixed`, `fixed_auto_queue_size`, `direct`, or `scaling`. + * @aliases t + */ + type +} +export type CatThreadPoolColumns = CatThreadPoolColumn | CatThreadPoolColumn[] diff --git a/specification/cat/aliases/CatAliasesRequest.ts b/specification/cat/aliases/CatAliasesRequest.ts index acf8c877d6..94a2a34803 100644 --- a/specification/cat/aliases/CatAliasesRequest.ts +++ b/specification/cat/aliases/CatAliasesRequest.ts @@ -17,9 +17,9 @@ * under the License. */ -import { CatRequestBase } from '@cat/_types/CatBase' import { ExpandWildcards, Names } from '@_types/common' import { Duration } from '@_types/Time' +import { CatAliasesColumns, CatRequestBase } from '@cat/_types/CatBase' /** * Get aliases. @@ -51,9 +51,9 @@ export interface Request extends CatRequestBase { } query_parameters: { /** - * List of columns to appear in the response. Supports simple wildcards. + * A comma-separated list of columns names to display. It supports simple wildcards. */ - h?: Names + h?: CatAliasesColumns /** * List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` diff --git a/specification/cat/allocation/CatAllocationRequest.ts b/specification/cat/allocation/CatAllocationRequest.ts index fdb909865e..0bec053b71 100644 --- a/specification/cat/allocation/CatAllocationRequest.ts +++ b/specification/cat/allocation/CatAllocationRequest.ts @@ -17,9 +17,9 @@ * under the License. */ -import { CatRequestBase } from '@cat/_types/CatBase' import { Bytes, Names, NodeIds } from '@_types/common' import { Duration } from '@_types/Time' +import { CatAllocationColumns, CatRequestBase } from '@cat/_types/CatBase' /** * Get shard allocation information. @@ -52,9 +52,9 @@ export interface Request extends CatRequestBase { /** The unit used to display byte values. */ bytes?: Bytes /** - * List of columns to appear in the response. Supports simple wildcards. + * A comma-separated list of columns names to display. It supports simple wildcards. */ - h?: Names + h?: CatAllocationColumns /** * List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` diff --git a/specification/cat/component_templates/CatComponentTemplatesRequest.ts b/specification/cat/component_templates/CatComponentTemplatesRequest.ts index b92a92fd37..7f131988b7 100644 --- a/specification/cat/component_templates/CatComponentTemplatesRequest.ts +++ b/specification/cat/component_templates/CatComponentTemplatesRequest.ts @@ -17,9 +17,9 @@ * under the License. */ -import { CatRequestBase } from '@cat/_types/CatBase' import { Names } from '@_types/common' import { Duration } from '@_types/Time' +import { CatComponentColumns, CatRequestBase } from '@cat/_types/CatBase' /** * Get component templates. @@ -55,9 +55,9 @@ export interface Request extends CatRequestBase { } query_parameters: { /** - * List of columns to appear in the response. Supports simple wildcards. + * A comma-separated list of columns names to display. It supports simple wildcards. */ - h?: Names + h?: CatComponentColumns /** * List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` diff --git a/specification/cat/count/CatCountRequest.ts b/specification/cat/count/CatCountRequest.ts index 3cd75d313f..a1b3b5f087 100644 --- a/specification/cat/count/CatCountRequest.ts +++ b/specification/cat/count/CatCountRequest.ts @@ -17,8 +17,8 @@ * under the License. */ -import { CatRequestBase } from '@cat/_types/CatBase' import { Indices, Names } from '@_types/common' +import { CatCountColumns, CatRequestBase } from '@cat/_types/CatBase' /** * Get a document count. @@ -55,9 +55,9 @@ export interface Request extends CatRequestBase { } query_parameters: { /** - * List of columns to appear in the response. Supports simple wildcards. + * A comma-separated list of columns names to display. It supports simple wildcards. */ - h?: Names + h?: CatCountColumns /** * List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` diff --git a/specification/cat/fielddata/CatFielddataRequest.ts b/specification/cat/fielddata/CatFielddataRequest.ts index cd6cd7cf0b..c88438f3a7 100644 --- a/specification/cat/fielddata/CatFielddataRequest.ts +++ b/specification/cat/fielddata/CatFielddataRequest.ts @@ -17,8 +17,8 @@ * under the License. */ -import { CatRequestBase } from '@cat/_types/CatBase' import { Bytes, Fields, Names } from '@_types/common' +import { CatFieldDataColumns, CatRequestBase } from '@cat/_types/CatBase' /** * Get field data cache information. @@ -57,9 +57,9 @@ export interface Request extends CatRequestBase { /** Comma-separated list of fields used to limit returned information. */ fields?: Fields /** - * List of columns to appear in the response. Supports simple wildcards. + * A comma-separated list of columns names to display. It supports simple wildcards. */ - h?: Names + h?: CatFieldDataColumns /** * List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` diff --git a/specification/cat/ml_jobs/CatJobsRequest.ts b/specification/cat/ml_jobs/CatJobsRequest.ts index f5a4f5554b..e9c0eb1344 100644 --- a/specification/cat/ml_jobs/CatJobsRequest.ts +++ b/specification/cat/ml_jobs/CatJobsRequest.ts @@ -17,9 +17,9 @@ * under the License. */ -import { CatAnonalyDetectorColumns, CatRequestBase } from '@cat/_types/CatBase' import { Bytes, Id } from '@_types/common' import { TimeUnit } from '@_types/Time' +import { CatAnomalyDetectorColumns, CatRequestBase } from '@cat/_types/CatBase' /** * Get anomaly detection jobs. @@ -78,9 +78,9 @@ export interface Request extends CatRequestBase { * Comma-separated list of column names to display. * @server_default buckets.count,data.processed_records,forecasts.total,id,model.bytes,model.memory_status,state */ - h?: CatAnonalyDetectorColumns + h?: CatAnomalyDetectorColumns /** Comma-separated list of column names or column aliases used to sort the response. */ - s?: CatAnonalyDetectorColumns + s?: CatAnomalyDetectorColumns /** * The unit used to display time values. */ From 107b500886426823ccf9fa871d60ea9e327eb7ac Mon Sep 17 00:00:00 2001 From: kosabogi Date: Mon, 18 Aug 2025 10:33:22 +0200 Subject: [PATCH 2/3] Trigger CI From f734d4a3ac41371ca92abd15154f2cf313bc9c64 Mon Sep 17 00:00:00 2001 From: kosabogi Date: Mon, 18 Aug 2025 10:36:06 +0200 Subject: [PATCH 3/3] Code style fix --- specification/cat/aliases/CatAliasesRequest.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/specification/cat/aliases/CatAliasesRequest.ts b/specification/cat/aliases/CatAliasesRequest.ts index 548fd3cf91..f68bd1a973 100644 --- a/specification/cat/aliases/CatAliasesRequest.ts +++ b/specification/cat/aliases/CatAliasesRequest.ts @@ -18,7 +18,6 @@ */ import { ExpandWildcards, Names } from '@_types/common' -import { Duration } from '@_types/Time' import { CatAliasesColumns, CatRequestBase } from '@cat/_types/CatBase' /**