diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index b86f8a5af..b785ce7ba 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -2895,13 +2895,12 @@ client.cat.nodes({ ... }) - **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - **`full_id` (Optional, boolean | string)**: If `true`, return the full node ID. If `false`, return the shortened node ID. - **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. -- **`h` (Optional, Enum("build" | "completion.size" | "cpu" | "disk.avail" | "disk.total" | "disk.used" | "disk.used_percent" | "fielddata.evictions" | "fielddata.memory_size" | "file_desc.current" | "file_desc.max" | "file_desc.percent" | "flush.total" | "flush.total_time" | "get.current" | "get.exists_time" | "get.exists_total" | "get.missing_time" | "get.missing_total" | "get.time" | "get.total" | "heap.current" | "heap.max" | "heap.percent" | "http_address" | "id" | "indexing.delete_current" | "indexing.delete_time" | "indexing.delete_total" | "indexing.index_current" | "indexing.index_failed" | "indexing.index_failed_due_to_version_conflict" | "indexing.index_time" | "indexing.index_total" | "ip" | "jdk" | "load_1m" | "load_5m" | "load_15m" | "mappings.total_count" | "mappings.total_estimated_overhead_in_bytes" | "master" | "merges.current" | "merges.current_docs" | "merges.current_size" | "merges.total" | "merges.total_docs" | "merges.total_size" | "merges.total_time" | "name" | "node.role" | "pid" | "port" | "query_cache.memory_size" | "query_cache.evictions" | "query_cache.hit_count" | "query_cache.miss_count" | "ram.current" | "ram.max" | "ram.percent" | "refresh.total" | "refresh.time" | "request_cache.memory_size" | "request_cache.evictions" | "request_cache.hit_count" | "request_cache.miss_count" | "script.compilations" | "script.cache_evictions" | "search.fetch_current" | "search.fetch_time" | "search.fetch_total" | "search.open_contexts" | "search.query_current" | "search.query_time" | "search.query_total" | "search.scroll_current" | "search.scroll_time" | "search.scroll_total" | "segments.count" | "segments.fixed_bitset_memory" | "segments.index_writer_memory" | "segments.memory" | "segments.version_map_memory" | "shard_stats.total_count" | "suggest.current" | "suggest.time" | "suggest.total" | "uptime" | "version") | Enum("build" | "completion.size" | "cpu" | "disk.avail" | "disk.total" | "disk.used" | "disk.used_percent" | "fielddata.evictions" | "fielddata.memory_size" | "file_desc.current" | "file_desc.max" | "file_desc.percent" | "flush.total" | "flush.total_time" | "get.current" | "get.exists_time" | "get.exists_total" | "get.missing_time" | "get.missing_total" | "get.time" | "get.total" | "heap.current" | "heap.max" | "heap.percent" | "http_address" | "id" | "indexing.delete_current" | "indexing.delete_time" | "indexing.delete_total" | "indexing.index_current" | "indexing.index_failed" | "indexing.index_failed_due_to_version_conflict" | "indexing.index_time" | "indexing.index_total" | "ip" | "jdk" | "load_1m" | "load_5m" | "load_15m" | "mappings.total_count" | "mappings.total_estimated_overhead_in_bytes" | "master" | "merges.current" | "merges.current_docs" | "merges.current_size" | "merges.total" | "merges.total_docs" | "merges.total_size" | "merges.total_time" | "name" | "node.role" | "pid" | "port" | "query_cache.memory_size" | "query_cache.evictions" | "query_cache.hit_count" | "query_cache.miss_count" | "ram.current" | "ram.max" | "ram.percent" | "refresh.total" | "refresh.time" | "request_cache.memory_size" | "request_cache.evictions" | "request_cache.hit_count" | "request_cache.miss_count" | "script.compilations" | "script.cache_evictions" | "search.fetch_current" | "search.fetch_time" | "search.fetch_total" | "search.open_contexts" | "search.query_current" | "search.query_time" | "search.query_total" | "search.scroll_current" | "search.scroll_time" | "search.scroll_total" | "segments.count" | "segments.fixed_bitset_memory" | "segments.index_writer_memory" | "segments.memory" | "segments.version_map_memory" | "shard_stats.total_count" | "suggest.current" | "suggest.time" | "suggest.total" | "uptime" | "version")[])**: A list of columns names to display. -It supports simple wildcards. -- **`s` (Optional, string | string[])**: A list of column names or aliases that determines the sort order. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. -- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. ## client.cat.pendingTasks [_cat.pending_tasks] Get pending task information. @@ -5439,7 +5438,7 @@ If no index is specified or the index does not have a default analyzer, the anal - **`analyzer` (Optional, string)**: The name of the analyzer that should be applied to the provided `text`. This could be a built-in analyzer, or an analyzer that’s been configured in the index. - **`attributes` (Optional, string[])**: Array of token attributes used to filter the output of the `explain` parameter. -- **`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name, unicode_set_filter } | { type, normalize_kana, normalize_kanji }[])**: Array of character filters used to preprocess characters before the tokenizer. +- **`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])**: Array of character filters used to preprocess characters before the tokenizer. - **`explain` (Optional, boolean)**: If `true`, the response includes token attributes and additional details. - **`field` (Optional, string)**: Field used to derive the analyzer. To use this parameter, you must specify an index. @@ -5817,16 +5816,6 @@ client.indices.deleteDataStream({ name }) - **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. -## client.indices.deleteDataStreamOptions [_indices.delete_data_stream_options] -Deletes the data stream options of the selected data streams. - -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) - -```ts -client.indices.deleteDataStreamOptions() -``` - - ## client.indices.deleteIndexTemplate [_indices.delete_index_template] Delete an index template. The provided may contain multiple template names separated by a comma. If multiple template @@ -6284,16 +6273,6 @@ Supports a list of values, such as `open,hidden`. - **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`verbose` (Optional, boolean)**: Whether the maximum timestamp for each data stream should be calculated and returned. -## client.indices.getDataStreamOptions [_indices.get_data_stream_options] -Returns the data stream options of the selected data streams. - -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) - -```ts -client.indices.getDataStreamOptions() -``` - - ## client.indices.getFieldMapping [_indices.get_field_mapping] Get mapping definitions. Retrieves mapping definitions for one or more fields. @@ -6649,16 +6628,6 @@ error. - **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -## client.indices.putDataStreamOptions [_indices.put_data_stream_options] -Updates the data stream options of the selected data streams. - -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) - -```ts -client.indices.putDataStreamOptions() -``` - - ## client.indices.putIndexTemplate [_indices.put_index_template] Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. @@ -7526,17 +7495,6 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ## client.inference.chatCompletionUnified [_inference.chat_completion_unified] Perform chat completion inference -The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. -It only works with the `chat_completion` task type for `openai` and `elastic` inference services. - -IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. -For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - -NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. -The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. -The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. -If you use the `openai` service or the `elastic` service, use the Chat completion inference API. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference) ```ts @@ -7636,6 +7594,11 @@ These settings are specific to the task type you specified and override the task ## client.inference.put [_inference.put] Create an inference endpoint. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. @@ -7659,6 +7622,12 @@ Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud) ```ts @@ -7684,6 +7653,12 @@ Creates an inference endpoint to perform an inference task with the `amazonbedro >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock) ```ts @@ -7706,6 +7681,12 @@ Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic) ```ts @@ -7729,6 +7710,12 @@ Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio) ```ts @@ -7758,6 +7745,12 @@ The list of chat completion models that you can choose from in your Azure OpenAI The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai) ```ts @@ -7781,6 +7774,12 @@ Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere) ```ts @@ -7874,6 +7873,12 @@ Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio) ```ts @@ -7894,6 +7899,12 @@ Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai) ```ts @@ -7930,6 +7941,12 @@ The following models are recommended for the Hugging Face service: * `multilingual-e5-base` * `multilingual-e5-small` +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face) ```ts @@ -7953,6 +7970,12 @@ Create an inference endpoint to perform an inference task with the `jinaai` serv To review the available `rerank` models, refer to . To review the available `text_embedding` models, refer to the . +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai) ```ts @@ -7975,6 +7998,12 @@ Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral) ```ts @@ -7996,6 +8025,12 @@ Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai) ```ts @@ -8045,6 +8080,12 @@ Create an inference endpoint to perform an inference task with the `watsonxai` s You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx) ```ts diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 394446967..5b65421f5 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -364,7 +364,7 @@ export default class Inference { } /** - * Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai` service or the `elastic` service, use the Chat completion inference API. + * Perform chat completion inference * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference | Elasticsearch API documentation} */ async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -643,7 +643,7 @@ export default class Inference { } /** - * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + * Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation} */ async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -698,7 +698,7 @@ export default class Inference { } /** - * Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. + * Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud | Elasticsearch API documentation} */ async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -756,7 +756,7 @@ export default class Inference { } /** - * Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. + * Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock | Elasticsearch API documentation} */ async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -814,7 +814,7 @@ export default class Inference { } /** - * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. + * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic | Elasticsearch API documentation} */ async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -872,7 +872,7 @@ export default class Inference { } /** - * Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. + * Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio | Elasticsearch API documentation} */ async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -930,7 +930,7 @@ export default class Inference { } /** - * Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). + * Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai | Elasticsearch API documentation} */ async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -988,7 +988,7 @@ export default class Inference { } /** - * Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. + * Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere | Elasticsearch API documentation} */ async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1162,7 +1162,7 @@ export default class Inference { } /** - * Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. + * Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio | Elasticsearch API documentation} */ async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1220,7 +1220,7 @@ export default class Inference { } /** - * Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. + * Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai | Elasticsearch API documentation} */ async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1278,7 +1278,7 @@ export default class Inference { } /** - * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` + * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face | Elasticsearch API documentation} */ async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1336,7 +1336,7 @@ export default class Inference { } /** - * Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to . To review the available `text_embedding` models, refer to the . + * Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to . To review the available `text_embedding` models, refer to the . When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai | Elasticsearch API documentation} */ async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1394,7 +1394,7 @@ export default class Inference { } /** - * Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. + * Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral | Elasticsearch API documentation} */ async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1452,7 +1452,7 @@ export default class Inference { } /** - * Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. + * Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai | Elasticsearch API documentation} */ async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1568,7 +1568,7 @@ export default class Inference { } /** - * Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. + * Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx | Elasticsearch API documentation} */ async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/types.ts b/src/api/types.ts index 5cc78eb73..aed848fb1 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -7072,7 +7072,6 @@ export interface AnalysisIcuNormalizationCharFilter extends AnalysisCharFilterBa type: 'icu_normalizer' mode?: AnalysisIcuNormalizationMode name?: AnalysisIcuNormalizationType - unicode_set_filter?: string } export type AnalysisIcuNormalizationMode = 'decompose' | 'compose' @@ -10118,10 +10117,6 @@ export type CatCatDfaColumn = 'assignment_explanation' | 'ae' | 'create_time' | export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[] -export type CatCatNodeColumn = 'build' | 'b' | 'completion.size' | 'cs' | 'completionSize' | 'cpu' | 'disk.avail' | 'd' | 'disk' | 'diskAvail' | 'disk.total' | 'dt' | 'diskTotal' | 'disk.used' | 'du' | 'diskUsed' | 'disk.used_percent' | 'dup' | 'diskUsedPercent' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'file_desc.current' | 'fdc' | 'fileDescriptorCurrent' | 'file_desc.max' | 'fdm' | 'fileDescriptorMax' | 'file_desc.percent' | 'fdp' | 'fileDescriptorPercent' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'heap.current' | 'hc' | 'heapCurrent' | 'heap.max' | 'hm' | 'heapMax' | 'heap.percent' | 'hp' | 'heapPercent' | 'http_address' | 'http' | 'id' | 'nodeId' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'i' | 'jdk' | 'j' | 'load_1m' | 'l' | 'load_5m' | 'l' | 'load_15m' | 'l' | 'mappings.total_count' | 'mtc' | 'mappingsTotalCount' | 'mappings.total_estimated_overhead_in_bytes' | 'mteo' | 'mappingsTotalEstimatedOverheadInBytes' | 'master' | 'm' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'name' | 'n' | 'node.role' | 'r' | 'role' | 'nodeRole' | 'pid' | 'p' | 'port' | 'po' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.hit_count' | 'qchc' | 'queryCacheHitCount' | 'query_cache.miss_count' | 'qcmc' | 'queryCacheMissCount' | 'ram.current' | 'rc' | 'ramCurrent' | 'ram.max' | 'rm' | 'ramMax' | 'ram.percent' | 'rp' | 'ramPercent' | 'refresh.total' | 'rto' | 'refreshTotal' | 'refresh.time' | 'rti' | 'refreshTime' | 'request_cache.memory_size' | 'rcm' | 'requestCacheMemory' | 'request_cache.evictions' | 'rce' | 'requestCacheEvictions' | 'request_cache.hit_count' | 'rchc' | 'requestCacheHitCount' | 'request_cache.miss_count' | 'rcmc' | 'requestCacheMissCount' | 'script.compilations' | 'scrcc' | 'scriptCompilations' | 'script.cache_evictions' | 'scrce' | 'scriptCacheEvictions' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'shard_stats.total_count' | 'sstc' | 'shards' | 'shardStatsTotalCount' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'uptime' | 'u' | 'version' | 'v' | string - -export type CatCatNodeColumns = CatCatNodeColumn | CatCatNodeColumn[] - export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { } @@ -13178,16 +13173,15 @@ export interface CatNodesRequest extends CatCatRequestBase { full_id?: boolean | string /** If true, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean - /** A comma-separated list of columns names to display. - * It supports simple wildcards. */ - h?: CatCatNodeColumns - /** A comma-separated list of column names or aliases that determines the sort order. + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names - /** The period to wait for a connection to the master node. */ + /** Period to wait for a connection to the master node. */ master_timeout?: Duration - /** The unit used to display time values. */ + /** Unit used to display time values. */ time?: TimeUnit /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { bytes?: never, full_id?: never, include_unloaded_segments?: never, h?: never, s?: never, master_timeout?: never, time?: never } @@ -22333,9 +22327,7 @@ export interface InferenceRateLimitSetting { } export interface InferenceRequestChatCompletion { - /** A list of objects representing the conversation. - * Requests should generally only add new messages from the user (role `user`). - * The other message roles (`assistant`, `system`, or `tool`) should generally only be copied from the response to a previous completion request, such that the messages array is built up throughout a conversation. */ + /** A list of objects representing the conversation. */ messages: InferenceMessage[] /** The ID of the model to use. */ model?: string @@ -24553,8 +24545,7 @@ export interface MigrationPostFeatureUpgradeRequest extends RequestBase { export interface MigrationPostFeatureUpgradeResponse { accepted: boolean - features?: MigrationPostFeatureUpgradeMigrationFeature[] - reason?: string + features: MigrationPostFeatureUpgradeMigrationFeature[] } export interface MlAdaptiveAllocationsSettings {