diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index d5985ca05..e63c1dc77 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -872,7 +872,7 @@ async def count( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns number of documents matching a query. + Count search results. Get the number of documents matching a query. ``_ @@ -2274,7 +2274,26 @@ async def health_report( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the health of the cluster. + Get the cluster health. Get a report with the health status of an Elasticsearch + cluster. The report contains a list of indicators that compose Elasticsearch + functionality. Each indicator has a health status of: green, unknown, yellow + or red. The indicator will provide an explanation and metadata describing the + reason for its current health status. The cluster’s status is controlled by the + worst indicator status. In the event that an indicator’s status is non-green, + a list of impacts may be present in the indicator result which detail the functionalities + that are negatively affected by the health issue. Each impact carries with it + a severity level, an area of the system that is affected, and a simple description + of the impact on the system. Some health indicators can determine the root cause + of a health problem and prescribe a set of steps that can be performed in order + to improve the health of the system. The root cause and remediation steps are + encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause + analysis, an action containing a brief description of the steps to take to fix + the problem, the list of affected resources (if applicable), and a detailed step-by-step + troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators + perform root cause analysis of non-green health statuses. This can be computationally + expensive when called frequently. When setting up automated polling of the API + for health status, set verbose to false to disable the more expensive analysis + logic. ``_ @@ -3079,6 +3098,7 @@ async def open_point_in_time( *, index: t.Union[str, t.Sequence[str]], keep_alive: t.Union[str, t.Literal[-1], t.Literal[0]], + allow_partial_search_results: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ @@ -3113,6 +3133,10 @@ async def open_point_in_time( :param index: A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices :param keep_alive: Extends the time to live of the corresponding point in time. + :param allow_partial_search_results: If `false`, creating a point in time request + when a shard is missing or unavailable will throw an exception. If `true`, + the point in time will contain all the shards that are available at the time + of the request. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such @@ -3135,6 +3159,8 @@ async def open_point_in_time( __body: t.Dict[str, t.Any] = body if body is not None else {} if keep_alive is not None: __query["keep_alive"] = keep_alive + if allow_partial_search_results is not None: + __query["allow_partial_search_results"] = allow_partial_search_results if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: diff --git a/elasticsearch/_async/client/async_search.py b/elasticsearch/_async/client/async_search.py index 0093273ca..8e2bbecf9 100644 --- a/elasticsearch/_async/client/async_search.py +++ b/elasticsearch/_async/client/async_search.py @@ -145,6 +145,7 @@ async def status( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -156,6 +157,9 @@ async def status( ``_ :param id: A unique identifier for the async search. + :param keep_alive: Specifies how long the async search needs to be available. + Ongoing async searches and any saved search results are deleted after this + period. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -168,6 +172,8 @@ async def status( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if keep_alive is not None: + __query["keep_alive"] = keep_alive if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -258,7 +264,6 @@ async def submit( ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, - keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, knn: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] @@ -269,7 +274,6 @@ async def submit( min_score: t.Optional[float] = None, pit: t.Optional[t.Mapping[str, t.Any]] = None, post_filter: t.Optional[t.Mapping[str, t.Any]] = None, - pre_filter_shard_size: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, @@ -283,7 +287,6 @@ async def submit( routing: t.Optional[str] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, - scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str, t.Any]] ] = None, @@ -376,9 +379,6 @@ async def submit( :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :param indices_boost: Boosts the _score of documents from specified indices. - :param keep_alive: Specifies how long the async search needs to be available. - Ongoing async searches and any saved search results are deleted after this - period. :param keep_on_completion: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. :param knn: Defines the approximate kNN search to run. @@ -394,10 +394,6 @@ async def submit( :param pit: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. :param post_filter: - :param pre_filter_shard_size: The default value cannot be changed, which enforces - the execution of a pre-filter roundtrip to retrieve statistics from each - shard so that the ones that surely don’t hold any document matching the query - get skipped. :param preference: Specify the node or shard the operation should be performed on (default: random) :param profile: @@ -406,13 +402,13 @@ async def submit( :param request_cache: Specify if request cache should be used for this request or not, defaults to true :param rescore: - :param rest_total_hits_as_int: + :param rest_total_hits_as_int: Indicates whether hits.total should be rendered + as an integer or an object in the rest search response :param routing: A comma-separated list of specific routing values :param runtime_mappings: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. :param script_fields: Retrieve a script evaluation (based on different fields) for each hit. - :param scroll: :param search_after: :param search_type: Search operation type :param seq_no_primary_term: If true, returns sequence number and primary term @@ -509,8 +505,6 @@ async def submit( __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable - if keep_alive is not None: - __query["keep_alive"] = keep_alive if keep_on_completion is not None: __query["keep_on_completion"] = keep_on_completion if lenient is not None: @@ -519,8 +513,6 @@ async def submit( __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if min_compatible_shard_node is not None: __query["min_compatible_shard_node"] = min_compatible_shard_node - if pre_filter_shard_size is not None: - __query["pre_filter_shard_size"] = pre_filter_shard_size if preference is not None: __query["preference"] = preference if pretty is not None: @@ -533,8 +525,6 @@ async def submit( __query["rest_total_hits_as_int"] = rest_total_hits_as_int if routing is not None: __query["routing"] = routing - if scroll is not None: - __query["scroll"] = scroll if search_type is not None: __query["search_type"] = search_type if source_excludes is not None: diff --git a/elasticsearch/_async/client/autoscaling.py b/elasticsearch/_async/client/autoscaling.py index cb2eccabf..7c1b1f01c 100644 --- a/elasticsearch/_async/client/autoscaling.py +++ b/elasticsearch/_async/client/autoscaling.py @@ -33,7 +33,9 @@ async def delete_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Delete an autoscaling policy. NOTE: This feature is designed for indirect use @@ -43,6 +45,11 @@ async def delete_autoscaling_policy( ``_ :param name: the name of the autoscaling policy + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -55,8 +62,12 @@ async def delete_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", @@ -74,6 +85,7 @@ async def get_autoscaling_capacity( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -91,6 +103,10 @@ async def get_autoscaling_capacity( use this information to make autoscaling decisions. ``_ + + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_autoscaling/capacity" @@ -101,6 +117,8 @@ async def get_autoscaling_capacity( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -121,6 +139,7 @@ async def get_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -131,6 +150,9 @@ async def get_autoscaling_policy( ``_ :param name: the name of the autoscaling policy + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -143,6 +165,8 @@ async def get_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -167,7 +191,9 @@ async def put_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Create or update an autoscaling policy. NOTE: This feature is designed for indirect @@ -178,6 +204,11 @@ async def put_autoscaling_policy( :param name: the name of the autoscaling policy :param policy: + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -196,8 +227,12 @@ async def put_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __body = policy if policy is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index c3f5ec8dc..03c17de2d 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -44,7 +44,13 @@ async def allocation_explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides explanations for shard allocations in the cluster. + Explain the shard allocations. Get explanations for shard allocations in the + cluster. For unassigned shards, it provides an explanation for why the shard + is unassigned. For assigned shards, it provides an explanation for why the shard + is remaining on its current node and has not moved or rebalanced to another node. + This API can be very useful when attempting to diagnose why a shard is unassigned + or why a shard continues to remain on its current node when you might expect + otherwise. ``_ @@ -165,7 +171,8 @@ async def delete_voting_config_exclusions( wait_for_removal: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears cluster voting config exclusions. + Clear cluster voting config exclusions. Remove master-eligible nodes from the + voting configuration exclusion list. ``_ @@ -331,8 +338,8 @@ async def get_settings( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-wide settings. By default, it returns only settings that have - been explicitly defined. + Get cluster-wide settings. By default, it returns only settings that have been + explicitly defined. ``_ @@ -414,14 +421,15 @@ async def health( ] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster health API returns a simple status on the health of the cluster. - You can also use the API to get the health status of only specified data streams - and indices. For data streams, the API retrieves the health status of the stream’s - backing indices. The cluster health status is: green, yellow or red. On the shard - level, a red status indicates that the specific shard is not allocated in the - cluster, yellow means that the primary shard is allocated but replicas are not, - and green means that all shards are allocated. The index level status is controlled - by the worst shard status. The cluster status is controlled by the worst index + Get the cluster health status. You can also use the API to get the health status + of only specified data streams and indices. For data streams, the API retrieves + the health status of the stream’s backing indices. The cluster health status + is: green, yellow or red. On the shard level, a red status indicates that the + specific shard is not allocated in the cluster. Yellow means that the primary + shard is allocated but replicas are not. Green means that all shards are allocated. + The index level status is controlled by the worst shard status. One of the main + benefits of the API is the ability to wait until the cluster reaches a certain + high watermark health level. The cluster status is controlled by the worst index status. ``_ @@ -568,14 +576,14 @@ async def pending_tasks( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-level changes (such as create index, update mapping, allocate - or fail shard) that have not yet been executed. NOTE: This API returns a list - of any pending updates to the cluster state. These are distinct from the tasks - reported by the Task Management API which include periodic tasks and tasks initiated - by the user, such as node stats, search queries, or create index requests. However, - if a user-initiated task such as a create index command causes a cluster state - update, the activity of this task might be reported by both task api and pending - cluster tasks API. + Get the pending cluster tasks. Get information about cluster-level changes (such + as create index, update mapping, allocate or fail shard) that have not yet taken + effect. NOTE: This API returns a list of any pending updates to the cluster state. + These are distinct from the tasks reported by the task management API which include + periodic tasks and tasks initiated by the user, such as node stats, search queries, + or create index requests. However, if a user-initiated task such as a create + index command causes a cluster state update, the activity of this task might + be reported by both task api and pending cluster tasks API. ``_ @@ -623,7 +631,33 @@ async def post_voting_config_exclusions( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster voting config exclusions by node ids or node names. + Update voting configuration exclusions. Update the cluster voting config exclusions + by node IDs or node names. By default, if there are more than three master-eligible + nodes in the cluster and you remove fewer than half of the master-eligible nodes + in the cluster at once, the voting configuration automatically shrinks. If you + want to shrink the voting configuration to contain fewer than three nodes or + to remove half or more of the master-eligible nodes in the cluster at once, use + this API to remove departing nodes from the voting configuration manually. The + API adds an entry for each specified node to the cluster’s voting configuration + exclusions list. It then waits until the cluster has reconfigured its voting + configuration to exclude the specified nodes. Clusters should have no voting + configuration exclusions in normal operation. Once the excluded nodes have stopped, + clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. + This API waits for the nodes to be fully removed from the cluster before it returns. + If your cluster has voting configuration exclusions for nodes that you no longer + intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` + to clear the voting configuration exclusions without waiting for the nodes to + leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with + an HTTP status code of 200 OK guarantees that the node has been removed from + the voting configuration and will not be reinstated until the voting configuration + exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. + If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response + with an HTTP status code other than 200 OK then the node may not have been removed + from the voting configuration. In that case, you may safely retry the call. NOTE: + Voting exclusions are required only when you remove at least half of the master-eligible + nodes from a cluster in a short time period. They are not required when removing + master-ineligible nodes or when removing fewer than half of the master-eligible + nodes. ``_ @@ -787,7 +821,26 @@ async def put_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster settings. + Update the cluster settings. Configure and update dynamic settings on a running + cluster. You can also configure dynamic settings locally on an unstarted or shut + down node in `elasticsearch.yml`. Updates made with this API can be persistent, + which apply across cluster restarts, or transient, which reset after a cluster + restart. You can also reset transient or persistent settings by assigning them + a null value. If you configure the same setting using multiple methods, Elasticsearch + applies the settings in following order of precedence: 1) Transient setting; + 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. + For example, you can apply a transient setting to override a persistent setting + or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting + will not override a defined transient or persistent setting. TIP: In Elastic + Cloud, use the user settings feature to configure all cluster settings. This + method automatically rejects unsafe settings that could break your cluster. If + you run Elasticsearch on your own hardware, use this API to configure dynamic + cluster settings. Only use `elasticsearch.yml` for static cluster settings and + node settings. The API doesn’t require a restart and ensures a setting’s value + is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. + Use persistent cluster settings instead. If a cluster becomes unstable, transient + settings can clear unexpectedly, resulting in a potentially undesired cluster + configuration. ``_ @@ -841,9 +894,9 @@ async def remote_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster remote info API allows you to retrieve all of the configured remote - cluster information. It returns connection and endpoint information keyed by - the configured remote cluster alias. + Get remote cluster information. Get all of the configured remote cluster information. + This API returns connection and endpoint information keyed by the configured + remote cluster alias. ``_ """ @@ -888,15 +941,35 @@ async def reroute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to manually change the allocation of individual shards in the cluster. + Reroute the cluster. Manually change the allocation of individual shards in the + cluster. For example, a shard can be moved from one node to another explicitly, + an allocation can be canceled, and an unassigned shard can be explicitly allocated + to a specific node. It is important to note that after processing any reroute + commands Elasticsearch will perform rebalancing as normal (respecting the values + of settings such as `cluster.routing.rebalance.enable`) in order to remain in + a balanced state. For example, if the requested allocation includes moving a + shard from node1 to node2 then this may cause a shard to be moved from node2 + back to node1 to even things out. The cluster can be set to disable allocations + using the `cluster.routing.allocation.enable` setting. If allocations are disabled + then the only allocations that will be performed are explicit ones given using + the reroute command, and consequent allocations due to rebalancing. The cluster + will attempt to allocate a shard a maximum of `index.allocation.max_retries` + times in a row (defaults to `5`), before giving up and leaving the shard unallocated. + This scenario can be caused by structural problems such as having an analyzer + which refers to a stopwords file which doesn’t exist on all nodes. Once the problem + has been corrected, allocation can be manually retried by calling the reroute + API with the `?retry_failed` URI query parameter, which will attempt a single + retry round for these shards. ``_ :param commands: Defines the commands to perform. - :param dry_run: If true, then the request simulates the operation only and returns - the resulting state. + :param dry_run: If true, then the request simulates the operation. It will calculate + the result of applying the commands to the current cluster state and return + the resulting cluster state after the commands (and rebalancing) have been + applied; it will not actually perform the requested changes. :param explain: If true, then the response contains an explanation of why the - commands can or cannot be executed. + commands can or cannot run. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -975,7 +1048,26 @@ async def state( wait_for_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a comprehensive information about the state of the cluster. + Get the cluster state. Get comprehensive information about the state of the cluster. + The cluster state is an internal data structure which keeps track of a variety + of information needed by every node, including the identity and attributes of + the other nodes in the cluster; cluster-wide settings; index metadata, including + the mapping and settings for each index; the location and status of every shard + copy in the cluster. The elected master node ensures that every node in the cluster + has a copy of the same cluster state. This API lets you retrieve a representation + of this internal state for debugging or diagnostic purposes. You may need to + consult the Elasticsearch source code to determine the precise meaning of the + response. By default the API will route requests to the elected master node since + this node is the authoritative source of cluster states. You can also retrieve + the cluster state held on the node handling the API request by adding the `?local=true` + query parameter. Elasticsearch may need to expend significant effort to compute + a response to this API in larger clusters, and the response may comprise a very + large quantity of data. If you use this API repeatedly, your cluster may become + unstable. WARNING: The response is a representation of an internal data structure. + Its format is not subject to the same compatibility guarantees as other more + stable APIs and may change from version to version. Do not query this API using + external monitoring tools. Instead, obtain the information you require using + other more stable cluster APIs. ``_ @@ -1059,9 +1151,9 @@ async def stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster statistics. It returns basic index metrics (shard numbers, store - size, memory usage) and information about the current nodes that form the cluster - (number, roles, os, jvm versions, memory usage, cpu and installed plugins). + Get cluster statistics. Get basic index metrics (shard numbers, store size, memory + usage) and information about the current nodes that form the cluster (number, + roles, os, jvm versions, memory usage, cpu and installed plugins). ``_ diff --git a/elasticsearch/_async/client/enrich.py b/elasticsearch/_async/client/enrich.py index 6cb22bf26..643ef6b90 100644 --- a/elasticsearch/_async/client/enrich.py +++ b/elasticsearch/_async/client/enrich.py @@ -77,7 +77,7 @@ async def execute_policy( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates the enrich index for an existing enrich policy. + Run an enrich policy. Create the enrich index for an existing enrich policy. ``_ diff --git a/elasticsearch/_async/client/eql.py b/elasticsearch/_async/client/eql.py index e835620fd..1a8239eec 100644 --- a/elasticsearch/_async/client/eql.py +++ b/elasticsearch/_async/client/eql.py @@ -36,8 +36,8 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async EQL search or a stored synchronous EQL search. The API also - deletes results for the search. + Delete an async EQL search. Delete an async EQL search or a stored synchronous + EQL search. The API also deletes results for the search. ``_ @@ -83,8 +83,8 @@ async def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status and available results for an async EQL search or a - stored synchronous EQL search. + Get async EQL search results. Get the current status and available results for + an async EQL search or a stored synchronous EQL search. ``_ @@ -134,8 +134,8 @@ async def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status for an async EQL search or a stored synchronous EQL - search without returning results. + Get the async EQL status. Get the current status for an async EQL search or a + stored synchronous EQL search without returning results. ``_ @@ -223,7 +223,9 @@ async def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns results matching a query expressed in Event Query Language (EQL) + Get EQL search results. Returns search results for an Event Query Language (EQL) + query. EQL assumes each document in a data stream or index corresponds to an + event. ``_ diff --git a/elasticsearch/_async/client/esql.py b/elasticsearch/_async/client/esql.py index 68eb37243..b8a39d611 100644 --- a/elasticsearch/_async/client/esql.py +++ b/elasticsearch/_async/client/esql.py @@ -68,7 +68,8 @@ async def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes an ES|QL request + Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) + query. ``_ diff --git a/elasticsearch/_async/client/graph.py b/elasticsearch/_async/client/graph.py index 2ce75e7f2..df8f3fdbe 100644 --- a/elasticsearch/_async/client/graph.py +++ b/elasticsearch/_async/client/graph.py @@ -45,8 +45,14 @@ async def explore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Extracts and summarizes information about the documents and terms in an Elasticsearch - data stream or index. + Explore graph analytics. Extract and summarize information about the documents + and terms in an Elasticsearch data stream or index. The easiest way to understand + the behavior of this API is to use the Graph UI to explore connections. An initial + request to the `_explore` API contains a seed query that identifies the documents + of interest and specifies the fields that define the vertices and connections + you want to include in the graph. Subsequent requests enable you to spider out + from one more vertices of interest. You can exclude vertices that have already + been returned. ``_ diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index e884cd8ff..e7af76ecc 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -1354,7 +1354,7 @@ async def exists_index_template( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Returns information about whether a particular index template exists. + Check index templates. Check whether index templates exist. ``_ @@ -3698,8 +3698,8 @@ async def resolve_index( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resolves the specified name(s) and/or index patterns for indices, aliases, and - data streams. Multiple patterns and remote clusters are supported. + Resolve indices. Resolve the names and/or index patterns for indices, aliases, + and data streams. Multiple patterns and remote clusters are supported. ``_ diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index a3ddb1628..701ba6835 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -20,19 +20,12 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import ( - SKIP_IN_PATH, - Stability, - _quote, - _rewrite_parameters, - _stability_warning, -) +from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class InferenceClient(NamespacedClient): @_rewrite_parameters() - @_stability_warning(Stability.EXPERIMENTAL) async def delete( self, *, @@ -100,7 +93,6 @@ async def delete( ) @_rewrite_parameters() - @_stability_warning(Stability.EXPERIMENTAL) async def get( self, *, @@ -159,7 +151,6 @@ async def get( @_rewrite_parameters( body_fields=("input", "query", "task_settings"), ) - @_stability_warning(Stability.EXPERIMENTAL) async def inference( self, *, @@ -246,7 +237,6 @@ async def inference( @_rewrite_parameters( body_name="inference_config", ) - @_stability_warning(Stability.EXPERIMENTAL) async def put( self, *, diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py index 0d78dc03c..ecd516365 100644 --- a/elasticsearch/_async/client/ingest.py +++ b/elasticsearch/_async/client/ingest.py @@ -38,7 +38,8 @@ async def delete_geoip_database( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a geoip database configuration. + Delete GeoIP database configurations. Delete one or more IP geolocation database + configurations. ``_ @@ -89,7 +90,7 @@ async def delete_pipeline( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes one or more existing ingest pipeline. + Delete pipelines. Delete one or more ingest pipelines. ``_ @@ -138,7 +139,8 @@ async def geo_ip_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets download statistics for GeoIP2 databases used with the geoip processor. + Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used + with the GeoIP processor. ``_ """ @@ -175,7 +177,8 @@ async def get_geoip_database( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more geoip database configurations. + Get GeoIP database configurations. Get information about one or more IP geolocation + database configurations. ``_ @@ -227,8 +230,8 @@ async def get_pipeline( summary: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more ingest pipelines. This API returns a local - reference of the pipeline. + Get pipelines. Get information about one or more ingest pipelines. This API returns + a local reference of the pipeline. ``_ @@ -279,10 +282,10 @@ async def processor_grok( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Extracts structured fields out of a single text field within a document. You - choose which field to extract matched fields from, as well as the grok pattern - you expect will match. A grok pattern is like a regular expression that supports - aliased expressions that can be reused. + Run a grok processor. Extract structured fields out of a single text field within + a document. You must choose which field to extract matched fields from, as well + as the grok pattern you expect will match. A grok pattern is like a regular expression + that supports aliased expressions that can be reused. ``_ """ @@ -325,7 +328,8 @@ async def put_geoip_database( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more geoip database configurations. + Create or update GeoIP database configurations. Create or update IP geolocation + database configurations. ``_ @@ -411,8 +415,7 @@ async def put_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an ingest pipeline. Changes made using this API take effect - immediately. + Create or update a pipeline. Changes made using this API take effect immediately. ``_ @@ -504,7 +507,9 @@ async def simulate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes an ingest pipeline against a set of provided documents. + Simulate a pipeline. Run an ingest pipeline against a set of provided documents. + You can either specify an existing pipeline to use with the provided documents + or supply a pipeline definition in the body of the request. ``_ diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py index a7b516588..5aa8aa0be 100644 --- a/elasticsearch/_async/client/nodes.py +++ b/elasticsearch/_async/client/nodes.py @@ -44,8 +44,8 @@ async def clear_repositories_metering_archive( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use this API to clear the archived repositories metering information - in the cluster. + Clear the archived repositories metering. Clear the archived repositories metering + information in the cluster. ``_ @@ -94,11 +94,11 @@ async def get_repositories_metering_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use the cluster repositories metering API to retrieve repositories metering - information in a cluster. This API exposes monotonically non-decreasing counters - and it’s expected that clients would durably store the information needed to - compute aggregations over a period of time. Additionally, the information exposed - by this API is volatile, meaning that it won’t be present after node restarts. + Get cluster repositories metering. Get repositories metering information for + a cluster. This API exposes monotonically non-decreasing counters and it is expected + that clients would durably store the information needed to compute aggregations + over a period of time. Additionally, the information exposed by this API is volatile, + meaning that it will not be present after node restarts. ``_ @@ -151,8 +151,9 @@ async def hot_threads( ] = None, ) -> TextApiResponse: """ - This API yields a breakdown of the hot threads on each selected node in the cluster. - The output is plain text with a breakdown of each node’s top hot threads. + Get the hot threads for nodes. Get a breakdown of the hot threads on each selected + node in the cluster. The output is plain text with a breakdown of the top hot + threads for each node. ``_ @@ -227,7 +228,8 @@ async def info( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes information. + Get node information. By default, the API returns all attributes and core settings + for cluster nodes. ``_ @@ -296,7 +298,18 @@ async def reload_secure_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads the keystore on nodes in the cluster. + Reload the keystore on nodes in the cluster. Secure settings are stored in an + on-disk keystore. Certain of these settings are reloadable. That is, you can + change them on disk and reload them without restarting any nodes in the cluster. + When you have updated reloadable secure settings in your keystore, you can use + this API to reload those settings on each node. When the Elasticsearch keystore + is password protected and not simply obfuscated, you must provide the password + for the keystore when you reload the secure settings. Reloading the settings + for the whole cluster assumes that the keystores for all nodes are protected + with the same password; this method is allowed only when inter-node communications + are encrypted. Alternatively, you can reload the secure settings on each node + by locally accessing the API and passing the node-specific Elasticsearch keystore + password. ``_ @@ -367,7 +380,8 @@ async def stats( types: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes statistics. + Get node statistics. Get statistics for nodes in a cluster. By default, all stats + are returned. You can limit the returned information by using metrics. ``_ @@ -484,7 +498,7 @@ async def usage( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information on the usage of features. + Get feature usage information. ``_ diff --git a/elasticsearch/_async/client/query_rules.py b/elasticsearch/_async/client/query_rules.py index cbf39e121..02f97bac7 100644 --- a/elasticsearch/_async/client/query_rules.py +++ b/elasticsearch/_async/client/query_rules.py @@ -37,7 +37,7 @@ async def delete_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a query rule within a query ruleset. + Delete a query rule. Delete a query rule within a query ruleset. ``_ @@ -85,7 +85,7 @@ async def delete_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a query ruleset. + Delete a query ruleset. ``_ @@ -126,7 +126,7 @@ async def get_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a query rule within a query ruleset + Get a query rule. Get details about a query rule within a query ruleset. ``_ @@ -174,7 +174,7 @@ async def get_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a query ruleset + Get a query ruleset. Get details about a query ruleset. ``_ @@ -217,7 +217,7 @@ async def list_rulesets( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns summarized information about existing query rulesets. + Get all query rulesets. Get summarized information about the query rulesets. ``_ @@ -270,7 +270,7 @@ async def put_rule( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query rule within a query ruleset. + Create or update a query rule. Create or update a query rule within a query ruleset. ``_ @@ -345,7 +345,7 @@ async def put_ruleset( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query ruleset. + Create or update a query ruleset. ``_ @@ -398,7 +398,8 @@ async def test( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query ruleset. + Test a query ruleset. Evaluate match criteria against a query ruleset to identify + the rules that would match that criteria. ``_ diff --git a/elasticsearch/_async/client/security.py b/elasticsearch/_async/client/security.py index f7e3d7ff7..2304eb2cf 100644 --- a/elasticsearch/_async/client/security.py +++ b/elasticsearch/_async/client/security.py @@ -2052,6 +2052,7 @@ async def has_privileges( "monitor_ml", "monitor_rollup", "monitor_snapshot", + "monitor_stats", "monitor_text_structure", "monitor_transform", "monitor_watcher", @@ -2392,6 +2393,7 @@ async def put_privileges( "global_", "indices", "metadata", + "remote_cluster", "remote_indices", "run_as", "transient_metadata", @@ -2452,6 +2454,7 @@ async def put_role( "monitor_ml", "monitor_rollup", "monitor_snapshot", + "monitor_stats", "monitor_text_structure", "monitor_transform", "monitor_watcher", @@ -2481,6 +2484,7 @@ async def put_role( refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, + remote_cluster: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, remote_indices: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, run_as: t.Optional[t.Sequence[str]] = None, transient_metadata: t.Optional[t.Mapping[str, t.Any]] = None, @@ -2508,6 +2512,7 @@ async def put_role( :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + :param remote_cluster: A list of remote cluster permissions entries. :param remote_indices: A list of remote indices permissions entries. :param run_as: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, @@ -2549,6 +2554,8 @@ async def put_role( __body["indices"] = indices if metadata is not None: __body["metadata"] = metadata + if remote_cluster is not None: + __body["remote_cluster"] = remote_cluster if remote_indices is not None: __body["remote_indices"] = remote_indices if run_as is not None: diff --git a/elasticsearch/_async/client/sql.py b/elasticsearch/_async/client/sql.py index c4b2f4335..06e8f98a3 100644 --- a/elasticsearch/_async/client/sql.py +++ b/elasticsearch/_async/client/sql.py @@ -39,7 +39,7 @@ async def clear_cursor( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears the SQL cursor + Clear an SQL search cursor. ``_ @@ -84,8 +84,8 @@ async def delete_async( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async SQL search or a stored synchronous SQL search. If the search - is still running, the API cancels it. + Delete an async SQL search. Delete an async SQL search or a stored synchronous + SQL search. If the search is still running, the API cancels it. ``_ @@ -131,8 +131,8 @@ async def get_async( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status and available results for an async SQL search or stored - synchronous SQL search + Get async SQL search results. Get the current status and available results for + an async SQL search or stored synchronous SQL search. ``_ @@ -189,8 +189,8 @@ async def get_async_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status of an async SQL search or a stored synchronous SQL - search + Get the async SQL search status. Get the current status of an async SQL search + or a stored synchronous SQL search. ``_ @@ -273,7 +273,7 @@ async def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes a SQL request + Get SQL search results. Run an SQL request. ``_ @@ -383,7 +383,8 @@ async def translate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Translates SQL into Elasticsearch queries + Translate SQL into Elasticsearch queries. Translate an SQL search into a search + API request containing Query DSL. ``_ diff --git a/elasticsearch/_async/client/synonyms.py b/elasticsearch/_async/client/synonyms.py index fac176a30..c86b2c584 100644 --- a/elasticsearch/_async/client/synonyms.py +++ b/elasticsearch/_async/client/synonyms.py @@ -36,7 +36,7 @@ async def delete_synonym( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a synonym set + Delete a synonym set. ``_ @@ -77,7 +77,7 @@ async def delete_synonym_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a synonym rule in a synonym set + Delete a synonym rule. Delete a synonym rule from a synonym set. ``_ @@ -127,7 +127,7 @@ async def get_synonym( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a synonym set + Get a synonym set. ``_ @@ -174,7 +174,7 @@ async def get_synonym_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a synonym rule from a synonym set + Get a synonym rule. Get a synonym rule from a synonym set. ``_ @@ -223,7 +223,7 @@ async def get_synonyms_sets( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a summary of all defined synonym sets + Get all synonym sets. Get a summary of all defined synonym sets. ``_ @@ -272,7 +272,9 @@ async def put_synonym( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a synonym set. + Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 + synonym rules per set. If you need to manage more synonym rules, you can create + multiple synonym sets. ``_ @@ -325,7 +327,8 @@ async def put_synonym_rule( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a synonym rule in a synonym set + Create or update a synonym rule. Create or update a synonym rule in a synonym + set. ``_ diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index adf877523..12d6f3fc5 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -870,7 +870,7 @@ def count( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns number of documents matching a query. + Count search results. Get the number of documents matching a query. ``_ @@ -2272,7 +2272,26 @@ def health_report( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the health of the cluster. + Get the cluster health. Get a report with the health status of an Elasticsearch + cluster. The report contains a list of indicators that compose Elasticsearch + functionality. Each indicator has a health status of: green, unknown, yellow + or red. The indicator will provide an explanation and metadata describing the + reason for its current health status. The cluster’s status is controlled by the + worst indicator status. In the event that an indicator’s status is non-green, + a list of impacts may be present in the indicator result which detail the functionalities + that are negatively affected by the health issue. Each impact carries with it + a severity level, an area of the system that is affected, and a simple description + of the impact on the system. Some health indicators can determine the root cause + of a health problem and prescribe a set of steps that can be performed in order + to improve the health of the system. The root cause and remediation steps are + encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause + analysis, an action containing a brief description of the steps to take to fix + the problem, the list of affected resources (if applicable), and a detailed step-by-step + troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators + perform root cause analysis of non-green health statuses. This can be computationally + expensive when called frequently. When setting up automated polling of the API + for health status, set verbose to false to disable the more expensive analysis + logic. ``_ @@ -3077,6 +3096,7 @@ def open_point_in_time( *, index: t.Union[str, t.Sequence[str]], keep_alive: t.Union[str, t.Literal[-1], t.Literal[0]], + allow_partial_search_results: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ @@ -3111,6 +3131,10 @@ def open_point_in_time( :param index: A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices :param keep_alive: Extends the time to live of the corresponding point in time. + :param allow_partial_search_results: If `false`, creating a point in time request + when a shard is missing or unavailable will throw an exception. If `true`, + the point in time will contain all the shards that are available at the time + of the request. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such @@ -3133,6 +3157,8 @@ def open_point_in_time( __body: t.Dict[str, t.Any] = body if body is not None else {} if keep_alive is not None: __query["keep_alive"] = keep_alive + if allow_partial_search_results is not None: + __query["allow_partial_search_results"] = allow_partial_search_results if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: diff --git a/elasticsearch/_sync/client/async_search.py b/elasticsearch/_sync/client/async_search.py index 147553dc3..3a8791e3c 100644 --- a/elasticsearch/_sync/client/async_search.py +++ b/elasticsearch/_sync/client/async_search.py @@ -145,6 +145,7 @@ def status( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -156,6 +157,9 @@ def status( ``_ :param id: A unique identifier for the async search. + :param keep_alive: Specifies how long the async search needs to be available. + Ongoing async searches and any saved search results are deleted after this + period. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -168,6 +172,8 @@ def status( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if keep_alive is not None: + __query["keep_alive"] = keep_alive if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -258,7 +264,6 @@ def submit( ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, - keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, knn: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] @@ -269,7 +274,6 @@ def submit( min_score: t.Optional[float] = None, pit: t.Optional[t.Mapping[str, t.Any]] = None, post_filter: t.Optional[t.Mapping[str, t.Any]] = None, - pre_filter_shard_size: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, @@ -283,7 +287,6 @@ def submit( routing: t.Optional[str] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, - scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str, t.Any]] ] = None, @@ -376,9 +379,6 @@ def submit( :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :param indices_boost: Boosts the _score of documents from specified indices. - :param keep_alive: Specifies how long the async search needs to be available. - Ongoing async searches and any saved search results are deleted after this - period. :param keep_on_completion: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. :param knn: Defines the approximate kNN search to run. @@ -394,10 +394,6 @@ def submit( :param pit: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. :param post_filter: - :param pre_filter_shard_size: The default value cannot be changed, which enforces - the execution of a pre-filter roundtrip to retrieve statistics from each - shard so that the ones that surely don’t hold any document matching the query - get skipped. :param preference: Specify the node or shard the operation should be performed on (default: random) :param profile: @@ -406,13 +402,13 @@ def submit( :param request_cache: Specify if request cache should be used for this request or not, defaults to true :param rescore: - :param rest_total_hits_as_int: + :param rest_total_hits_as_int: Indicates whether hits.total should be rendered + as an integer or an object in the rest search response :param routing: A comma-separated list of specific routing values :param runtime_mappings: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. :param script_fields: Retrieve a script evaluation (based on different fields) for each hit. - :param scroll: :param search_after: :param search_type: Search operation type :param seq_no_primary_term: If true, returns sequence number and primary term @@ -509,8 +505,6 @@ def submit( __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable - if keep_alive is not None: - __query["keep_alive"] = keep_alive if keep_on_completion is not None: __query["keep_on_completion"] = keep_on_completion if lenient is not None: @@ -519,8 +513,6 @@ def submit( __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if min_compatible_shard_node is not None: __query["min_compatible_shard_node"] = min_compatible_shard_node - if pre_filter_shard_size is not None: - __query["pre_filter_shard_size"] = pre_filter_shard_size if preference is not None: __query["preference"] = preference if pretty is not None: @@ -533,8 +525,6 @@ def submit( __query["rest_total_hits_as_int"] = rest_total_hits_as_int if routing is not None: __query["routing"] = routing - if scroll is not None: - __query["scroll"] = scroll if search_type is not None: __query["search_type"] = search_type if source_excludes is not None: diff --git a/elasticsearch/_sync/client/autoscaling.py b/elasticsearch/_sync/client/autoscaling.py index 6dc45d2a5..c73f74986 100644 --- a/elasticsearch/_sync/client/autoscaling.py +++ b/elasticsearch/_sync/client/autoscaling.py @@ -33,7 +33,9 @@ def delete_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Delete an autoscaling policy. NOTE: This feature is designed for indirect use @@ -43,6 +45,11 @@ def delete_autoscaling_policy( ``_ :param name: the name of the autoscaling policy + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -55,8 +62,12 @@ def delete_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", @@ -74,6 +85,7 @@ def get_autoscaling_capacity( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -91,6 +103,10 @@ def get_autoscaling_capacity( use this information to make autoscaling decisions. ``_ + + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_autoscaling/capacity" @@ -101,6 +117,8 @@ def get_autoscaling_capacity( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -121,6 +139,7 @@ def get_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -131,6 +150,9 @@ def get_autoscaling_policy( ``_ :param name: the name of the autoscaling policy + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -143,6 +165,8 @@ def get_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -167,7 +191,9 @@ def put_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Create or update an autoscaling policy. NOTE: This feature is designed for indirect @@ -178,6 +204,11 @@ def put_autoscaling_policy( :param name: the name of the autoscaling policy :param policy: + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -196,8 +227,12 @@ def put_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __body = policy if policy is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py index d7f60e889..6c1afa6c7 100644 --- a/elasticsearch/_sync/client/cluster.py +++ b/elasticsearch/_sync/client/cluster.py @@ -44,7 +44,13 @@ def allocation_explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides explanations for shard allocations in the cluster. + Explain the shard allocations. Get explanations for shard allocations in the + cluster. For unassigned shards, it provides an explanation for why the shard + is unassigned. For assigned shards, it provides an explanation for why the shard + is remaining on its current node and has not moved or rebalanced to another node. + This API can be very useful when attempting to diagnose why a shard is unassigned + or why a shard continues to remain on its current node when you might expect + otherwise. ``_ @@ -165,7 +171,8 @@ def delete_voting_config_exclusions( wait_for_removal: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears cluster voting config exclusions. + Clear cluster voting config exclusions. Remove master-eligible nodes from the + voting configuration exclusion list. ``_ @@ -331,8 +338,8 @@ def get_settings( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-wide settings. By default, it returns only settings that have - been explicitly defined. + Get cluster-wide settings. By default, it returns only settings that have been + explicitly defined. ``_ @@ -414,14 +421,15 @@ def health( ] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster health API returns a simple status on the health of the cluster. - You can also use the API to get the health status of only specified data streams - and indices. For data streams, the API retrieves the health status of the stream’s - backing indices. The cluster health status is: green, yellow or red. On the shard - level, a red status indicates that the specific shard is not allocated in the - cluster, yellow means that the primary shard is allocated but replicas are not, - and green means that all shards are allocated. The index level status is controlled - by the worst shard status. The cluster status is controlled by the worst index + Get the cluster health status. You can also use the API to get the health status + of only specified data streams and indices. For data streams, the API retrieves + the health status of the stream’s backing indices. The cluster health status + is: green, yellow or red. On the shard level, a red status indicates that the + specific shard is not allocated in the cluster. Yellow means that the primary + shard is allocated but replicas are not. Green means that all shards are allocated. + The index level status is controlled by the worst shard status. One of the main + benefits of the API is the ability to wait until the cluster reaches a certain + high watermark health level. The cluster status is controlled by the worst index status. ``_ @@ -568,14 +576,14 @@ def pending_tasks( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-level changes (such as create index, update mapping, allocate - or fail shard) that have not yet been executed. NOTE: This API returns a list - of any pending updates to the cluster state. These are distinct from the tasks - reported by the Task Management API which include periodic tasks and tasks initiated - by the user, such as node stats, search queries, or create index requests. However, - if a user-initiated task such as a create index command causes a cluster state - update, the activity of this task might be reported by both task api and pending - cluster tasks API. + Get the pending cluster tasks. Get information about cluster-level changes (such + as create index, update mapping, allocate or fail shard) that have not yet taken + effect. NOTE: This API returns a list of any pending updates to the cluster state. + These are distinct from the tasks reported by the task management API which include + periodic tasks and tasks initiated by the user, such as node stats, search queries, + or create index requests. However, if a user-initiated task such as a create + index command causes a cluster state update, the activity of this task might + be reported by both task api and pending cluster tasks API. ``_ @@ -623,7 +631,33 @@ def post_voting_config_exclusions( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster voting config exclusions by node ids or node names. + Update voting configuration exclusions. Update the cluster voting config exclusions + by node IDs or node names. By default, if there are more than three master-eligible + nodes in the cluster and you remove fewer than half of the master-eligible nodes + in the cluster at once, the voting configuration automatically shrinks. If you + want to shrink the voting configuration to contain fewer than three nodes or + to remove half or more of the master-eligible nodes in the cluster at once, use + this API to remove departing nodes from the voting configuration manually. The + API adds an entry for each specified node to the cluster’s voting configuration + exclusions list. It then waits until the cluster has reconfigured its voting + configuration to exclude the specified nodes. Clusters should have no voting + configuration exclusions in normal operation. Once the excluded nodes have stopped, + clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. + This API waits for the nodes to be fully removed from the cluster before it returns. + If your cluster has voting configuration exclusions for nodes that you no longer + intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` + to clear the voting configuration exclusions without waiting for the nodes to + leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with + an HTTP status code of 200 OK guarantees that the node has been removed from + the voting configuration and will not be reinstated until the voting configuration + exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. + If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response + with an HTTP status code other than 200 OK then the node may not have been removed + from the voting configuration. In that case, you may safely retry the call. NOTE: + Voting exclusions are required only when you remove at least half of the master-eligible + nodes from a cluster in a short time period. They are not required when removing + master-ineligible nodes or when removing fewer than half of the master-eligible + nodes. ``_ @@ -787,7 +821,26 @@ def put_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster settings. + Update the cluster settings. Configure and update dynamic settings on a running + cluster. You can also configure dynamic settings locally on an unstarted or shut + down node in `elasticsearch.yml`. Updates made with this API can be persistent, + which apply across cluster restarts, or transient, which reset after a cluster + restart. You can also reset transient or persistent settings by assigning them + a null value. If you configure the same setting using multiple methods, Elasticsearch + applies the settings in following order of precedence: 1) Transient setting; + 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. + For example, you can apply a transient setting to override a persistent setting + or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting + will not override a defined transient or persistent setting. TIP: In Elastic + Cloud, use the user settings feature to configure all cluster settings. This + method automatically rejects unsafe settings that could break your cluster. If + you run Elasticsearch on your own hardware, use this API to configure dynamic + cluster settings. Only use `elasticsearch.yml` for static cluster settings and + node settings. The API doesn’t require a restart and ensures a setting’s value + is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. + Use persistent cluster settings instead. If a cluster becomes unstable, transient + settings can clear unexpectedly, resulting in a potentially undesired cluster + configuration. ``_ @@ -841,9 +894,9 @@ def remote_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster remote info API allows you to retrieve all of the configured remote - cluster information. It returns connection and endpoint information keyed by - the configured remote cluster alias. + Get remote cluster information. Get all of the configured remote cluster information. + This API returns connection and endpoint information keyed by the configured + remote cluster alias. ``_ """ @@ -888,15 +941,35 @@ def reroute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to manually change the allocation of individual shards in the cluster. + Reroute the cluster. Manually change the allocation of individual shards in the + cluster. For example, a shard can be moved from one node to another explicitly, + an allocation can be canceled, and an unassigned shard can be explicitly allocated + to a specific node. It is important to note that after processing any reroute + commands Elasticsearch will perform rebalancing as normal (respecting the values + of settings such as `cluster.routing.rebalance.enable`) in order to remain in + a balanced state. For example, if the requested allocation includes moving a + shard from node1 to node2 then this may cause a shard to be moved from node2 + back to node1 to even things out. The cluster can be set to disable allocations + using the `cluster.routing.allocation.enable` setting. If allocations are disabled + then the only allocations that will be performed are explicit ones given using + the reroute command, and consequent allocations due to rebalancing. The cluster + will attempt to allocate a shard a maximum of `index.allocation.max_retries` + times in a row (defaults to `5`), before giving up and leaving the shard unallocated. + This scenario can be caused by structural problems such as having an analyzer + which refers to a stopwords file which doesn’t exist on all nodes. Once the problem + has been corrected, allocation can be manually retried by calling the reroute + API with the `?retry_failed` URI query parameter, which will attempt a single + retry round for these shards. ``_ :param commands: Defines the commands to perform. - :param dry_run: If true, then the request simulates the operation only and returns - the resulting state. + :param dry_run: If true, then the request simulates the operation. It will calculate + the result of applying the commands to the current cluster state and return + the resulting cluster state after the commands (and rebalancing) have been + applied; it will not actually perform the requested changes. :param explain: If true, then the response contains an explanation of why the - commands can or cannot be executed. + commands can or cannot run. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -975,7 +1048,26 @@ def state( wait_for_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a comprehensive information about the state of the cluster. + Get the cluster state. Get comprehensive information about the state of the cluster. + The cluster state is an internal data structure which keeps track of a variety + of information needed by every node, including the identity and attributes of + the other nodes in the cluster; cluster-wide settings; index metadata, including + the mapping and settings for each index; the location and status of every shard + copy in the cluster. The elected master node ensures that every node in the cluster + has a copy of the same cluster state. This API lets you retrieve a representation + of this internal state for debugging or diagnostic purposes. You may need to + consult the Elasticsearch source code to determine the precise meaning of the + response. By default the API will route requests to the elected master node since + this node is the authoritative source of cluster states. You can also retrieve + the cluster state held on the node handling the API request by adding the `?local=true` + query parameter. Elasticsearch may need to expend significant effort to compute + a response to this API in larger clusters, and the response may comprise a very + large quantity of data. If you use this API repeatedly, your cluster may become + unstable. WARNING: The response is a representation of an internal data structure. + Its format is not subject to the same compatibility guarantees as other more + stable APIs and may change from version to version. Do not query this API using + external monitoring tools. Instead, obtain the information you require using + other more stable cluster APIs. ``_ @@ -1059,9 +1151,9 @@ def stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster statistics. It returns basic index metrics (shard numbers, store - size, memory usage) and information about the current nodes that form the cluster - (number, roles, os, jvm versions, memory usage, cpu and installed plugins). + Get cluster statistics. Get basic index metrics (shard numbers, store size, memory + usage) and information about the current nodes that form the cluster (number, + roles, os, jvm versions, memory usage, cpu and installed plugins). ``_ diff --git a/elasticsearch/_sync/client/enrich.py b/elasticsearch/_sync/client/enrich.py index 099cbf2dd..b33eff34d 100644 --- a/elasticsearch/_sync/client/enrich.py +++ b/elasticsearch/_sync/client/enrich.py @@ -77,7 +77,7 @@ def execute_policy( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates the enrich index for an existing enrich policy. + Run an enrich policy. Create the enrich index for an existing enrich policy. ``_ diff --git a/elasticsearch/_sync/client/eql.py b/elasticsearch/_sync/client/eql.py index 39af43fb0..2610b3261 100644 --- a/elasticsearch/_sync/client/eql.py +++ b/elasticsearch/_sync/client/eql.py @@ -36,8 +36,8 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async EQL search or a stored synchronous EQL search. The API also - deletes results for the search. + Delete an async EQL search. Delete an async EQL search or a stored synchronous + EQL search. The API also deletes results for the search. ``_ @@ -83,8 +83,8 @@ def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status and available results for an async EQL search or a - stored synchronous EQL search. + Get async EQL search results. Get the current status and available results for + an async EQL search or a stored synchronous EQL search. ``_ @@ -134,8 +134,8 @@ def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status for an async EQL search or a stored synchronous EQL - search without returning results. + Get the async EQL status. Get the current status for an async EQL search or a + stored synchronous EQL search without returning results. ``_ @@ -223,7 +223,9 @@ def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns results matching a query expressed in Event Query Language (EQL) + Get EQL search results. Returns search results for an Event Query Language (EQL) + query. EQL assumes each document in a data stream or index corresponds to an + event. ``_ diff --git a/elasticsearch/_sync/client/esql.py b/elasticsearch/_sync/client/esql.py index fc9fd2a7e..8863d8e84 100644 --- a/elasticsearch/_sync/client/esql.py +++ b/elasticsearch/_sync/client/esql.py @@ -68,7 +68,8 @@ def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes an ES|QL request + Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) + query. ``_ diff --git a/elasticsearch/_sync/client/graph.py b/elasticsearch/_sync/client/graph.py index 5a29add0b..f62bbb15a 100644 --- a/elasticsearch/_sync/client/graph.py +++ b/elasticsearch/_sync/client/graph.py @@ -45,8 +45,14 @@ def explore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Extracts and summarizes information about the documents and terms in an Elasticsearch - data stream or index. + Explore graph analytics. Extract and summarize information about the documents + and terms in an Elasticsearch data stream or index. The easiest way to understand + the behavior of this API is to use the Graph UI to explore connections. An initial + request to the `_explore` API contains a seed query that identifies the documents + of interest and specifies the fields that define the vertices and connections + you want to include in the graph. Subsequent requests enable you to spider out + from one more vertices of interest. You can exclude vertices that have already + been returned. ``_ diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index 92133311a..b27909af1 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -1354,7 +1354,7 @@ def exists_index_template( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Returns information about whether a particular index template exists. + Check index templates. Check whether index templates exist. ``_ @@ -3698,8 +3698,8 @@ def resolve_index( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resolves the specified name(s) and/or index patterns for indices, aliases, and - data streams. Multiple patterns and remote clusters are supported. + Resolve indices. Resolve the names and/or index patterns for indices, aliases, + and data streams. Multiple patterns and remote clusters are supported. ``_ diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 9f58dfbfc..08f9da4aa 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -20,19 +20,12 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import ( - SKIP_IN_PATH, - Stability, - _quote, - _rewrite_parameters, - _stability_warning, -) +from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class InferenceClient(NamespacedClient): @_rewrite_parameters() - @_stability_warning(Stability.EXPERIMENTAL) def delete( self, *, @@ -100,7 +93,6 @@ def delete( ) @_rewrite_parameters() - @_stability_warning(Stability.EXPERIMENTAL) def get( self, *, @@ -159,7 +151,6 @@ def get( @_rewrite_parameters( body_fields=("input", "query", "task_settings"), ) - @_stability_warning(Stability.EXPERIMENTAL) def inference( self, *, @@ -246,7 +237,6 @@ def inference( @_rewrite_parameters( body_name="inference_config", ) - @_stability_warning(Stability.EXPERIMENTAL) def put( self, *, diff --git a/elasticsearch/_sync/client/ingest.py b/elasticsearch/_sync/client/ingest.py index e244e91a3..db211c1c3 100644 --- a/elasticsearch/_sync/client/ingest.py +++ b/elasticsearch/_sync/client/ingest.py @@ -38,7 +38,8 @@ def delete_geoip_database( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a geoip database configuration. + Delete GeoIP database configurations. Delete one or more IP geolocation database + configurations. ``_ @@ -89,7 +90,7 @@ def delete_pipeline( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes one or more existing ingest pipeline. + Delete pipelines. Delete one or more ingest pipelines. ``_ @@ -138,7 +139,8 @@ def geo_ip_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets download statistics for GeoIP2 databases used with the geoip processor. + Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used + with the GeoIP processor. ``_ """ @@ -175,7 +177,8 @@ def get_geoip_database( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more geoip database configurations. + Get GeoIP database configurations. Get information about one or more IP geolocation + database configurations. ``_ @@ -227,8 +230,8 @@ def get_pipeline( summary: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more ingest pipelines. This API returns a local - reference of the pipeline. + Get pipelines. Get information about one or more ingest pipelines. This API returns + a local reference of the pipeline. ``_ @@ -279,10 +282,10 @@ def processor_grok( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Extracts structured fields out of a single text field within a document. You - choose which field to extract matched fields from, as well as the grok pattern - you expect will match. A grok pattern is like a regular expression that supports - aliased expressions that can be reused. + Run a grok processor. Extract structured fields out of a single text field within + a document. You must choose which field to extract matched fields from, as well + as the grok pattern you expect will match. A grok pattern is like a regular expression + that supports aliased expressions that can be reused. ``_ """ @@ -325,7 +328,8 @@ def put_geoip_database( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more geoip database configurations. + Create or update GeoIP database configurations. Create or update IP geolocation + database configurations. ``_ @@ -411,8 +415,7 @@ def put_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an ingest pipeline. Changes made using this API take effect - immediately. + Create or update a pipeline. Changes made using this API take effect immediately. ``_ @@ -504,7 +507,9 @@ def simulate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes an ingest pipeline against a set of provided documents. + Simulate a pipeline. Run an ingest pipeline against a set of provided documents. + You can either specify an existing pipeline to use with the provided documents + or supply a pipeline definition in the body of the request. ``_ diff --git a/elasticsearch/_sync/client/nodes.py b/elasticsearch/_sync/client/nodes.py index 5c8e36979..13e5254ef 100644 --- a/elasticsearch/_sync/client/nodes.py +++ b/elasticsearch/_sync/client/nodes.py @@ -44,8 +44,8 @@ def clear_repositories_metering_archive( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use this API to clear the archived repositories metering information - in the cluster. + Clear the archived repositories metering. Clear the archived repositories metering + information in the cluster. ``_ @@ -94,11 +94,11 @@ def get_repositories_metering_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use the cluster repositories metering API to retrieve repositories metering - information in a cluster. This API exposes monotonically non-decreasing counters - and it’s expected that clients would durably store the information needed to - compute aggregations over a period of time. Additionally, the information exposed - by this API is volatile, meaning that it won’t be present after node restarts. + Get cluster repositories metering. Get repositories metering information for + a cluster. This API exposes monotonically non-decreasing counters and it is expected + that clients would durably store the information needed to compute aggregations + over a period of time. Additionally, the information exposed by this API is volatile, + meaning that it will not be present after node restarts. ``_ @@ -151,8 +151,9 @@ def hot_threads( ] = None, ) -> TextApiResponse: """ - This API yields a breakdown of the hot threads on each selected node in the cluster. - The output is plain text with a breakdown of each node’s top hot threads. + Get the hot threads for nodes. Get a breakdown of the hot threads on each selected + node in the cluster. The output is plain text with a breakdown of the top hot + threads for each node. ``_ @@ -227,7 +228,8 @@ def info( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes information. + Get node information. By default, the API returns all attributes and core settings + for cluster nodes. ``_ @@ -296,7 +298,18 @@ def reload_secure_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads the keystore on nodes in the cluster. + Reload the keystore on nodes in the cluster. Secure settings are stored in an + on-disk keystore. Certain of these settings are reloadable. That is, you can + change them on disk and reload them without restarting any nodes in the cluster. + When you have updated reloadable secure settings in your keystore, you can use + this API to reload those settings on each node. When the Elasticsearch keystore + is password protected and not simply obfuscated, you must provide the password + for the keystore when you reload the secure settings. Reloading the settings + for the whole cluster assumes that the keystores for all nodes are protected + with the same password; this method is allowed only when inter-node communications + are encrypted. Alternatively, you can reload the secure settings on each node + by locally accessing the API and passing the node-specific Elasticsearch keystore + password. ``_ @@ -367,7 +380,8 @@ def stats( types: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes statistics. + Get node statistics. Get statistics for nodes in a cluster. By default, all stats + are returned. You can limit the returned information by using metrics. ``_ @@ -484,7 +498,7 @@ def usage( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information on the usage of features. + Get feature usage information. ``_ diff --git a/elasticsearch/_sync/client/query_rules.py b/elasticsearch/_sync/client/query_rules.py index d5aaa2f76..57e2d74ee 100644 --- a/elasticsearch/_sync/client/query_rules.py +++ b/elasticsearch/_sync/client/query_rules.py @@ -37,7 +37,7 @@ def delete_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a query rule within a query ruleset. + Delete a query rule. Delete a query rule within a query ruleset. ``_ @@ -85,7 +85,7 @@ def delete_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a query ruleset. + Delete a query ruleset. ``_ @@ -126,7 +126,7 @@ def get_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a query rule within a query ruleset + Get a query rule. Get details about a query rule within a query ruleset. ``_ @@ -174,7 +174,7 @@ def get_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a query ruleset + Get a query ruleset. Get details about a query ruleset. ``_ @@ -217,7 +217,7 @@ def list_rulesets( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns summarized information about existing query rulesets. + Get all query rulesets. Get summarized information about the query rulesets. ``_ @@ -270,7 +270,7 @@ def put_rule( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query rule within a query ruleset. + Create or update a query rule. Create or update a query rule within a query ruleset. ``_ @@ -345,7 +345,7 @@ def put_ruleset( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query ruleset. + Create or update a query ruleset. ``_ @@ -398,7 +398,8 @@ def test( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query ruleset. + Test a query ruleset. Evaluate match criteria against a query ruleset to identify + the rules that would match that criteria. ``_ diff --git a/elasticsearch/_sync/client/security.py b/elasticsearch/_sync/client/security.py index 35d35a8db..c139f2868 100644 --- a/elasticsearch/_sync/client/security.py +++ b/elasticsearch/_sync/client/security.py @@ -2052,6 +2052,7 @@ def has_privileges( "monitor_ml", "monitor_rollup", "monitor_snapshot", + "monitor_stats", "monitor_text_structure", "monitor_transform", "monitor_watcher", @@ -2392,6 +2393,7 @@ def put_privileges( "global_", "indices", "metadata", + "remote_cluster", "remote_indices", "run_as", "transient_metadata", @@ -2452,6 +2454,7 @@ def put_role( "monitor_ml", "monitor_rollup", "monitor_snapshot", + "monitor_stats", "monitor_text_structure", "monitor_transform", "monitor_watcher", @@ -2481,6 +2484,7 @@ def put_role( refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, + remote_cluster: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, remote_indices: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, run_as: t.Optional[t.Sequence[str]] = None, transient_metadata: t.Optional[t.Mapping[str, t.Any]] = None, @@ -2508,6 +2512,7 @@ def put_role( :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + :param remote_cluster: A list of remote cluster permissions entries. :param remote_indices: A list of remote indices permissions entries. :param run_as: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, @@ -2549,6 +2554,8 @@ def put_role( __body["indices"] = indices if metadata is not None: __body["metadata"] = metadata + if remote_cluster is not None: + __body["remote_cluster"] = remote_cluster if remote_indices is not None: __body["remote_indices"] = remote_indices if run_as is not None: diff --git a/elasticsearch/_sync/client/sql.py b/elasticsearch/_sync/client/sql.py index b7da9229c..dc5f238e8 100644 --- a/elasticsearch/_sync/client/sql.py +++ b/elasticsearch/_sync/client/sql.py @@ -39,7 +39,7 @@ def clear_cursor( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears the SQL cursor + Clear an SQL search cursor. ``_ @@ -84,8 +84,8 @@ def delete_async( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async SQL search or a stored synchronous SQL search. If the search - is still running, the API cancels it. + Delete an async SQL search. Delete an async SQL search or a stored synchronous + SQL search. If the search is still running, the API cancels it. ``_ @@ -131,8 +131,8 @@ def get_async( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status and available results for an async SQL search or stored - synchronous SQL search + Get async SQL search results. Get the current status and available results for + an async SQL search or stored synchronous SQL search. ``_ @@ -189,8 +189,8 @@ def get_async_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status of an async SQL search or a stored synchronous SQL - search + Get the async SQL search status. Get the current status of an async SQL search + or a stored synchronous SQL search. ``_ @@ -273,7 +273,7 @@ def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes a SQL request + Get SQL search results. Run an SQL request. ``_ @@ -383,7 +383,8 @@ def translate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Translates SQL into Elasticsearch queries + Translate SQL into Elasticsearch queries. Translate an SQL search into a search + API request containing Query DSL. ``_ diff --git a/elasticsearch/_sync/client/synonyms.py b/elasticsearch/_sync/client/synonyms.py index b82ec67a0..ccc4a6d89 100644 --- a/elasticsearch/_sync/client/synonyms.py +++ b/elasticsearch/_sync/client/synonyms.py @@ -36,7 +36,7 @@ def delete_synonym( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a synonym set + Delete a synonym set. ``_ @@ -77,7 +77,7 @@ def delete_synonym_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a synonym rule in a synonym set + Delete a synonym rule. Delete a synonym rule from a synonym set. ``_ @@ -127,7 +127,7 @@ def get_synonym( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a synonym set + Get a synonym set. ``_ @@ -174,7 +174,7 @@ def get_synonym_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a synonym rule from a synonym set + Get a synonym rule. Get a synonym rule from a synonym set. ``_ @@ -223,7 +223,7 @@ def get_synonyms_sets( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a summary of all defined synonym sets + Get all synonym sets. Get a summary of all defined synonym sets. ``_ @@ -272,7 +272,9 @@ def put_synonym( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a synonym set. + Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 + synonym rules per set. If you need to manage more synonym rules, you can create + multiple synonym sets. ``_ @@ -325,7 +327,8 @@ def put_synonym_rule( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a synonym rule in a synonym set + Create or update a synonym rule. Create or update a synonym rule in a synonym + set. ``_