diff --git a/elasticsearch_dsl/aggs.py b/elasticsearch_dsl/aggs.py
index df677ba2..2f79c308 100644
--- a/elasticsearch_dsl/aggs.py
+++ b/elasticsearch_dsl/aggs.py
@@ -24,17 +24,27 @@
Dict,
Generic,
Iterable,
+ Literal,
+ Mapping,
MutableMapping,
Optional,
+ Sequence,
Union,
cast,
)
+from elastic_transport.client_utils import DEFAULT
+
+from .query import Query
from .response.aggs import AggResponse, BucketData, FieldBucketData, TopHitsData
from .utils import _R, AttrDict, DslBase
if TYPE_CHECKING:
- from .query import Query
+ from elastic_transport.client_utils import DefaultType
+
+ from elasticsearch_dsl import types
+
+ from .document_base import InstrumentedField
from .search_base import SearchBase
@@ -202,334 +212,3520 @@ def to_dict(self) -> Dict[str, Any]:
return d
-class Filter(Bucket[_R]):
- name = "filter"
- _param_defs = {
- "filter": {"type": "query"},
- "aggs": {"type": "agg", "hash": True},
- }
-
- def __init__(self, filter: Optional[Union[str, "Query"]] = None, **params: Any):
- if filter is not None:
- params["filter"] = filter
- super().__init__(**params)
-
- def to_dict(self) -> Dict[str, Any]:
- d = super().to_dict()
- if isinstance(d[self.name], dict):
- n = cast(AttrDict[Any], d[self.name])
- n.update(n.pop("filter", {}))
- return d
-
-
class Pipeline(Agg[_R]):
pass
-# bucket aggregations
-class Filters(Bucket[_R]):
- name = "filters"
+class AdjacencyMatrix(Bucket[_R]):
+ """
+ A bucket aggregation returning a form of adjacency matrix. The request
+ provides a collection of named filter expressions, similar to the
+ `filters` aggregation. Each bucket in the response represents a non-
+ empty cell in the matrix of intersecting filters.
+
+ :arg filters: Filters used to create buckets. At least one filter is
+ required.
+ :arg separator: Separator used to concatenate filter names. Defaults
+ to &.
+ """
+
+ name = "adjacency_matrix"
_param_defs = {
"filters": {"type": "query", "hash": True},
- "aggs": {"type": "agg", "hash": True},
}
+ def __init__(
+ self,
+ *,
+ filters: Union[Mapping[str, Query], "DefaultType"] = DEFAULT,
+ separator: Union[str, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(filters=filters, separator=separator, **kwargs)
+
+
+class AutoDateHistogram(Bucket[_R]):
+ """
+ A multi-bucket aggregation similar to the date histogram, except
+ instead of providing an interval to use as the width of each bucket, a
+ target number of buckets is provided.
+
+ :arg buckets: The target number of buckets. Defaults to `10` if
+ omitted.
+ :arg field: The field on which to run the aggregation.
+ :arg format: The date format used to format `key_as_string` in the
+ response. If no `format` is specified, the first date format
+ specified in the field mapping is used.
+ :arg minimum_interval: The minimum rounding interval. This can make
+ the collection process more efficient, as the aggregation will not
+ attempt to round at any interval lower than `minimum_interval`.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg offset: Time zone specified as a ISO 8601 UTC offset.
+ :arg params:
+ :arg script:
+ :arg time_zone: Time zone ID.
+ """
-class Children(Bucket[_R]):
- name = "children"
-
-
-class Parent(Bucket[_R]):
- name = "parent"
-
+ name = "auto_date_histogram"
-class DateHistogram(Bucket[_R]):
- name = "date_histogram"
+ def __init__(
+ self,
+ *,
+ buckets: Union[int, "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ minimum_interval: Union[
+ Literal["second", "minute", "hour", "day", "month", "year"], "DefaultType"
+ ] = DEFAULT,
+ missing: Any = DEFAULT,
+ offset: Union[str, "DefaultType"] = DEFAULT,
+ params: Union[Mapping[str, Any], "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ time_zone: Union[str, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ buckets=buckets,
+ field=field,
+ format=format,
+ minimum_interval=minimum_interval,
+ missing=missing,
+ offset=offset,
+ params=params,
+ script=script,
+ time_zone=time_zone,
+ **kwargs,
+ )
def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
return FieldBucketData(self, search, data)
-class AutoDateHistogram(DateHistogram[_R]):
- name = "auto_date_histogram"
-
+class Avg(Agg[_R]):
+ """
+ A single-value metrics aggregation that computes the average of
+ numeric values that are extracted from the aggregated documents.
-class AdjacencyMatrix(Bucket[_R]):
- name = "adjacency_matrix"
+ :arg format:
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+ name = "avg"
-class DateRange(Bucket[_R]):
- name = "date_range"
+ def __init__(
+ self,
+ *,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ format=format, field=field, missing=missing, script=script, **kwargs
+ )
-class GeoDistance(Bucket[_R]):
- name = "geo_distance"
+class AvgBucket(Pipeline[_R]):
+ """
+ A sibling pipeline aggregation which calculates the mean value of a
+ specified metric in a sibling aggregation. The specified metric must
+ be numeric and the sibling aggregation must be a multi-bucket
+ aggregation.
+
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+ name = "avg_bucket"
-class GeohashGrid(Bucket[_R]):
- name = "geohash_grid"
+ def __init__(
+ self,
+ *,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
+ )
-class GeohexGrid(Bucket[_R]):
- name = "geohex_grid"
+class Boxplot(Agg[_R]):
+ """
+ A metrics aggregation that computes a box plot of numeric values
+ extracted from the aggregated documents.
+
+ :arg compression: Limits the maximum number of nodes used by the
+ underlying TDigest algorithm to `20 * compression`, enabling
+ control of memory usage and approximation error.
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+ name = "boxplot"
-class GeotileGrid(Bucket[_R]):
- name = "geotile_grid"
+ def __init__(
+ self,
+ *,
+ compression: Union[float, "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ compression=compression,
+ field=field,
+ missing=missing,
+ script=script,
+ **kwargs,
+ )
-class GeoCentroid(Bucket[_R]):
- name = "geo_centroid"
+class BucketScript(Pipeline[_R]):
+ """
+ A parent pipeline aggregation which runs a script which can perform
+ per bucket computations on metrics in the parent multi-bucket
+ aggregation.
+
+ :arg script: The script to run for this aggregation.
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+ name = "bucket_script"
-class Global(Bucket[_R]):
- name = "global"
+ def __init__(
+ self,
+ *,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ script=script,
+ format=format,
+ gap_policy=gap_policy,
+ buckets_path=buckets_path,
+ **kwargs,
+ )
-class Histogram(Bucket[_R]):
- name = "histogram"
+class BucketSelector(Pipeline[_R]):
+ """
+ A parent pipeline aggregation which runs a script to determine whether
+ the current bucket will be retained in the parent multi-bucket
+ aggregation.
+
+ :arg script: The script to run for this aggregation.
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
- def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
- return FieldBucketData(self, search, data)
+ name = "bucket_selector"
+ def __init__(
+ self,
+ *,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ script=script,
+ format=format,
+ gap_policy=gap_policy,
+ buckets_path=buckets_path,
+ **kwargs,
+ )
+
+
+class BucketSort(Bucket[_R]):
+ """
+ A parent pipeline aggregation which sorts the buckets of its parent
+ multi-bucket aggregation.
+
+ :arg from: Buckets in positions prior to `from` will be truncated.
+ :arg gap_policy: The policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg size: The number of buckets to return. Defaults to all buckets of
+ the parent aggregation.
+ :arg sort: The list of fields to sort on.
+ """
-class IPRange(Bucket[_R]):
- name = "ip_range"
+ name = "bucket_sort"
+ def __init__(
+ self,
+ *,
+ from_: Union[int, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ size: Union[int, "DefaultType"] = DEFAULT,
+ sort: Union[
+ Union[Union[str, "InstrumentedField"], "types.SortOptions"],
+ Sequence[Union[Union[str, "InstrumentedField"], "types.SortOptions"]],
+ Dict[str, Any],
+ "DefaultType",
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ from_=from_, gap_policy=gap_policy, size=size, sort=sort, **kwargs
+ )
+
+
+class BucketCountKsTest(Pipeline[_R]):
+ """
+ A sibling pipeline aggregation which runs a two sample
+ Kolmogorov–Smirnov test ("K-S test") against a provided distribution
+ and the distribution implied by the documents counts in the configured
+ sibling aggregation.
+
+ :arg alternative: A list of string values indicating which K-S test
+ alternative to calculate. The valid values are: "greater", "less",
+ "two_sided". This parameter is key for determining the K-S
+ statistic used when calculating the K-S test. Default value is all
+ possible alternative hypotheses.
+ :arg fractions: A list of doubles indicating the distribution of the
+ samples with which to compare to the `buckets_path` results. In
+ typical usage this is the overall proportion of documents in each
+ bucket, which is compared with the actual document proportions in
+ each bucket from the sibling aggregation counts. The default is to
+ assume that overall documents are uniformly distributed on these
+ buckets, which they would be if one used equal percentiles of a
+ metric to define the bucket end points.
+ :arg sampling_method: Indicates the sampling methodology when
+ calculating the K-S test. Note, this is sampling of the returned
+ values. This determines the cumulative distribution function (CDF)
+ points used comparing the two samples. Default is `upper_tail`,
+ which emphasizes the upper end of the CDF points. Valid options
+ are: `upper_tail`, `uniform`, and `lower_tail`.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
+ name = "bucket_count_ks_test"
+
+ def __init__(
+ self,
+ *,
+ alternative: Union[Sequence[str], "DefaultType"] = DEFAULT,
+ fractions: Union[Sequence[float], "DefaultType"] = DEFAULT,
+ sampling_method: Union[str, "DefaultType"] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ alternative=alternative,
+ fractions=fractions,
+ sampling_method=sampling_method,
+ buckets_path=buckets_path,
+ **kwargs,
+ )
+
+
+class BucketCorrelation(Pipeline[_R]):
+ """
+ A sibling pipeline aggregation which runs a correlation function on
+ the configured sibling multi-bucket aggregation.
+
+ :arg function: (required) The correlation function to execute.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
+ name = "bucket_correlation"
+
+ def __init__(
+ self,
+ *,
+ function: Union[
+ "types.BucketCorrelationFunction", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(function=function, buckets_path=buckets_path, **kwargs)
-class IPPrefix(Bucket[_R]):
- name = "ip_prefix"
+class Cardinality(Agg[_R]):
+ """
+ A single-value metrics aggregation that calculates an approximate
+ count of distinct values.
+
+ :arg precision_threshold: A unique count below which counts are
+ expected to be close to accurate. This allows to trade memory for
+ accuracy. Defaults to `3000` if omitted.
+ :arg rehash:
+ :arg execution_hint: Mechanism by which cardinality aggregations is
+ run.
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
-class Missing(Bucket[_R]):
- name = "missing"
+ name = "cardinality"
+ def __init__(
+ self,
+ *,
+ precision_threshold: Union[int, "DefaultType"] = DEFAULT,
+ rehash: Union[bool, "DefaultType"] = DEFAULT,
+ execution_hint: Union[
+ Literal[
+ "global_ordinals",
+ "segment_ordinals",
+ "direct",
+ "save_memory_heuristic",
+ "save_time_heuristic",
+ ],
+ "DefaultType",
+ ] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ precision_threshold=precision_threshold,
+ rehash=rehash,
+ execution_hint=execution_hint,
+ field=field,
+ missing=missing,
+ script=script,
+ **kwargs,
+ )
-class Nested(Bucket[_R]):
- name = "nested"
+class CategorizeText(Bucket[_R]):
+ """
+ A multi-bucket aggregation that groups semi-structured text into
+ buckets.
+
+ :arg field: (required) The semi-structured text field to categorize.
+ :arg max_unique_tokens: The maximum number of unique tokens at any
+ position up to max_matched_tokens. Must be larger than 1. Smaller
+ values use less memory and create fewer categories. Larger values
+ will use more memory and create narrower categories. Max allowed
+ value is 100. Defaults to `50` if omitted.
+ :arg max_matched_tokens: The maximum number of token positions to
+ match on before attempting to merge categories. Larger values will
+ use more memory and create narrower categories. Max allowed value
+ is 100. Defaults to `5` if omitted.
+ :arg similarity_threshold: The minimum percentage of tokens that must
+ match for text to be added to the category bucket. Must be between
+ 1 and 100. The larger the value the narrower the categories.
+ Larger values will increase memory usage and create narrower
+ categories. Defaults to `50` if omitted.
+ :arg categorization_filters: This property expects an array of regular
+ expressions. The expressions are used to filter out matching
+ sequences from the categorization field values. You can use this
+ functionality to fine tune the categorization by excluding
+ sequences from consideration when categories are defined. For
+ example, you can exclude SQL statements that appear in your log
+ files. This property cannot be used at the same time as
+ categorization_analyzer. If you only want to define simple regular
+ expression filters that are applied prior to tokenization, setting
+ this property is the easiest method. If you also want to customize
+ the tokenizer or post-tokenization filtering, use the
+ categorization_analyzer property instead and include the filters
+ as pattern_replace character filters.
+ :arg categorization_analyzer: The categorization analyzer specifies
+ how the text is analyzed and tokenized before being categorized.
+ The syntax is very similar to that used to define the analyzer in
+ the [Analyze endpoint](https://www.elastic.co/guide/en/elasticsear
+ ch/reference/8.0/indices-analyze.html). This property cannot be
+ used at the same time as categorization_filters.
+ :arg shard_size: The number of categorization buckets to return from
+ each shard before merging all the results.
+ :arg size: The number of buckets to return. Defaults to `10` if
+ omitted.
+ :arg min_doc_count: The minimum number of documents in a bucket to be
+ returned to the results.
+ :arg shard_min_doc_count: The minimum number of documents in a bucket
+ to be returned from the shard before merging.
+ """
-class Range(Bucket[_R]):
- name = "range"
+ name = "categorize_text"
+ def __init__(
+ self,
+ *,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ max_unique_tokens: Union[int, "DefaultType"] = DEFAULT,
+ max_matched_tokens: Union[int, "DefaultType"] = DEFAULT,
+ similarity_threshold: Union[int, "DefaultType"] = DEFAULT,
+ categorization_filters: Union[Sequence[str], "DefaultType"] = DEFAULT,
+ categorization_analyzer: Union[
+ str, "types.CustomCategorizeTextAnalyzer", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ shard_size: Union[int, "DefaultType"] = DEFAULT,
+ size: Union[int, "DefaultType"] = DEFAULT,
+ min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+ shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ field=field,
+ max_unique_tokens=max_unique_tokens,
+ max_matched_tokens=max_matched_tokens,
+ similarity_threshold=similarity_threshold,
+ categorization_filters=categorization_filters,
+ categorization_analyzer=categorization_analyzer,
+ shard_size=shard_size,
+ size=size,
+ min_doc_count=min_doc_count,
+ shard_min_doc_count=shard_min_doc_count,
+ **kwargs,
+ )
-class RareTerms(Bucket[_R]):
- name = "rare_terms"
- def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
- return FieldBucketData(self, search, data)
+class Children(Bucket[_R]):
+ """
+ A single bucket aggregation that selects child documents that have the
+ specified type, as defined in a `join` field.
+ :arg type: The child type that should be selected.
+ """
-class ReverseNested(Bucket[_R]):
- name = "reverse_nested"
+ name = "children"
+ def __init__(self, type: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any):
+ super().__init__(type=type, **kwargs)
-class SignificantTerms(Bucket[_R]):
- name = "significant_terms"
+class Composite(Bucket[_R]):
+ """
+ A multi-bucket aggregation that creates composite buckets from
+ different sources. Unlike the other multi-bucket aggregations, you can
+ use the `composite` aggregation to paginate *all* buckets from a
+ multi-level aggregation efficiently.
+
+ :arg after: When paginating, use the `after_key` value returned in the
+ previous response to retrieve the next page.
+ :arg size: The number of composite buckets that should be returned.
+ Defaults to `10` if omitted.
+ :arg sources: The value sources used to build composite buckets. Keys
+ are returned in the order of the `sources` definition.
+ """
-class SignificantText(Bucket[_R]):
- name = "significant_text"
+ name = "composite"
+ def __init__(
+ self,
+ *,
+ after: Union[
+ Mapping[
+ Union[str, "InstrumentedField"], Union[int, float, str, bool, None, Any]
+ ],
+ "DefaultType",
+ ] = DEFAULT,
+ size: Union[int, "DefaultType"] = DEFAULT,
+ sources: Union[Sequence[Mapping[str, Agg[_R]]], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(after=after, size=size, sources=sources, **kwargs)
-class Terms(Bucket[_R]):
- name = "terms"
- def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
- return FieldBucketData(self, search, data)
+class CumulativeCardinality(Pipeline[_R]):
+ """
+ A parent pipeline aggregation which calculates the cumulative
+ cardinality in a parent `histogram` or `date_histogram` aggregation.
+
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+ name = "cumulative_cardinality"
-class Sampler(Bucket[_R]):
- name = "sampler"
+ def __init__(
+ self,
+ *,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
+ )
-class DiversifiedSampler(Bucket[_R]):
- name = "diversified_sampler"
+class CumulativeSum(Pipeline[_R]):
+ """
+ A parent pipeline aggregation which calculates the cumulative sum of a
+ specified metric in a parent `histogram` or `date_histogram`
+ aggregation.
+
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+ name = "cumulative_sum"
-class RandomSampler(Bucket[_R]):
- name = "random_sampler"
+ def __init__(
+ self,
+ *,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
+ )
-class Composite(Bucket[_R]):
- name = "composite"
- _param_defs = {
- "sources": {"type": "agg", "hash": True, "multi": True},
- "aggs": {"type": "agg", "hash": True},
- }
+class DateHistogram(Bucket[_R]):
+ """
+ A multi-bucket values source based aggregation that can be applied on
+ date values or date range values extracted from the documents. It
+ dynamically builds fixed size (interval) buckets over the values.
+
+ :arg calendar_interval: Calendar-aware interval. Can be specified
+ using the unit name, such as `month`, or as a single unit
+ quantity, such as `1M`.
+ :arg extended_bounds: Enables extending the bounds of the histogram
+ beyond the data itself.
+ :arg hard_bounds: Limits the histogram to specified bounds.
+ :arg field: The date field whose values are use to build a histogram.
+ :arg fixed_interval: Fixed intervals: a fixed number of SI units and
+ never deviate, regardless of where they fall on the calendar.
+ :arg format: The date format used to format `key_as_string` in the
+ response. If no `format` is specified, the first date format
+ specified in the field mapping is used.
+ :arg interval:
+ :arg min_doc_count: Only returns buckets that have `min_doc_count`
+ number of documents. By default, all buckets between the first
+ bucket that matches documents and the last one are returned.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg offset: Changes the start value of each bucket by the specified
+ positive (`+`) or negative offset (`-`) duration.
+ :arg order: The sort order of the returned buckets.
+ :arg params:
+ :arg script:
+ :arg time_zone: Time zone used for bucketing and rounding. Defaults to
+ Coordinated Universal Time (UTC).
+ :arg keyed: Set to `true` to associate a unique string key with each
+ bucket and return the ranges as a hash rather than an array.
+ """
+ name = "date_histogram"
-class VariableWidthHistogram(Bucket[_R]):
- name = "variable_width_histogram"
+ def __init__(
+ self,
+ *,
+ calendar_interval: Union[
+ Literal[
+ "second", "minute", "hour", "day", "week", "month", "quarter", "year"
+ ],
+ "DefaultType",
+ ] = DEFAULT,
+ extended_bounds: Union[
+ "types.ExtendedBounds", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ hard_bounds: Union[
+ "types.ExtendedBounds", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ fixed_interval: Any = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ interval: Any = DEFAULT,
+ min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+ missing: Any = DEFAULT,
+ offset: Any = DEFAULT,
+ order: Union[
+ Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]],
+ Sequence[Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]]],
+ "DefaultType",
+ ] = DEFAULT,
+ params: Union[Mapping[str, Any], "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ time_zone: Union[str, "DefaultType"] = DEFAULT,
+ keyed: Union[bool, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ calendar_interval=calendar_interval,
+ extended_bounds=extended_bounds,
+ hard_bounds=hard_bounds,
+ field=field,
+ fixed_interval=fixed_interval,
+ format=format,
+ interval=interval,
+ min_doc_count=min_doc_count,
+ missing=missing,
+ offset=offset,
+ order=order,
+ params=params,
+ script=script,
+ time_zone=time_zone,
+ keyed=keyed,
+ **kwargs,
+ )
def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
return FieldBucketData(self, search, data)
-class MultiTerms(Bucket[_R]):
- name = "multi_terms"
-
+class DateRange(Bucket[_R]):
+ """
+ A multi-bucket value source based aggregation that enables the user to
+ define a set of date ranges - each representing a bucket.
+
+ :arg field: The date field whose values are use to build ranges.
+ :arg format: The date format used to format `from` and `to` in the
+ response.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg ranges: Array of date ranges.
+ :arg time_zone: Time zone used to convert dates from another time zone
+ to UTC.
+ :arg keyed: Set to `true` to associate a unique string key with each
+ bucket and returns the ranges as a hash rather than an array.
+ """
-class CategorizeText(Bucket[_R]):
- name = "categorize_text"
+ name = "date_range"
+ def __init__(
+ self,
+ *,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ ranges: Union[
+ Sequence["types.DateRangeExpression"],
+ Sequence[Dict[str, Any]],
+ "DefaultType",
+ ] = DEFAULT,
+ time_zone: Union[str, "DefaultType"] = DEFAULT,
+ keyed: Union[bool, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ field=field,
+ format=format,
+ missing=missing,
+ ranges=ranges,
+ time_zone=time_zone,
+ keyed=keyed,
+ **kwargs,
+ )
-# metric aggregations
-class TopHits(Agg[_R]):
- name = "top_hits"
- def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
- return TopHitsData(self, search, data)
+class Derivative(Pipeline[_R]):
+ """
+ A parent pipeline aggregation which calculates the derivative of a
+ specified metric in a parent `histogram` or `date_histogram`
+ aggregation.
+
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+ name = "derivative"
-class Avg(Agg[_R]):
- name = "avg"
+ def __init__(
+ self,
+ *,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
+ )
-class WeightedAvg(Agg[_R]):
- name = "weighted_avg"
+class DiversifiedSampler(Bucket[_R]):
+ """
+ A filtering aggregation used to limit any sub aggregations' processing
+ to a sample of the top-scoring documents. Similar to the `sampler`
+ aggregation, but adds the ability to limit the number of matches that
+ share a common value.
+
+ :arg execution_hint: The type of value used for de-duplication.
+ Defaults to `global_ordinals` if omitted.
+ :arg max_docs_per_value: Limits how many documents are permitted per
+ choice of de-duplicating value. Defaults to `1` if omitted.
+ :arg script:
+ :arg shard_size: Limits how many top-scoring documents are collected
+ in the sample processed on each shard. Defaults to `100` if
+ omitted.
+ :arg field: The field used to provide values used for de-duplication.
+ """
+ name = "diversified_sampler"
-class Cardinality(Agg[_R]):
- name = "cardinality"
+ def __init__(
+ self,
+ *,
+ execution_hint: Union[
+ Literal["map", "global_ordinals", "bytes_hash"], "DefaultType"
+ ] = DEFAULT,
+ max_docs_per_value: Union[int, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ shard_size: Union[int, "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ execution_hint=execution_hint,
+ max_docs_per_value=max_docs_per_value,
+ script=script,
+ shard_size=shard_size,
+ field=field,
+ **kwargs,
+ )
class ExtendedStats(Agg[_R]):
+ """
+ A multi-value metrics aggregation that computes stats over numeric
+ values extracted from the aggregated documents.
+
+ :arg sigma: The number of standard deviations above/below the mean to
+ display.
+ :arg format:
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+
name = "extended_stats"
+ def __init__(
+ self,
+ *,
+ sigma: Union[float, "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ sigma=sigma,
+ format=format,
+ field=field,
+ missing=missing,
+ script=script,
+ **kwargs,
+ )
-class Boxplot(Agg[_R]):
- name = "boxplot"
+class ExtendedStatsBucket(Pipeline[_R]):
+ """
+ A sibling pipeline aggregation which calculates a variety of stats
+ across all bucket of a specified metric in a sibling aggregation.
+
+ :arg sigma: The number of standard deviations above/below the mean to
+ display.
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
-class GeoBounds(Agg[_R]):
- name = "geo_bounds"
+ name = "extended_stats_bucket"
+ def __init__(
+ self,
+ *,
+ sigma: Union[float, "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ sigma=sigma,
+ format=format,
+ gap_policy=gap_policy,
+ buckets_path=buckets_path,
+ **kwargs,
+ )
+
+
+class FrequentItemSets(Agg[_R]):
+ """
+ A bucket aggregation which finds frequent item sets, a form of
+ association rules mining that identifies items that often occur
+ together.
+
+ :arg fields: (required) Fields to analyze.
+ :arg minimum_set_size: The minimum size of one item set. Defaults to
+ `1` if omitted.
+ :arg minimum_support: The minimum support of one item set. Defaults to
+ `0.1` if omitted.
+ :arg size: The number of top item sets to return. Defaults to `10` if
+ omitted.
+ :arg filter: Query that filters documents from analysis.
+ """
+
+ name = "frequent_item_sets"
+ _param_defs = {
+ "filter": {"type": "query"},
+ }
-class GeoLine(Agg[_R]):
- name = "geo_line"
+ def __init__(
+ self,
+ *,
+ fields: Union[
+ Sequence["types.FrequentItemSetsField"],
+ Sequence[Dict[str, Any]],
+ "DefaultType",
+ ] = DEFAULT,
+ minimum_set_size: Union[int, "DefaultType"] = DEFAULT,
+ minimum_support: Union[float, "DefaultType"] = DEFAULT,
+ size: Union[int, "DefaultType"] = DEFAULT,
+ filter: Union[Query, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ fields=fields,
+ minimum_set_size=minimum_set_size,
+ minimum_support=minimum_support,
+ size=size,
+ filter=filter,
+ **kwargs,
+ )
-class Max(Agg[_R]):
- name = "max"
+class Filter(Bucket[_R]):
+ """
+ A single bucket aggregation that narrows the set of documents to those
+ that match a query.
+ :arg filter: A single bucket aggregation that narrows the set of
+ documents to those that match a query.
+ """
-class MatrixStats(Agg[_R]):
- name = "matrix_stats"
+ name = "filter"
+ _param_defs = {
+ "filter": {"type": "query"},
+ "aggs": {"type": "agg", "hash": True},
+ }
+ def __init__(self, filter: Union[Query, "DefaultType"] = DEFAULT, **kwargs: Any):
+ super().__init__(filter=filter, **kwargs)
-class MedianAbsoluteDeviation(Agg[_R]):
- name = "median_absolute_deviation"
+ def to_dict(self) -> Dict[str, Any]:
+ d = super().to_dict()
+ if isinstance(d[self.name], dict):
+ n = cast(AttrDict[Any], d[self.name])
+ n.update(n.pop("filter", {}))
+ return d
-class Min(Agg[_R]):
- name = "min"
+class Filters(Bucket[_R]):
+ """
+ A multi-bucket aggregation where each bucket contains the documents
+ that match a query.
+
+ :arg filters: Collection of queries from which to build buckets.
+ :arg other_bucket: Set to `true` to add a bucket to the response which
+ will contain all documents that do not match any of the given
+ filters.
+ :arg other_bucket_key: The key with which the other bucket is
+ returned. Defaults to `_other_` if omitted.
+ :arg keyed: By default, the named filters aggregation returns the
+ buckets as an object. Set to `false` to return the buckets as an
+ array of objects. Defaults to `True` if omitted.
+ """
+ name = "filters"
+ _param_defs = {
+ "filters": {"type": "query", "hash": True},
+ "aggs": {"type": "agg", "hash": True},
+ }
-class Percentiles(Agg[_R]):
- name = "percentiles"
+ def __init__(
+ self,
+ *,
+ filters: Union[Dict[str, Query], "DefaultType"] = DEFAULT,
+ other_bucket: Union[bool, "DefaultType"] = DEFAULT,
+ other_bucket_key: Union[str, "DefaultType"] = DEFAULT,
+ keyed: Union[bool, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ filters=filters,
+ other_bucket=other_bucket,
+ other_bucket_key=other_bucket_key,
+ keyed=keyed,
+ **kwargs,
+ )
-class PercentileRanks(Agg[_R]):
- name = "percentile_ranks"
+class GeoBounds(Agg[_R]):
+ """
+ A metric aggregation that computes the geographic bounding box
+ containing all values for a Geopoint or Geoshape field.
+
+ :arg wrap_longitude: Specifies whether the bounding box should be
+ allowed to overlap the international date line. Defaults to `True`
+ if omitted.
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+ name = "geo_bounds"
-class ScriptedMetric(Agg[_R]):
- name = "scripted_metric"
+ def __init__(
+ self,
+ *,
+ wrap_longitude: Union[bool, "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ wrap_longitude=wrap_longitude,
+ field=field,
+ missing=missing,
+ script=script,
+ **kwargs,
+ )
+
+
+class GeoCentroid(Agg[_R]):
+ """
+ A metric aggregation that computes the weighted centroid from all
+ coordinate values for geo fields.
+
+ :arg count:
+ :arg location:
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+ name = "geo_centroid"
-class Stats(Agg[_R]):
- name = "stats"
+ def __init__(
+ self,
+ *,
+ count: Union[int, "DefaultType"] = DEFAULT,
+ location: Union[
+ "types.LatLonGeoLocation",
+ "types.GeoHashLocation",
+ Sequence[float],
+ str,
+ Dict[str, Any],
+ "DefaultType",
+ ] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ count=count,
+ location=location,
+ field=field,
+ missing=missing,
+ script=script,
+ **kwargs,
+ )
-class Sum(Agg[_R]):
- name = "sum"
+class GeoDistance(Bucket[_R]):
+ """
+ A multi-bucket aggregation that works on `geo_point` fields. Evaluates
+ the distance of each document value from an origin point and
+ determines the buckets it belongs to, based on ranges defined in the
+ request.
+
+ :arg distance_type: The distance calculation type. Defaults to `arc`
+ if omitted.
+ :arg field: A field of type `geo_point` used to evaluate the distance.
+ :arg origin: The origin used to evaluate the distance.
+ :arg ranges: An array of ranges used to bucket documents.
+ :arg unit: The distance unit. Defaults to `m` if omitted.
+ """
+ name = "geo_distance"
-class TopMetrics(Agg[_R]):
- name = "top_metrics"
+ def __init__(
+ self,
+ *,
+ distance_type: Union[Literal["arc", "plane"], "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ origin: Union[
+ "types.LatLonGeoLocation",
+ "types.GeoHashLocation",
+ Sequence[float],
+ str,
+ Dict[str, Any],
+ "DefaultType",
+ ] = DEFAULT,
+ ranges: Union[
+ Sequence["types.AggregationRange"], Sequence[Dict[str, Any]], "DefaultType"
+ ] = DEFAULT,
+ unit: Union[
+ Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ distance_type=distance_type,
+ field=field,
+ origin=origin,
+ ranges=ranges,
+ unit=unit,
+ **kwargs,
+ )
-class TTest(Agg[_R]):
- name = "t_test"
+class GeohashGrid(Bucket[_R]):
+ """
+ A multi-bucket aggregation that groups `geo_point` and `geo_shape`
+ values into buckets that represent a grid. Each cell is labeled using
+ a geohash which is of user-definable precision.
+
+ :arg bounds: The bounding box to filter the points in each bucket.
+ :arg field: Field containing indexed `geo_point` or `geo_shape`
+ values. If the field contains an array, `geohash_grid` aggregates
+ all array values.
+ :arg precision: The string length of the geohashes used to define
+ cells/buckets in the results. Defaults to `5` if omitted.
+ :arg shard_size: Allows for more accurate counting of the top cells
+ returned in the final result the aggregation. Defaults to
+ returning `max(10,(size x number-of-shards))` buckets from each
+ shard.
+ :arg size: The maximum number of geohash buckets to return. Defaults
+ to `10000` if omitted.
+ """
+ name = "geohash_grid"
-class ValueCount(Agg[_R]):
- name = "value_count"
+ def __init__(
+ self,
+ *,
+ bounds: Union[
+ "types.CoordsGeoBounds",
+ "types.TopLeftBottomRightGeoBounds",
+ "types.TopRightBottomLeftGeoBounds",
+ "types.WktGeoBounds",
+ Dict[str, Any],
+ "DefaultType",
+ ] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ precision: Union[float, str, "DefaultType"] = DEFAULT,
+ shard_size: Union[int, "DefaultType"] = DEFAULT,
+ size: Union[int, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ bounds=bounds,
+ field=field,
+ precision=precision,
+ shard_size=shard_size,
+ size=size,
+ **kwargs,
+ )
-# pipeline aggregations
-class AvgBucket(Pipeline[_R]):
- name = "avg_bucket"
+class GeoLine(Agg[_R]):
+ """
+ Aggregates all `geo_point` values within a bucket into a `LineString`
+ ordered by the chosen sort field.
+
+ :arg point: (required) The name of the geo_point field.
+ :arg sort: (required) The name of the numeric field to use as the sort
+ key for ordering the points. When the `geo_line` aggregation is
+ nested inside a `time_series` aggregation, this field defaults to
+ `@timestamp`, and any other value will result in error.
+ :arg include_sort: When `true`, returns an additional array of the
+ sort values in the feature properties.
+ :arg sort_order: The order in which the line is sorted (ascending or
+ descending). Defaults to `asc` if omitted.
+ :arg size: The maximum length of the line represented in the
+ aggregation. Valid sizes are between 1 and 10000. Defaults to
+ `10000` if omitted.
+ """
+ name = "geo_line"
-class BucketScript(Pipeline[_R]):
- name = "bucket_script"
+ def __init__(
+ self,
+ *,
+ point: Union["types.GeoLinePoint", Dict[str, Any], "DefaultType"] = DEFAULT,
+ sort: Union["types.GeoLineSort", Dict[str, Any], "DefaultType"] = DEFAULT,
+ include_sort: Union[bool, "DefaultType"] = DEFAULT,
+ sort_order: Union[Literal["asc", "desc"], "DefaultType"] = DEFAULT,
+ size: Union[int, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ point=point,
+ sort=sort,
+ include_sort=include_sort,
+ sort_order=sort_order,
+ size=size,
+ **kwargs,
+ )
-class BucketSelector(Pipeline[_R]):
- name = "bucket_selector"
+class GeotileGrid(Bucket[_R]):
+ """
+ A multi-bucket aggregation that groups `geo_point` and `geo_shape`
+ values into buckets that represent a grid. Each cell corresponds to a
+ map tile as used by many online map sites.
+
+ :arg field: Field containing indexed `geo_point` or `geo_shape`
+ values. If the field contains an array, `geotile_grid` aggregates
+ all array values.
+ :arg precision: Integer zoom of the key used to define cells/buckets
+ in the results. Values outside of the range [0,29] will be
+ rejected. Defaults to `7` if omitted.
+ :arg shard_size: Allows for more accurate counting of the top cells
+ returned in the final result the aggregation. Defaults to
+ returning `max(10,(size x number-of-shards))` buckets from each
+ shard.
+ :arg size: The maximum number of buckets to return. Defaults to
+ `10000` if omitted.
+ :arg bounds: A bounding box to filter the geo-points or geo-shapes in
+ each bucket.
+ """
+ name = "geotile_grid"
-class CumulativeSum(Pipeline[_R]):
- name = "cumulative_sum"
+ def __init__(
+ self,
+ *,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ precision: Union[float, "DefaultType"] = DEFAULT,
+ shard_size: Union[int, "DefaultType"] = DEFAULT,
+ size: Union[int, "DefaultType"] = DEFAULT,
+ bounds: Union[
+ "types.CoordsGeoBounds",
+ "types.TopLeftBottomRightGeoBounds",
+ "types.TopRightBottomLeftGeoBounds",
+ "types.WktGeoBounds",
+ Dict[str, Any],
+ "DefaultType",
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ field=field,
+ precision=precision,
+ shard_size=shard_size,
+ size=size,
+ bounds=bounds,
+ **kwargs,
+ )
-class CumulativeCardinality(Pipeline[_R]):
- name = "cumulative_cardinality"
+class GeohexGrid(Bucket[_R]):
+ """
+ A multi-bucket aggregation that groups `geo_point` and `geo_shape`
+ values into buckets that represent a grid. Each cell corresponds to a
+ H3 cell index and is labeled using the H3Index representation.
+
+ :arg field: (required) Field containing indexed `geo_point` or
+ `geo_shape` values. If the field contains an array, `geohex_grid`
+ aggregates all array values.
+ :arg precision: Integer zoom of the key used to defined cells or
+ buckets in the results. Value should be between 0-15. Defaults to
+ `6` if omitted.
+ :arg bounds: Bounding box used to filter the geo-points in each
+ bucket.
+ :arg size: Maximum number of buckets to return. Defaults to `10000` if
+ omitted.
+ :arg shard_size: Number of buckets returned from each shard.
+ """
+ name = "geohex_grid"
-class Derivative(Pipeline[_R]):
- name = "derivative"
+ def __init__(
+ self,
+ *,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ precision: Union[int, "DefaultType"] = DEFAULT,
+ bounds: Union[
+ "types.CoordsGeoBounds",
+ "types.TopLeftBottomRightGeoBounds",
+ "types.TopRightBottomLeftGeoBounds",
+ "types.WktGeoBounds",
+ Dict[str, Any],
+ "DefaultType",
+ ] = DEFAULT,
+ size: Union[int, "DefaultType"] = DEFAULT,
+ shard_size: Union[int, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ field=field,
+ precision=precision,
+ bounds=bounds,
+ size=size,
+ shard_size=shard_size,
+ **kwargs,
+ )
-class ExtendedStatsBucket(Pipeline[_R]):
- name = "extended_stats_bucket"
+class Global(Bucket[_R]):
+ """
+ Defines a single bucket of all the documents within the search
+ execution context. This context is defined by the indices and the
+ document types you’re searching on, but is not influenced by the
+ search query itself.
+ """
+ name = "global"
-class Inference(Pipeline[_R]):
- name = "inference"
+ def __init__(self, **kwargs: Any):
+ super().__init__(**kwargs)
-class MaxBucket(Pipeline[_R]):
+class Histogram(Bucket[_R]):
+ """
+ A multi-bucket values source based aggregation that can be applied on
+ numeric values or numeric range values extracted from the documents.
+ It dynamically builds fixed size (interval) buckets over the values.
+
+ :arg extended_bounds: Enables extending the bounds of the histogram
+ beyond the data itself.
+ :arg hard_bounds: Limits the range of buckets in the histogram. It is
+ particularly useful in the case of open data ranges that can
+ result in a very large number of buckets.
+ :arg field: The name of the field to aggregate on.
+ :arg interval: The interval for the buckets. Must be a positive
+ decimal.
+ :arg min_doc_count: Only returns buckets that have `min_doc_count`
+ number of documents. By default, the response will fill gaps in
+ the histogram with empty buckets.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg offset: By default, the bucket keys start with 0 and then
+ continue in even spaced steps of `interval`. The bucket boundaries
+ can be shifted by using the `offset` option.
+ :arg order: The sort order of the returned buckets. By default, the
+ returned buckets are sorted by their key ascending.
+ :arg script:
+ :arg format:
+ :arg keyed: If `true`, returns buckets as a hash instead of an array,
+ keyed by the bucket keys.
+ """
+
+ name = "histogram"
+
+ def __init__(
+ self,
+ *,
+ extended_bounds: Union[
+ "types.ExtendedBounds", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ hard_bounds: Union[
+ "types.ExtendedBounds", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ interval: Union[float, "DefaultType"] = DEFAULT,
+ min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+ missing: Union[float, "DefaultType"] = DEFAULT,
+ offset: Union[float, "DefaultType"] = DEFAULT,
+ order: Union[
+ Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]],
+ Sequence[Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]]],
+ "DefaultType",
+ ] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ keyed: Union[bool, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ extended_bounds=extended_bounds,
+ hard_bounds=hard_bounds,
+ field=field,
+ interval=interval,
+ min_doc_count=min_doc_count,
+ missing=missing,
+ offset=offset,
+ order=order,
+ script=script,
+ format=format,
+ keyed=keyed,
+ **kwargs,
+ )
+
+ def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+ return FieldBucketData(self, search, data)
+
+
+class IPRange(Bucket[_R]):
+ """
+ A multi-bucket value source based aggregation that enables the user to
+ define a set of IP ranges - each representing a bucket.
+
+ :arg field: The date field whose values are used to build ranges.
+ :arg ranges: Array of IP ranges.
+ """
+
+ name = "ip_range"
+
+ def __init__(
+ self,
+ *,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ ranges: Union[
+ Sequence["types.IpRangeAggregationRange"],
+ Sequence[Dict[str, Any]],
+ "DefaultType",
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(field=field, ranges=ranges, **kwargs)
+
+
+class IPPrefix(Bucket[_R]):
+ """
+ A bucket aggregation that groups documents based on the network or
+ sub-network of an IP address.
+
+ :arg field: (required) The IP address field to aggregation on. The
+ field mapping type must be `ip`.
+ :arg prefix_length: (required) Length of the network prefix. For IPv4
+ addresses the accepted range is [0, 32]. For IPv6 addresses the
+ accepted range is [0, 128].
+ :arg is_ipv6: Defines whether the prefix applies to IPv6 addresses.
+ :arg append_prefix_length: Defines whether the prefix length is
+ appended to IP address keys in the response.
+ :arg keyed: Defines whether buckets are returned as a hash rather than
+ an array in the response.
+ :arg min_doc_count: Minimum number of documents in a bucket for it to
+ be included in the response. Defaults to `1` if omitted.
+ """
+
+ name = "ip_prefix"
+
+ def __init__(
+ self,
+ *,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ prefix_length: Union[int, "DefaultType"] = DEFAULT,
+ is_ipv6: Union[bool, "DefaultType"] = DEFAULT,
+ append_prefix_length: Union[bool, "DefaultType"] = DEFAULT,
+ keyed: Union[bool, "DefaultType"] = DEFAULT,
+ min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ field=field,
+ prefix_length=prefix_length,
+ is_ipv6=is_ipv6,
+ append_prefix_length=append_prefix_length,
+ keyed=keyed,
+ min_doc_count=min_doc_count,
+ **kwargs,
+ )
+
+
+class Inference(Pipeline[_R]):
+ """
+ A parent pipeline aggregation which loads a pre-trained model and
+ performs inference on the collated result fields from the parent
+ bucket aggregation.
+
+ :arg model_id: (required) The ID or alias for the trained model.
+ :arg inference_config: Contains the inference type and its options.
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
+ name = "inference"
+
+ def __init__(
+ self,
+ *,
+ model_id: Union[str, "DefaultType"] = DEFAULT,
+ inference_config: Union[
+ "types.InferenceConfigContainer", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ model_id=model_id,
+ inference_config=inference_config,
+ format=format,
+ gap_policy=gap_policy,
+ buckets_path=buckets_path,
+ **kwargs,
+ )
+
+
+class Line(Agg[_R]):
+ """
+ :arg point: (required) The name of the geo_point field.
+ :arg sort: (required) The name of the numeric field to use as the sort
+ key for ordering the points. When the `geo_line` aggregation is
+ nested inside a `time_series` aggregation, this field defaults to
+ `@timestamp`, and any other value will result in error.
+ :arg include_sort: When `true`, returns an additional array of the
+ sort values in the feature properties.
+ :arg sort_order: The order in which the line is sorted (ascending or
+ descending). Defaults to `asc` if omitted.
+ :arg size: The maximum length of the line represented in the
+ aggregation. Valid sizes are between 1 and 10000. Defaults to
+ `10000` if omitted.
+ """
+
+ name = "line"
+
+ def __init__(
+ self,
+ *,
+ point: Union["types.GeoLinePoint", Dict[str, Any], "DefaultType"] = DEFAULT,
+ sort: Union["types.GeoLineSort", Dict[str, Any], "DefaultType"] = DEFAULT,
+ include_sort: Union[bool, "DefaultType"] = DEFAULT,
+ sort_order: Union[Literal["asc", "desc"], "DefaultType"] = DEFAULT,
+ size: Union[int, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ point=point,
+ sort=sort,
+ include_sort=include_sort,
+ sort_order=sort_order,
+ size=size,
+ **kwargs,
+ )
+
+
+class MatrixStats(Agg[_R]):
+ """
+ A numeric aggregation that computes the following statistics over a
+ set of document fields: `count`, `mean`, `variance`, `skewness`,
+ `kurtosis`, `covariance`, and `covariance`.
+
+ :arg mode: Array value the aggregation will use for array or multi-
+ valued fields. Defaults to `avg` if omitted.
+ :arg fields: An array of fields for computing the statistics.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ """
+
+ name = "matrix_stats"
+
+ def __init__(
+ self,
+ *,
+ mode: Union[
+ Literal["min", "max", "sum", "avg", "median"], "DefaultType"
+ ] = DEFAULT,
+ fields: Union[
+ Union[str, "InstrumentedField"],
+ Sequence[Union[str, "InstrumentedField"]],
+ "DefaultType",
+ ] = DEFAULT,
+ missing: Union[
+ Mapping[Union[str, "InstrumentedField"], float], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(mode=mode, fields=fields, missing=missing, **kwargs)
+
+
+class Max(Agg[_R]):
+ """
+ A single-value metrics aggregation that returns the maximum value
+ among the numeric values extracted from the aggregated documents.
+
+ :arg format:
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+
+ name = "max"
+
+ def __init__(
+ self,
+ *,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ format=format, field=field, missing=missing, script=script, **kwargs
+ )
+
+
+class MaxBucket(Pipeline[_R]):
+ """
+ A sibling pipeline aggregation which identifies the bucket(s) with the
+ maximum value of a specified metric in a sibling aggregation and
+ outputs both the value and the key(s) of the bucket(s).
+
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
name = "max_bucket"
+ def __init__(
+ self,
+ *,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
+ )
+
+
+class MedianAbsoluteDeviation(Agg[_R]):
+ """
+ A single-value aggregation that approximates the median absolute
+ deviation of its search results.
+
+ :arg compression: Limits the maximum number of nodes used by the
+ underlying TDigest algorithm to `20 * compression`, enabling
+ control of memory usage and approximation error. Defaults to
+ `1000` if omitted.
+ :arg format:
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+
+ name = "median_absolute_deviation"
+
+ def __init__(
+ self,
+ *,
+ compression: Union[float, "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ compression=compression,
+ format=format,
+ field=field,
+ missing=missing,
+ script=script,
+ **kwargs,
+ )
+
+
+class Min(Agg[_R]):
+ """
+ A single-value metrics aggregation that returns the minimum value
+ among numeric values extracted from the aggregated documents.
+
+ :arg format:
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+
+ name = "min"
+
+ def __init__(
+ self,
+ *,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ format=format, field=field, missing=missing, script=script, **kwargs
+ )
+
class MinBucket(Pipeline[_R]):
+ """
+ A sibling pipeline aggregation which identifies the bucket(s) with the
+ minimum value of a specified metric in a sibling aggregation and
+ outputs both the value and the key(s) of the bucket(s).
+
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
name = "min_bucket"
+ def __init__(
+ self,
+ *,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
+ )
-class MovingFn(Pipeline[_R]):
- name = "moving_fn"
+
+class Missing(Bucket[_R]):
+ """
+ A field data based single bucket aggregation, that creates a bucket of
+ all documents in the current document set context that are missing a
+ field value (effectively, missing a field or having the configured
+ NULL value set).
+
+ :arg field: The name of the field.
+ :arg missing:
+ """
+
+ name = "missing"
+
+ def __init__(
+ self,
+ *,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(field=field, missing=missing, **kwargs)
class MovingAvg(Pipeline[_R]):
+ """ """
+
name = "moving_avg"
+ def __init__(self, **kwargs: Any):
+ super().__init__(**kwargs)
+
+
+class LinearMovingAverageAggregation(MovingAvg[_R]):
+ """
+ :arg model: (required)
+ :arg settings: (required)
+ :arg minimize:
+ :arg predict:
+ :arg window:
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
+ def __init__(
+ self,
+ *,
+ model: Any = DEFAULT,
+ settings: Union["types.EmptyObject", Dict[str, Any], "DefaultType"] = DEFAULT,
+ minimize: Union[bool, "DefaultType"] = DEFAULT,
+ predict: Union[int, "DefaultType"] = DEFAULT,
+ window: Union[int, "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ model=model,
+ settings=settings,
+ minimize=minimize,
+ predict=predict,
+ window=window,
+ format=format,
+ gap_policy=gap_policy,
+ buckets_path=buckets_path,
+ **kwargs,
+ )
+
+
+class SimpleMovingAverageAggregation(MovingAvg[_R]):
+ """
+ :arg model: (required)
+ :arg settings: (required)
+ :arg minimize:
+ :arg predict:
+ :arg window:
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
+ def __init__(
+ self,
+ *,
+ model: Any = DEFAULT,
+ settings: Union["types.EmptyObject", Dict[str, Any], "DefaultType"] = DEFAULT,
+ minimize: Union[bool, "DefaultType"] = DEFAULT,
+ predict: Union[int, "DefaultType"] = DEFAULT,
+ window: Union[int, "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ model=model,
+ settings=settings,
+ minimize=minimize,
+ predict=predict,
+ window=window,
+ format=format,
+ gap_policy=gap_policy,
+ buckets_path=buckets_path,
+ **kwargs,
+ )
+
+
+class EwmaMovingAverageAggregation(MovingAvg[_R]):
+ """
+ :arg model: (required)
+ :arg settings: (required)
+ :arg minimize:
+ :arg predict:
+ :arg window:
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
+ def __init__(
+ self,
+ *,
+ model: Any = DEFAULT,
+ settings: Union[
+ "types.EwmaModelSettings", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ minimize: Union[bool, "DefaultType"] = DEFAULT,
+ predict: Union[int, "DefaultType"] = DEFAULT,
+ window: Union[int, "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ model=model,
+ settings=settings,
+ minimize=minimize,
+ predict=predict,
+ window=window,
+ format=format,
+ gap_policy=gap_policy,
+ buckets_path=buckets_path,
+ **kwargs,
+ )
+
+
+class HoltMovingAverageAggregation(MovingAvg[_R]):
+ """
+ :arg model: (required)
+ :arg settings: (required)
+ :arg minimize:
+ :arg predict:
+ :arg window:
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
+ def __init__(
+ self,
+ *,
+ model: Any = DEFAULT,
+ settings: Union[
+ "types.HoltLinearModelSettings", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ minimize: Union[bool, "DefaultType"] = DEFAULT,
+ predict: Union[int, "DefaultType"] = DEFAULT,
+ window: Union[int, "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ model=model,
+ settings=settings,
+ minimize=minimize,
+ predict=predict,
+ window=window,
+ format=format,
+ gap_policy=gap_policy,
+ buckets_path=buckets_path,
+ **kwargs,
+ )
+
+
+class HoltWintersMovingAverageAggregation(MovingAvg[_R]):
+ """
+ :arg model: (required)
+ :arg settings: (required)
+ :arg minimize:
+ :arg predict:
+ :arg window:
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
+ def __init__(
+ self,
+ *,
+ model: Any = DEFAULT,
+ settings: Union[
+ "types.HoltWintersModelSettings", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ minimize: Union[bool, "DefaultType"] = DEFAULT,
+ predict: Union[int, "DefaultType"] = DEFAULT,
+ window: Union[int, "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ model=model,
+ settings=settings,
+ minimize=minimize,
+ predict=predict,
+ window=window,
+ format=format,
+ gap_policy=gap_policy,
+ buckets_path=buckets_path,
+ **kwargs,
+ )
+
class MovingPercentiles(Pipeline[_R]):
+ """
+ Given an ordered series of percentiles, "slides" a window across those
+ percentiles and computes cumulative percentiles.
+
+ :arg window: The size of window to "slide" across the histogram.
+ :arg shift: By default, the window consists of the last n values
+ excluding the current bucket. Increasing `shift` by 1, moves the
+ starting window position by 1 to the right.
+ :arg keyed:
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
name = "moving_percentiles"
+ def __init__(
+ self,
+ *,
+ window: Union[int, "DefaultType"] = DEFAULT,
+ shift: Union[int, "DefaultType"] = DEFAULT,
+ keyed: Union[bool, "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ window=window,
+ shift=shift,
+ keyed=keyed,
+ format=format,
+ gap_policy=gap_policy,
+ buckets_path=buckets_path,
+ **kwargs,
+ )
+
+
+class MovingFn(Pipeline[_R]):
+ """
+ Given an ordered series of data, "slides" a window across the data and
+ runs a custom script on each window of data. For convenience, a number
+ of common functions are predefined such as `min`, `max`, and moving
+ averages.
+
+ :arg script: The script that should be executed on each window of
+ data.
+ :arg shift: By default, the window consists of the last n values
+ excluding the current bucket. Increasing `shift` by 1, moves the
+ starting window position by 1 to the right.
+ :arg window: The size of window to "slide" across the histogram.
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
+ name = "moving_fn"
+
+ def __init__(
+ self,
+ *,
+ script: Union[str, "DefaultType"] = DEFAULT,
+ shift: Union[int, "DefaultType"] = DEFAULT,
+ window: Union[int, "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ script=script,
+ shift=shift,
+ window=window,
+ format=format,
+ gap_policy=gap_policy,
+ buckets_path=buckets_path,
+ **kwargs,
+ )
+
+
+class MultiTerms(Bucket[_R]):
+ """
+ A multi-bucket value source based aggregation where buckets are
+ dynamically built - one per unique set of values.
+
+ :arg terms: (required) The field from which to generate sets of terms.
+ :arg collect_mode: Specifies the strategy for data collection.
+ Defaults to `breadth_first` if omitted.
+ :arg order: Specifies the sort order of the buckets. Defaults to
+ sorting by descending document count.
+ :arg min_doc_count: The minimum number of documents in a bucket for it
+ to be returned. Defaults to `1` if omitted.
+ :arg shard_min_doc_count: The minimum number of documents in a bucket
+ on each shard for it to be returned. Defaults to `1` if omitted.
+ :arg shard_size: The number of candidate terms produced by each shard.
+ By default, `shard_size` will be automatically estimated based on
+ the number of shards and the `size` parameter.
+ :arg show_term_doc_count_error: Calculates the doc count error on per
+ term basis.
+ :arg size: The number of term buckets should be returned out of the
+ overall terms list. Defaults to `10` if omitted.
+ """
+
+ name = "multi_terms"
+
+ def __init__(
+ self,
+ *,
+ terms: Union[
+ Sequence["types.MultiTermLookup"], Sequence[Dict[str, Any]], "DefaultType"
+ ] = DEFAULT,
+ collect_mode: Union[
+ Literal["depth_first", "breadth_first"], "DefaultType"
+ ] = DEFAULT,
+ order: Union[
+ Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]],
+ Sequence[Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]]],
+ "DefaultType",
+ ] = DEFAULT,
+ min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+ shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+ shard_size: Union[int, "DefaultType"] = DEFAULT,
+ show_term_doc_count_error: Union[bool, "DefaultType"] = DEFAULT,
+ size: Union[int, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ terms=terms,
+ collect_mode=collect_mode,
+ order=order,
+ min_doc_count=min_doc_count,
+ shard_min_doc_count=shard_min_doc_count,
+ shard_size=shard_size,
+ show_term_doc_count_error=show_term_doc_count_error,
+ size=size,
+ **kwargs,
+ )
+
+
+class Nested(Bucket[_R]):
+ """
+ A special single bucket aggregation that enables aggregating nested
+ documents.
+
+ :arg path: The path to the field of type `nested`.
+ """
+
+ name = "nested"
+
+ def __init__(
+ self,
+ path: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(path=path, **kwargs)
+
class Normalize(Pipeline[_R]):
+ """
+ A parent pipeline aggregation which calculates the specific
+ normalized/rescaled value for a specific bucket value.
+
+ :arg method: The specific method to apply.
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
name = "normalize"
+ def __init__(
+ self,
+ *,
+ method: Union[
+ Literal[
+ "rescale_0_1",
+ "rescale_0_100",
+ "percent_of_sum",
+ "mean",
+ "z-score",
+ "softmax",
+ ],
+ "DefaultType",
+ ] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ method=method,
+ format=format,
+ gap_policy=gap_policy,
+ buckets_path=buckets_path,
+ **kwargs,
+ )
+
+
+class Parent(Bucket[_R]):
+ """
+ A special single bucket aggregation that selects parent documents that
+ have the specified type, as defined in a `join` field.
+
+ :arg type: The child type that should be selected.
+ """
+
+ name = "parent"
+
+ def __init__(self, type: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any):
+ super().__init__(type=type, **kwargs)
+
+
+class PercentileRanks(Agg[_R]):
+ """
+ A multi-value metrics aggregation that calculates one or more
+ percentile ranks over numeric values extracted from the aggregated
+ documents.
+
+ :arg keyed: By default, the aggregation associates a unique string key
+ with each bucket and returns the ranges as a hash rather than an
+ array. Set to `false` to disable this behavior. Defaults to `True`
+ if omitted.
+ :arg values: An array of values for which to calculate the percentile
+ ranks.
+ :arg hdr: Uses the alternative High Dynamic Range Histogram algorithm
+ to calculate percentile ranks.
+ :arg tdigest: Sets parameters for the default TDigest algorithm used
+ to calculate percentile ranks.
+ :arg format:
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+
+ name = "percentile_ranks"
+
+ def __init__(
+ self,
+ *,
+ keyed: Union[bool, "DefaultType"] = DEFAULT,
+ values: Union[Sequence[float], None, "DefaultType"] = DEFAULT,
+ hdr: Union["types.HdrMethod", Dict[str, Any], "DefaultType"] = DEFAULT,
+ tdigest: Union["types.TDigest", Dict[str, Any], "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ keyed=keyed,
+ values=values,
+ hdr=hdr,
+ tdigest=tdigest,
+ format=format,
+ field=field,
+ missing=missing,
+ script=script,
+ **kwargs,
+ )
+
+
+class Percentiles(Agg[_R]):
+ """
+ A multi-value metrics aggregation that calculates one or more
+ percentiles over numeric values extracted from the aggregated
+ documents.
+
+ :arg keyed: By default, the aggregation associates a unique string key
+ with each bucket and returns the ranges as a hash rather than an
+ array. Set to `false` to disable this behavior. Defaults to `True`
+ if omitted.
+ :arg percents: The percentiles to calculate.
+ :arg hdr: Uses the alternative High Dynamic Range Histogram algorithm
+ to calculate percentiles.
+ :arg tdigest: Sets parameters for the default TDigest algorithm used
+ to calculate percentiles.
+ :arg format:
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+
+ name = "percentiles"
+
+ def __init__(
+ self,
+ *,
+ keyed: Union[bool, "DefaultType"] = DEFAULT,
+ percents: Union[Sequence[float], "DefaultType"] = DEFAULT,
+ hdr: Union["types.HdrMethod", Dict[str, Any], "DefaultType"] = DEFAULT,
+ tdigest: Union["types.TDigest", Dict[str, Any], "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ keyed=keyed,
+ percents=percents,
+ hdr=hdr,
+ tdigest=tdigest,
+ format=format,
+ field=field,
+ missing=missing,
+ script=script,
+ **kwargs,
+ )
+
class PercentilesBucket(Pipeline[_R]):
+ """
+ A sibling pipeline aggregation which calculates percentiles across all
+ bucket of a specified metric in a sibling aggregation.
+
+ :arg percents: The list of percentiles to calculate.
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
name = "percentiles_bucket"
+ def __init__(
+ self,
+ *,
+ percents: Union[Sequence[float], "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ percents=percents,
+ format=format,
+ gap_policy=gap_policy,
+ buckets_path=buckets_path,
+ **kwargs,
+ )
+
+
+class Range(Bucket[_R]):
+ """
+ A multi-bucket value source based aggregation that enables the user to
+ define a set of ranges - each representing a bucket.
+
+ :arg field: The date field whose values are use to build ranges.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg ranges: An array of ranges used to bucket documents.
+ :arg script:
+ :arg keyed: Set to `true` to associate a unique string key with each
+ bucket and return the ranges as a hash rather than an array.
+ :arg format:
+ """
+
+ name = "range"
+
+ def __init__(
+ self,
+ *,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[int, "DefaultType"] = DEFAULT,
+ ranges: Union[
+ Sequence["types.AggregationRange"], Sequence[Dict[str, Any]], "DefaultType"
+ ] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ keyed: Union[bool, "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ field=field,
+ missing=missing,
+ ranges=ranges,
+ script=script,
+ keyed=keyed,
+ format=format,
+ **kwargs,
+ )
+
+
+class RareTerms(Bucket[_R]):
+ """
+ A multi-bucket value source based aggregation which finds "rare"
+ terms — terms that are at the long-tail of the distribution and are
+ not frequent.
+
+ :arg exclude: Terms that should be excluded from the aggregation.
+ :arg field: The field from which to return rare terms.
+ :arg include: Terms that should be included in the aggregation.
+ :arg max_doc_count: The maximum number of documents a term should
+ appear in. Defaults to `1` if omitted.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg precision: The precision of the internal CuckooFilters. Smaller
+ precision leads to better approximation, but higher memory usage.
+ Defaults to `0.001` if omitted.
+ :arg value_type:
+ """
+
+ name = "rare_terms"
+
+ def __init__(
+ self,
+ *,
+ exclude: Union[str, Sequence[str], "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ include: Union[
+ str, Sequence[str], "types.TermsPartition", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ max_doc_count: Union[int, "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ precision: Union[float, "DefaultType"] = DEFAULT,
+ value_type: Union[str, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ exclude=exclude,
+ field=field,
+ include=include,
+ max_doc_count=max_doc_count,
+ missing=missing,
+ precision=precision,
+ value_type=value_type,
+ **kwargs,
+ )
+
+
+class Rate(Agg[_R]):
+ """
+ Calculates a rate of documents or a field in each bucket. Can only be
+ used inside a `date_histogram` or `composite` aggregation.
+
+ :arg unit: The interval used to calculate the rate. By default, the
+ interval of the `date_histogram` is used.
+ :arg mode: How the rate is calculated. Defaults to `sum` if omitted.
+ :arg format:
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+
+ name = "rate"
+
+ def __init__(
+ self,
+ *,
+ unit: Union[
+ Literal[
+ "second", "minute", "hour", "day", "week", "month", "quarter", "year"
+ ],
+ "DefaultType",
+ ] = DEFAULT,
+ mode: Union[Literal["sum", "value_count"], "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ unit=unit,
+ mode=mode,
+ format=format,
+ field=field,
+ missing=missing,
+ script=script,
+ **kwargs,
+ )
+
+
+class ReverseNested(Bucket[_R]):
+ """
+ A special single bucket aggregation that enables aggregating on parent
+ documents from nested documents. Should only be defined inside a
+ `nested` aggregation.
+
+ :arg path: Defines the nested object field that should be joined back
+ to. The default is empty, which means that it joins back to the
+ root/main document level.
+ """
+
+ name = "reverse_nested"
+
+ def __init__(
+ self,
+ path: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(path=path, **kwargs)
+
+
+class RandomSampler(Bucket[_R]):
+ """
+ A single bucket aggregation that randomly includes documents in the
+ aggregated results. Sampling provides significant speed improvement at
+ the cost of accuracy.
+
+ :arg probability: (required) The probability that a document will be
+ included in the aggregated data. Must be greater than 0, less than
+ 0.5, or exactly 1. The lower the probability, the fewer documents
+ are matched.
+ :arg seed: The seed to generate the random sampling of documents. When
+ a seed is provided, the random subset of documents is the same
+ between calls.
+ :arg shard_seed: When combined with seed, setting shard_seed ensures
+ 100% consistent sampling over shards where data is exactly the
+ same.
+ """
+
+ name = "random_sampler"
+
+ def __init__(
+ self,
+ *,
+ probability: Union[float, "DefaultType"] = DEFAULT,
+ seed: Union[int, "DefaultType"] = DEFAULT,
+ shard_seed: Union[int, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ probability=probability, seed=seed, shard_seed=shard_seed, **kwargs
+ )
+
+
+class Sampler(Bucket[_R]):
+ """
+ A filtering aggregation used to limit any sub aggregations' processing
+ to a sample of the top-scoring documents.
+
+ :arg shard_size: Limits how many top-scoring documents are collected
+ in the sample processed on each shard. Defaults to `100` if
+ omitted.
+ """
+
+ name = "sampler"
+
+ def __init__(self, shard_size: Union[int, "DefaultType"] = DEFAULT, **kwargs: Any):
+ super().__init__(shard_size=shard_size, **kwargs)
+
+
+class ScriptedMetric(Agg[_R]):
+ """
+ A metric aggregation that uses scripts to provide a metric output.
+
+ :arg combine_script: Runs once on each shard after document collection
+ is complete. Allows the aggregation to consolidate the state
+ returned from each shard.
+ :arg init_script: Runs prior to any collection of documents. Allows
+ the aggregation to set up any initial state.
+ :arg map_script: Run once per document collected. If no
+ `combine_script` is specified, the resulting state needs to be
+ stored in the `state` object.
+ :arg params: A global object with script parameters for `init`, `map`
+ and `combine` scripts. It is shared between the scripts.
+ :arg reduce_script: Runs once on the coordinating node after all
+ shards have returned their results. The script is provided with
+ access to a variable `states`, which is an array of the result of
+ the `combine_script` on each shard.
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+
+ name = "scripted_metric"
+
+ def __init__(
+ self,
+ *,
+ combine_script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ init_script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ map_script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ params: Union[Mapping[str, Any], "DefaultType"] = DEFAULT,
+ reduce_script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ combine_script=combine_script,
+ init_script=init_script,
+ map_script=map_script,
+ params=params,
+ reduce_script=reduce_script,
+ field=field,
+ missing=missing,
+ script=script,
+ **kwargs,
+ )
+
class SerialDiff(Pipeline[_R]):
+ """
+ An aggregation that subtracts values in a time series from themselves
+ at different time lags or periods.
+
+ :arg lag: The historical bucket to subtract from the current value.
+ Must be a positive, non-zero integer.
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
name = "serial_diff"
+ def __init__(
+ self,
+ *,
+ lag: Union[int, "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ lag=lag,
+ format=format,
+ gap_policy=gap_policy,
+ buckets_path=buckets_path,
+ **kwargs,
+ )
+
+
+class SignificantTerms(Bucket[_R]):
+ """
+ Returns interesting or unusual occurrences of terms in a set.
+
+ :arg background_filter: A background filter that can be used to focus
+ in on significant terms within a narrower context, instead of the
+ entire index.
+ :arg chi_square: Use Chi square, as described in "Information
+ Retrieval", Manning et al., Chapter 13.5.2, as the significance
+ score.
+ :arg exclude: Terms to exclude.
+ :arg execution_hint: Mechanism by which the aggregation should be
+ executed: using field values directly or using global ordinals.
+ :arg field: The field from which to return significant terms.
+ :arg gnd: Use Google normalized distance as described in "The Google
+ Similarity Distance", Cilibrasi and Vitanyi, 2007, as the
+ significance score.
+ :arg include: Terms to include.
+ :arg jlh: Use JLH score as the significance score.
+ :arg min_doc_count: Only return terms that are found in more than
+ `min_doc_count` hits. Defaults to `3` if omitted.
+ :arg mutual_information: Use mutual information as described in
+ "Information Retrieval", Manning et al., Chapter 13.5.1, as the
+ significance score.
+ :arg percentage: A simple calculation of the number of documents in
+ the foreground sample with a term divided by the number of
+ documents in the background with the term.
+ :arg script_heuristic: Customized score, implemented via a script.
+ :arg shard_min_doc_count: Regulates the certainty a shard has if the
+ term should actually be added to the candidate list or not with
+ respect to the `min_doc_count`. Terms will only be considered if
+ their local shard frequency within the set is higher than the
+ `shard_min_doc_count`.
+ :arg shard_size: Can be used to control the volumes of candidate terms
+ produced by each shard. By default, `shard_size` will be
+ automatically estimated based on the number of shards and the
+ `size` parameter.
+ :arg size: The number of buckets returned out of the overall terms
+ list.
+ """
+
+ name = "significant_terms"
+ _param_defs = {
+ "background_filter": {"type": "query"},
+ }
+
+ def __init__(
+ self,
+ *,
+ background_filter: Union[Query, "DefaultType"] = DEFAULT,
+ chi_square: Union[
+ "types.ChiSquareHeuristic", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ exclude: Union[str, Sequence[str], "DefaultType"] = DEFAULT,
+ execution_hint: Union[
+ Literal[
+ "map",
+ "global_ordinals",
+ "global_ordinals_hash",
+ "global_ordinals_low_cardinality",
+ ],
+ "DefaultType",
+ ] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ gnd: Union[
+ "types.GoogleNormalizedDistanceHeuristic", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ include: Union[
+ str, Sequence[str], "types.TermsPartition", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ jlh: Union["types.EmptyObject", Dict[str, Any], "DefaultType"] = DEFAULT,
+ min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+ mutual_information: Union[
+ "types.MutualInformationHeuristic", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ percentage: Union[
+ "types.PercentageScoreHeuristic", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ script_heuristic: Union[
+ "types.ScriptedHeuristic", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+ shard_size: Union[int, "DefaultType"] = DEFAULT,
+ size: Union[int, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ background_filter=background_filter,
+ chi_square=chi_square,
+ exclude=exclude,
+ execution_hint=execution_hint,
+ field=field,
+ gnd=gnd,
+ include=include,
+ jlh=jlh,
+ min_doc_count=min_doc_count,
+ mutual_information=mutual_information,
+ percentage=percentage,
+ script_heuristic=script_heuristic,
+ shard_min_doc_count=shard_min_doc_count,
+ shard_size=shard_size,
+ size=size,
+ **kwargs,
+ )
+
+
+class SignificantText(Bucket[_R]):
+ """
+ Returns interesting or unusual occurrences of free-text terms in a
+ set.
+
+ :arg background_filter: A background filter that can be used to focus
+ in on significant terms within a narrower context, instead of the
+ entire index.
+ :arg chi_square: Use Chi square, as described in "Information
+ Retrieval", Manning et al., Chapter 13.5.2, as the significance
+ score.
+ :arg exclude: Values to exclude.
+ :arg execution_hint: Determines whether the aggregation will use field
+ values directly or global ordinals.
+ :arg field: The field from which to return significant text.
+ :arg filter_duplicate_text: Whether to out duplicate text to deal with
+ noisy data.
+ :arg gnd: Use Google normalized distance as described in "The Google
+ Similarity Distance", Cilibrasi and Vitanyi, 2007, as the
+ significance score.
+ :arg include: Values to include.
+ :arg jlh: Use JLH score as the significance score.
+ :arg min_doc_count: Only return values that are found in more than
+ `min_doc_count` hits. Defaults to `3` if omitted.
+ :arg mutual_information: Use mutual information as described in
+ "Information Retrieval", Manning et al., Chapter 13.5.1, as the
+ significance score.
+ :arg percentage: A simple calculation of the number of documents in
+ the foreground sample with a term divided by the number of
+ documents in the background with the term.
+ :arg script_heuristic: Customized score, implemented via a script.
+ :arg shard_min_doc_count: Regulates the certainty a shard has if the
+ values should actually be added to the candidate list or not with
+ respect to the min_doc_count. Values will only be considered if
+ their local shard frequency within the set is higher than the
+ `shard_min_doc_count`.
+ :arg shard_size: The number of candidate terms produced by each shard.
+ By default, `shard_size` will be automatically estimated based on
+ the number of shards and the `size` parameter.
+ :arg size: The number of buckets returned out of the overall terms
+ list.
+ :arg source_fields: Overrides the JSON `_source` fields from which
+ text will be analyzed.
+ """
+
+ name = "significant_text"
+ _param_defs = {
+ "background_filter": {"type": "query"},
+ }
+
+ def __init__(
+ self,
+ *,
+ background_filter: Union[Query, "DefaultType"] = DEFAULT,
+ chi_square: Union[
+ "types.ChiSquareHeuristic", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ exclude: Union[str, Sequence[str], "DefaultType"] = DEFAULT,
+ execution_hint: Union[
+ Literal[
+ "map",
+ "global_ordinals",
+ "global_ordinals_hash",
+ "global_ordinals_low_cardinality",
+ ],
+ "DefaultType",
+ ] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ filter_duplicate_text: Union[bool, "DefaultType"] = DEFAULT,
+ gnd: Union[
+ "types.GoogleNormalizedDistanceHeuristic", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ include: Union[
+ str, Sequence[str], "types.TermsPartition", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ jlh: Union["types.EmptyObject", Dict[str, Any], "DefaultType"] = DEFAULT,
+ min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+ mutual_information: Union[
+ "types.MutualInformationHeuristic", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ percentage: Union[
+ "types.PercentageScoreHeuristic", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ script_heuristic: Union[
+ "types.ScriptedHeuristic", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+ shard_size: Union[int, "DefaultType"] = DEFAULT,
+ size: Union[int, "DefaultType"] = DEFAULT,
+ source_fields: Union[
+ Union[str, "InstrumentedField"],
+ Sequence[Union[str, "InstrumentedField"]],
+ "DefaultType",
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ background_filter=background_filter,
+ chi_square=chi_square,
+ exclude=exclude,
+ execution_hint=execution_hint,
+ field=field,
+ filter_duplicate_text=filter_duplicate_text,
+ gnd=gnd,
+ include=include,
+ jlh=jlh,
+ min_doc_count=min_doc_count,
+ mutual_information=mutual_information,
+ percentage=percentage,
+ script_heuristic=script_heuristic,
+ shard_min_doc_count=shard_min_doc_count,
+ shard_size=shard_size,
+ size=size,
+ source_fields=source_fields,
+ **kwargs,
+ )
+
+
+class Stats(Agg[_R]):
+ """
+ A multi-value metrics aggregation that computes stats over numeric
+ values extracted from the aggregated documents.
+
+ :arg format:
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+
+ name = "stats"
+
+ def __init__(
+ self,
+ *,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ format=format, field=field, missing=missing, script=script, **kwargs
+ )
+
class StatsBucket(Pipeline[_R]):
+ """
+ A sibling pipeline aggregation which calculates a variety of stats
+ across all bucket of a specified metric in a sibling aggregation.
+
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
name = "stats_bucket"
+ def __init__(
+ self,
+ *,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
+ )
+
+
+class StringStats(Agg[_R]):
+ """
+ A multi-value metrics aggregation that computes statistics over string
+ values extracted from the aggregated documents.
+
+ :arg show_distribution: Shows the probability distribution for all
+ characters.
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+
+ name = "string_stats"
+
+ def __init__(
+ self,
+ *,
+ show_distribution: Union[bool, "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ show_distribution=show_distribution,
+ field=field,
+ missing=missing,
+ script=script,
+ **kwargs,
+ )
+
+
+class Sum(Agg[_R]):
+ """
+ A single-value metrics aggregation that sums numeric values that are
+ extracted from the aggregated documents.
+
+ :arg format:
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+
+ name = "sum"
+
+ def __init__(
+ self,
+ *,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ format=format, field=field, missing=missing, script=script, **kwargs
+ )
+
class SumBucket(Pipeline[_R]):
+ """
+ A sibling pipeline aggregation which calculates the sum of a specified
+ metric across all buckets in a sibling aggregation.
+
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
name = "sum_bucket"
+ def __init__(
+ self,
+ *,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
+ )
-class BucketSort(Pipeline[_R]):
- name = "bucket_sort"
+
+class Terms(Bucket[_R]):
+ """
+ A multi-bucket value source based aggregation where buckets are
+ dynamically built - one per unique value.
+
+ :arg collect_mode: Determines how child aggregations should be
+ calculated: breadth-first or depth-first.
+ :arg exclude: Values to exclude. Accepts regular expressions and
+ partitions.
+ :arg execution_hint: Determines whether the aggregation will use field
+ values directly or global ordinals.
+ :arg field: The field from which to return terms.
+ :arg include: Values to include. Accepts regular expressions and
+ partitions.
+ :arg min_doc_count: Only return values that are found in more than
+ `min_doc_count` hits. Defaults to `1` if omitted.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg missing_order:
+ :arg missing_bucket:
+ :arg value_type: Coerced unmapped fields into the specified type.
+ :arg order: Specifies the sort order of the buckets. Defaults to
+ sorting by descending document count.
+ :arg script:
+ :arg shard_min_doc_count: Regulates the certainty a shard has if the
+ term should actually be added to the candidate list or not with
+ respect to the `min_doc_count`. Terms will only be considered if
+ their local shard frequency within the set is higher than the
+ `shard_min_doc_count`.
+ :arg shard_size: The number of candidate terms produced by each shard.
+ By default, `shard_size` will be automatically estimated based on
+ the number of shards and the `size` parameter.
+ :arg show_term_doc_count_error: Set to `true` to return the
+ `doc_count_error_upper_bound`, which is an upper bound to the
+ error on the `doc_count` returned by each shard.
+ :arg size: The number of buckets returned out of the overall terms
+ list. Defaults to `10` if omitted.
+ :arg format:
+ """
+
+ name = "terms"
+
+ def __init__(
+ self,
+ *,
+ collect_mode: Union[
+ Literal["depth_first", "breadth_first"], "DefaultType"
+ ] = DEFAULT,
+ exclude: Union[str, Sequence[str], "DefaultType"] = DEFAULT,
+ execution_hint: Union[
+ Literal[
+ "map",
+ "global_ordinals",
+ "global_ordinals_hash",
+ "global_ordinals_low_cardinality",
+ ],
+ "DefaultType",
+ ] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ include: Union[
+ str, Sequence[str], "types.TermsPartition", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ missing_order: Union[
+ Literal["first", "last", "default"], "DefaultType"
+ ] = DEFAULT,
+ missing_bucket: Union[bool, "DefaultType"] = DEFAULT,
+ value_type: Union[str, "DefaultType"] = DEFAULT,
+ order: Union[
+ Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]],
+ Sequence[Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]]],
+ "DefaultType",
+ ] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+ shard_size: Union[int, "DefaultType"] = DEFAULT,
+ show_term_doc_count_error: Union[bool, "DefaultType"] = DEFAULT,
+ size: Union[int, "DefaultType"] = DEFAULT,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ collect_mode=collect_mode,
+ exclude=exclude,
+ execution_hint=execution_hint,
+ field=field,
+ include=include,
+ min_doc_count=min_doc_count,
+ missing=missing,
+ missing_order=missing_order,
+ missing_bucket=missing_bucket,
+ value_type=value_type,
+ order=order,
+ script=script,
+ shard_min_doc_count=shard_min_doc_count,
+ shard_size=shard_size,
+ show_term_doc_count_error=show_term_doc_count_error,
+ size=size,
+ format=format,
+ **kwargs,
+ )
+
+ def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+ return FieldBucketData(self, search, data)
+
+
+class TimeSeries(Bucket[_R]):
+ """
+ The time series aggregation queries data created using a time series
+ index. This is typically data such as metrics or other data streams
+ with a time component, and requires creating an index using the time
+ series mode.
+
+ :arg size: The maximum number of results to return. Defaults to
+ `10000` if omitted.
+ :arg keyed: Set to `true` to associate a unique string key with each
+ bucket and returns the ranges as a hash rather than an array.
+ """
+
+ name = "time_series"
+
+ def __init__(
+ self,
+ *,
+ size: Union[int, "DefaultType"] = DEFAULT,
+ keyed: Union[bool, "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(size=size, keyed=keyed, **kwargs)
+
+
+class TopHits(Agg[_R]):
+ """
+ A metric aggregation that returns the top matching documents per
+ bucket.
+
+ :arg docvalue_fields: Fields for which to return doc values.
+ :arg explain: If `true`, returns detailed information about score
+ computation as part of a hit.
+ :arg fields: Array of wildcard (*) patterns. The request returns
+ values for field names matching these patterns in the hits.fields
+ property of the response.
+ :arg from: Starting document offset.
+ :arg highlight: Specifies the highlighter to use for retrieving
+ highlighted snippets from one or more fields in the search
+ results.
+ :arg script_fields: Returns the result of one or more script
+ evaluations for each hit.
+ :arg size: The maximum number of top matching hits to return per
+ bucket. Defaults to `3` if omitted.
+ :arg sort: Sort order of the top matching hits. By default, the hits
+ are sorted by the score of the main query.
+ :arg _source: Selects the fields of the source that are returned.
+ :arg stored_fields: Returns values for the specified stored fields
+ (fields that use the `store` mapping option).
+ :arg track_scores: If `true`, calculates and returns document scores,
+ even if the scores are not used for sorting.
+ :arg version: If `true`, returns document version as part of a hit.
+ :arg seq_no_primary_term: If `true`, returns sequence number and
+ primary term of the last modification of each hit.
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+
+ name = "top_hits"
+
+ def __init__(
+ self,
+ *,
+ docvalue_fields: Union[
+ Sequence["types.FieldAndFormat"], Sequence[Dict[str, Any]], "DefaultType"
+ ] = DEFAULT,
+ explain: Union[bool, "DefaultType"] = DEFAULT,
+ fields: Union[
+ Sequence["types.FieldAndFormat"], Sequence[Dict[str, Any]], "DefaultType"
+ ] = DEFAULT,
+ from_: Union[int, "DefaultType"] = DEFAULT,
+ highlight: Union["types.Highlight", Dict[str, Any], "DefaultType"] = DEFAULT,
+ script_fields: Union[
+ Mapping[str, "types.ScriptField"], Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ size: Union[int, "DefaultType"] = DEFAULT,
+ sort: Union[
+ Union[Union[str, "InstrumentedField"], "types.SortOptions"],
+ Sequence[Union[Union[str, "InstrumentedField"], "types.SortOptions"]],
+ Dict[str, Any],
+ "DefaultType",
+ ] = DEFAULT,
+ _source: Union[
+ bool, "types.SourceFilter", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ stored_fields: Union[
+ Union[str, "InstrumentedField"],
+ Sequence[Union[str, "InstrumentedField"]],
+ "DefaultType",
+ ] = DEFAULT,
+ track_scores: Union[bool, "DefaultType"] = DEFAULT,
+ version: Union[bool, "DefaultType"] = DEFAULT,
+ seq_no_primary_term: Union[bool, "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ docvalue_fields=docvalue_fields,
+ explain=explain,
+ fields=fields,
+ from_=from_,
+ highlight=highlight,
+ script_fields=script_fields,
+ size=size,
+ sort=sort,
+ _source=_source,
+ stored_fields=stored_fields,
+ track_scores=track_scores,
+ version=version,
+ seq_no_primary_term=seq_no_primary_term,
+ field=field,
+ missing=missing,
+ script=script,
+ **kwargs,
+ )
+
+ def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+ return TopHitsData(self, search, data)
+
+
+class TTest(Agg[_R]):
+ """
+ A metrics aggregation that performs a statistical hypothesis test in
+ which the test statistic follows a Student’s t-distribution under the
+ null hypothesis on numeric values extracted from the aggregated
+ documents.
+
+ :arg a: Test population A.
+ :arg b: Test population B.
+ :arg type: The type of test. Defaults to `heteroscedastic` if omitted.
+ """
+
+ name = "t_test"
+
+ def __init__(
+ self,
+ *,
+ a: Union["types.TestPopulation", Dict[str, Any], "DefaultType"] = DEFAULT,
+ b: Union["types.TestPopulation", Dict[str, Any], "DefaultType"] = DEFAULT,
+ type: Union[
+ Literal["paired", "homoscedastic", "heteroscedastic"], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(a=a, b=b, type=type, **kwargs)
+
+
+class TopMetrics(Agg[_R]):
+ """
+ A metric aggregation that selects metrics from the document with the
+ largest or smallest sort value.
+
+ :arg metrics: The fields of the top document to return.
+ :arg size: The number of top documents from which to return metrics.
+ Defaults to `1` if omitted.
+ :arg sort: The sort order of the documents.
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+
+ name = "top_metrics"
+
+ def __init__(
+ self,
+ *,
+ metrics: Union[
+ "types.TopMetricsValue",
+ Sequence["types.TopMetricsValue"],
+ Sequence[Dict[str, Any]],
+ "DefaultType",
+ ] = DEFAULT,
+ size: Union[int, "DefaultType"] = DEFAULT,
+ sort: Union[
+ Union[Union[str, "InstrumentedField"], "types.SortOptions"],
+ Sequence[Union[Union[str, "InstrumentedField"], "types.SortOptions"]],
+ Dict[str, Any],
+ "DefaultType",
+ ] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ metrics=metrics,
+ size=size,
+ sort=sort,
+ field=field,
+ missing=missing,
+ script=script,
+ **kwargs,
+ )
+
+
+class ValueCount(Agg[_R]):
+ """
+ A single-value metrics aggregation that counts the number of values
+ that are extracted from the aggregated documents.
+
+ :arg format:
+ :arg field: The field on which to run the aggregation.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ :arg script:
+ """
+
+ name = "value_count"
+
+ def __init__(
+ self,
+ *,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ format=format, field=field, missing=missing, script=script, **kwargs
+ )
+
+
+class WeightedAvg(Agg[_R]):
+ """
+ A single-value metrics aggregation that computes the weighted average
+ of numeric values that are extracted from the aggregated documents.
+
+ :arg format: A numeric response formatter.
+ :arg value: Configuration for the field that provides the values.
+ :arg value_type:
+ :arg weight: Configuration for the field or script that provides the
+ weights.
+ """
+
+ name = "weighted_avg"
+
+ def __init__(
+ self,
+ *,
+ format: Union[str, "DefaultType"] = DEFAULT,
+ value: Union[
+ "types.WeightedAverageValue", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ value_type: Union[
+ Literal[
+ "string",
+ "long",
+ "double",
+ "number",
+ "date",
+ "date_nanos",
+ "ip",
+ "numeric",
+ "geo_point",
+ "boolean",
+ ],
+ "DefaultType",
+ ] = DEFAULT,
+ weight: Union[
+ "types.WeightedAverageValue", Dict[str, Any], "DefaultType"
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ format=format, value=value, value_type=value_type, weight=weight, **kwargs
+ )
+
+
+class VariableWidthHistogram(Bucket[_R]):
+ """
+ A multi-bucket aggregation similar to the histogram, except instead of
+ providing an interval to use as the width of each bucket, a target
+ number of buckets is provided.
+
+ :arg field: The name of the field.
+ :arg buckets: The target number of buckets. Defaults to `10` if
+ omitted.
+ :arg shard_size: The number of buckets that the coordinating node will
+ request from each shard. Defaults to `buckets * 50`.
+ :arg initial_buffer: Specifies the number of individual documents that
+ will be stored in memory on a shard before the initial bucketing
+ algorithm is run. Defaults to `min(10 * shard_size, 50000)`.
+ :arg script:
+ """
+
+ name = "variable_width_histogram"
+
+ def __init__(
+ self,
+ *,
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ buckets: Union[int, "DefaultType"] = DEFAULT,
+ shard_size: Union[int, "DefaultType"] = DEFAULT,
+ initial_buffer: Union[int, "DefaultType"] = DEFAULT,
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+ **kwargs: Any,
+ ):
+ super().__init__(
+ field=field,
+ buckets=buckets,
+ shard_size=shard_size,
+ initial_buffer=initial_buffer,
+ script=script,
+ **kwargs,
+ )
+
+ def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+ return FieldBucketData(self, search, data)
diff --git a/elasticsearch_dsl/query.py b/elasticsearch_dsl/query.py
index 4d131d1b..dfb5518b 100644
--- a/elasticsearch_dsl/query.py
+++ b/elasticsearch_dsl/query.py
@@ -624,7 +624,9 @@ def __init__(
Literal["multiply", "replace", "sum", "avg", "max", "min"], "DefaultType"
] = DEFAULT,
functions: Union[
- Sequence["types.FunctionScoreContainer"], Dict[str, Any], "DefaultType"
+ Sequence["types.FunctionScoreContainer"],
+ Sequence[Dict[str, Any]],
+ "DefaultType",
] = DEFAULT,
max_boost: Union[float, "DefaultType"] = DEFAULT,
min_score: Union[float, "DefaultType"] = DEFAULT,
@@ -1688,7 +1690,7 @@ def __init__(
organic: Union[Query, "DefaultType"] = DEFAULT,
ids: Union[Sequence[str], "DefaultType"] = DEFAULT,
docs: Union[
- Sequence["types.PinnedDoc"], Dict[str, Any], "DefaultType"
+ Sequence["types.PinnedDoc"], Sequence[Dict[str, Any]], "DefaultType"
] = DEFAULT,
boost: Union[float, "DefaultType"] = DEFAULT,
_name: Union[str, "DefaultType"] = DEFAULT,
@@ -2372,7 +2374,7 @@ def __init__(
self,
*,
clauses: Union[
- Sequence["types.SpanQuery"], Dict[str, Any], "DefaultType"
+ Sequence["types.SpanQuery"], Sequence[Dict[str, Any]], "DefaultType"
] = DEFAULT,
in_order: Union[bool, "DefaultType"] = DEFAULT,
slop: Union[int, "DefaultType"] = DEFAULT,
@@ -2459,7 +2461,7 @@ def __init__(
self,
*,
clauses: Union[
- Sequence["types.SpanQuery"], Dict[str, Any], "DefaultType"
+ Sequence["types.SpanQuery"], Sequence[Dict[str, Any]], "DefaultType"
] = DEFAULT,
boost: Union[float, "DefaultType"] = DEFAULT,
_name: Union[str, "DefaultType"] = DEFAULT,
diff --git a/elasticsearch_dsl/search_base.py b/elasticsearch_dsl/search_base.py
index 76a9b1d9..739635f8 100644
--- a/elasticsearch_dsl/search_base.py
+++ b/elasticsearch_dsl/search_base.py
@@ -949,7 +949,7 @@ def to_dict(self, count: bool = False, **kwargs: Any) -> Dict[str, Any]:
d = {}
if self.query:
- d["query"] = self.query.to_dict()
+ d["query"] = recursive_to_dict(self.query)
if self._knn:
if len(self._knn) == 1:
@@ -963,10 +963,10 @@ def to_dict(self, count: bool = False, **kwargs: Any) -> Dict[str, Any]:
# count request doesn't care for sorting and other things
if not count:
if self.post_filter:
- d["post_filter"] = self.post_filter.to_dict()
+ d["post_filter"] = recursive_to_dict(self.post_filter.to_dict())
if self.aggs.aggs:
- d.update(self.aggs.to_dict())
+ d.update(recursive_to_dict(self.aggs.to_dict()))
if self._sort:
d["sort"] = self._sort
diff --git a/elasticsearch_dsl/types.py b/elasticsearch_dsl/types.py
index e75770fb..cfb42791 100644
--- a/elasticsearch_dsl/types.py
+++ b/elasticsearch_dsl/types.py
@@ -26,6 +26,110 @@
PipeSeparatedFlags = str
+class Aggregation(AttrDict[Any]):
+ pass
+
+
+class AggregationRange(AttrDict[Any]):
+ """
+ :arg from: Start of the range (inclusive).
+ :arg key: Custom key to return the range with.
+ :arg to: End of the range (exclusive).
+ """
+
+ from_: Union[float, DefaultType]
+ key: Union[str, DefaultType]
+ to: Union[float, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ from_: Union[float, DefaultType] = DEFAULT,
+ key: Union[str, DefaultType] = DEFAULT,
+ to: Union[float, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if from_ is not DEFAULT:
+ kwargs["from_"] = from_
+ if key is not DEFAULT:
+ kwargs["key"] = key
+ if to is not DEFAULT:
+ kwargs["to"] = to
+ super().__init__(kwargs)
+
+
+class BucketCorrelationFunction(AttrDict[Any]):
+ """
+ :arg count_correlation: (required) The configuration to calculate a
+ count correlation. This function is designed for determining the
+ correlation of a term value and a given metric.
+ """
+
+ count_correlation: Union[
+ "BucketCorrelationFunctionCountCorrelation", Dict[str, Any], DefaultType
+ ]
+
+ def __init__(
+ self,
+ *,
+ count_correlation: Union[
+ "BucketCorrelationFunctionCountCorrelation", Dict[str, Any], DefaultType
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if count_correlation is not DEFAULT:
+ kwargs["count_correlation"] = count_correlation
+ super().__init__(kwargs)
+
+
+class BucketPathAggregation(Aggregation):
+ """
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
+ buckets_path: Union[str, Sequence[str], Mapping[str, str], DefaultType]
+
+ def __init__(
+ self,
+ *,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], DefaultType
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if buckets_path is not DEFAULT:
+ kwargs["buckets_path"] = buckets_path
+ super().__init__(**kwargs)
+
+
+class ChiSquareHeuristic(AttrDict[Any]):
+ """
+ :arg background_is_superset: (required) Set to `false` if you defined
+ a custom background filter that represents a different set of
+ documents that you want to compare to.
+ :arg include_negatives: (required) Set to `false` to filter out the
+ terms that appear less often in the subset than in documents
+ outside the subset.
+ """
+
+ background_is_superset: Union[bool, DefaultType]
+ include_negatives: Union[bool, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ background_is_superset: Union[bool, DefaultType] = DEFAULT,
+ include_negatives: Union[bool, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if background_is_superset is not DEFAULT:
+ kwargs["background_is_superset"] = background_is_superset
+ if include_negatives is not DEFAULT:
+ kwargs["include_negatives"] = include_negatives
+ super().__init__(kwargs)
+
+
class QueryBase(AttrDict[Any]):
"""
:arg boost: Floating point number used to decrease or increase the
@@ -143,6 +247,157 @@ def __init__(
super().__init__(kwargs)
+class CustomCategorizeTextAnalyzer(AttrDict[Any]):
+ """
+ :arg char_filter:
+ :arg tokenizer:
+ :arg filter:
+ """
+
+ char_filter: Union[Sequence[str], DefaultType]
+ tokenizer: Union[str, DefaultType]
+ filter: Union[Sequence[str], DefaultType]
+
+ def __init__(
+ self,
+ *,
+ char_filter: Union[Sequence[str], DefaultType] = DEFAULT,
+ tokenizer: Union[str, DefaultType] = DEFAULT,
+ filter: Union[Sequence[str], DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if char_filter is not DEFAULT:
+ kwargs["char_filter"] = char_filter
+ if tokenizer is not DEFAULT:
+ kwargs["tokenizer"] = tokenizer
+ if filter is not DEFAULT:
+ kwargs["filter"] = filter
+ super().__init__(kwargs)
+
+
+class DateRangeExpression(AttrDict[Any]):
+ """
+ :arg from: Start of the range (inclusive).
+ :arg key: Custom key to return the range with.
+ :arg to: End of the range (exclusive).
+ """
+
+ from_: Union[str, float, DefaultType]
+ key: Union[str, DefaultType]
+ to: Union[str, float, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ from_: Union[str, float, DefaultType] = DEFAULT,
+ key: Union[str, DefaultType] = DEFAULT,
+ to: Union[str, float, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if from_ is not DEFAULT:
+ kwargs["from_"] = from_
+ if key is not DEFAULT:
+ kwargs["key"] = key
+ if to is not DEFAULT:
+ kwargs["to"] = to
+ super().__init__(kwargs)
+
+
+class EmptyObject(AttrDict[Any]):
+ pass
+
+
+class EwmaModelSettings(AttrDict[Any]):
+ """
+ :arg alpha:
+ """
+
+ alpha: Union[float, DefaultType]
+
+ def __init__(self, *, alpha: Union[float, DefaultType] = DEFAULT, **kwargs: Any):
+ if alpha is not DEFAULT:
+ kwargs["alpha"] = alpha
+ super().__init__(kwargs)
+
+
+class ExtendedBounds(AttrDict[Any]):
+ """
+ :arg max: Maximum value for the bound.
+ :arg min: Minimum value for the bound.
+ """
+
+ max: Any
+ min: Any
+
+ def __init__(self, *, max: Any = DEFAULT, min: Any = DEFAULT, **kwargs: Any):
+ if max is not DEFAULT:
+ kwargs["max"] = max
+ if min is not DEFAULT:
+ kwargs["min"] = min
+ super().__init__(kwargs)
+
+
+class FieldAndFormat(AttrDict[Any]):
+ """
+ :arg field: (required) Wildcard pattern. The request returns values
+ for field names matching this pattern.
+ :arg format: Format in which the values are returned.
+ :arg include_unmapped:
+ """
+
+ field: Union[str, InstrumentedField, DefaultType]
+ format: Union[str, DefaultType]
+ include_unmapped: Union[bool, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ format: Union[str, DefaultType] = DEFAULT,
+ include_unmapped: Union[bool, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if field is not DEFAULT:
+ kwargs["field"] = str(field)
+ if format is not DEFAULT:
+ kwargs["format"] = format
+ if include_unmapped is not DEFAULT:
+ kwargs["include_unmapped"] = include_unmapped
+ super().__init__(kwargs)
+
+
+class FrequentItemSetsField(AttrDict[Any]):
+ """
+ :arg field: (required)
+ :arg exclude: Values to exclude. Can be regular expression strings or
+ arrays of strings of exact terms.
+ :arg include: Values to include. Can be regular expression strings or
+ arrays of strings of exact terms.
+ """
+
+ field: Union[str, InstrumentedField, DefaultType]
+ exclude: Union[str, Sequence[str], DefaultType]
+ include: Union[str, Sequence[str], "TermsPartition", Dict[str, Any], DefaultType]
+
+ def __init__(
+ self,
+ *,
+ field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ exclude: Union[str, Sequence[str], DefaultType] = DEFAULT,
+ include: Union[
+ str, Sequence[str], "TermsPartition", Dict[str, Any], DefaultType
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if field is not DEFAULT:
+ kwargs["field"] = str(field)
+ if exclude is not DEFAULT:
+ kwargs["exclude"] = exclude
+ if include is not DEFAULT:
+ kwargs["include"] = include
+ super().__init__(kwargs)
+
+
class FunctionScoreContainer(AttrDict[Any]):
"""
:arg exp: Function that scores a document with a exponential decay,
@@ -286,6 +541,43 @@ def __init__(self, *, geohash: Union[str, DefaultType] = DEFAULT, **kwargs: Any)
super().__init__(kwargs)
+class GeoLinePoint(AttrDict[Any]):
+ """
+ :arg field: (required) The name of the geo_point field.
+ """
+
+ field: Union[str, InstrumentedField, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if field is not DEFAULT:
+ kwargs["field"] = str(field)
+ super().__init__(kwargs)
+
+
+class GeoLineSort(AttrDict[Any]):
+ """
+ :arg field: (required) The name of the numeric field to use as the
+ sort key for ordering the points.
+ """
+
+ field: Union[str, InstrumentedField, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if field is not DEFAULT:
+ kwargs["field"] = str(field)
+ super().__init__(kwargs)
+
+
class GeoPolygonPoints(AttrDict[Any]):
"""
:arg points: (required)
@@ -348,86 +640,568 @@ def __init__(
super().__init__(kwargs)
-class InnerHits(AttrDict[Any]):
+class GoogleNormalizedDistanceHeuristic(AttrDict[Any]):
"""
- :arg name: The name for the particular inner hit definition in the
- response. Useful when a search request contains multiple inner
- hits.
- :arg size: The maximum number of hits to return per `inner_hits`.
- Defaults to `3` if omitted.
- :arg from: Inner hit starting document offset.
- :arg collapse:
- :arg docvalue_fields:
- :arg explain:
- :arg highlight:
- :arg ignore_unmapped:
- :arg script_fields:
- :arg seq_no_primary_term:
- :arg fields:
- :arg sort: How the inner hits should be sorted per `inner_hits`. By
- default, inner hits are sorted by score.
- :arg _source:
- :arg stored_fields:
- :arg track_scores:
- :arg version:
+ :arg background_is_superset: Set to `false` if you defined a custom
+ background filter that represents a different set of documents
+ that you want to compare to.
"""
- name: Union[str, DefaultType]
- size: Union[int, DefaultType]
- from_: Union[int, DefaultType]
- collapse: Union["FieldCollapse", Dict[str, Any], DefaultType]
- docvalue_fields: Union[Sequence["FieldAndFormat"], Dict[str, Any], DefaultType]
- explain: Union[bool, DefaultType]
- highlight: Union["Highlight", Dict[str, Any], DefaultType]
- ignore_unmapped: Union[bool, DefaultType]
- script_fields: Union[
- Mapping[Union[str, InstrumentedField], "ScriptField"],
- Dict[str, Any],
- DefaultType,
- ]
- seq_no_primary_term: Union[bool, DefaultType]
- fields: Union[
- Union[str, InstrumentedField],
- Sequence[Union[str, InstrumentedField]],
- DefaultType,
- ]
- sort: Union[
- Union[Union[str, InstrumentedField], "SortOptions"],
- Sequence[Union[Union[str, InstrumentedField], "SortOptions"]],
- Dict[str, Any],
- DefaultType,
- ]
- _source: Union[bool, "SourceFilter", Dict[str, Any], DefaultType]
- stored_fields: Union[
- Union[str, InstrumentedField],
- Sequence[Union[str, InstrumentedField]],
- DefaultType,
- ]
- track_scores: Union[bool, DefaultType]
- version: Union[bool, DefaultType]
+ background_is_superset: Union[bool, DefaultType]
def __init__(
self,
*,
- name: Union[str, DefaultType] = DEFAULT,
- size: Union[int, DefaultType] = DEFAULT,
- from_: Union[int, DefaultType] = DEFAULT,
- collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] = DEFAULT,
- docvalue_fields: Union[
- Sequence["FieldAndFormat"], Dict[str, Any], DefaultType
- ] = DEFAULT,
- explain: Union[bool, DefaultType] = DEFAULT,
- highlight: Union["Highlight", Dict[str, Any], DefaultType] = DEFAULT,
- ignore_unmapped: Union[bool, DefaultType] = DEFAULT,
- script_fields: Union[
- Mapping[Union[str, InstrumentedField], "ScriptField"],
- Dict[str, Any],
- DefaultType,
- ] = DEFAULT,
- seq_no_primary_term: Union[bool, DefaultType] = DEFAULT,
- fields: Union[
- Union[str, InstrumentedField],
- Sequence[Union[str, InstrumentedField]],
+ background_is_superset: Union[bool, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if background_is_superset is not DEFAULT:
+ kwargs["background_is_superset"] = background_is_superset
+ super().__init__(kwargs)
+
+
+class HdrMethod(AttrDict[Any]):
+ """
+ :arg number_of_significant_value_digits: Specifies the resolution of
+ values for the histogram in number of significant digits.
+ """
+
+ number_of_significant_value_digits: Union[int, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ number_of_significant_value_digits: Union[int, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if number_of_significant_value_digits is not DEFAULT:
+ kwargs["number_of_significant_value_digits"] = (
+ number_of_significant_value_digits
+ )
+ super().__init__(kwargs)
+
+
+class HighlightBase(AttrDict[Any]):
+ """
+ :arg type:
+ :arg boundary_chars: A string that contains each boundary character.
+ Defaults to `.,!? \t\n` if omitted.
+ :arg boundary_max_scan: How far to scan for boundary characters.
+ Defaults to `20` if omitted.
+ :arg boundary_scanner: Specifies how to break the highlighted
+ fragments: chars, sentence, or word. Only valid for the unified
+ and fvh highlighters. Defaults to `sentence` for the `unified`
+ highlighter. Defaults to `chars` for the `fvh` highlighter.
+ :arg boundary_scanner_locale: Controls which locale is used to search
+ for sentence and word boundaries. This parameter takes a form of a
+ language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`.
+ Defaults to `Locale.ROOT` if omitted.
+ :arg force_source:
+ :arg fragmenter: Specifies how text should be broken up in highlight
+ snippets: `simple` or `span`. Only valid for the `plain`
+ highlighter. Defaults to `span` if omitted.
+ :arg fragment_size: The size of the highlighted fragment in
+ characters. Defaults to `100` if omitted.
+ :arg highlight_filter:
+ :arg highlight_query: Highlight matches for a query other than the
+ search query. This is especially useful if you use a rescore query
+ because those are not taken into account by highlighting by
+ default.
+ :arg max_fragment_length:
+ :arg max_analyzed_offset: If set to a non-negative value, highlighting
+ stops at this defined maximum limit. The rest of the text is not
+ processed, thus not highlighted and no error is returned The
+ `max_analyzed_offset` query setting does not override the
+ `index.highlight.max_analyzed_offset` setting, which prevails when
+ it’s set to lower value than the query setting.
+ :arg no_match_size: The amount of text you want to return from the
+ beginning of the field if there are no matching fragments to
+ highlight.
+ :arg number_of_fragments: The maximum number of fragments to return.
+ If the number of fragments is set to `0`, no fragments are
+ returned. Instead, the entire field contents are highlighted and
+ returned. This can be handy when you need to highlight short texts
+ such as a title or address, but fragmentation is not required. If
+ `number_of_fragments` is `0`, `fragment_size` is ignored. Defaults
+ to `5` if omitted.
+ :arg options:
+ :arg order: Sorts highlighted fragments by score when set to `score`.
+ By default, fragments will be output in the order they appear in
+ the field (order: `none`). Setting this option to `score` will
+ output the most relevant fragments first. Each highlighter applies
+ its own logic to compute relevancy scores. Defaults to `none` if
+ omitted.
+ :arg phrase_limit: Controls the number of matching phrases in a
+ document that are considered. Prevents the `fvh` highlighter from
+ analyzing too many phrases and consuming too much memory. When
+ using `matched_fields`, `phrase_limit` phrases per matched field
+ are considered. Raising the limit increases query time and
+ consumes more memory. Only supported by the `fvh` highlighter.
+ Defaults to `256` if omitted.
+ :arg post_tags: Use in conjunction with `pre_tags` to define the HTML
+ tags to use for the highlighted text. By default, highlighted text
+ is wrapped in `` and `` tags.
+ :arg pre_tags: Use in conjunction with `post_tags` to define the HTML
+ tags to use for the highlighted text. By default, highlighted text
+ is wrapped in `` and `` tags.
+ :arg require_field_match: By default, only fields that contains a
+ query match are highlighted. Set to `false` to highlight all
+ fields. Defaults to `True` if omitted.
+ :arg tags_schema: Set to `styled` to use the built-in tag schema.
+ """
+
+ type: Union[Literal["plain", "fvh", "unified"], DefaultType]
+ boundary_chars: Union[str, DefaultType]
+ boundary_max_scan: Union[int, DefaultType]
+ boundary_scanner: Union[Literal["chars", "sentence", "word"], DefaultType]
+ boundary_scanner_locale: Union[str, DefaultType]
+ force_source: Union[bool, DefaultType]
+ fragmenter: Union[Literal["simple", "span"], DefaultType]
+ fragment_size: Union[int, DefaultType]
+ highlight_filter: Union[bool, DefaultType]
+ highlight_query: Union[Query, DefaultType]
+ max_fragment_length: Union[int, DefaultType]
+ max_analyzed_offset: Union[int, DefaultType]
+ no_match_size: Union[int, DefaultType]
+ number_of_fragments: Union[int, DefaultType]
+ options: Union[Mapping[str, Any], DefaultType]
+ order: Union[Literal["score"], DefaultType]
+ phrase_limit: Union[int, DefaultType]
+ post_tags: Union[Sequence[str], DefaultType]
+ pre_tags: Union[Sequence[str], DefaultType]
+ require_field_match: Union[bool, DefaultType]
+ tags_schema: Union[Literal["styled"], DefaultType]
+
+ def __init__(
+ self,
+ *,
+ type: Union[Literal["plain", "fvh", "unified"], DefaultType] = DEFAULT,
+ boundary_chars: Union[str, DefaultType] = DEFAULT,
+ boundary_max_scan: Union[int, DefaultType] = DEFAULT,
+ boundary_scanner: Union[
+ Literal["chars", "sentence", "word"], DefaultType
+ ] = DEFAULT,
+ boundary_scanner_locale: Union[str, DefaultType] = DEFAULT,
+ force_source: Union[bool, DefaultType] = DEFAULT,
+ fragmenter: Union[Literal["simple", "span"], DefaultType] = DEFAULT,
+ fragment_size: Union[int, DefaultType] = DEFAULT,
+ highlight_filter: Union[bool, DefaultType] = DEFAULT,
+ highlight_query: Union[Query, DefaultType] = DEFAULT,
+ max_fragment_length: Union[int, DefaultType] = DEFAULT,
+ max_analyzed_offset: Union[int, DefaultType] = DEFAULT,
+ no_match_size: Union[int, DefaultType] = DEFAULT,
+ number_of_fragments: Union[int, DefaultType] = DEFAULT,
+ options: Union[Mapping[str, Any], DefaultType] = DEFAULT,
+ order: Union[Literal["score"], DefaultType] = DEFAULT,
+ phrase_limit: Union[int, DefaultType] = DEFAULT,
+ post_tags: Union[Sequence[str], DefaultType] = DEFAULT,
+ pre_tags: Union[Sequence[str], DefaultType] = DEFAULT,
+ require_field_match: Union[bool, DefaultType] = DEFAULT,
+ tags_schema: Union[Literal["styled"], DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if type is not DEFAULT:
+ kwargs["type"] = type
+ if boundary_chars is not DEFAULT:
+ kwargs["boundary_chars"] = boundary_chars
+ if boundary_max_scan is not DEFAULT:
+ kwargs["boundary_max_scan"] = boundary_max_scan
+ if boundary_scanner is not DEFAULT:
+ kwargs["boundary_scanner"] = boundary_scanner
+ if boundary_scanner_locale is not DEFAULT:
+ kwargs["boundary_scanner_locale"] = boundary_scanner_locale
+ if force_source is not DEFAULT:
+ kwargs["force_source"] = force_source
+ if fragmenter is not DEFAULT:
+ kwargs["fragmenter"] = fragmenter
+ if fragment_size is not DEFAULT:
+ kwargs["fragment_size"] = fragment_size
+ if highlight_filter is not DEFAULT:
+ kwargs["highlight_filter"] = highlight_filter
+ if highlight_query is not DEFAULT:
+ kwargs["highlight_query"] = highlight_query
+ if max_fragment_length is not DEFAULT:
+ kwargs["max_fragment_length"] = max_fragment_length
+ if max_analyzed_offset is not DEFAULT:
+ kwargs["max_analyzed_offset"] = max_analyzed_offset
+ if no_match_size is not DEFAULT:
+ kwargs["no_match_size"] = no_match_size
+ if number_of_fragments is not DEFAULT:
+ kwargs["number_of_fragments"] = number_of_fragments
+ if options is not DEFAULT:
+ kwargs["options"] = options
+ if order is not DEFAULT:
+ kwargs["order"] = order
+ if phrase_limit is not DEFAULT:
+ kwargs["phrase_limit"] = phrase_limit
+ if post_tags is not DEFAULT:
+ kwargs["post_tags"] = post_tags
+ if pre_tags is not DEFAULT:
+ kwargs["pre_tags"] = pre_tags
+ if require_field_match is not DEFAULT:
+ kwargs["require_field_match"] = require_field_match
+ if tags_schema is not DEFAULT:
+ kwargs["tags_schema"] = tags_schema
+ super().__init__(kwargs)
+
+
+class Highlight(HighlightBase):
+ """
+ :arg fields: (required)
+ :arg encoder:
+ :arg type:
+ :arg boundary_chars: A string that contains each boundary character.
+ Defaults to `.,!? \t\n` if omitted.
+ :arg boundary_max_scan: How far to scan for boundary characters.
+ Defaults to `20` if omitted.
+ :arg boundary_scanner: Specifies how to break the highlighted
+ fragments: chars, sentence, or word. Only valid for the unified
+ and fvh highlighters. Defaults to `sentence` for the `unified`
+ highlighter. Defaults to `chars` for the `fvh` highlighter.
+ :arg boundary_scanner_locale: Controls which locale is used to search
+ for sentence and word boundaries. This parameter takes a form of a
+ language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`.
+ Defaults to `Locale.ROOT` if omitted.
+ :arg force_source:
+ :arg fragmenter: Specifies how text should be broken up in highlight
+ snippets: `simple` or `span`. Only valid for the `plain`
+ highlighter. Defaults to `span` if omitted.
+ :arg fragment_size: The size of the highlighted fragment in
+ characters. Defaults to `100` if omitted.
+ :arg highlight_filter:
+ :arg highlight_query: Highlight matches for a query other than the
+ search query. This is especially useful if you use a rescore query
+ because those are not taken into account by highlighting by
+ default.
+ :arg max_fragment_length:
+ :arg max_analyzed_offset: If set to a non-negative value, highlighting
+ stops at this defined maximum limit. The rest of the text is not
+ processed, thus not highlighted and no error is returned The
+ `max_analyzed_offset` query setting does not override the
+ `index.highlight.max_analyzed_offset` setting, which prevails when
+ it’s set to lower value than the query setting.
+ :arg no_match_size: The amount of text you want to return from the
+ beginning of the field if there are no matching fragments to
+ highlight.
+ :arg number_of_fragments: The maximum number of fragments to return.
+ If the number of fragments is set to `0`, no fragments are
+ returned. Instead, the entire field contents are highlighted and
+ returned. This can be handy when you need to highlight short texts
+ such as a title or address, but fragmentation is not required. If
+ `number_of_fragments` is `0`, `fragment_size` is ignored. Defaults
+ to `5` if omitted.
+ :arg options:
+ :arg order: Sorts highlighted fragments by score when set to `score`.
+ By default, fragments will be output in the order they appear in
+ the field (order: `none`). Setting this option to `score` will
+ output the most relevant fragments first. Each highlighter applies
+ its own logic to compute relevancy scores. Defaults to `none` if
+ omitted.
+ :arg phrase_limit: Controls the number of matching phrases in a
+ document that are considered. Prevents the `fvh` highlighter from
+ analyzing too many phrases and consuming too much memory. When
+ using `matched_fields`, `phrase_limit` phrases per matched field
+ are considered. Raising the limit increases query time and
+ consumes more memory. Only supported by the `fvh` highlighter.
+ Defaults to `256` if omitted.
+ :arg post_tags: Use in conjunction with `pre_tags` to define the HTML
+ tags to use for the highlighted text. By default, highlighted text
+ is wrapped in `` and `` tags.
+ :arg pre_tags: Use in conjunction with `post_tags` to define the HTML
+ tags to use for the highlighted text. By default, highlighted text
+ is wrapped in `` and `` tags.
+ :arg require_field_match: By default, only fields that contains a
+ query match are highlighted. Set to `false` to highlight all
+ fields. Defaults to `True` if omitted.
+ :arg tags_schema: Set to `styled` to use the built-in tag schema.
+ """
+
+ fields: Union[
+ Mapping[Union[str, InstrumentedField], "HighlightField"],
+ Dict[str, Any],
+ DefaultType,
+ ]
+ encoder: Union[Literal["default", "html"], DefaultType]
+ type: Union[Literal["plain", "fvh", "unified"], DefaultType]
+ boundary_chars: Union[str, DefaultType]
+ boundary_max_scan: Union[int, DefaultType]
+ boundary_scanner: Union[Literal["chars", "sentence", "word"], DefaultType]
+ boundary_scanner_locale: Union[str, DefaultType]
+ force_source: Union[bool, DefaultType]
+ fragmenter: Union[Literal["simple", "span"], DefaultType]
+ fragment_size: Union[int, DefaultType]
+ highlight_filter: Union[bool, DefaultType]
+ highlight_query: Union[Query, DefaultType]
+ max_fragment_length: Union[int, DefaultType]
+ max_analyzed_offset: Union[int, DefaultType]
+ no_match_size: Union[int, DefaultType]
+ number_of_fragments: Union[int, DefaultType]
+ options: Union[Mapping[str, Any], DefaultType]
+ order: Union[Literal["score"], DefaultType]
+ phrase_limit: Union[int, DefaultType]
+ post_tags: Union[Sequence[str], DefaultType]
+ pre_tags: Union[Sequence[str], DefaultType]
+ require_field_match: Union[bool, DefaultType]
+ tags_schema: Union[Literal["styled"], DefaultType]
+
+ def __init__(
+ self,
+ *,
+ fields: Union[
+ Mapping[Union[str, InstrumentedField], "HighlightField"],
+ Dict[str, Any],
+ DefaultType,
+ ] = DEFAULT,
+ encoder: Union[Literal["default", "html"], DefaultType] = DEFAULT,
+ type: Union[Literal["plain", "fvh", "unified"], DefaultType] = DEFAULT,
+ boundary_chars: Union[str, DefaultType] = DEFAULT,
+ boundary_max_scan: Union[int, DefaultType] = DEFAULT,
+ boundary_scanner: Union[
+ Literal["chars", "sentence", "word"], DefaultType
+ ] = DEFAULT,
+ boundary_scanner_locale: Union[str, DefaultType] = DEFAULT,
+ force_source: Union[bool, DefaultType] = DEFAULT,
+ fragmenter: Union[Literal["simple", "span"], DefaultType] = DEFAULT,
+ fragment_size: Union[int, DefaultType] = DEFAULT,
+ highlight_filter: Union[bool, DefaultType] = DEFAULT,
+ highlight_query: Union[Query, DefaultType] = DEFAULT,
+ max_fragment_length: Union[int, DefaultType] = DEFAULT,
+ max_analyzed_offset: Union[int, DefaultType] = DEFAULT,
+ no_match_size: Union[int, DefaultType] = DEFAULT,
+ number_of_fragments: Union[int, DefaultType] = DEFAULT,
+ options: Union[Mapping[str, Any], DefaultType] = DEFAULT,
+ order: Union[Literal["score"], DefaultType] = DEFAULT,
+ phrase_limit: Union[int, DefaultType] = DEFAULT,
+ post_tags: Union[Sequence[str], DefaultType] = DEFAULT,
+ pre_tags: Union[Sequence[str], DefaultType] = DEFAULT,
+ require_field_match: Union[bool, DefaultType] = DEFAULT,
+ tags_schema: Union[Literal["styled"], DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if fields is not DEFAULT:
+ kwargs["fields"] = str(fields)
+ if encoder is not DEFAULT:
+ kwargs["encoder"] = encoder
+ if type is not DEFAULT:
+ kwargs["type"] = type
+ if boundary_chars is not DEFAULT:
+ kwargs["boundary_chars"] = boundary_chars
+ if boundary_max_scan is not DEFAULT:
+ kwargs["boundary_max_scan"] = boundary_max_scan
+ if boundary_scanner is not DEFAULT:
+ kwargs["boundary_scanner"] = boundary_scanner
+ if boundary_scanner_locale is not DEFAULT:
+ kwargs["boundary_scanner_locale"] = boundary_scanner_locale
+ if force_source is not DEFAULT:
+ kwargs["force_source"] = force_source
+ if fragmenter is not DEFAULT:
+ kwargs["fragmenter"] = fragmenter
+ if fragment_size is not DEFAULT:
+ kwargs["fragment_size"] = fragment_size
+ if highlight_filter is not DEFAULT:
+ kwargs["highlight_filter"] = highlight_filter
+ if highlight_query is not DEFAULT:
+ kwargs["highlight_query"] = highlight_query
+ if max_fragment_length is not DEFAULT:
+ kwargs["max_fragment_length"] = max_fragment_length
+ if max_analyzed_offset is not DEFAULT:
+ kwargs["max_analyzed_offset"] = max_analyzed_offset
+ if no_match_size is not DEFAULT:
+ kwargs["no_match_size"] = no_match_size
+ if number_of_fragments is not DEFAULT:
+ kwargs["number_of_fragments"] = number_of_fragments
+ if options is not DEFAULT:
+ kwargs["options"] = options
+ if order is not DEFAULT:
+ kwargs["order"] = order
+ if phrase_limit is not DEFAULT:
+ kwargs["phrase_limit"] = phrase_limit
+ if post_tags is not DEFAULT:
+ kwargs["post_tags"] = post_tags
+ if pre_tags is not DEFAULT:
+ kwargs["pre_tags"] = pre_tags
+ if require_field_match is not DEFAULT:
+ kwargs["require_field_match"] = require_field_match
+ if tags_schema is not DEFAULT:
+ kwargs["tags_schema"] = tags_schema
+ super().__init__(**kwargs)
+
+
+class HoltLinearModelSettings(AttrDict[Any]):
+ """
+ :arg alpha:
+ :arg beta:
+ """
+
+ alpha: Union[float, DefaultType]
+ beta: Union[float, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ alpha: Union[float, DefaultType] = DEFAULT,
+ beta: Union[float, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if alpha is not DEFAULT:
+ kwargs["alpha"] = alpha
+ if beta is not DEFAULT:
+ kwargs["beta"] = beta
+ super().__init__(kwargs)
+
+
+class HoltWintersModelSettings(AttrDict[Any]):
+ """
+ :arg alpha:
+ :arg beta:
+ :arg gamma:
+ :arg pad:
+ :arg period:
+ :arg type:
+ """
+
+ alpha: Union[float, DefaultType]
+ beta: Union[float, DefaultType]
+ gamma: Union[float, DefaultType]
+ pad: Union[bool, DefaultType]
+ period: Union[int, DefaultType]
+ type: Union[Literal["add", "mult"], DefaultType]
+
+ def __init__(
+ self,
+ *,
+ alpha: Union[float, DefaultType] = DEFAULT,
+ beta: Union[float, DefaultType] = DEFAULT,
+ gamma: Union[float, DefaultType] = DEFAULT,
+ pad: Union[bool, DefaultType] = DEFAULT,
+ period: Union[int, DefaultType] = DEFAULT,
+ type: Union[Literal["add", "mult"], DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if alpha is not DEFAULT:
+ kwargs["alpha"] = alpha
+ if beta is not DEFAULT:
+ kwargs["beta"] = beta
+ if gamma is not DEFAULT:
+ kwargs["gamma"] = gamma
+ if pad is not DEFAULT:
+ kwargs["pad"] = pad
+ if period is not DEFAULT:
+ kwargs["period"] = period
+ if type is not DEFAULT:
+ kwargs["type"] = type
+ super().__init__(kwargs)
+
+
+class InferenceConfigContainer(AttrDict[Any]):
+ """
+ :arg regression: Regression configuration for inference.
+ :arg classification: Classification configuration for inference.
+ """
+
+ regression: Union["RegressionInferenceOptions", Dict[str, Any], DefaultType]
+ classification: Union["ClassificationInferenceOptions", Dict[str, Any], DefaultType]
+
+ def __init__(
+ self,
+ *,
+ regression: Union[
+ "RegressionInferenceOptions", Dict[str, Any], DefaultType
+ ] = DEFAULT,
+ classification: Union[
+ "ClassificationInferenceOptions", Dict[str, Any], DefaultType
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if regression is not DEFAULT:
+ kwargs["regression"] = regression
+ if classification is not DEFAULT:
+ kwargs["classification"] = classification
+ super().__init__(kwargs)
+
+
+class InnerHits(AttrDict[Any]):
+ """
+ :arg name: The name for the particular inner hit definition in the
+ response. Useful when a search request contains multiple inner
+ hits.
+ :arg size: The maximum number of hits to return per `inner_hits`.
+ Defaults to `3` if omitted.
+ :arg from: Inner hit starting document offset.
+ :arg collapse:
+ :arg docvalue_fields:
+ :arg explain:
+ :arg highlight:
+ :arg ignore_unmapped:
+ :arg script_fields:
+ :arg seq_no_primary_term:
+ :arg fields:
+ :arg sort: How the inner hits should be sorted per `inner_hits`. By
+ default, inner hits are sorted by score.
+ :arg _source:
+ :arg stored_fields:
+ :arg track_scores:
+ :arg version:
+ """
+
+ name: Union[str, DefaultType]
+ size: Union[int, DefaultType]
+ from_: Union[int, DefaultType]
+ collapse: Union["FieldCollapse", Dict[str, Any], DefaultType]
+ docvalue_fields: Union[
+ Sequence["FieldAndFormat"], Sequence[Dict[str, Any]], DefaultType
+ ]
+ explain: Union[bool, DefaultType]
+ highlight: Union["Highlight", Dict[str, Any], DefaultType]
+ ignore_unmapped: Union[bool, DefaultType]
+ script_fields: Union[
+ Mapping[Union[str, InstrumentedField], "ScriptField"],
+ Dict[str, Any],
+ DefaultType,
+ ]
+ seq_no_primary_term: Union[bool, DefaultType]
+ fields: Union[
+ Union[str, InstrumentedField],
+ Sequence[Union[str, InstrumentedField]],
+ DefaultType,
+ ]
+ sort: Union[
+ Union[Union[str, InstrumentedField], "SortOptions"],
+ Sequence[Union[Union[str, InstrumentedField], "SortOptions"]],
+ Dict[str, Any],
+ DefaultType,
+ ]
+ _source: Union[bool, "SourceFilter", Dict[str, Any], DefaultType]
+ stored_fields: Union[
+ Union[str, InstrumentedField],
+ Sequence[Union[str, InstrumentedField]],
+ DefaultType,
+ ]
+ track_scores: Union[bool, DefaultType]
+ version: Union[bool, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ name: Union[str, DefaultType] = DEFAULT,
+ size: Union[int, DefaultType] = DEFAULT,
+ from_: Union[int, DefaultType] = DEFAULT,
+ collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] = DEFAULT,
+ docvalue_fields: Union[
+ Sequence["FieldAndFormat"], Sequence[Dict[str, Any]], DefaultType
+ ] = DEFAULT,
+ explain: Union[bool, DefaultType] = DEFAULT,
+ highlight: Union["Highlight", Dict[str, Any], DefaultType] = DEFAULT,
+ ignore_unmapped: Union[bool, DefaultType] = DEFAULT,
+ script_fields: Union[
+ Mapping[Union[str, InstrumentedField], "ScriptField"],
+ Dict[str, Any],
+ DefaultType,
+ ] = DEFAULT,
+ seq_no_primary_term: Union[bool, DefaultType] = DEFAULT,
+ fields: Union[
+ Union[str, InstrumentedField],
+ Sequence[Union[str, InstrumentedField]],
DefaultType,
] = DEFAULT,
sort: Union[
@@ -540,6 +1314,34 @@ def __init__(
super().__init__(**kwargs)
+class IpRangeAggregationRange(AttrDict[Any]):
+ """
+ :arg from: Start of the range.
+ :arg mask: IP range defined as a CIDR mask.
+ :arg to: End of the range.
+ """
+
+ from_: Union[str, None, DefaultType]
+ mask: Union[str, DefaultType]
+ to: Union[str, None, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ from_: Union[str, None, DefaultType] = DEFAULT,
+ mask: Union[str, DefaultType] = DEFAULT,
+ to: Union[str, None, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if from_ is not DEFAULT:
+ kwargs["from_"] = from_
+ if mask is not DEFAULT:
+ kwargs["mask"] = mask
+ if to is not DEFAULT:
+ kwargs["to"] = to
+ super().__init__(kwargs)
+
+
class LatLonGeoLocation(AttrDict[Any]):
"""
:arg lat: (required) Latitude
@@ -876,56 +1678,198 @@ class MatchQuery(QueryBase):
def __init__(
self,
*,
- query: Union[str, float, bool, DefaultType] = DEFAULT,
- analyzer: Union[str, DefaultType] = DEFAULT,
- auto_generate_synonyms_phrase_query: Union[bool, DefaultType] = DEFAULT,
- cutoff_frequency: Union[float, DefaultType] = DEFAULT,
- fuzziness: Union[str, int, DefaultType] = DEFAULT,
- fuzzy_rewrite: Union[str, DefaultType] = DEFAULT,
- fuzzy_transpositions: Union[bool, DefaultType] = DEFAULT,
- lenient: Union[bool, DefaultType] = DEFAULT,
- max_expansions: Union[int, DefaultType] = DEFAULT,
- minimum_should_match: Union[int, str, DefaultType] = DEFAULT,
- operator: Union[Literal["and", "or"], DefaultType] = DEFAULT,
- prefix_length: Union[int, DefaultType] = DEFAULT,
- zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
+ query: Union[str, float, bool, DefaultType] = DEFAULT,
+ analyzer: Union[str, DefaultType] = DEFAULT,
+ auto_generate_synonyms_phrase_query: Union[bool, DefaultType] = DEFAULT,
+ cutoff_frequency: Union[float, DefaultType] = DEFAULT,
+ fuzziness: Union[str, int, DefaultType] = DEFAULT,
+ fuzzy_rewrite: Union[str, DefaultType] = DEFAULT,
+ fuzzy_transpositions: Union[bool, DefaultType] = DEFAULT,
+ lenient: Union[bool, DefaultType] = DEFAULT,
+ max_expansions: Union[int, DefaultType] = DEFAULT,
+ minimum_should_match: Union[int, str, DefaultType] = DEFAULT,
+ operator: Union[Literal["and", "or"], DefaultType] = DEFAULT,
+ prefix_length: Union[int, DefaultType] = DEFAULT,
+ zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT,
+ boost: Union[float, DefaultType] = DEFAULT,
+ _name: Union[str, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if query is not DEFAULT:
+ kwargs["query"] = query
+ if analyzer is not DEFAULT:
+ kwargs["analyzer"] = analyzer
+ if auto_generate_synonyms_phrase_query is not DEFAULT:
+ kwargs["auto_generate_synonyms_phrase_query"] = (
+ auto_generate_synonyms_phrase_query
+ )
+ if cutoff_frequency is not DEFAULT:
+ kwargs["cutoff_frequency"] = cutoff_frequency
+ if fuzziness is not DEFAULT:
+ kwargs["fuzziness"] = fuzziness
+ if fuzzy_rewrite is not DEFAULT:
+ kwargs["fuzzy_rewrite"] = fuzzy_rewrite
+ if fuzzy_transpositions is not DEFAULT:
+ kwargs["fuzzy_transpositions"] = fuzzy_transpositions
+ if lenient is not DEFAULT:
+ kwargs["lenient"] = lenient
+ if max_expansions is not DEFAULT:
+ kwargs["max_expansions"] = max_expansions
+ if minimum_should_match is not DEFAULT:
+ kwargs["minimum_should_match"] = minimum_should_match
+ if operator is not DEFAULT:
+ kwargs["operator"] = operator
+ if prefix_length is not DEFAULT:
+ kwargs["prefix_length"] = prefix_length
+ if zero_terms_query is not DEFAULT:
+ kwargs["zero_terms_query"] = zero_terms_query
+ if boost is not DEFAULT:
+ kwargs["boost"] = boost
+ if _name is not DEFAULT:
+ kwargs["_name"] = _name
+ super().__init__(**kwargs)
+
+
+class PipelineAggregationBase(BucketPathAggregation):
+ """
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
+ format: Union[str, DefaultType]
+ gap_policy: Union[Literal["skip", "insert_zeros", "keep_values"], DefaultType]
+ buckets_path: Union[str, Sequence[str], Mapping[str, str], DefaultType]
+
+ def __init__(
+ self,
+ *,
+ format: Union[str, DefaultType] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], DefaultType
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], DefaultType
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if format is not DEFAULT:
+ kwargs["format"] = format
+ if gap_policy is not DEFAULT:
+ kwargs["gap_policy"] = gap_policy
+ if buckets_path is not DEFAULT:
+ kwargs["buckets_path"] = buckets_path
+ super().__init__(**kwargs)
+
+
+class MovingAverageAggregationBase(PipelineAggregationBase):
+ """
+ :arg minimize:
+ :arg predict:
+ :arg window:
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
+ """
+
+ minimize: Union[bool, DefaultType]
+ predict: Union[int, DefaultType]
+ window: Union[int, DefaultType]
+ format: Union[str, DefaultType]
+ gap_policy: Union[Literal["skip", "insert_zeros", "keep_values"], DefaultType]
+ buckets_path: Union[str, Sequence[str], Mapping[str, str], DefaultType]
+
+ def __init__(
+ self,
+ *,
+ minimize: Union[bool, DefaultType] = DEFAULT,
+ predict: Union[int, DefaultType] = DEFAULT,
+ window: Union[int, DefaultType] = DEFAULT,
+ format: Union[str, DefaultType] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], DefaultType
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], DefaultType
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if minimize is not DEFAULT:
+ kwargs["minimize"] = minimize
+ if predict is not DEFAULT:
+ kwargs["predict"] = predict
+ if window is not DEFAULT:
+ kwargs["window"] = window
+ if format is not DEFAULT:
+ kwargs["format"] = format
+ if gap_policy is not DEFAULT:
+ kwargs["gap_policy"] = gap_policy
+ if buckets_path is not DEFAULT:
+ kwargs["buckets_path"] = buckets_path
+ super().__init__(**kwargs)
+
+
+class MultiTermLookup(AttrDict[Any]):
+ """
+ :arg field: (required) A fields from which to retrieve terms.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
+ """
+
+ field: Union[str, InstrumentedField, DefaultType]
+ missing: Union[str, int, float, bool, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ missing: Union[str, int, float, bool, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if field is not DEFAULT:
+ kwargs["field"] = str(field)
+ if missing is not DEFAULT:
+ kwargs["missing"] = missing
+ super().__init__(kwargs)
+
+
+class MutualInformationHeuristic(AttrDict[Any]):
+ """
+ :arg background_is_superset: Set to `false` if you defined a custom
+ background filter that represents a different set of documents
+ that you want to compare to.
+ :arg include_negatives: Set to `false` to filter out the terms that
+ appear less often in the subset than in documents outside the
+ subset.
+ """
+
+ background_is_superset: Union[bool, DefaultType]
+ include_negatives: Union[bool, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ background_is_superset: Union[bool, DefaultType] = DEFAULT,
+ include_negatives: Union[bool, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if query is not DEFAULT:
- kwargs["query"] = query
- if analyzer is not DEFAULT:
- kwargs["analyzer"] = analyzer
- if auto_generate_synonyms_phrase_query is not DEFAULT:
- kwargs["auto_generate_synonyms_phrase_query"] = (
- auto_generate_synonyms_phrase_query
- )
- if cutoff_frequency is not DEFAULT:
- kwargs["cutoff_frequency"] = cutoff_frequency
- if fuzziness is not DEFAULT:
- kwargs["fuzziness"] = fuzziness
- if fuzzy_rewrite is not DEFAULT:
- kwargs["fuzzy_rewrite"] = fuzzy_rewrite
- if fuzzy_transpositions is not DEFAULT:
- kwargs["fuzzy_transpositions"] = fuzzy_transpositions
- if lenient is not DEFAULT:
- kwargs["lenient"] = lenient
- if max_expansions is not DEFAULT:
- kwargs["max_expansions"] = max_expansions
- if minimum_should_match is not DEFAULT:
- kwargs["minimum_should_match"] = minimum_should_match
- if operator is not DEFAULT:
- kwargs["operator"] = operator
- if prefix_length is not DEFAULT:
- kwargs["prefix_length"] = prefix_length
- if zero_terms_query is not DEFAULT:
- kwargs["zero_terms_query"] = zero_terms_query
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(**kwargs)
+ if background_is_superset is not DEFAULT:
+ kwargs["background_is_superset"] = background_is_superset
+ if include_negatives is not DEFAULT:
+ kwargs["include_negatives"] = include_negatives
+ super().__init__(kwargs)
+
+
+class PercentageScoreHeuristic(AttrDict[Any]):
+ pass
class PinnedDoc(AttrDict[Any]):
@@ -1176,6 +2120,47 @@ def __init__(
super().__init__(kwargs)
+class ScriptField(AttrDict[Any]):
+ """
+ :arg script: (required)
+ :arg ignore_failure:
+ """
+
+ script: Union["Script", Dict[str, Any], DefaultType]
+ ignore_failure: Union[bool, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
+ ignore_failure: Union[bool, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if script is not DEFAULT:
+ kwargs["script"] = script
+ if ignore_failure is not DEFAULT:
+ kwargs["ignore_failure"] = ignore_failure
+ super().__init__(kwargs)
+
+
+class ScriptedHeuristic(AttrDict[Any]):
+ """
+ :arg script: (required)
+ """
+
+ script: Union["Script", Dict[str, Any], DefaultType]
+
+ def __init__(
+ self,
+ *,
+ script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if script is not DEFAULT:
+ kwargs["script"] = script
+ super().__init__(kwargs)
+
+
class ShapeFieldQuery(AttrDict[Any]):
"""
:arg indexed_shape: Queries using a pre-indexed shape.
@@ -1210,6 +2195,86 @@ def __init__(
super().__init__(kwargs)
+class SortOptions(AttrDict[Any]):
+ """
+ :arg _field: The field to use in this query.
+ :arg _value: The query value for the field.
+ :arg _score:
+ :arg _doc:
+ :arg _geo_distance:
+ :arg _script:
+ """
+
+ _field: Union[str, "InstrumentedField", "DefaultType"]
+ _value: Union["FieldSort", Dict[str, Any], "DefaultType"]
+ _score: Union["ScoreSort", Dict[str, Any], DefaultType]
+ _doc: Union["ScoreSort", Dict[str, Any], DefaultType]
+ _geo_distance: Union["GeoDistanceSort", Dict[str, Any], DefaultType]
+ _script: Union["ScriptSort", Dict[str, Any], DefaultType]
+
+ def __init__(
+ self,
+ _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ _value: Union["FieldSort", Dict[str, Any], "DefaultType"] = DEFAULT,
+ *,
+ _score: Union["ScoreSort", Dict[str, Any], DefaultType] = DEFAULT,
+ _doc: Union["ScoreSort", Dict[str, Any], DefaultType] = DEFAULT,
+ _geo_distance: Union["GeoDistanceSort", Dict[str, Any], DefaultType] = DEFAULT,
+ _script: Union["ScriptSort", Dict[str, Any], DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if _field is not DEFAULT:
+ kwargs[str(_field)] = _value
+ if _score is not DEFAULT:
+ kwargs["_score"] = _score
+ if _doc is not DEFAULT:
+ kwargs["_doc"] = _doc
+ if _geo_distance is not DEFAULT:
+ kwargs["_geo_distance"] = _geo_distance
+ if _script is not DEFAULT:
+ kwargs["_script"] = _script
+ super().__init__(kwargs)
+
+
+class SourceFilter(AttrDict[Any]):
+ """
+ :arg excludes:
+ :arg includes:
+ """
+
+ excludes: Union[
+ Union[str, InstrumentedField],
+ Sequence[Union[str, InstrumentedField]],
+ DefaultType,
+ ]
+ includes: Union[
+ Union[str, InstrumentedField],
+ Sequence[Union[str, InstrumentedField]],
+ DefaultType,
+ ]
+
+ def __init__(
+ self,
+ *,
+ excludes: Union[
+ Union[str, InstrumentedField],
+ Sequence[Union[str, InstrumentedField]],
+ DefaultType,
+ ] = DEFAULT,
+ includes: Union[
+ Union[str, InstrumentedField],
+ Sequence[Union[str, InstrumentedField]],
+ DefaultType,
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if excludes is not DEFAULT:
+ kwargs["excludes"] = str(excludes)
+ if includes is not DEFAULT:
+ kwargs["includes"] = str(includes)
+ super().__init__(kwargs)
+
+
class SpanQuery(AttrDict[Any]):
"""
:arg span_containing: Accepts a list of span queries, but only returns
@@ -1330,6 +2395,23 @@ def __init__(
super().__init__(**kwargs)
+class TDigest(AttrDict[Any]):
+ """
+ :arg compression: Limits the maximum number of nodes used by the
+ underlying TDigest algorithm to `20 * compression`, enabling
+ control of memory usage and approximation error.
+ """
+
+ compression: Union[int, DefaultType]
+
+ def __init__(
+ self, *, compression: Union[int, DefaultType] = DEFAULT, **kwargs: Any
+ ):
+ if compression is not DEFAULT:
+ kwargs["compression"] = compression
+ super().__init__(kwargs)
+
+
class TermQuery(QueryBase):
"""
:arg value: (required) Term you wish to find in the provided field.
@@ -1403,10 +2485,35 @@ def __init__(
super().__init__(kwargs)
+class TermsPartition(AttrDict[Any]):
+ """
+ :arg num_partitions: (required) The number of partitions.
+ :arg partition: (required) The partition number for this request.
+ """
+
+ num_partitions: Union[int, DefaultType]
+ partition: Union[int, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ num_partitions: Union[int, DefaultType] = DEFAULT,
+ partition: Union[int, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if num_partitions is not DEFAULT:
+ kwargs["num_partitions"] = num_partitions
+ if partition is not DEFAULT:
+ kwargs["partition"] = partition
+ super().__init__(kwargs)
+
+
class TermsSetQuery(QueryBase):
"""
:arg terms: (required) Array of terms you wish to find in the provided
field.
+ :arg minimum_should_match: Specification describing number of matching
+ terms required to return a document.
:arg minimum_should_match_field: Numeric field containing the number
of matching terms required to return a document.
:arg minimum_should_match_script: Custom script containing the number
@@ -1420,6 +2527,7 @@ class TermsSetQuery(QueryBase):
"""
terms: Union[Sequence[str], DefaultType]
+ minimum_should_match: Union[int, str, DefaultType]
minimum_should_match_field: Union[str, InstrumentedField, DefaultType]
minimum_should_match_script: Union["Script", Dict[str, Any], DefaultType]
boost: Union[float, DefaultType]
@@ -1429,6 +2537,7 @@ def __init__(
self,
*,
terms: Union[Sequence[str], DefaultType] = DEFAULT,
+ minimum_should_match: Union[int, str, DefaultType] = DEFAULT,
minimum_should_match_field: Union[
str, InstrumentedField, DefaultType
] = DEFAULT,
@@ -1441,6 +2550,8 @@ def __init__(
):
if terms is not DEFAULT:
kwargs["terms"] = terms
+ if minimum_should_match is not DEFAULT:
+ kwargs["minimum_should_match"] = minimum_should_match
if minimum_should_match_field is not DEFAULT:
kwargs["minimum_should_match_field"] = str(minimum_should_match_field)
if minimum_should_match_script is not DEFAULT:
@@ -1452,6 +2563,35 @@ def __init__(
super().__init__(**kwargs)
+class TestPopulation(AttrDict[Any]):
+ """
+ :arg field: (required) The field to aggregate.
+ :arg script:
+ :arg filter: A filter used to define a set of records to run unpaired
+ t-test on.
+ """
+
+ field: Union[str, InstrumentedField, DefaultType]
+ script: Union["Script", Dict[str, Any], DefaultType]
+ filter: Union[Query, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
+ filter: Union[Query, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if field is not DEFAULT:
+ kwargs["field"] = str(field)
+ if script is not DEFAULT:
+ kwargs["script"] = script
+ if filter is not DEFAULT:
+ kwargs["filter"] = filter
+ super().__init__(kwargs)
+
+
class TextExpansionQuery(QueryBase):
"""
:arg model_id: (required) The text expansion NLP model to use
@@ -1581,6 +2721,24 @@ def __init__(
super().__init__(kwargs)
+class TopMetricsValue(AttrDict[Any]):
+ """
+ :arg field: (required) A field to return as a metric.
+ """
+
+ field: Union[str, InstrumentedField, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if field is not DEFAULT:
+ kwargs["field"] = str(field)
+ super().__init__(kwargs)
+
+
class TopRightBottomLeftGeoBounds(AttrDict[Any]):
"""
:arg top_right: (required)
@@ -1632,6 +2790,34 @@ def __init__(
super().__init__(kwargs)
+class WeightedAverageValue(AttrDict[Any]):
+ """
+ :arg field: The field from which to extract the values or weights.
+ :arg missing: A value or weight to use if the field is missing.
+ :arg script:
+ """
+
+ field: Union[str, InstrumentedField, DefaultType]
+ missing: Union[float, DefaultType]
+ script: Union["Script", Dict[str, Any], DefaultType]
+
+ def __init__(
+ self,
+ *,
+ field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ missing: Union[float, DefaultType] = DEFAULT,
+ script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if field is not DEFAULT:
+ kwargs["field"] = str(field)
+ if missing is not DEFAULT:
+ kwargs["missing"] = missing
+ if script is not DEFAULT:
+ kwargs["script"] = script
+ super().__init__(kwargs)
+
+
class WeightedTokensQuery(QueryBase):
"""
:arg tokens: (required) The tokens representing this query
@@ -1736,272 +2922,71 @@ def __init__(self, *, wkt: Union[str, DefaultType] = DEFAULT, **kwargs: Any):
super().__init__(kwargs)
-class FieldLookup(AttrDict[Any]):
- """
- :arg id: (required) `id` of the document.
- :arg index: Index from which to retrieve the document.
- :arg path: Name of the field.
- :arg routing: Custom routing value.
- """
-
- id: Union[str, DefaultType]
- index: Union[str, DefaultType]
- path: Union[str, InstrumentedField, DefaultType]
- routing: Union[str, DefaultType]
-
- def __init__(
- self,
- *,
- id: Union[str, DefaultType] = DEFAULT,
- index: Union[str, DefaultType] = DEFAULT,
- path: Union[str, InstrumentedField, DefaultType] = DEFAULT,
- routing: Union[str, DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if id is not DEFAULT:
- kwargs["id"] = id
- if index is not DEFAULT:
- kwargs["index"] = index
- if path is not DEFAULT:
- kwargs["path"] = str(path)
- if routing is not DEFAULT:
- kwargs["routing"] = routing
- super().__init__(kwargs)
-
-
-class FieldCollapse(AttrDict[Any]):
+class BucketCorrelationFunctionCountCorrelation(AttrDict[Any]):
"""
- :arg field: (required) The field to collapse the result set on
- :arg inner_hits: The number of inner hits and their sort order
- :arg max_concurrent_group_searches: The number of concurrent requests
- allowed to retrieve the inner_hits per group
- :arg collapse:
+ :arg indicator: (required) The indicator with which to correlate the
+ configured `bucket_path` values.
"""
- field: Union[str, InstrumentedField, DefaultType]
- inner_hits: Union["InnerHits", Sequence["InnerHits"], Dict[str, Any], DefaultType]
- max_concurrent_group_searches: Union[int, DefaultType]
- collapse: Union["FieldCollapse", Dict[str, Any], DefaultType]
+ indicator: Union[
+ "BucketCorrelationFunctionCountCorrelationIndicator",
+ Dict[str, Any],
+ DefaultType,
+ ]
def __init__(
self,
*,
- field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
- inner_hits: Union[
- "InnerHits", Sequence["InnerHits"], Dict[str, Any], DefaultType
+ indicator: Union[
+ "BucketCorrelationFunctionCountCorrelationIndicator",
+ Dict[str, Any],
+ DefaultType,
] = DEFAULT,
- max_concurrent_group_searches: Union[int, DefaultType] = DEFAULT,
- collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if field is not DEFAULT:
- kwargs["field"] = str(field)
- if inner_hits is not DEFAULT:
- kwargs["inner_hits"] = inner_hits
- if max_concurrent_group_searches is not DEFAULT:
- kwargs["max_concurrent_group_searches"] = max_concurrent_group_searches
- if collapse is not DEFAULT:
- kwargs["collapse"] = collapse
- super().__init__(kwargs)
-
-
-class FieldAndFormat(AttrDict[Any]):
- """
- :arg field: (required) Wildcard pattern. The request returns values
- for field names matching this pattern.
- :arg format: Format in which the values are returned.
- :arg include_unmapped:
- """
-
- field: Union[str, InstrumentedField, DefaultType]
- format: Union[str, DefaultType]
- include_unmapped: Union[bool, DefaultType]
-
- def __init__(
- self,
- *,
- field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
- format: Union[str, DefaultType] = DEFAULT,
- include_unmapped: Union[bool, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if field is not DEFAULT:
- kwargs["field"] = str(field)
- if format is not DEFAULT:
- kwargs["format"] = format
- if include_unmapped is not DEFAULT:
- kwargs["include_unmapped"] = include_unmapped
+ if indicator is not DEFAULT:
+ kwargs["indicator"] = indicator
super().__init__(kwargs)
-class HighlightBase(AttrDict[Any]):
+class FieldLookup(AttrDict[Any]):
"""
- :arg type:
- :arg boundary_chars: A string that contains each boundary character.
- Defaults to `.,!? \t\n` if omitted.
- :arg boundary_max_scan: How far to scan for boundary characters.
- Defaults to `20` if omitted.
- :arg boundary_scanner: Specifies how to break the highlighted
- fragments: chars, sentence, or word. Only valid for the unified
- and fvh highlighters. Defaults to `sentence` for the `unified`
- highlighter. Defaults to `chars` for the `fvh` highlighter.
- :arg boundary_scanner_locale: Controls which locale is used to search
- for sentence and word boundaries. This parameter takes a form of a
- language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`.
- Defaults to `Locale.ROOT` if omitted.
- :arg force_source:
- :arg fragmenter: Specifies how text should be broken up in highlight
- snippets: `simple` or `span`. Only valid for the `plain`
- highlighter. Defaults to `span` if omitted.
- :arg fragment_size: The size of the highlighted fragment in
- characters. Defaults to `100` if omitted.
- :arg highlight_filter:
- :arg highlight_query: Highlight matches for a query other than the
- search query. This is especially useful if you use a rescore query
- because those are not taken into account by highlighting by
- default.
- :arg max_fragment_length:
- :arg max_analyzed_offset: If set to a non-negative value, highlighting
- stops at this defined maximum limit. The rest of the text is not
- processed, thus not highlighted and no error is returned The
- `max_analyzed_offset` query setting does not override the
- `index.highlight.max_analyzed_offset` setting, which prevails when
- it’s set to lower value than the query setting.
- :arg no_match_size: The amount of text you want to return from the
- beginning of the field if there are no matching fragments to
- highlight.
- :arg number_of_fragments: The maximum number of fragments to return.
- If the number of fragments is set to `0`, no fragments are
- returned. Instead, the entire field contents are highlighted and
- returned. This can be handy when you need to highlight short texts
- such as a title or address, but fragmentation is not required. If
- `number_of_fragments` is `0`, `fragment_size` is ignored. Defaults
- to `5` if omitted.
- :arg options:
- :arg order: Sorts highlighted fragments by score when set to `score`.
- By default, fragments will be output in the order they appear in
- the field (order: `none`). Setting this option to `score` will
- output the most relevant fragments first. Each highlighter applies
- its own logic to compute relevancy scores. Defaults to `none` if
- omitted.
- :arg phrase_limit: Controls the number of matching phrases in a
- document that are considered. Prevents the `fvh` highlighter from
- analyzing too many phrases and consuming too much memory. When
- using `matched_fields`, `phrase_limit` phrases per matched field
- are considered. Raising the limit increases query time and
- consumes more memory. Only supported by the `fvh` highlighter.
- Defaults to `256` if omitted.
- :arg post_tags: Use in conjunction with `pre_tags` to define the HTML
- tags to use for the highlighted text. By default, highlighted text
- is wrapped in `` and `` tags.
- :arg pre_tags: Use in conjunction with `post_tags` to define the HTML
- tags to use for the highlighted text. By default, highlighted text
- is wrapped in `` and `` tags.
- :arg require_field_match: By default, only fields that contains a
- query match are highlighted. Set to `false` to highlight all
- fields. Defaults to `True` if omitted.
- :arg tags_schema: Set to `styled` to use the built-in tag schema.
+ :arg id: (required) `id` of the document.
+ :arg index: Index from which to retrieve the document.
+ :arg path: Name of the field.
+ :arg routing: Custom routing value.
"""
- type: Union[Literal["plain", "fvh", "unified"], DefaultType]
- boundary_chars: Union[str, DefaultType]
- boundary_max_scan: Union[int, DefaultType]
- boundary_scanner: Union[Literal["chars", "sentence", "word"], DefaultType]
- boundary_scanner_locale: Union[str, DefaultType]
- force_source: Union[bool, DefaultType]
- fragmenter: Union[Literal["simple", "span"], DefaultType]
- fragment_size: Union[int, DefaultType]
- highlight_filter: Union[bool, DefaultType]
- highlight_query: Union[Query, DefaultType]
- max_fragment_length: Union[int, DefaultType]
- max_analyzed_offset: Union[int, DefaultType]
- no_match_size: Union[int, DefaultType]
- number_of_fragments: Union[int, DefaultType]
- options: Union[Mapping[str, Any], DefaultType]
- order: Union[Literal["score"], DefaultType]
- phrase_limit: Union[int, DefaultType]
- post_tags: Union[Sequence[str], DefaultType]
- pre_tags: Union[Sequence[str], DefaultType]
- require_field_match: Union[bool, DefaultType]
- tags_schema: Union[Literal["styled"], DefaultType]
-
- def __init__(
- self,
- *,
- type: Union[Literal["plain", "fvh", "unified"], DefaultType] = DEFAULT,
- boundary_chars: Union[str, DefaultType] = DEFAULT,
- boundary_max_scan: Union[int, DefaultType] = DEFAULT,
- boundary_scanner: Union[
- Literal["chars", "sentence", "word"], DefaultType
- ] = DEFAULT,
- boundary_scanner_locale: Union[str, DefaultType] = DEFAULT,
- force_source: Union[bool, DefaultType] = DEFAULT,
- fragmenter: Union[Literal["simple", "span"], DefaultType] = DEFAULT,
- fragment_size: Union[int, DefaultType] = DEFAULT,
- highlight_filter: Union[bool, DefaultType] = DEFAULT,
- highlight_query: Union[Query, DefaultType] = DEFAULT,
- max_fragment_length: Union[int, DefaultType] = DEFAULT,
- max_analyzed_offset: Union[int, DefaultType] = DEFAULT,
- no_match_size: Union[int, DefaultType] = DEFAULT,
- number_of_fragments: Union[int, DefaultType] = DEFAULT,
- options: Union[Mapping[str, Any], DefaultType] = DEFAULT,
- order: Union[Literal["score"], DefaultType] = DEFAULT,
- phrase_limit: Union[int, DefaultType] = DEFAULT,
- post_tags: Union[Sequence[str], DefaultType] = DEFAULT,
- pre_tags: Union[Sequence[str], DefaultType] = DEFAULT,
- require_field_match: Union[bool, DefaultType] = DEFAULT,
- tags_schema: Union[Literal["styled"], DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if type is not DEFAULT:
- kwargs["type"] = type
- if boundary_chars is not DEFAULT:
- kwargs["boundary_chars"] = boundary_chars
- if boundary_max_scan is not DEFAULT:
- kwargs["boundary_max_scan"] = boundary_max_scan
- if boundary_scanner is not DEFAULT:
- kwargs["boundary_scanner"] = boundary_scanner
- if boundary_scanner_locale is not DEFAULT:
- kwargs["boundary_scanner_locale"] = boundary_scanner_locale
- if force_source is not DEFAULT:
- kwargs["force_source"] = force_source
- if fragmenter is not DEFAULT:
- kwargs["fragmenter"] = fragmenter
- if fragment_size is not DEFAULT:
- kwargs["fragment_size"] = fragment_size
- if highlight_filter is not DEFAULT:
- kwargs["highlight_filter"] = highlight_filter
- if highlight_query is not DEFAULT:
- kwargs["highlight_query"] = highlight_query
- if max_fragment_length is not DEFAULT:
- kwargs["max_fragment_length"] = max_fragment_length
- if max_analyzed_offset is not DEFAULT:
- kwargs["max_analyzed_offset"] = max_analyzed_offset
- if no_match_size is not DEFAULT:
- kwargs["no_match_size"] = no_match_size
- if number_of_fragments is not DEFAULT:
- kwargs["number_of_fragments"] = number_of_fragments
- if options is not DEFAULT:
- kwargs["options"] = options
- if order is not DEFAULT:
- kwargs["order"] = order
- if phrase_limit is not DEFAULT:
- kwargs["phrase_limit"] = phrase_limit
- if post_tags is not DEFAULT:
- kwargs["post_tags"] = post_tags
- if pre_tags is not DEFAULT:
- kwargs["pre_tags"] = pre_tags
- if require_field_match is not DEFAULT:
- kwargs["require_field_match"] = require_field_match
- if tags_schema is not DEFAULT:
- kwargs["tags_schema"] = tags_schema
+ id: Union[str, DefaultType]
+ index: Union[str, DefaultType]
+ path: Union[str, InstrumentedField, DefaultType]
+ routing: Union[str, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ id: Union[str, DefaultType] = DEFAULT,
+ index: Union[str, DefaultType] = DEFAULT,
+ path: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ routing: Union[str, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if id is not DEFAULT:
+ kwargs["id"] = id
+ if index is not DEFAULT:
+ kwargs["index"] = index
+ if path is not DEFAULT:
+ kwargs["path"] = str(path)
+ if routing is not DEFAULT:
+ kwargs["routing"] = routing
super().__init__(kwargs)
-class Highlight(HighlightBase):
+class HighlightField(HighlightBase):
"""
- :arg fields: (required)
- :arg encoder:
+ :arg fragment_offset:
+ :arg matched_fields:
+ :arg analyzer:
:arg type:
:arg boundary_chars: A string that contains each boundary character.
Defaults to `.,!? \t\n` if omitted.
@@ -2069,12 +3054,13 @@ class Highlight(HighlightBase):
:arg tags_schema: Set to `styled` to use the built-in tag schema.
"""
- fields: Union[
- Mapping[Union[str, InstrumentedField], "HighlightField"],
- Dict[str, Any],
+ fragment_offset: Union[int, DefaultType]
+ matched_fields: Union[
+ Union[str, InstrumentedField],
+ Sequence[Union[str, InstrumentedField]],
DefaultType,
]
- encoder: Union[Literal["default", "html"], DefaultType]
+ analyzer: Union[str, Dict[str, Any], DefaultType]
type: Union[Literal["plain", "fvh", "unified"], DefaultType]
boundary_chars: Union[str, DefaultType]
boundary_max_scan: Union[int, DefaultType]
@@ -2100,12 +3086,13 @@ class Highlight(HighlightBase):
def __init__(
self,
*,
- fields: Union[
- Mapping[Union[str, InstrumentedField], "HighlightField"],
- Dict[str, Any],
+ fragment_offset: Union[int, DefaultType] = DEFAULT,
+ matched_fields: Union[
+ Union[str, InstrumentedField],
+ Sequence[Union[str, InstrumentedField]],
DefaultType,
] = DEFAULT,
- encoder: Union[Literal["default", "html"], DefaultType] = DEFAULT,
+ analyzer: Union[str, Dict[str, Any], DefaultType] = DEFAULT,
type: Union[Literal["plain", "fvh", "unified"], DefaultType] = DEFAULT,
boundary_chars: Union[str, DefaultType] = DEFAULT,
boundary_max_scan: Union[int, DefaultType] = DEFAULT,
@@ -2131,10 +3118,12 @@ def __init__(
tags_schema: Union[Literal["styled"], DefaultType] = DEFAULT,
**kwargs: Any,
):
- if fields is not DEFAULT:
- kwargs["fields"] = str(fields)
- if encoder is not DEFAULT:
- kwargs["encoder"] = encoder
+ if fragment_offset is not DEFAULT:
+ kwargs["fragment_offset"] = fragment_offset
+ if matched_fields is not DEFAULT:
+ kwargs["matched_fields"] = str(matched_fields)
+ if analyzer is not DEFAULT:
+ kwargs["analyzer"] = analyzer
if type is not DEFAULT:
kwargs["type"] = type
if boundary_chars is not DEFAULT:
@@ -2180,98 +3169,114 @@ def __init__(
super().__init__(**kwargs)
-class ScriptField(AttrDict[Any]):
+class RegressionInferenceOptions(AttrDict[Any]):
"""
- :arg script: (required)
- :arg ignore_failure:
+ :arg results_field: The field that is added to incoming documents to
+ contain the inference prediction. Defaults to predicted_value.
+ :arg num_top_feature_importance_values: Specifies the maximum number
+ of feature importance values per document.
"""
- script: Union["Script", Dict[str, Any], DefaultType]
- ignore_failure: Union[bool, DefaultType]
+ results_field: Union[str, InstrumentedField, DefaultType]
+ num_top_feature_importance_values: Union[int, DefaultType]
def __init__(
self,
*,
- script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
- ignore_failure: Union[bool, DefaultType] = DEFAULT,
+ results_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ num_top_feature_importance_values: Union[int, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if script is not DEFAULT:
- kwargs["script"] = script
- if ignore_failure is not DEFAULT:
- kwargs["ignore_failure"] = ignore_failure
+ if results_field is not DEFAULT:
+ kwargs["results_field"] = str(results_field)
+ if num_top_feature_importance_values is not DEFAULT:
+ kwargs["num_top_feature_importance_values"] = (
+ num_top_feature_importance_values
+ )
super().__init__(kwargs)
-class SortOptions(AttrDict[Any]):
+class ClassificationInferenceOptions(AttrDict[Any]):
"""
- :arg _score:
- :arg _doc:
- :arg _geo_distance:
- :arg _script:
+ :arg num_top_classes: Specifies the number of top class predictions to
+ return. Defaults to 0.
+ :arg num_top_feature_importance_values: Specifies the maximum number
+ of feature importance values per document.
+ :arg prediction_field_type: Specifies the type of the predicted field
+ to write. Acceptable values are: string, number, boolean. When
+ boolean is provided 1.0 is transformed to true and 0.0 to false.
+ :arg results_field: The field that is added to incoming documents to
+ contain the inference prediction. Defaults to predicted_value.
+ :arg top_classes_results_field: Specifies the field to which the top
+ classes are written. Defaults to top_classes.
"""
- _score: Union["ScoreSort", Dict[str, Any], DefaultType]
- _doc: Union["ScoreSort", Dict[str, Any], DefaultType]
- _geo_distance: Union["GeoDistanceSort", Dict[str, Any], DefaultType]
- _script: Union["ScriptSort", Dict[str, Any], DefaultType]
+ num_top_classes: Union[int, DefaultType]
+ num_top_feature_importance_values: Union[int, DefaultType]
+ prediction_field_type: Union[str, DefaultType]
+ results_field: Union[str, DefaultType]
+ top_classes_results_field: Union[str, DefaultType]
def __init__(
self,
*,
- _score: Union["ScoreSort", Dict[str, Any], DefaultType] = DEFAULT,
- _doc: Union["ScoreSort", Dict[str, Any], DefaultType] = DEFAULT,
- _geo_distance: Union["GeoDistanceSort", Dict[str, Any], DefaultType] = DEFAULT,
- _script: Union["ScriptSort", Dict[str, Any], DefaultType] = DEFAULT,
+ num_top_classes: Union[int, DefaultType] = DEFAULT,
+ num_top_feature_importance_values: Union[int, DefaultType] = DEFAULT,
+ prediction_field_type: Union[str, DefaultType] = DEFAULT,
+ results_field: Union[str, DefaultType] = DEFAULT,
+ top_classes_results_field: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if _score is not DEFAULT:
- kwargs["_score"] = _score
- if _doc is not DEFAULT:
- kwargs["_doc"] = _doc
- if _geo_distance is not DEFAULT:
- kwargs["_geo_distance"] = _geo_distance
- if _script is not DEFAULT:
- kwargs["_script"] = _script
+ if num_top_classes is not DEFAULT:
+ kwargs["num_top_classes"] = num_top_classes
+ if num_top_feature_importance_values is not DEFAULT:
+ kwargs["num_top_feature_importance_values"] = (
+ num_top_feature_importance_values
+ )
+ if prediction_field_type is not DEFAULT:
+ kwargs["prediction_field_type"] = prediction_field_type
+ if results_field is not DEFAULT:
+ kwargs["results_field"] = results_field
+ if top_classes_results_field is not DEFAULT:
+ kwargs["top_classes_results_field"] = top_classes_results_field
super().__init__(kwargs)
-class SourceFilter(AttrDict[Any]):
+class FieldCollapse(AttrDict[Any]):
"""
- :arg excludes:
- :arg includes:
+ :arg field: (required) The field to collapse the result set on
+ :arg inner_hits: The number of inner hits and their sort order
+ :arg max_concurrent_group_searches: The number of concurrent requests
+ allowed to retrieve the inner_hits per group
+ :arg collapse:
"""
- excludes: Union[
- Union[str, InstrumentedField],
- Sequence[Union[str, InstrumentedField]],
- DefaultType,
- ]
- includes: Union[
- Union[str, InstrumentedField],
- Sequence[Union[str, InstrumentedField]],
- DefaultType,
+ field: Union[str, InstrumentedField, DefaultType]
+ inner_hits: Union[
+ "InnerHits", Sequence["InnerHits"], Sequence[Dict[str, Any]], DefaultType
]
+ max_concurrent_group_searches: Union[int, DefaultType]
+ collapse: Union["FieldCollapse", Dict[str, Any], DefaultType]
def __init__(
self,
*,
- excludes: Union[
- Union[str, InstrumentedField],
- Sequence[Union[str, InstrumentedField]],
- DefaultType,
- ] = DEFAULT,
- includes: Union[
- Union[str, InstrumentedField],
- Sequence[Union[str, InstrumentedField]],
- DefaultType,
+ field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ inner_hits: Union[
+ "InnerHits", Sequence["InnerHits"], Sequence[Dict[str, Any]], DefaultType
] = DEFAULT,
+ max_concurrent_group_searches: Union[int, DefaultType] = DEFAULT,
+ collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] = DEFAULT,
**kwargs: Any,
):
- if excludes is not DEFAULT:
- kwargs["excludes"] = str(excludes)
- if includes is not DEFAULT:
- kwargs["includes"] = str(includes)
+ if field is not DEFAULT:
+ kwargs["field"] = str(field)
+ if inner_hits is not DEFAULT:
+ kwargs["inner_hits"] = inner_hits
+ if max_concurrent_group_searches is not DEFAULT:
+ kwargs["max_concurrent_group_searches"] = max_concurrent_group_searches
+ if collapse is not DEFAULT:
+ kwargs["collapse"] = collapse
super().__init__(kwargs)
@@ -2288,7 +3293,9 @@ class IntervalsAllOf(AttrDict[Any]):
:arg filter: Rule used to filter returned intervals.
"""
- intervals: Union[Sequence["IntervalsContainer"], Dict[str, Any], DefaultType]
+ intervals: Union[
+ Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType
+ ]
max_gaps: Union[int, DefaultType]
ordered: Union[bool, DefaultType]
filter: Union["IntervalsFilter", Dict[str, Any], DefaultType]
@@ -2297,7 +3304,7 @@ def __init__(
self,
*,
intervals: Union[
- Sequence["IntervalsContainer"], Dict[str, Any], DefaultType
+ Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType
] = DEFAULT,
max_gaps: Union[int, DefaultType] = DEFAULT,
ordered: Union[bool, DefaultType] = DEFAULT,
@@ -2321,14 +3328,16 @@ class IntervalsAnyOf(AttrDict[Any]):
:arg filter: Rule used to filter returned intervals.
"""
- intervals: Union[Sequence["IntervalsContainer"], Dict[str, Any], DefaultType]
+ intervals: Union[
+ Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType
+ ]
filter: Union["IntervalsFilter", Dict[str, Any], DefaultType]
def __init__(
self,
*,
intervals: Union[
- Sequence["IntervalsContainer"], Dict[str, Any], DefaultType
+ Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType
] = DEFAULT,
filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT,
**kwargs: Any,
@@ -2489,40 +3498,319 @@ class IntervalsWildcard(AttrDict[Any]):
def __init__(
self,
*,
- pattern: Union[str, DefaultType] = DEFAULT,
- analyzer: Union[str, DefaultType] = DEFAULT,
- use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ pattern: Union[str, DefaultType] = DEFAULT,
+ analyzer: Union[str, DefaultType] = DEFAULT,
+ use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if pattern is not DEFAULT:
+ kwargs["pattern"] = pattern
+ if analyzer is not DEFAULT:
+ kwargs["analyzer"] = analyzer
+ if use_field is not DEFAULT:
+ kwargs["use_field"] = str(use_field)
+ super().__init__(kwargs)
+
+
+class TextEmbedding(AttrDict[Any]):
+ """
+ :arg model_id: (required)
+ :arg model_text: (required)
+ """
+
+ model_id: Union[str, DefaultType]
+ model_text: Union[str, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ model_id: Union[str, DefaultType] = DEFAULT,
+ model_text: Union[str, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if model_id is not DEFAULT:
+ kwargs["model_id"] = model_id
+ if model_text is not DEFAULT:
+ kwargs["model_text"] = model_text
+ super().__init__(kwargs)
+
+
+class FieldSort(AttrDict[Any]):
+ """
+ :arg missing:
+ :arg mode:
+ :arg nested:
+ :arg order:
+ :arg unmapped_type:
+ :arg numeric_type:
+ :arg format:
+ """
+
+ missing: Union[str, int, float, bool, DefaultType]
+ mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType]
+ nested: Union["NestedSortValue", Dict[str, Any], DefaultType]
+ order: Union[Literal["asc", "desc"], DefaultType]
+ unmapped_type: Union[
+ Literal[
+ "none",
+ "geo_point",
+ "geo_shape",
+ "ip",
+ "binary",
+ "keyword",
+ "text",
+ "search_as_you_type",
+ "date",
+ "date_nanos",
+ "boolean",
+ "completion",
+ "nested",
+ "object",
+ "version",
+ "murmur3",
+ "token_count",
+ "percolator",
+ "integer",
+ "long",
+ "short",
+ "byte",
+ "float",
+ "half_float",
+ "scaled_float",
+ "double",
+ "integer_range",
+ "float_range",
+ "long_range",
+ "double_range",
+ "date_range",
+ "ip_range",
+ "alias",
+ "join",
+ "rank_feature",
+ "rank_features",
+ "flattened",
+ "shape",
+ "histogram",
+ "constant_keyword",
+ "aggregate_metric_double",
+ "dense_vector",
+ "semantic_text",
+ "sparse_vector",
+ "match_only_text",
+ "icu_collation_keyword",
+ ],
+ DefaultType,
+ ]
+ numeric_type: Union[Literal["long", "double", "date", "date_nanos"], DefaultType]
+ format: Union[str, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ missing: Union[str, int, float, bool, DefaultType] = DEFAULT,
+ mode: Union[
+ Literal["min", "max", "sum", "avg", "median"], DefaultType
+ ] = DEFAULT,
+ nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT,
+ order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
+ unmapped_type: Union[
+ Literal[
+ "none",
+ "geo_point",
+ "geo_shape",
+ "ip",
+ "binary",
+ "keyword",
+ "text",
+ "search_as_you_type",
+ "date",
+ "date_nanos",
+ "boolean",
+ "completion",
+ "nested",
+ "object",
+ "version",
+ "murmur3",
+ "token_count",
+ "percolator",
+ "integer",
+ "long",
+ "short",
+ "byte",
+ "float",
+ "half_float",
+ "scaled_float",
+ "double",
+ "integer_range",
+ "float_range",
+ "long_range",
+ "double_range",
+ "date_range",
+ "ip_range",
+ "alias",
+ "join",
+ "rank_feature",
+ "rank_features",
+ "flattened",
+ "shape",
+ "histogram",
+ "constant_keyword",
+ "aggregate_metric_double",
+ "dense_vector",
+ "semantic_text",
+ "sparse_vector",
+ "match_only_text",
+ "icu_collation_keyword",
+ ],
+ DefaultType,
+ ] = DEFAULT,
+ numeric_type: Union[
+ Literal["long", "double", "date", "date_nanos"], DefaultType
+ ] = DEFAULT,
+ format: Union[str, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if missing is not DEFAULT:
+ kwargs["missing"] = missing
+ if mode is not DEFAULT:
+ kwargs["mode"] = mode
+ if nested is not DEFAULT:
+ kwargs["nested"] = nested
+ if order is not DEFAULT:
+ kwargs["order"] = order
+ if unmapped_type is not DEFAULT:
+ kwargs["unmapped_type"] = unmapped_type
+ if numeric_type is not DEFAULT:
+ kwargs["numeric_type"] = numeric_type
+ if format is not DEFAULT:
+ kwargs["format"] = format
+ super().__init__(kwargs)
+
+
+class ScoreSort(AttrDict[Any]):
+ """
+ :arg order:
+ """
+
+ order: Union[Literal["asc", "desc"], DefaultType]
+
+ def __init__(
+ self,
+ *,
+ order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if order is not DEFAULT:
+ kwargs["order"] = order
+ super().__init__(kwargs)
+
+
+class GeoDistanceSort(AttrDict[Any]):
+ """
+ :arg _field: The field to use in this query.
+ :arg _value: The query value for the field.
+ :arg mode:
+ :arg distance_type:
+ :arg ignore_unmapped:
+ :arg order:
+ :arg unit:
+ :arg nested:
+ """
+
+ _field: Union[str, "InstrumentedField", "DefaultType"]
+ _value: Union[
+ Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str],
+ Sequence[Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]],
+ Dict[str, Any],
+ "DefaultType",
+ ]
+ mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType]
+ distance_type: Union[Literal["arc", "plane"], DefaultType]
+ ignore_unmapped: Union[bool, DefaultType]
+ order: Union[Literal["asc", "desc"], DefaultType]
+ unit: Union[
+ Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], DefaultType
+ ]
+ nested: Union["NestedSortValue", Dict[str, Any], DefaultType]
+
+ def __init__(
+ self,
+ _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ _value: Union[
+ Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str],
+ Sequence[
+ Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]
+ ],
+ Dict[str, Any],
+ "DefaultType",
+ ] = DEFAULT,
+ *,
+ mode: Union[
+ Literal["min", "max", "sum", "avg", "median"], DefaultType
+ ] = DEFAULT,
+ distance_type: Union[Literal["arc", "plane"], DefaultType] = DEFAULT,
+ ignore_unmapped: Union[bool, DefaultType] = DEFAULT,
+ order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
+ unit: Union[
+ Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], DefaultType
+ ] = DEFAULT,
+ nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT,
**kwargs: Any,
):
- if pattern is not DEFAULT:
- kwargs["pattern"] = pattern
- if analyzer is not DEFAULT:
- kwargs["analyzer"] = analyzer
- if use_field is not DEFAULT:
- kwargs["use_field"] = str(use_field)
+ if _field is not DEFAULT:
+ kwargs[str(_field)] = _value
+ if mode is not DEFAULT:
+ kwargs["mode"] = mode
+ if distance_type is not DEFAULT:
+ kwargs["distance_type"] = distance_type
+ if ignore_unmapped is not DEFAULT:
+ kwargs["ignore_unmapped"] = ignore_unmapped
+ if order is not DEFAULT:
+ kwargs["order"] = order
+ if unit is not DEFAULT:
+ kwargs["unit"] = unit
+ if nested is not DEFAULT:
+ kwargs["nested"] = nested
super().__init__(kwargs)
-class TextEmbedding(AttrDict[Any]):
+class ScriptSort(AttrDict[Any]):
"""
- :arg model_id: (required)
- :arg model_text: (required)
+ :arg script: (required)
+ :arg order:
+ :arg type:
+ :arg mode:
+ :arg nested:
"""
- model_id: Union[str, DefaultType]
- model_text: Union[str, DefaultType]
+ script: Union["Script", Dict[str, Any], DefaultType]
+ order: Union[Literal["asc", "desc"], DefaultType]
+ type: Union[Literal["string", "number", "version"], DefaultType]
+ mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType]
+ nested: Union["NestedSortValue", Dict[str, Any], DefaultType]
def __init__(
self,
*,
- model_id: Union[str, DefaultType] = DEFAULT,
- model_text: Union[str, DefaultType] = DEFAULT,
+ script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
+ order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
+ type: Union[Literal["string", "number", "version"], DefaultType] = DEFAULT,
+ mode: Union[
+ Literal["min", "max", "sum", "avg", "median"], DefaultType
+ ] = DEFAULT,
+ nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT,
**kwargs: Any,
):
- if model_id is not DEFAULT:
- kwargs["model_id"] = model_id
- if model_text is not DEFAULT:
- kwargs["model_text"] = model_text
+ if script is not DEFAULT:
+ kwargs["script"] = script
+ if order is not DEFAULT:
+ kwargs["order"] = order
+ if type is not DEFAULT:
+ kwargs["type"] = type
+ if mode is not DEFAULT:
+ kwargs["mode"] = mode
+ if nested is not DEFAULT:
+ kwargs["nested"] = nested
super().__init__(kwargs)
@@ -2687,7 +3975,7 @@ class SpanNearQuery(QueryBase):
:arg _name:
"""
- clauses: Union[Sequence["SpanQuery"], Dict[str, Any], DefaultType]
+ clauses: Union[Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType]
in_order: Union[bool, DefaultType]
slop: Union[int, DefaultType]
boost: Union[float, DefaultType]
@@ -2696,7 +3984,9 @@ class SpanNearQuery(QueryBase):
def __init__(
self,
*,
- clauses: Union[Sequence["SpanQuery"], Dict[str, Any], DefaultType] = DEFAULT,
+ clauses: Union[
+ Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType
+ ] = DEFAULT,
in_order: Union[bool, DefaultType] = DEFAULT,
slop: Union[int, DefaultType] = DEFAULT,
boost: Union[float, DefaultType] = DEFAULT,
@@ -2733,408 +4023,154 @@ class SpanNotQuery(QueryBase):
default value of 1.0. A boost value between 0 and 1.0 decreases
the relevance score. A value greater than 1.0 increases the
relevance score. Defaults to `1` if omitted.
- :arg _name:
- """
-
- exclude: Union["SpanQuery", Dict[str, Any], DefaultType]
- include: Union["SpanQuery", Dict[str, Any], DefaultType]
- dist: Union[int, DefaultType]
- post: Union[int, DefaultType]
- pre: Union[int, DefaultType]
- boost: Union[float, DefaultType]
- _name: Union[str, DefaultType]
-
- def __init__(
- self,
- *,
- exclude: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
- include: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
- dist: Union[int, DefaultType] = DEFAULT,
- post: Union[int, DefaultType] = DEFAULT,
- pre: Union[int, DefaultType] = DEFAULT,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if exclude is not DEFAULT:
- kwargs["exclude"] = exclude
- if include is not DEFAULT:
- kwargs["include"] = include
- if dist is not DEFAULT:
- kwargs["dist"] = dist
- if post is not DEFAULT:
- kwargs["post"] = post
- if pre is not DEFAULT:
- kwargs["pre"] = pre
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(**kwargs)
-
-
-class SpanOrQuery(QueryBase):
- """
- :arg clauses: (required) Array of one or more other span type queries.
- :arg boost: Floating point number used to decrease or increase the
- relevance scores of the query. Boost values are relative to the
- default value of 1.0. A boost value between 0 and 1.0 decreases
- the relevance score. A value greater than 1.0 increases the
- relevance score. Defaults to `1` if omitted.
- :arg _name:
- """
-
- clauses: Union[Sequence["SpanQuery"], Dict[str, Any], DefaultType]
- boost: Union[float, DefaultType]
- _name: Union[str, DefaultType]
-
- def __init__(
- self,
- *,
- clauses: Union[Sequence["SpanQuery"], Dict[str, Any], DefaultType] = DEFAULT,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if clauses is not DEFAULT:
- kwargs["clauses"] = clauses
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(**kwargs)
-
-
-class SpanWithinQuery(QueryBase):
- """
- :arg big: (required) Can be any span query. Matching spans from
- `little` that are enclosed within `big` are returned.
- :arg little: (required) Can be any span query. Matching spans from
- `little` that are enclosed within `big` are returned.
- :arg boost: Floating point number used to decrease or increase the
- relevance scores of the query. Boost values are relative to the
- default value of 1.0. A boost value between 0 and 1.0 decreases
- the relevance score. A value greater than 1.0 increases the
- relevance score. Defaults to `1` if omitted.
- :arg _name:
- """
-
- big: Union["SpanQuery", Dict[str, Any], DefaultType]
- little: Union["SpanQuery", Dict[str, Any], DefaultType]
- boost: Union[float, DefaultType]
- _name: Union[str, DefaultType]
-
- def __init__(
- self,
- *,
- big: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
- little: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if big is not DEFAULT:
- kwargs["big"] = big
- if little is not DEFAULT:
- kwargs["little"] = little
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(**kwargs)
-
-
-class HighlightField(HighlightBase):
- """
- :arg fragment_offset:
- :arg matched_fields:
- :arg analyzer:
- :arg type:
- :arg boundary_chars: A string that contains each boundary character.
- Defaults to `.,!? \t\n` if omitted.
- :arg boundary_max_scan: How far to scan for boundary characters.
- Defaults to `20` if omitted.
- :arg boundary_scanner: Specifies how to break the highlighted
- fragments: chars, sentence, or word. Only valid for the unified
- and fvh highlighters. Defaults to `sentence` for the `unified`
- highlighter. Defaults to `chars` for the `fvh` highlighter.
- :arg boundary_scanner_locale: Controls which locale is used to search
- for sentence and word boundaries. This parameter takes a form of a
- language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`.
- Defaults to `Locale.ROOT` if omitted.
- :arg force_source:
- :arg fragmenter: Specifies how text should be broken up in highlight
- snippets: `simple` or `span`. Only valid for the `plain`
- highlighter. Defaults to `span` if omitted.
- :arg fragment_size: The size of the highlighted fragment in
- characters. Defaults to `100` if omitted.
- :arg highlight_filter:
- :arg highlight_query: Highlight matches for a query other than the
- search query. This is especially useful if you use a rescore query
- because those are not taken into account by highlighting by
- default.
- :arg max_fragment_length:
- :arg max_analyzed_offset: If set to a non-negative value, highlighting
- stops at this defined maximum limit. The rest of the text is not
- processed, thus not highlighted and no error is returned The
- `max_analyzed_offset` query setting does not override the
- `index.highlight.max_analyzed_offset` setting, which prevails when
- it’s set to lower value than the query setting.
- :arg no_match_size: The amount of text you want to return from the
- beginning of the field if there are no matching fragments to
- highlight.
- :arg number_of_fragments: The maximum number of fragments to return.
- If the number of fragments is set to `0`, no fragments are
- returned. Instead, the entire field contents are highlighted and
- returned. This can be handy when you need to highlight short texts
- such as a title or address, but fragmentation is not required. If
- `number_of_fragments` is `0`, `fragment_size` is ignored. Defaults
- to `5` if omitted.
- :arg options:
- :arg order: Sorts highlighted fragments by score when set to `score`.
- By default, fragments will be output in the order they appear in
- the field (order: `none`). Setting this option to `score` will
- output the most relevant fragments first. Each highlighter applies
- its own logic to compute relevancy scores. Defaults to `none` if
- omitted.
- :arg phrase_limit: Controls the number of matching phrases in a
- document that are considered. Prevents the `fvh` highlighter from
- analyzing too many phrases and consuming too much memory. When
- using `matched_fields`, `phrase_limit` phrases per matched field
- are considered. Raising the limit increases query time and
- consumes more memory. Only supported by the `fvh` highlighter.
- Defaults to `256` if omitted.
- :arg post_tags: Use in conjunction with `pre_tags` to define the HTML
- tags to use for the highlighted text. By default, highlighted text
- is wrapped in `` and `` tags.
- :arg pre_tags: Use in conjunction with `post_tags` to define the HTML
- tags to use for the highlighted text. By default, highlighted text
- is wrapped in `` and `` tags.
- :arg require_field_match: By default, only fields that contains a
- query match are highlighted. Set to `false` to highlight all
- fields. Defaults to `True` if omitted.
- :arg tags_schema: Set to `styled` to use the built-in tag schema.
+ :arg _name:
"""
- fragment_offset: Union[int, DefaultType]
- matched_fields: Union[
- Union[str, InstrumentedField],
- Sequence[Union[str, InstrumentedField]],
- DefaultType,
- ]
- analyzer: Union[str, Dict[str, Any], DefaultType]
- type: Union[Literal["plain", "fvh", "unified"], DefaultType]
- boundary_chars: Union[str, DefaultType]
- boundary_max_scan: Union[int, DefaultType]
- boundary_scanner: Union[Literal["chars", "sentence", "word"], DefaultType]
- boundary_scanner_locale: Union[str, DefaultType]
- force_source: Union[bool, DefaultType]
- fragmenter: Union[Literal["simple", "span"], DefaultType]
- fragment_size: Union[int, DefaultType]
- highlight_filter: Union[bool, DefaultType]
- highlight_query: Union[Query, DefaultType]
- max_fragment_length: Union[int, DefaultType]
- max_analyzed_offset: Union[int, DefaultType]
- no_match_size: Union[int, DefaultType]
- number_of_fragments: Union[int, DefaultType]
- options: Union[Mapping[str, Any], DefaultType]
- order: Union[Literal["score"], DefaultType]
- phrase_limit: Union[int, DefaultType]
- post_tags: Union[Sequence[str], DefaultType]
- pre_tags: Union[Sequence[str], DefaultType]
- require_field_match: Union[bool, DefaultType]
- tags_schema: Union[Literal["styled"], DefaultType]
+ exclude: Union["SpanQuery", Dict[str, Any], DefaultType]
+ include: Union["SpanQuery", Dict[str, Any], DefaultType]
+ dist: Union[int, DefaultType]
+ post: Union[int, DefaultType]
+ pre: Union[int, DefaultType]
+ boost: Union[float, DefaultType]
+ _name: Union[str, DefaultType]
def __init__(
self,
*,
- fragment_offset: Union[int, DefaultType] = DEFAULT,
- matched_fields: Union[
- Union[str, InstrumentedField],
- Sequence[Union[str, InstrumentedField]],
- DefaultType,
- ] = DEFAULT,
- analyzer: Union[str, Dict[str, Any], DefaultType] = DEFAULT,
- type: Union[Literal["plain", "fvh", "unified"], DefaultType] = DEFAULT,
- boundary_chars: Union[str, DefaultType] = DEFAULT,
- boundary_max_scan: Union[int, DefaultType] = DEFAULT,
- boundary_scanner: Union[
- Literal["chars", "sentence", "word"], DefaultType
- ] = DEFAULT,
- boundary_scanner_locale: Union[str, DefaultType] = DEFAULT,
- force_source: Union[bool, DefaultType] = DEFAULT,
- fragmenter: Union[Literal["simple", "span"], DefaultType] = DEFAULT,
- fragment_size: Union[int, DefaultType] = DEFAULT,
- highlight_filter: Union[bool, DefaultType] = DEFAULT,
- highlight_query: Union[Query, DefaultType] = DEFAULT,
- max_fragment_length: Union[int, DefaultType] = DEFAULT,
- max_analyzed_offset: Union[int, DefaultType] = DEFAULT,
- no_match_size: Union[int, DefaultType] = DEFAULT,
- number_of_fragments: Union[int, DefaultType] = DEFAULT,
- options: Union[Mapping[str, Any], DefaultType] = DEFAULT,
- order: Union[Literal["score"], DefaultType] = DEFAULT,
- phrase_limit: Union[int, DefaultType] = DEFAULT,
- post_tags: Union[Sequence[str], DefaultType] = DEFAULT,
- pre_tags: Union[Sequence[str], DefaultType] = DEFAULT,
- require_field_match: Union[bool, DefaultType] = DEFAULT,
- tags_schema: Union[Literal["styled"], DefaultType] = DEFAULT,
+ exclude: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+ include: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+ dist: Union[int, DefaultType] = DEFAULT,
+ post: Union[int, DefaultType] = DEFAULT,
+ pre: Union[int, DefaultType] = DEFAULT,
+ boost: Union[float, DefaultType] = DEFAULT,
+ _name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if fragment_offset is not DEFAULT:
- kwargs["fragment_offset"] = fragment_offset
- if matched_fields is not DEFAULT:
- kwargs["matched_fields"] = str(matched_fields)
- if analyzer is not DEFAULT:
- kwargs["analyzer"] = analyzer
- if type is not DEFAULT:
- kwargs["type"] = type
- if boundary_chars is not DEFAULT:
- kwargs["boundary_chars"] = boundary_chars
- if boundary_max_scan is not DEFAULT:
- kwargs["boundary_max_scan"] = boundary_max_scan
- if boundary_scanner is not DEFAULT:
- kwargs["boundary_scanner"] = boundary_scanner
- if boundary_scanner_locale is not DEFAULT:
- kwargs["boundary_scanner_locale"] = boundary_scanner_locale
- if force_source is not DEFAULT:
- kwargs["force_source"] = force_source
- if fragmenter is not DEFAULT:
- kwargs["fragmenter"] = fragmenter
- if fragment_size is not DEFAULT:
- kwargs["fragment_size"] = fragment_size
- if highlight_filter is not DEFAULT:
- kwargs["highlight_filter"] = highlight_filter
- if highlight_query is not DEFAULT:
- kwargs["highlight_query"] = highlight_query
- if max_fragment_length is not DEFAULT:
- kwargs["max_fragment_length"] = max_fragment_length
- if max_analyzed_offset is not DEFAULT:
- kwargs["max_analyzed_offset"] = max_analyzed_offset
- if no_match_size is not DEFAULT:
- kwargs["no_match_size"] = no_match_size
- if number_of_fragments is not DEFAULT:
- kwargs["number_of_fragments"] = number_of_fragments
- if options is not DEFAULT:
- kwargs["options"] = options
- if order is not DEFAULT:
- kwargs["order"] = order
- if phrase_limit is not DEFAULT:
- kwargs["phrase_limit"] = phrase_limit
- if post_tags is not DEFAULT:
- kwargs["post_tags"] = post_tags
- if pre_tags is not DEFAULT:
- kwargs["pre_tags"] = pre_tags
- if require_field_match is not DEFAULT:
- kwargs["require_field_match"] = require_field_match
- if tags_schema is not DEFAULT:
- kwargs["tags_schema"] = tags_schema
+ if exclude is not DEFAULT:
+ kwargs["exclude"] = exclude
+ if include is not DEFAULT:
+ kwargs["include"] = include
+ if dist is not DEFAULT:
+ kwargs["dist"] = dist
+ if post is not DEFAULT:
+ kwargs["post"] = post
+ if pre is not DEFAULT:
+ kwargs["pre"] = pre
+ if boost is not DEFAULT:
+ kwargs["boost"] = boost
+ if _name is not DEFAULT:
+ kwargs["_name"] = _name
super().__init__(**kwargs)
-class ScoreSort(AttrDict[Any]):
+class SpanOrQuery(QueryBase):
"""
- :arg order:
+ :arg clauses: (required) Array of one or more other span type queries.
+ :arg boost: Floating point number used to decrease or increase the
+ relevance scores of the query. Boost values are relative to the
+ default value of 1.0. A boost value between 0 and 1.0 decreases
+ the relevance score. A value greater than 1.0 increases the
+ relevance score. Defaults to `1` if omitted.
+ :arg _name:
"""
- order: Union[Literal["asc", "desc"], DefaultType]
+ clauses: Union[Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType]
+ boost: Union[float, DefaultType]
+ _name: Union[str, DefaultType]
def __init__(
self,
*,
- order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
+ clauses: Union[
+ Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType
+ ] = DEFAULT,
+ boost: Union[float, DefaultType] = DEFAULT,
+ _name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if order is not DEFAULT:
- kwargs["order"] = order
- super().__init__(kwargs)
+ if clauses is not DEFAULT:
+ kwargs["clauses"] = clauses
+ if boost is not DEFAULT:
+ kwargs["boost"] = boost
+ if _name is not DEFAULT:
+ kwargs["_name"] = _name
+ super().__init__(**kwargs)
-class GeoDistanceSort(AttrDict[Any]):
+class SpanWithinQuery(QueryBase):
"""
- :arg mode:
- :arg distance_type:
- :arg ignore_unmapped:
- :arg order:
- :arg unit:
- :arg nested:
+ :arg big: (required) Can be any span query. Matching spans from
+ `little` that are enclosed within `big` are returned.
+ :arg little: (required) Can be any span query. Matching spans from
+ `little` that are enclosed within `big` are returned.
+ :arg boost: Floating point number used to decrease or increase the
+ relevance scores of the query. Boost values are relative to the
+ default value of 1.0. A boost value between 0 and 1.0 decreases
+ the relevance score. A value greater than 1.0 increases the
+ relevance score. Defaults to `1` if omitted.
+ :arg _name:
"""
- mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType]
- distance_type: Union[Literal["arc", "plane"], DefaultType]
- ignore_unmapped: Union[bool, DefaultType]
- order: Union[Literal["asc", "desc"], DefaultType]
- unit: Union[
- Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], DefaultType
- ]
- nested: Union["NestedSortValue", Dict[str, Any], DefaultType]
+ big: Union["SpanQuery", Dict[str, Any], DefaultType]
+ little: Union["SpanQuery", Dict[str, Any], DefaultType]
+ boost: Union[float, DefaultType]
+ _name: Union[str, DefaultType]
def __init__(
self,
*,
- mode: Union[
- Literal["min", "max", "sum", "avg", "median"], DefaultType
- ] = DEFAULT,
- distance_type: Union[Literal["arc", "plane"], DefaultType] = DEFAULT,
- ignore_unmapped: Union[bool, DefaultType] = DEFAULT,
- order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
- unit: Union[
- Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], DefaultType
- ] = DEFAULT,
- nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT,
+ big: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+ little: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+ boost: Union[float, DefaultType] = DEFAULT,
+ _name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if mode is not DEFAULT:
- kwargs["mode"] = mode
- if distance_type is not DEFAULT:
- kwargs["distance_type"] = distance_type
- if ignore_unmapped is not DEFAULT:
- kwargs["ignore_unmapped"] = ignore_unmapped
- if order is not DEFAULT:
- kwargs["order"] = order
- if unit is not DEFAULT:
- kwargs["unit"] = unit
- if nested is not DEFAULT:
- kwargs["nested"] = nested
- super().__init__(kwargs)
+ if big is not DEFAULT:
+ kwargs["big"] = big
+ if little is not DEFAULT:
+ kwargs["little"] = little
+ if boost is not DEFAULT:
+ kwargs["boost"] = boost
+ if _name is not DEFAULT:
+ kwargs["_name"] = _name
+ super().__init__(**kwargs)
-class ScriptSort(AttrDict[Any]):
+class BucketCorrelationFunctionCountCorrelationIndicator(AttrDict[Any]):
"""
- :arg script: (required)
- :arg order:
- :arg type:
- :arg mode:
- :arg nested:
+ :arg doc_count: (required) The total number of documents that
+ initially created the expectations. It’s required to be greater
+ than or equal to the sum of all values in the buckets_path as this
+ is the originating superset of data to which the term values are
+ correlated.
+ :arg expectations: (required) An array of numbers with which to
+ correlate the configured `bucket_path` values. The length of this
+ value must always equal the number of buckets returned by the
+ `bucket_path`.
+ :arg fractions: An array of fractions to use when averaging and
+ calculating variance. This should be used if the pre-calculated
+ data and the buckets_path have known gaps. The length of
+ fractions, if provided, must equal expectations.
"""
- script: Union["Script", Dict[str, Any], DefaultType]
- order: Union[Literal["asc", "desc"], DefaultType]
- type: Union[Literal["string", "number", "version"], DefaultType]
- mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType]
- nested: Union["NestedSortValue", Dict[str, Any], DefaultType]
+ doc_count: Union[int, DefaultType]
+ expectations: Union[Sequence[float], DefaultType]
+ fractions: Union[Sequence[float], DefaultType]
def __init__(
self,
*,
- script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
- order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
- type: Union[Literal["string", "number", "version"], DefaultType] = DEFAULT,
- mode: Union[
- Literal["min", "max", "sum", "avg", "median"], DefaultType
- ] = DEFAULT,
- nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT,
+ doc_count: Union[int, DefaultType] = DEFAULT,
+ expectations: Union[Sequence[float], DefaultType] = DEFAULT,
+ fractions: Union[Sequence[float], DefaultType] = DEFAULT,
**kwargs: Any,
):
- if script is not DEFAULT:
- kwargs["script"] = script
- if order is not DEFAULT:
- kwargs["order"] = order
- if type is not DEFAULT:
- kwargs["type"] = type
- if mode is not DEFAULT:
- kwargs["mode"] = mode
- if nested is not DEFAULT:
- kwargs["nested"] = nested
+ if doc_count is not DEFAULT:
+ kwargs["doc_count"] = doc_count
+ if expectations is not DEFAULT:
+ kwargs["expectations"] = expectations
+ if fractions is not DEFAULT:
+ kwargs["fractions"] = fractions
super().__init__(kwargs)
diff --git a/elasticsearch_dsl/utils.py b/elasticsearch_dsl/utils.py
index 3617589c..e85a17e0 100644
--- a/elasticsearch_dsl/utils.py
+++ b/elasticsearch_dsl/utils.py
@@ -365,6 +365,7 @@ def __setattr__(self, name: str, value: Any) -> None:
def _setattr(self, name: str, value: Any) -> None:
# if this attribute has special type assigned to it...
+ name = AttrDict.RESERVED.get(name, name)
if name in self._param_defs:
pinfo = self._param_defs[name]
diff --git a/examples/async/composite_agg.py b/examples/async/composite_agg.py
index af9a01aa..5188ce4c 100644
--- a/examples/async/composite_agg.py
+++ b/examples/async/composite_agg.py
@@ -17,19 +17,19 @@
import asyncio
import os
-from typing import Any, AsyncIterator, Dict, List, Optional, Union
+from typing import Any, AsyncIterator, Dict, Mapping, Sequence
from elasticsearch.helpers import async_bulk
-from elasticsearch_dsl import A, Agg, AsyncSearch, Response, async_connections
+from elasticsearch_dsl import Agg, AsyncSearch, Response, aggs, async_connections
from tests.test_integration.test_data import DATA, GIT_INDEX
async def scan_aggs(
search: AsyncSearch,
- source_aggs: Union[Dict[str, Agg], List[Dict[str, Agg]]],
+ source_aggs: Sequence[Mapping[str, Agg]],
inner_aggs: Dict[str, Agg] = {},
- size: Optional[int] = 10,
+ size: int = 10,
) -> AsyncIterator[Response]:
"""
Helper function used to iterate over all possible bucket combinations of
@@ -40,7 +40,12 @@ async def scan_aggs(
async def run_search(**kwargs: Any) -> Response:
s = search[:0]
bucket = s.aggs.bucket(
- "comp", "composite", sources=source_aggs, size=size, **kwargs
+ "comp",
+ aggs.Composite(
+ sources=source_aggs,
+ size=size,
+ **kwargs,
+ ),
)
for agg_name, agg in inner_aggs.items():
bucket[agg_name] = agg
@@ -72,8 +77,8 @@ async def main() -> None:
# run some aggregations on the data
async for b in scan_aggs(
AsyncSearch(index="git"),
- {"files": A("terms", field="files")},
- {"first_seen": A("min", field="committed_date")},
+ [{"files": aggs.Terms(field="files")}],
+ {"first_seen": aggs.Min(field="committed_date")},
):
print(
"File %s has been modified %d times, first seen at %s."
diff --git a/examples/composite_agg.py b/examples/composite_agg.py
index e35103f9..f93560ec 100644
--- a/examples/composite_agg.py
+++ b/examples/composite_agg.py
@@ -16,19 +16,19 @@
# under the License.
import os
-from typing import Any, Dict, Iterator, List, Optional, Union
+from typing import Any, Dict, Iterator, Mapping, Sequence
from elasticsearch.helpers import bulk
-from elasticsearch_dsl import A, Agg, Response, Search, connections
+from elasticsearch_dsl import Agg, Response, Search, aggs, connections
from tests.test_integration.test_data import DATA, GIT_INDEX
def scan_aggs(
search: Search,
- source_aggs: Union[Dict[str, Agg], List[Dict[str, Agg]]],
+ source_aggs: Sequence[Mapping[str, Agg]],
inner_aggs: Dict[str, Agg] = {},
- size: Optional[int] = 10,
+ size: int = 10,
) -> Iterator[Response]:
"""
Helper function used to iterate over all possible bucket combinations of
@@ -39,7 +39,12 @@ def scan_aggs(
def run_search(**kwargs: Any) -> Response:
s = search[:0]
bucket = s.aggs.bucket(
- "comp", "composite", sources=source_aggs, size=size, **kwargs
+ "comp",
+ aggs.Composite(
+ sources=source_aggs,
+ size=size,
+ **kwargs,
+ ),
)
for agg_name, agg in inner_aggs.items():
bucket[agg_name] = agg
@@ -69,8 +74,8 @@ def main() -> None:
# run some aggregations on the data
for b in scan_aggs(
Search(index="git"),
- {"files": A("terms", field="files")},
- {"first_seen": A("min", field="committed_date")},
+ [{"files": aggs.Terms(field="files")}],
+ {"first_seen": aggs.Min(field="committed_date")},
):
print(
"File %s has been modified %d times, first seen at %s."
diff --git a/tests/test_aggs.py b/tests/test_aggs.py
index 30b39c3a..38f43aaf 100644
--- a/tests/test_aggs.py
+++ b/tests/test_aggs.py
@@ -17,7 +17,7 @@
from pytest import raises
-from elasticsearch_dsl import aggs, query
+from elasticsearch_dsl import aggs, query, types
def test_repr() -> None:
@@ -220,7 +220,14 @@ def test_filters_correctly_identifies_the_hash() -> None:
def test_bucket_sort_agg() -> None:
- bucket_sort_agg = aggs.BucketSort(sort=[{"total_sales": {"order": "desc"}}], size=3)
+ # test the dictionary (type ignored) and fully typed alterantives
+ bucket_sort_agg = aggs.BucketSort(sort=[{"total_sales": {"order": "desc"}}], size=3) # type: ignore
+ assert bucket_sort_agg.to_dict() == {
+ "bucket_sort": {"sort": [{"total_sales": {"order": "desc"}}], "size": 3}
+ }
+ bucket_sort_agg = aggs.BucketSort(
+ sort=[types.SortOptions("total_sales", types.FieldSort(order="desc"))], size=3
+ )
assert bucket_sort_agg.to_dict() == {
"bucket_sort": {"sort": [{"total_sales": {"order": "desc"}}], "size": 3}
}
@@ -245,7 +252,10 @@ def test_bucket_sort_agg() -> None:
def test_bucket_sort_agg_only_trnunc() -> None:
- bucket_sort_agg = aggs.BucketSort(False, **{"from": 1, "size": 1})
+ # test the dictionary (type ignored) and fully typed alterantives
+ bucket_sort_agg = aggs.BucketSort(**{"from": 1, "size": 1, "_expand__to_dot": False}) # type: ignore
+ assert bucket_sort_agg.to_dict() == {"bucket_sort": {"from": 1, "size": 1}}
+ bucket_sort_agg = aggs.BucketSort(from_=1, size=1, _expand__to_dot=False)
assert bucket_sort_agg.to_dict() == {"bucket_sort": {"from": 1, "size": 1}}
a = aggs.DateHistogram(field="date", interval="month")
@@ -257,20 +267,26 @@ def test_bucket_sort_agg_only_trnunc() -> None:
def test_geohash_grid_aggregation() -> None:
- a = aggs.GeohashGrid(**{"field": "centroid", "precision": 3})
-
+ # test the dictionary (type ignored) and fully typed alterantives
+ a = aggs.GeohashGrid(**{"field": "centroid", "precision": 3}) # type: ignore
+ assert {"geohash_grid": {"field": "centroid", "precision": 3}} == a.to_dict()
+ a = aggs.GeohashGrid(field="centroid", precision=3)
assert {"geohash_grid": {"field": "centroid", "precision": 3}} == a.to_dict()
def test_geohex_grid_aggregation() -> None:
- a = aggs.GeohexGrid(**{"field": "centroid", "precision": 3})
-
+ # test the dictionary (type ignored) and fully typed alterantives
+ a = aggs.GeohexGrid(**{"field": "centroid", "precision": 3}) # type: ignore
+ assert {"geohex_grid": {"field": "centroid", "precision": 3}} == a.to_dict()
+ a = aggs.GeohexGrid(field="centroid", precision=3)
assert {"geohex_grid": {"field": "centroid", "precision": 3}} == a.to_dict()
def test_geotile_grid_aggregation() -> None:
- a = aggs.GeotileGrid(**{"field": "centroid", "precision": 3})
-
+ # test the dictionary (type ignored) and fully typed alterantives
+ a = aggs.GeotileGrid(**{"field": "centroid", "precision": 3}) # type: ignore
+ assert {"geotile_grid": {"field": "centroid", "precision": 3}} == a.to_dict()
+ a = aggs.GeotileGrid(field="centroid", precision=3)
assert {"geotile_grid": {"field": "centroid", "precision": 3}} == a.to_dict()
@@ -307,19 +323,15 @@ def test_variable_width_histogram_aggregation() -> None:
def test_ip_prefix_aggregation() -> None:
- a = aggs.IPPrefix(**{"field": "ipv4", "prefix_length": 24})
-
+ # test the dictionary (type ignored) and fully typed alterantives
+ a = aggs.IPPrefix(**{"field": "ipv4", "prefix_length": 24}) # type: ignore
+ assert {"ip_prefix": {"field": "ipv4", "prefix_length": 24}} == a.to_dict()
+ a = aggs.IPPrefix(field="ipv4", prefix_length=24)
assert {"ip_prefix": {"field": "ipv4", "prefix_length": 24}} == a.to_dict()
def test_ip_prefix_aggregation_extra() -> None:
- a = aggs.IPPrefix(
- **{
- "field": "ipv6",
- "prefix_length": 64,
- "is_ipv6": True,
- }
- )
+ a = aggs.IPPrefix(field="ipv6", prefix_length=64, is_ipv6=True)
assert {
"ip_prefix": {
@@ -340,6 +352,20 @@ def test_multi_terms_aggregation() -> None:
]
}
} == a.to_dict()
+ a = aggs.MultiTerms(
+ terms=[
+ types.MultiTermLookup(field="tags"),
+ types.MultiTermLookup(field="author.row"),
+ ]
+ )
+ assert {
+ "multi_terms": {
+ "terms": [
+ {"field": "tags"},
+ {"field": "author.row"},
+ ]
+ }
+ } == a.to_dict()
def test_categorize_text_aggregation() -> None:
@@ -452,11 +478,21 @@ def test_random_sampler_aggregation() -> None:
def test_adjancecy_matrix_aggregation() -> None:
+ a = aggs.AdjacencyMatrix(filters={"grpA": {"terms": {"accounts": ["hillary", "sidney"]}}, "grpB": {"terms": {"accounts": ["donald", "mitt"]}}, "grpC": {"terms": {"accounts": ["vladimir", "nigel"]}}}) # type: ignore
+ assert {
+ "adjacency_matrix": {
+ "filters": {
+ "grpA": {"terms": {"accounts": ["hillary", "sidney"]}},
+ "grpB": {"terms": {"accounts": ["donald", "mitt"]}},
+ "grpC": {"terms": {"accounts": ["vladimir", "nigel"]}},
+ }
+ }
+ } == a.to_dict()
a = aggs.AdjacencyMatrix(
filters={
- "grpA": {"terms": {"accounts": ["hillary", "sidney"]}},
- "grpB": {"terms": {"accounts": ["donald", "mitt"]}},
- "grpC": {"terms": {"accounts": ["vladimir", "nigel"]}},
+ "grpA": query.Terms(accounts=["hillary", "sidney"]),
+ "grpB": query.Terms(accounts=["donald", "mitt"]),
+ "grpC": query.Terms(accounts=["vladimir", "nigel"]),
}
)
assert {
@@ -471,11 +507,18 @@ def test_adjancecy_matrix_aggregation() -> None:
def test_top_metrics_aggregation() -> None:
- a = aggs.TopMetrics(metrics={"field": "m"}, sort={"s": "desc"})
-
+ # test the dictionary (type ignored) and fully typed alterantives
+ a = aggs.TopMetrics(metrics={"field": "m"}, sort={"s": "desc"}) # type: ignore
assert {
"top_metrics": {"metrics": {"field": "m"}, "sort": {"s": "desc"}}
} == a.to_dict()
+ a = aggs.TopMetrics(
+ metrics=types.TopMetricsValue(field="m"),
+ sort=types.SortOptions("s", types.FieldSort(order="desc")),
+ )
+ assert {
+ "top_metrics": {"metrics": {"field": "m"}, "sort": {"s": {"order": "desc"}}}
+ } == a.to_dict()
def test_bucket_agg_with_filter() -> None:
diff --git a/tests/test_integration/test_examples/_async/test_composite_aggs.py b/tests/test_integration/test_examples/_async/test_composite_aggs.py
index 86c88cc0..770872f5 100644
--- a/tests/test_integration/test_examples/_async/test_composite_aggs.py
+++ b/tests/test_integration/test_examples/_async/test_composite_aggs.py
@@ -28,7 +28,7 @@ async def test_scan_aggs_exhausts_all_files(
async_data_client: AsyncElasticsearch,
) -> None:
s = AsyncSearch(index="flat-git")
- key_aggs = {"files": A("terms", field="files")}
+ key_aggs = [{"files": A("terms", field="files")}]
file_list = [f async for f in scan_aggs(s, key_aggs)]
assert len(file_list) == 26
diff --git a/tests/test_integration/test_examples/_sync/test_composite_aggs.py b/tests/test_integration/test_examples/_sync/test_composite_aggs.py
index 990987dd..3c4f0c95 100644
--- a/tests/test_integration/test_examples/_sync/test_composite_aggs.py
+++ b/tests/test_integration/test_examples/_sync/test_composite_aggs.py
@@ -28,7 +28,7 @@ def test_scan_aggs_exhausts_all_files(
data_client: Elasticsearch,
) -> None:
s = Search(index="flat-git")
- key_aggs = {"files": A("terms", field="files")}
+ key_aggs = [{"files": A("terms", field="files")}]
file_list = [f for f in scan_aggs(s, key_aggs)]
assert len(file_list) == 26
diff --git a/utils/generator.py b/utils/generator.py
index c0a4f3a0..aeeaa9d8 100644
--- a/utils/generator.py
+++ b/utils/generator.py
@@ -32,6 +32,7 @@
lstrip_blocks=True,
)
query_py = jinja_env.get_template("query.py.tpl")
+aggs_py = jinja_env.get_template("aggs.py.tpl")
types_py = jinja_env.get_template("types.py.tpl")
# map with name replacements for Elasticsearch attributes
@@ -43,6 +44,22 @@
"_types.query_dsl:DistanceFeatureQuery": "_types.query_dsl:DistanceFeatureQueryBase",
}
+# some aggregation types are complicated to determine from the schema, so they
+# have their correct type here
+AGG_TYPES = {
+ "bucket_count_ks_test": "Pipeline",
+ "bucket_correlation": "Pipeline",
+ "bucket_sort": "Bucket",
+ "categorize_text": "Bucket",
+ "filter": "Bucket",
+ "moving_avg": "Pipeline",
+ "variable_width_histogram": "Bucket",
+}
+
+
+def property_to_class_name(name):
+ return "".join([w.title() if w != "ip" else "IP" for w in name.split("_")])
+
def wrapped_doc(text, width=70, initial_indent="", subsequent_indent=""):
"""Formats a docstring as a list of lines of up to the request width."""
@@ -63,6 +80,15 @@ def add_dict_type(type_):
return type_
+def add_seq_dict_type(type_):
+ """Add List[Dict[str, Any]] to a Python type hint."""
+ if type_.startswith("Union["):
+ type_ = f"{type_[:-1]}, Sequence[Dict[str, Any]]]"
+ else:
+ type_ = f"Union[{type_}, Sequence[Dict[str, Any]]]"
+ return type_
+
+
def add_not_set(type_):
"""Add DefaultType to a Python type hint."""
if type_.startswith("Union["):
@@ -101,6 +127,18 @@ def find_type(self, name, namespace=None):
):
return t
+ def inherits_from(self, type_, name, namespace=None):
+ while "inherits" in type_:
+ type_ = self.find_type(
+ type_["inherits"]["type"]["name"],
+ type_["inherits"]["type"]["namespace"],
+ )
+ if type_["name"]["name"] == name and (
+ namespace is None or type_["name"]["namespace"] == namespace
+ ):
+ return True
+ return False
+
def get_python_type(self, schema_type):
"""Obtain Python typing details for a given schema type
@@ -137,6 +175,17 @@ def get_python_type(self, schema_type):
):
# QueryContainer maps to the DSL's Query class
return "Query", {"type": "query"}
+ elif (
+ type_name["namespace"] == "_types.aggregations"
+ and type_name["name"] == "Buckets"
+ ):
+ return "Dict[str, Query]", {"type": "query", "hash": True}
+ elif (
+ type_name["namespace"] == "_types.aggregations"
+ and type_name["name"] == "CompositeAggregationSource"
+ ):
+ # QueryContainer maps to the DSL's Query class
+ return "Agg[_R]", None
else:
# for any other instances we get the type and recurse
type_ = self.find_type(type_name["name"], type_name["namespace"])
@@ -156,7 +205,9 @@ def get_python_type(self, schema_type):
# for dicts we use Mapping[key_type, value_type]
key_type, key_param = self.get_python_type(schema_type["key"])
value_type, value_param = self.get_python_type(schema_type["value"])
- return f"Mapping[{key_type}, {value_type}]", None
+ return f"Mapping[{key_type}, {value_type}]", (
+ {**value_param, "hash": True} if value_param else None
+ )
elif schema_type["kind"] == "union_of":
if (
@@ -258,7 +309,9 @@ def add_attribute(self, k, arg, for_types_py=False):
type_ = "Any"
param = None
if type_ != "Any":
- if "types." in type_:
+ if 'Sequence["types.' in type_:
+ type_ = add_seq_dict_type(type_) # interfaces can be given as dicts
+ elif "types." in type_:
type_ = add_dict_type(type_) # interfaces can be given as dicts
type_ = add_not_set(type_)
if for_types_py:
@@ -302,6 +355,59 @@ def add_attribute(self, k, arg, for_types_py=False):
if param and "params" in k:
k["params"].append(param)
+ def add_behaviors(self, type_, k, for_types_py=False):
+ """Add behaviors reported in the specification of the given type to the
+ class representation.
+ """
+ if "behaviors" in type_:
+ for behavior in type_["behaviors"]:
+ if (
+ behavior["type"]["name"] != "AdditionalProperty"
+ or behavior["type"]["namespace"] != "_spec_utils"
+ ):
+ # we do not support this behavior, so we ignore it
+ continue
+ key_type, _ = schema.get_python_type(behavior["generics"][0])
+ if "InstrumentedField" in key_type:
+ value_type, _ = schema.get_python_type(behavior["generics"][1])
+ if for_types_py:
+ value_type = value_type.replace('"DefaultType"', "DefaultType")
+ value_type = value_type.replace(
+ '"InstrumentedField"', "InstrumentedField"
+ )
+ value_type = re.sub(
+ r'"(function\.[a-zA-Z0-9_]+)"', r"\1", value_type
+ )
+ value_type = re.sub(
+ r'"types\.([a-zA-Z0-9_]+)"', r'"\1"', value_type
+ )
+ value_type = re.sub(
+ r'"(wrappers\.[a-zA-Z0-9_]+)"', r"\1", value_type
+ )
+ k["args"].append(
+ {
+ "name": "_field",
+ "type": add_not_set(key_type),
+ "doc": [":arg _field: The field to use in this query."],
+ "required": False,
+ "positional": True,
+ }
+ )
+ k["args"].append(
+ {
+ "name": "_value",
+ "type": add_not_set(add_dict_type(value_type)),
+ "doc": [":arg _value: The query value for the field."],
+ "required": False,
+ "positional": True,
+ }
+ )
+ k["is_single_field"] = True
+ else:
+ raise RuntimeError(
+ f"Non-field AdditionalProperty are not supported for interface {type_['name']['namespace']}:{type_['name']['name']}."
+ )
+
def property_to_python_class(self, p):
"""Return a dictionary with template data necessary to render a schema
property as a Python class.
@@ -334,59 +440,41 @@ def property_to_python_class(self, p):
"""
k = {
"property_name": p["name"],
- "name": "".join([w.title() for w in p["name"].split("_")]),
+ "name": property_to_class_name(p["name"]),
}
k["docstring"] = wrapped_doc(p.get("description") or "")
+ other_classes = []
kind = p["type"]["kind"]
if kind == "instance_of":
namespace = p["type"]["type"]["namespace"]
name = p["type"]["type"]["name"]
if f"{namespace}:{name}" in TYPE_REPLACEMENTS:
namespace, name = TYPE_REPLACEMENTS[f"{namespace}:{name}"].split(":")
- type_ = schema.find_type(name, namespace)
+ if name == "QueryContainer" and namespace == "_types.query_dsl":
+ type_ = {
+ "kind": "interface",
+ "properties": [p],
+ }
+ else:
+ type_ = schema.find_type(name, namespace)
+ if p["name"] in AGG_TYPES:
+ k["parent"] = AGG_TYPES[p["name"]]
+
if type_["kind"] == "interface":
+ # set the correct parent for bucket and pipeline aggregations
+ if self.inherits_from(
+ type_, "PipelineAggregationBase", "_types.aggregations"
+ ):
+ k["parent"] = "Pipeline"
+ elif self.inherits_from(
+ type_, "BucketAggregationBase", "_types.aggregations"
+ ):
+ k["parent"] = "Bucket"
+
+ # generate class attributes
k["args"] = []
k["params"] = []
- if "behaviors" in type_:
- for behavior in type_["behaviors"]:
- if (
- behavior["type"]["name"] != "AdditionalProperty"
- or behavior["type"]["namespace"] != "_spec_utils"
- ):
- # we do not support this behavior, so we ignore it
- continue
- key_type, _ = schema.get_python_type(behavior["generics"][0])
- if "InstrumentedField" in key_type:
- value_type, _ = schema.get_python_type(
- behavior["generics"][1]
- )
- k["args"].append(
- {
- "name": "_field",
- "type": add_not_set(key_type),
- "doc": [
- ":arg _field: The field to use in this query."
- ],
- "required": False,
- "positional": True,
- }
- )
- k["args"].append(
- {
- "name": "_value",
- "type": add_not_set(add_dict_type(value_type)),
- "doc": [
- ":arg _value: The query value for the field."
- ],
- "required": False,
- "positional": True,
- }
- )
- k["is_single_field"] = True
- else:
- raise RuntimeError(
- f"Non-field AdditionalProperty are not supported for interface {namespace}:{name}."
- )
+ self.add_behaviors(type_, k)
while True:
for arg in type_["properties"]:
self.add_attribute(k, arg)
@@ -397,6 +485,21 @@ def property_to_python_class(self, p):
)
else:
break
+
+ elif type_["kind"] == "type_alias":
+ if type_["type"]["kind"] == "union_of":
+ # for unions we create sub-classes
+ for other in type_["type"]["items"]:
+ other_class = self.interface_to_python_class(
+ other["type"]["name"], self.interfaces, for_types_py=False
+ )
+ other_class["parent"] = k["name"]
+ other_classes.append(other_class)
+ else:
+ raise RuntimeError(
+ "Cannot generate code for instances of type_alias instances that are not unions."
+ )
+
else:
raise RuntimeError(
f"Cannot generate code for instances of kind '{type_['kind']}'"
@@ -444,9 +547,9 @@ def property_to_python_class(self, p):
else:
raise RuntimeError(f"Cannot generate code for type {p['type']}")
- return k
+ return [k] + other_classes
- def interface_to_python_class(self, interface, interfaces):
+ def interface_to_python_class(self, interface, interfaces, for_types_py=True):
"""Return a dictionary with template data necessary to render an
interface a Python class.
@@ -475,9 +578,10 @@ def interface_to_python_class(self, interface, interfaces):
if type_["kind"] != "interface":
raise RuntimeError(f"Type {interface} is not an interface")
k = {"name": interface, "args": []}
+ self.add_behaviors(type_, k, for_types_py=for_types_py)
while True:
for arg in type_["properties"]:
- schema.add_attribute(k, arg, for_types_py=True)
+ schema.add_attribute(k, arg, for_types_py=for_types_py)
if "inherits" not in type_ or "type" not in type_["inherits"]:
break
@@ -500,13 +604,28 @@ def generate_query_py(schema, filename):
classes = []
query_container = schema.find_type("QueryContainer", "_types.query_dsl")
for p in query_container["properties"]:
- classes.append(schema.property_to_python_class(p))
+ classes += schema.property_to_python_class(p)
with open(filename, "wt") as f:
f.write(query_py.render(classes=classes, parent="Query"))
print(f"Generated {filename}.")
+def generate_aggs_py(schema, filename):
+ """Generate aggs.py with all the properties of `AggregationContainer` as
+ Python classes.
+ """
+ classes = []
+ aggs_container = schema.find_type("AggregationContainer", "_types.aggregations")
+ for p in aggs_container["properties"]:
+ if "containerProperty" not in p or not p["containerProperty"]:
+ classes += schema.property_to_python_class(p)
+
+ with open(filename, "wt") as f:
+ f.write(aggs_py.render(classes=classes, parent="Agg"))
+ print(f"Generated {filename}.")
+
+
def generate_types_py(schema, filename):
"""Generate types.py"""
classes = {}
@@ -542,4 +661,5 @@ def generate_types_py(schema, filename):
if __name__ == "__main__":
schema = ElasticsearchSchema()
generate_query_py(schema, "elasticsearch_dsl/query.py")
+ generate_aggs_py(schema, "elasticsearch_dsl/aggs.py")
generate_types_py(schema, "elasticsearch_dsl/types.py")
diff --git a/utils/templates/aggs.py.tpl b/utils/templates/aggs.py.tpl
new file mode 100644
index 00000000..47f9937a
--- /dev/null
+++ b/utils/templates/aggs.py.tpl
@@ -0,0 +1,320 @@
+# Licensed to Elasticsearch B.V. under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch B.V. licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import collections.abc
+from copy import deepcopy
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ ClassVar,
+ Dict,
+ Generic,
+ Iterable,
+ Literal,
+ Mapping,
+ MutableMapping,
+ Optional,
+ Sequence,
+ Union,
+ cast,
+)
+
+from elastic_transport.client_utils import DEFAULT
+
+from .query import Query
+from .response.aggs import AggResponse, BucketData, FieldBucketData, TopHitsData
+from .utils import _R, AttrDict, DslBase
+
+if TYPE_CHECKING:
+ from elastic_transport.client_utils import DefaultType
+ from .document_base import InstrumentedField
+ from .search_base import SearchBase
+ from elasticsearch_dsl import types
+
+
+def A(
+ name_or_agg: Union[MutableMapping[str, Any], "Agg[_R]", str],
+ filter: Optional[Union[str, "Query"]] = None,
+ **params: Any,
+) -> "Agg[_R]":
+ if filter is not None:
+ if name_or_agg != "filter":
+ raise ValueError(
+ "Aggregation %r doesn't accept positional argument 'filter'."
+ % name_or_agg
+ )
+ params["filter"] = filter
+
+ # {"terms": {"field": "tags"}, "aggs": {...}}
+ if isinstance(name_or_agg, collections.abc.MutableMapping):
+ if params:
+ raise ValueError("A() cannot accept parameters when passing in a dict.")
+ # copy to avoid modifying in-place
+ agg = deepcopy(name_or_agg)
+ # pop out nested aggs
+ aggs = agg.pop("aggs", None)
+ # pop out meta data
+ meta = agg.pop("meta", None)
+ # should be {"terms": {"field": "tags"}}
+ if len(agg) != 1:
+ raise ValueError(
+ 'A() can only accept dict with an aggregation ({"terms": {...}}). '
+ "Instead it got (%r)" % name_or_agg
+ )
+ agg_type, params = agg.popitem()
+ if aggs:
+ params = params.copy()
+ params["aggs"] = aggs
+ if meta:
+ params = params.copy()
+ params["meta"] = meta
+ return Agg[_R].get_dsl_class(agg_type)(_expand__to_dot=False, **params)
+
+ # Terms(...) just return the nested agg
+ elif isinstance(name_or_agg, Agg):
+ if params:
+ raise ValueError(
+ "A() cannot accept parameters when passing in an Agg object."
+ )
+ return name_or_agg
+
+ # "terms", field="tags"
+ return Agg[_R].get_dsl_class(name_or_agg)(**params)
+
+
+class Agg(DslBase, Generic[_R]):
+ _type_name = "agg"
+ _type_shortcut = staticmethod(A)
+ name = ""
+
+ def __contains__(self, key: str) -> bool:
+ return False
+
+ def to_dict(self) -> Dict[str, Any]:
+ d = super().to_dict()
+ if isinstance(d[self.name], dict):
+ n = cast(Dict[str, Any], d[self.name])
+ if "meta" in n:
+ d["meta"] = n.pop("meta")
+ return d
+
+ def result(self, search: "SearchBase[_R]", data: Dict[str, Any]) -> AttrDict[Any]:
+ return AggResponse[_R](self, search, data)
+
+
+class AggBase(Generic[_R]):
+ aggs: Dict[str, Agg[_R]]
+ _base: Agg[_R]
+ _params: Dict[str, Any]
+ _param_defs: ClassVar[Dict[str, Any]] = {
+ "aggs": {"type": "agg", "hash": True},
+ }
+
+ def __contains__(self, key: str) -> bool:
+ return key in self._params.get("aggs", {})
+
+ def __getitem__(self, agg_name: str) -> Agg[_R]:
+ agg = cast(
+ Agg[_R], self._params.setdefault("aggs", {})[agg_name]
+ ) # propagate KeyError
+
+ # make sure we're not mutating a shared state - whenever accessing a
+ # bucket, return a shallow copy of it to be safe
+ if isinstance(agg, Bucket):
+ agg = A(agg.name, **agg._params)
+ # be sure to store the copy so any modifications to it will affect us
+ self._params["aggs"][agg_name] = agg
+
+ return agg
+
+ def __setitem__(self, agg_name: str, agg: Agg[_R]) -> None:
+ self.aggs[agg_name] = A(agg)
+
+ def __iter__(self) -> Iterable[str]:
+ return iter(self.aggs)
+
+ def _agg(
+ self,
+ bucket: bool,
+ name: str,
+ agg_type: Union[Dict[str, Any], Agg[_R], str],
+ *args: Any,
+ **params: Any,
+ ) -> Agg[_R]:
+ agg = self[name] = A(agg_type, *args, **params)
+
+ # For chaining - when creating new buckets return them...
+ if bucket:
+ return agg
+ # otherwise return self._base so we can keep chaining
+ else:
+ return self._base
+
+ def metric(
+ self,
+ name: str,
+ agg_type: Union[Dict[str, Any], Agg[_R], str],
+ *args: Any,
+ **params: Any,
+ ) -> Agg[_R]:
+ return self._agg(False, name, agg_type, *args, **params)
+
+ def bucket(
+ self,
+ name: str,
+ agg_type: Union[Dict[str, Any], Agg[_R], str],
+ *args: Any,
+ **params: Any,
+ ) -> "Bucket[_R]":
+ return cast("Bucket[_R]", self._agg(True, name, agg_type, *args, **params))
+
+ def pipeline(
+ self,
+ name: str,
+ agg_type: Union[Dict[str, Any], Agg[_R], str],
+ *args: Any,
+ **params: Any,
+ ) -> "Pipeline[_R]":
+ return cast("Pipeline[_R]", self._agg(False, name, agg_type, *args, **params))
+
+ def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+ return BucketData(self, search, data) # type: ignore
+
+
+class Bucket(AggBase[_R], Agg[_R]):
+ def __init__(self, **params: Any):
+ super().__init__(**params)
+ # remember self for chaining
+ self._base = self
+
+ def to_dict(self) -> Dict[str, Any]:
+ d = super(AggBase, self).to_dict()
+ if isinstance(d[self.name], dict):
+ n = cast(AttrDict[Any], d[self.name])
+ if "aggs" in n:
+ d["aggs"] = n.pop("aggs")
+ return d
+
+
+class Pipeline(Agg[_R]):
+ pass
+
+
+{% for k in classes %}
+class {{ k.name }}({{ k.parent if k.parent else parent }}[_R]):
+ """
+ {% for line in k.docstring %}
+ {{ line }}
+ {% endfor %}
+ {% if k.args %}
+ {% if k.docstring %}
+
+ {% endif %}
+ {% for kwarg in k.args %}
+ {% for line in kwarg.doc %}
+ {{ line }}
+ {% endfor %}
+ {% endfor %}
+ {% endif %}
+ """
+ {% if k.property_name %}
+ name = "{{ k.property_name }}"
+ {% endif %}
+ {% if k.params %}
+ _param_defs = {
+ {% for param in k.params %}
+ "{{ param.name }}": {{ param.param }},
+ {% endfor %}
+ {% if k.name == "Filter" or k.name == "Filters" or k.name == "Composite" %}
+ {# Some #}
+ "aggs": {"type": "agg", "hash": True},
+ {% endif %}
+ }
+ {% endif %}
+
+ def __init__(
+ self,
+ {% if k.args | length != 1 %}
+ {% for arg in k.args %}
+ {% if arg.positional %}
+ {{ arg.name }}: {{ arg.type }} = DEFAULT,
+ {% endif %}
+ {% endfor %}
+ {% if k.args and not k.args[-1].positional %}
+ *,
+ {% endif %}
+ {% for arg in k.args %}
+ {% if not arg.positional %}
+ {{ arg.name }}: {{ arg.type }} = DEFAULT,
+ {% endif %}
+ {% endfor %}
+ {% else %}
+ {# when we have just one argument, we allow it as positional or keyword #}
+ {% for arg in k.args %}
+ {{ arg.name }}: {{ arg.type }} = DEFAULT,
+ {% endfor %}
+ {% endif %}
+ **kwargs: Any
+ ):
+ {% if k.name == "FunctionScore" %}
+ {# continuation of the FunctionScore shortcut property support from above #}
+ if functions is DEFAULT:
+ functions = []
+ for name in ScoreFunction._classes:
+ if name in kwargs:
+ functions.append({name: kwargs.pop(name)}) # type: ignore
+ {% elif k.is_single_field %}
+ if _field is not DEFAULT:
+ kwargs[str(_field)] = _value
+ {% elif k.is_multi_field %}
+ if _fields is not DEFAULT:
+ for field, value in _fields.items():
+ kwargs[str(field)] = value
+ {% endif %}
+ super().__init__(
+ {% for arg in k.args %}
+ {% if not arg.positional %}
+ {{ arg.name }}={{ arg.name }},
+ {% endif %}
+ {% endfor %}
+ **kwargs
+ )
+
+ {# what follows is a set of Pythonic enhancements to some of the query classes
+ which are outside the scope of the code generator #}
+ {% if k.name == "Filter" %}
+ def to_dict(self) -> Dict[str, Any]:
+ d = super().to_dict()
+ if isinstance(d[self.name], dict):
+ n = cast(AttrDict[Any], d[self.name])
+ n.update(n.pop("filter", {}))
+ return d
+
+ {% elif k.name == "Histogram" or k.name == "DateHistogram" or k.name == "AutoDateHistogram" or k.name == "VariableWidthHistogram" %}
+ def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+ return FieldBucketData(self, search, data)
+
+ {% elif k.name == "Terms" %}
+ def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+ return FieldBucketData(self, search, data)
+
+ {% elif k.name == "TopHits" %}
+ def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+ return TopHitsData(self, search, data)
+
+ {% endif %}
+{% endfor %}
diff --git a/utils/templates/types.py.tpl b/utils/templates/types.py.tpl
index ea2a5c11..8f854b0c 100644
--- a/utils/templates/types.py.tpl
+++ b/utils/templates/types.py.tpl
@@ -43,13 +43,17 @@ class {{ k.name }}({{ k.parent if k.parent else "AttrDict[Any]" }}):
def __init__(
self,
{% for arg in k.args %}
- {% if arg.positional %}{{ arg.name }}: {{ arg.type }} = DEFAULT,{% endif %}
+ {% if arg.positional %}
+ {{ arg.name }}: {{ arg.type }} = DEFAULT,
+ {% endif %}
{% endfor %}
{% if k.args and not k.args[-1].positional %}
*,
{% endif %}
{% for arg in k.args %}
- {% if not arg.positional %}{{ arg.name }}: {{ arg.type }} = DEFAULT,{% endif %}
+ {% if not arg.positional %}
+ {{ arg.name }}: {{ arg.type }} = DEFAULT,
+ {% endif %}
{% endfor %}
**kwargs: Any
):