diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 87fda361e5041..5981d0020f112 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -56,7 +56,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.27", "8.15.6", "8.16.2", "8.17.0", "8.18.0"] + BWC_VERSION: ["7.17.27", "8.16.2", "8.17.0", "8.18.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 5bdda62fe6287..a6285d9da9991 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -561,8 +561,8 @@ steps: env: BWC_VERSION: 8.14.3 - - label: "{{matrix.image}} / 8.15.6 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.6 + - label: "{{matrix.image}} / 8.15.5 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.5 timeout_in_minutes: 300 matrix: setup: @@ -575,7 +575,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.15.6 + BWC_VERSION: 8.15.5 - label: "{{matrix.image}} / 8.16.2 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.2 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index fc6cf96bcd42a..6d19de6615c7d 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -629,8 +629,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.15.6 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.6#bwcTest + - label: 8.15.5 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.5#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -639,7 +639,7 @@ steps: buildDirectory: /dev/shm/bk preemptible: true env: - BWC_VERSION: 8.15.6 + BWC_VERSION: 8.15.5 retry: automatic: - exit_status: "-1" @@ -771,7 +771,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk17 - BWC_VERSION: ["7.17.27", "8.15.6", "8.16.2", "8.17.0", "8.18.0"] + BWC_VERSION: ["7.17.27", "8.16.2", "8.17.0", "8.18.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -819,7 +819,7 @@ steps: - openjdk21 - openjdk22 - openjdk23 - BWC_VERSION: ["7.17.27", "8.15.6", "8.16.2", "8.17.0", "8.18.0"] + BWC_VERSION: ["7.17.27", "8.16.2", "8.17.0", "8.18.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index aa9ac9776dcc4..73de0f3e1f798 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -32,7 +32,7 @@ BWC_VERSION: - "8.12.2" - "8.13.4" - "8.14.3" - - "8.15.6" + - "8.15.5" - "8.16.2" - "8.17.0" - "8.18.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 77892cd297f3e..e8ee5a6fa75e8 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,6 +1,5 @@ BWC_VERSION: - "7.17.27" - - "8.15.6" - "8.16.2" - "8.17.0" - "8.18.0" diff --git a/branches.json b/branches.json index 0e23a795664dd..95fbdb1efd655 100644 --- a/branches.json +++ b/branches.json @@ -13,9 +13,6 @@ { "branch": "8.x" }, - { - "branch": "8.15" - }, { "branch": "7.17" } diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java index 41c0b4d67e1df..ea9009172c7e2 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java @@ -17,8 +17,6 @@ import org.gradle.api.Project; import java.io.File; -import java.util.Arrays; -import java.util.Map; /** * This plugin configures formatting for Java source using Spotless @@ -66,8 +64,7 @@ public void apply(Project project) { java.importOrderFile(new File(elasticsearchWorkspace, importOrderPath)); // Most formatting is done through the Eclipse formatter - java.eclipse().withP2Mirrors(Map.of("https://download.eclipse.org/", "https://mirror.umd.edu/eclipse/")) - .configFile(new File(elasticsearchWorkspace, formatterConfigPath)); + java.eclipse().configFile(new File(elasticsearchWorkspace, formatterConfigPath)); // Ensure blank lines are actually empty. Since formatters are applied in // order, apply this one last, otherwise non-empty blank lines can creep diff --git a/build.gradle b/build.gradle index a5c518afef94e..cf7bcc96330a1 100644 --- a/build.gradle +++ b/build.gradle @@ -290,7 +290,10 @@ allprojects { if (project.path.contains(":distribution:docker")) { enabled = false } - + if (project.path.contains(":libs:cli")) { + // ensure we resolve p2 dependencies for the spotless eclipse formatter + dependsOn "spotlessJavaCheck" + } } plugins.withId('lifecycle-base') { diff --git a/docs/changelog/117839.yaml b/docs/changelog/117839.yaml new file mode 100644 index 0000000000000..98c97b5078c02 --- /dev/null +++ b/docs/changelog/117839.yaml @@ -0,0 +1,5 @@ +pr: 117839 +summary: Add match support for `semantic_text` fields +area: "Search" +type: enhancement +issues: [] diff --git a/docs/changelog/118035.yaml b/docs/changelog/118035.yaml new file mode 100644 index 0000000000000..fdeaa184723b9 --- /dev/null +++ b/docs/changelog/118035.yaml @@ -0,0 +1,6 @@ +pr: 118035 +summary: Include hidden indices in `DeprecationInfoAction` +area: Indices APIs +type: bug +issues: + - 118020 diff --git a/docs/changelog/118102.yaml b/docs/changelog/118102.yaml new file mode 100644 index 0000000000000..e5ec32cdddbec --- /dev/null +++ b/docs/changelog/118102.yaml @@ -0,0 +1,5 @@ +pr: 118102 +summary: "ESQL: Enterprise license enforcement for CCS" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/118114.yaml b/docs/changelog/118114.yaml new file mode 100644 index 0000000000000..1b7532d5df981 --- /dev/null +++ b/docs/changelog/118114.yaml @@ -0,0 +1,5 @@ +pr: 118114 +summary: Enable physical plan verification +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/118166.yaml b/docs/changelog/118166.yaml new file mode 100644 index 0000000000000..99e3fcafd5805 --- /dev/null +++ b/docs/changelog/118166.yaml @@ -0,0 +1,5 @@ +pr: 118166 +summary: Update minimum supported snapshot version for Machine Learning jobs to 8.3.0 +area: Machine Learning +type: upgrade +issues: [] diff --git a/docs/changelog/118173.yaml b/docs/changelog/118173.yaml new file mode 100644 index 0000000000000..a3c9054674ba5 --- /dev/null +++ b/docs/changelog/118173.yaml @@ -0,0 +1,5 @@ +pr: 118173 +summary: ES|QL categorize with multiple groupings +area: Machine Learning +type: feature +issues: [] diff --git a/docs/changelog/118194.yaml b/docs/changelog/118194.yaml new file mode 100644 index 0000000000000..0e5eca55d597c --- /dev/null +++ b/docs/changelog/118194.yaml @@ -0,0 +1,5 @@ +pr: 118194 +summary: Retry on `ClusterBlockException` on transform destination index +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/118435.yaml b/docs/changelog/118435.yaml new file mode 100644 index 0000000000000..8bccbeb54698d --- /dev/null +++ b/docs/changelog/118435.yaml @@ -0,0 +1,6 @@ +pr: 118435 +summary: '`_score` should not be a reserved attribute in ES|QL' +area: ES|QL +type: enhancement +issues: + - 118460 diff --git a/docs/reference/connector/docs/connectors-release-notes.asciidoc b/docs/reference/connector/docs/connectors-release-notes.asciidoc index e1ed082365c00..ff3d859e1a888 100644 --- a/docs/reference/connector/docs/connectors-release-notes.asciidoc +++ b/docs/reference/connector/docs/connectors-release-notes.asciidoc @@ -9,8 +9,76 @@ Prior to version *8.16.0*, the connector release notes were published as part of the {enterprise-search-ref}/changelog.html[Enterprise Search documentation]. ==== -*Release notes*: +[discrete] +[[es-connectors-release-notes-8-17-0]] +=== 8.17.0 -* <> +No notable changes in this release. -include::release-notes/connectors-release-notes-8.16.0.asciidoc[] +[discrete] +[[es-connectors-release-notes-8-16-1]] +=== 8.16.1 + +[discrete] +[[es-connectors-release-notes-8-16-1-bug-fixes]] +==== Bug fixes + +* Fixed a bug in the Outlook Connector where having deactivated users could cause the sync to fail. +See https://github.com/elastic/connectors/pull/2967[*PR 2967*]. +* Fixed a bug where the Confluence connector was not downloading some blog post documents due to unexpected response format. +See https://github.com/elastic/connectors/pull/2984[*PR 2984*]. + +[discrete] +[[es-connectors-release-notes-8-16-0]] +=== 8.16.0 + +[discrete] +[[es-connectors-release-notes-deprecation-notice]] +==== Deprecation notices + +* *Direct index access for connectors and sync jobs* ++ +IMPORTANT: Directly accessing connector and sync job state through `.elastic-connectors*` indices is deprecated, and will be disallowed entirely in a future release. + +* Instead, the Elasticsearch Connector APIs should be used. Connectors framework code now uses the <> by default. +See https://github.com/elastic/connectors/pull/2884[*PR 2902*]. + +* *Docker `enterprise-search` namespace deprecation* ++ +IMPORTANT: The `enterprise-search` Docker namespace is deprecated and will be discontinued in a future release. ++ +Starting in `8.16.0`, Docker images are being transitioned to the new `integrations` namespace, which will become the sole location for future releases. This affects the https://github.com/elastic/connectors[Elastic Connectors] and https://github.com/elastic/data-extraction-service[Elastic Data Extraction Service]. ++ +During this transition period, images are published to both namespaces: ++ +** *Example*: ++ +Deprecated namespace:: +`docker.elastic.co/enterprise-search/elastic-connectors:v8.16.0` ++ +New namespace:: +`docker.elastic.co/integrations/elastic-connectors:v8.16.0` ++ +Users should migrate to the new `integrations` namespace as soon as possible to ensure continued access to future releases. + +[discrete] +[[es-connectors-release-notes-8-16-0-enhancements]] +==== Enhancements + +* Docker images now use Chainguard's Wolfi base image (`docker.elastic.co/wolfi/jdk:openjdk-11-dev`), replacing the previous `ubuntu:focal` base. + +* The Sharepoint Online connector now works with the `Sites.Selected` permission instead of the broader permission `Sites.Read.All`. +See https://github.com/elastic/connectors/pull/2762[*PR 2762*]. + +* Starting in 8.16.0, connectors will start using proper SEMVER, with `MAJOR.MINOR.PATCH`, which aligns with Elasticsearch/Kibana versions. This drops the previous `.BUILD` suffix, which we used to release connectors between Elastic stack releases. Going forward, these inter-stack-release releases will be suffixed instead with `+`, aligning with Elastic Agent and conforming to SEMVER. +See https://github.com/elastic/connectors/pull/2749[*PR 2749*]. + +* Connector logs now use UTC timestamps, instead of machine-local timestamps. This only impacts logging output. +See https://github.com/elastic/connectors/pull/2695[*PR 2695*]. + +[discrete] +[[es-connectors-release-notes-8-16-0-bug-fixes]] +==== Bug fixes + +* The Dropbox connector now fetches the files from team shared folders. +See https://github.com/elastic/connectors/pull/2718[*PR 2718*]. diff --git a/docs/reference/connector/docs/release-notes/connectors-release-notes-8.16.0.asciidoc b/docs/reference/connector/docs/release-notes/connectors-release-notes-8.16.0.asciidoc deleted file mode 100644 index 7608336073176..0000000000000 --- a/docs/reference/connector/docs/release-notes/connectors-release-notes-8.16.0.asciidoc +++ /dev/null @@ -1,53 +0,0 @@ -[[es-connectors-release-notes-8-16-0]] -=== 8.16.0 connectors release notes - -[discrete] -[[es-connectors-release-notes-deprecation-notice]] -==== Deprecation notices - -* *Direct index access for connectors and sync jobs* -+ -IMPORTANT: Directly accessing connector and sync job state through `.elastic-connectors*` indices is deprecated, and will be disallowed entirely in a future release. - -* Instead, the Elasticsearch Connector APIs should be used. Connectors framework code now uses the <> by default. -See https://github.com/elastic/connectors/pull/2884[*PR 2902*]. - -* *Docker `enterprise-search` namespace deprecation* -+ -IMPORTANT: The `enterprise-search` Docker namespace is deprecated and will be discontinued in a future release. -+ -Starting in `8.16.0`, Docker images are being transitioned to the new `integrations` namespace, which will become the sole location for future releases. This affects the https://github.com/elastic/connectors[Elastic Connectors] and https://github.com/elastic/data-extraction-service[Elastic Data Extraction Service]. -+ -During this transition period, images are published to both namespaces: -+ -** *Example*: -+ -Deprecated namespace:: -`docker.elastic.co/enterprise-search/elastic-connectors:v8.16.0` -+ -New namespace:: -`docker.elastic.co/integrations/elastic-connectors:v8.16.0` -+ -Users should migrate to the new `integrations` namespace as soon as possible to ensure continued access to future releases. - -[discrete] -[[es-connectors-release-notes-8-16-0-enhancements]] -==== Enhancements - -* Docker images now use Chainguard's Wolfi base image (`docker.elastic.co/wolfi/jdk:openjdk-11-dev`), replacing the previous `ubuntu:focal` base. - -* The Sharepoint Online connector now works with the `Sites.Selected` permission instead of the broader permission `Sites.Read.All`. -See https://github.com/elastic/connectors/pull/2762[*PR 2762*]. - -* Starting in 8.16.0, connectors will start using proper SEMVER, with `MAJOR.MINOR.PATCH`, which aligns with Elasticsearch/Kibana versions. This drops the previous `.BUILD` suffix, which we used to release connectors between Elastic stack releases. Going forward, these inter-stack-release releases will be suffixed instead with `+`, aligning with Elastic Agent and conforming to SEMVER. -See https://github.com/elastic/connectors/pull/2749[*PR 2749*]. - -* Connector logs now use UTC timestamps, instead of machine-local timestamps. This only impacts logging output. -See https://github.com/elastic/connectors/pull/2695[*PR 2695*]. - -[discrete] -[[es-connectors-release-notes-8-16-0-bug-fixes]] -==== Bug fixes - -* The Dropbox connector now fetches the files from team shared folders. -See https://github.com/elastic/connectors/pull/2718[*PR 2718*]. \ No newline at end of file diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index ca6a7e489449b..a212c4e152b0e 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -71,7 +71,7 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=refresh] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=routing] `_source`:: -(Optional, list) Set to `false` to disable source retrieval (default: `true`). +(Optional, list) Set to `true` to enable source retrieval (default: `false`). You can also specify a comma-separated list of the fields you want to retrieve. `_source_excludes`:: diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index d9b8f8802a04b..73e2db6e45e34 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -113,7 +113,7 @@ Index mode supports the following values: `standard`::: Standard indexing with default settings. -`tsds`::: _(data streams only)_ Index mode optimized for storage of metrics. For more information, see <>. +`time_series`::: _(data streams only)_ Index mode optimized for storage of metrics. For more information, see <>. `logsdb`::: _(data streams only)_ Index mode optimized for <>. diff --git a/docs/reference/mapping/params/index-prefixes.asciidoc b/docs/reference/mapping/params/index-prefixes.asciidoc index a143c5531c81b..1d5e844467b6f 100644 --- a/docs/reference/mapping/params/index-prefixes.asciidoc +++ b/docs/reference/mapping/params/index-prefixes.asciidoc @@ -54,3 +54,30 @@ PUT my-index-000001 } } -------------------------------- + +`index_prefixes` parameter instructs {ES} to create a subfield "._index_prefix". This +field will be used to do fast prefix queries. When doing highlighting, add "._index_prefix" +subfield to the `matched_fields` parameter to highlight the main field based on the +found matches of the prefix field, like in the request below: + +[source,console] +-------------------------------- +GET my-index-000001/_search +{ + "query": { + "prefix": { + "full_name": { + "value": "ki" + } + } + }, + "highlight": { + "fields": { + "full_name": { + "matched_fields": ["full_name._index_prefix"] + } + } + } +} +-------------------------------- +// TEST[continued] diff --git a/docs/reference/mapping/types/search-as-you-type.asciidoc b/docs/reference/mapping/types/search-as-you-type.asciidoc index 3c71389f4cebb..c2673a614c265 100644 --- a/docs/reference/mapping/types/search-as-you-type.asciidoc +++ b/docs/reference/mapping/types/search-as-you-type.asciidoc @@ -97,11 +97,21 @@ GET my-index-000001/_search "my_field._3gram" ] } + }, + "highlight": { + "fields": { + "my_field": { + "matched_fields": ["my_field._index_prefix"] <1> + } + } } } -------------------------------------------------- // TEST[continued] +<1> Adding "my_field._index_prefix" to the `matched_fields` allows to highlight + "my_field" also based on matches from "my_field._index_prefix" field. + [source,console-result] -------------------------------------------------- { @@ -126,6 +136,11 @@ GET my-index-000001/_search "_score" : 0.8630463, "_source" : { "my_field" : "quick brown fox jump lazy dog" + }, + "highlight": { + "my_field": [ + "quick brown fox jump lazy dog" + ] } } ] diff --git a/docs/reference/migration/migrate_8_17.asciidoc b/docs/reference/migration/migrate_8_17.asciidoc index 15bc6431c60ba..d499f1ce30497 100644 --- a/docs/reference/migration/migrate_8_17.asciidoc +++ b/docs/reference/migration/migrate_8_17.asciidoc @@ -18,3 +18,54 @@ coming::[8.17.0] There are no breaking changes in {es} 8.17. + +[discrete] +[[deprecated-8.17]] +=== Deprecations + +The following functionality has been deprecated in {es} 8.17 +and will be removed in a future version. +While this won't have an immediate impact on your applications, +we strongly encourage you to take the described steps to update your code +after upgrading to 8.17. + +To find out if you are using any deprecated functionality, +enable <>. + +[discrete] +[[deprecations_817_mapping]] +==== Mapping deprecations + +[[deprecate_source_mode_in_mappings]] +.Deprecate `_source.mode` in mappings +[%collapsible] +==== +*Details* + +Configuring `_source.mode` in mappings is deprecated and will be removed in future versions. Use `index.mapping.source.mode` index setting instead. + +*Impact* + +Use `index.mapping.source.mode` index setting instead +==== + +[discrete] +[[deprecations_817_rest_api]] +==== REST API deprecations + +[[format_of_non_detailed_error_responses_changing_in_v9]] +.The format of non-detailed error responses is changing in v9 +[%collapsible] +==== +*Details* + +When an error occurs when processing a request, Elasticsearch returns information on that error in the REST response. +If `http:detailed_errors.enabled: false` is specified in node settings with the v8 REST API and below, +the format of this response changes significantly. +Starting with the v9 REST API, the JSON structure of responses with errors when the `http.detailed_errors.enabled: false` option is set +will be the same as when detailed errors are enabled (which is the default). +To keep using the existing format for non-detailed error responses, use the v8 REST API. + +*Impact* + +If you have set `http.detailed_errors.enabled: false` (the default is `true`) +the structure of JSON when any exceptions occur will change with the v9 REST API. +To keep using the existing format, use the v8 REST API. +==== + diff --git a/docs/reference/release-notes/8.16.0.asciidoc b/docs/reference/release-notes/8.16.0.asciidoc index 88ae9f9e5b599..fd7ef963d9ff7 100644 --- a/docs/reference/release-notes/8.16.0.asciidoc +++ b/docs/reference/release-notes/8.16.0.asciidoc @@ -270,7 +270,6 @@ ES|QL:: * Push down filters even in case of renames in Evals {es-pull}114411[#114411] * Speed up CASE for some parameters {es-pull}112295[#112295] * Speed up grouping by bytes {es-pull}114021[#114021] -* Support INLINESTATS grouped on expressions {es-pull}111690[#111690] * Use less memory in listener {es-pull}114358[#114358] * Add support for cached strings in plan serialization {es-pull}112929[#112929] * Add Telemetry API and track top functions {es-pull}111226[#111226] @@ -462,7 +461,6 @@ ES|QL:: * Add boolean support to Max and Min aggs {es-pull}110527[#110527] * Add boolean support to TOP aggregation {es-pull}110718[#110718] * Added `mv_percentile` function {es-pull}111749[#111749] (issue: {es-issue}111591[#111591]) -* INLINESTATS {es-pull}109583[#109583] (issue: {es-issue}107589[#107589]) * Introduce per agg filter {es-pull}113735[#113735] * Strings support for MAX and MIN aggregations {es-pull}111544[#111544] * Support IP fields in MAX and MIN aggregations {es-pull}110921[#110921] diff --git a/docs/reference/release-notes/8.17.0.asciidoc b/docs/reference/release-notes/8.17.0.asciidoc index 59962fd83e9b7..9ddfd69c4343d 100644 --- a/docs/reference/release-notes/8.17.0.asciidoc +++ b/docs/reference/release-notes/8.17.0.asciidoc @@ -1,8 +1,204 @@ [[release-notes-8.17.0]] == {es} version 8.17.0 -coming[8.17.0] - Also see <>. +[[license-8.17.0]] +[float] +=== License changes + +[float] +==== Change to synthetic `_source` licensing + +Starting with this release, the <> feature is available exclusively with the Enterprise subscription. Synthetic `_source` is used in logs data streams (`logsdb` index mode), time series data streams (TSDS, using `time_series` index mode), application performance monitoring (APM), and Universal Profiling. + +If you are using these capabilities and are not on an Enterprise license, the change will result in increased storage requirements for new data, as the synthetic `_source` setting will be ignored. Existing indices that used synthetic `_source` will remain seamlessly accessible. + +Refer to the subscription page for https://www.elastic.co/subscriptions/cloud[Elastic Cloud] and {subscriptions}[Elastic Stack/self-managed] for the breakdown of available features and their associated subscription tiers. For further details and subscription options, contact your Elastic sales representative or https://www.elastic.co/contact[contact us]. + +[[bug-8.17.0]] +[float] +=== Bug fixes + +Analysis:: +* Adjust analyze limit exception to be a `bad_request` {es-pull}116325[#116325] + +CCS:: +* Fix long metric deserialize & add - auto-resize needs to be set manually {es-pull}117105[#117105] (issue: {es-issue}116914[#116914]) + +CRUD:: +* Standardize error code when bulk body is invalid {es-pull}114869[#114869] + +Data streams:: +* Acquire stats searcher for data stream stats {es-pull}117953[#117953] + +EQL:: +* Don't use a `BytesStreamOutput` to copy keys in `BytesRefBlockHash` {es-pull}114819[#114819] (issue: {es-issue}114599[#114599]) + +ES|QL:: +* Added stricter range type checks and runtime warnings for ENRICH {es-pull}115091[#115091] (issues: {es-issue}107357[#107357], {es-issue}116799[#116799]) +* Don't return TEXT type for functions that take TEXT {es-pull}114334[#114334] (issues: {es-issue}111537[#111537], {es-issue}114333[#114333]) +* ESQL: Fix sorts containing `_source` {es-pull}116980[#116980] (issue: {es-issue}116659[#116659]) +* ES|QL: Fix stats by constant expression {es-pull}114899[#114899] +* Fix BWC for ES|QL cluster request {es-pull}117865[#117865] +* Fix CCS exchange when multi cluster aliases point to same cluster {es-pull}117297[#117297] +* Fix COUNT filter pushdown {es-pull}117503[#117503] (issue: {es-issue}115522[#115522]) +* Fix NPE in `EnrichLookupService` on mixed clusters with <8.14 versions {es-pull}116583[#116583] (issues: {es-issue}116529[#116529], {es-issue}116544[#116544]) +* Fix stats by constant expresson with alias {es-pull}117551[#117551] +* Fix validation of SORT by aggregate functions {es-pull}117316[#117316] +* Fixing remote ENRICH by pushing the Enrich inside `FragmentExec` {es-pull}114665[#114665] (issue: {es-issue}105095[#105095]) +* Ignore cancellation exceptions {es-pull}117657[#117657] +* Limit size of `Literal#toString` {es-pull}117842[#117842] +* Opt into extra data stream resolution {es-pull}118378[#118378] +* Use `SearchStats` instead of field.isAggregatable in data node planning {es-pull}115744[#115744] (issue: {es-issue}115737[#115737]) +* [ESQL] Fix Binary Comparisons on Date Nanos {es-pull}116346[#116346] +* [ES|QL] To_DatePeriod and To_TimeDuration return better error messages on `union_type` fields {es-pull}114934[#114934] + +Infra/CLI:: +* Fix NPE on plugin sync {es-pull}115640[#115640] (issue: {es-issue}114818[#114818]) + +Ingest Node:: +* Fix enrich cache size setting name {es-pull}117575[#117575] +* Fix log message format bugs {es-pull}118354[#118354] +* Fix reconstituting version string from components {es-pull}117213[#117213] (issue: {es-issue}116950[#116950]) +* Reducing error-level stack trace logging for normal events in `GeoIpDownloader` {es-pull}114924[#114924] + +License:: +* Distinguish `LicensedFeature` by family field {es-pull}116809[#116809] + +Logs:: +* Prohibit changes to index mode, source, and sort settings during resize {es-pull}115812[#115812] + +Machine Learning:: +* Fix deberta tokenizer bug caused by bug in normalizer {es-pull}117189[#117189] +* Fix for Deberta tokenizer when input sequence exceeds 512 tokens {es-pull}117595[#117595] +* Hides `hugging_face_elser` service from the `GET _inference/_services API` {es-pull}116664[#116664] (issue: {es-issue}116644[#116644]) +* Mitigate IOSession timeouts {es-pull}115414[#115414] (issues: {es-issue}114385[#114385], {es-issue}114327[#114327], {es-issue}114105[#114105], {es-issue}114232[#114232]) +* Propagate scoring function through random sampler {es-pull}116957[#116957] (issue: {es-issue}110134[#110134]) +* Wait for the worker service to shutdown before closing task processor {es-pull}117920[#117920] (issue: {es-issue}117563[#117563]) + +Mapping:: +* Address mapping and compute engine runtime field issues {es-pull}117792[#117792] (issue: {es-issue}117644[#117644]) +* Always Emit Inference ID in Semantic Text Mapping {es-pull}117294[#117294] +* Fix concurrency issue with `ReinitializingSourceProvider` {es-pull}118370[#118370] (issue: {es-issue}118238[#118238]) +* Fix false positive date detection with trailing dot {es-pull}116953[#116953] (issue: {es-issue}116946[#116946]) +* Parse the contents of dynamic objects for [subobjects:false] {es-pull}117762[#117762] (issue: {es-issue}117544[#117544]) + +Network:: +* Use underlying `ByteBuf` `refCount` for `ReleasableBytesReference` {es-pull}116211[#116211] + +Ranking:: +* Fix for propagating filters from compound to inner retrievers {es-pull}117914[#117914] + +Search:: +* Add missing `async_search` query parameters to rest-api-spec {es-pull}117312[#117312] +* Don't skip shards in coord rewrite if timestamp is an alias {es-pull}117271[#117271] +* Fields caps does not honour ignore_unavailable {es-pull}116021[#116021] (issue: {es-issue}107767[#107767]) +* _validate does not honour ignore_unavailable {es-pull}116656[#116656] (issue: {es-issue}116594[#116594]) + +Vector Search:: +* Correct bit * byte and bit * float script comparisons {es-pull}117404[#117404] + +Watcher:: +* Watch Next Run Interval Resets On Shard Move or Node Restart {es-pull}115102[#115102] (issue: {es-issue}111433[#111433]) + +[[deprecation-8.17.0]] +[float] +=== Deprecations + +Infra/REST API:: +* Add a basic deprecation warning that the JSON format for non-detailed error responses is changing in v9 {es-pull}114739[#114739] (issue: {es-issue}89387[#89387]) + +Mapping:: +* Deprecate `_source.mode` in mappings {es-pull}116689[#116689] + +[[enhancement-8.17.0]] +[float] +=== Enhancements + +Authorization:: +* Add a `monitor_stats` privilege and allow that privilege for remote cluster privileges {es-pull}114964[#114964] + +Data streams:: +* Adding a deprecation info API warning for data streams with old indices {es-pull}116447[#116447] + +ES|QL:: +* Add ES|QL `bit_length` function {es-pull}115792[#115792] +* ESQL: Honor skip_unavailable setting for nonmatching indices errors at planning time {es-pull}116348[#116348] (issue: {es-issue}114531[#114531]) +* ESQL: Remove parent from `FieldAttribute` {es-pull}112881[#112881] +* ESQL: extract common filter from aggs {es-pull}115678[#115678] +* ESQL: optimise aggregations filtered by false/null into evals {es-pull}115858[#115858] +* ES|QL CCS uses `skip_unavailable` setting for handling disconnected remote clusters {es-pull}115266[#115266] (issue: {es-issue}114531[#114531]) +* ES|QL: add metrics for functions {es-pull}114620[#114620] +* Esql Enable Date Nanos (tech preview) {es-pull}117080[#117080] +* [ES|QL] Implicit casting string literal to intervals {es-pull}115814[#115814] (issue: {es-issue}115352[#115352]) + +Indices APIs:: +* Ensure class resource stream is closed in `ResourceUtils` {es-pull}116437[#116437] + +Inference:: +* Add version prefix to Inference Service API path {es-pull}117366[#117366] +* Update sparse text embeddings API route for Inference Service {es-pull}118368[#118368] + +Infra/Core:: +* Support for unsigned 64 bit numbers in Cpu stats {es-pull}114681[#114681] (issue: {es-issue}112274[#112274]) + +Ingest Node:: +* Adding support for additional mapping to simulate ingest API {es-pull}114742[#114742] +* Adding support for simulate ingest mapping adddition for indices with mappings that do not come from templates {es-pull}115359[#115359] + +Logs:: +* Add logsdb telemetry {es-pull}115994[#115994] +* Add num docs and size to logsdb telemetry {es-pull}116128[#116128] +* Feature: re-structure document ID generation favoring _id inverted index compression {es-pull}104683[#104683] + +Machine Learning:: +* Add special case for elastic reranker in inference API {es-pull}116962[#116962] +* Adding inference endpoint validation for `AzureAiStudioService` {es-pull}113713[#113713] +* Adds support for `input_type` field to Vertex inference service {es-pull}116431[#116431] +* Enable built-in Inference Endpoints and default for Semantic Text {es-pull}116931[#116931] +* Increase default `queue_capacity` to 10_000 and decrease max `queue_capacity` to 100_000 {es-pull}115041[#115041] +* [Inference API] Add API to get configuration of inference services {es-pull}114862[#114862] +* [Inference API] Improve chunked results error message {es-pull}115807[#115807] + +Recovery:: +* Attempt to clean up index before remote transfer {es-pull}115142[#115142] (issue: {es-issue}104473[#104473]) + +Relevance:: +* Add query rules retriever {es-pull}114855[#114855] + +Search:: +* Add Search Phase APM metrics {es-pull}113194[#113194] +* Add `docvalue_fields` Support for `dense_vector` Fields {es-pull}114484[#114484] (issue: {es-issue}108470[#108470]) +* Add initial support for `semantic_text` field type {es-pull}113920[#113920] +* Adds access to flags no_sub_matches and no_overlapping_matches to hyphenation-decompounder-tokenfilter {es-pull}115459[#115459] (issue: {es-issue}97849[#97849]) +* Better sizing `BytesRef` for Strings in Queries {es-pull}115655[#115655] +* Enable `_tier` based coordinator rewrites for all indices (not just mounted indices) {es-pull}115797[#115797] + +Vector Search:: +* Add support for bitwise inner-product in painless {es-pull}116082[#116082] +* Improve halfbyte transposition performance, marginally improving bbq performance {es-pull}117350[#117350] + +[[feature-8.17.0]] +[float] +=== New features + +Data streams:: +* Add default ILM policies and switch to ILM for apm-data plugin {es-pull}115687[#115687] + +ES|QL:: +* Add support for `BYTE_LENGTH` scalar function {es-pull}116591[#116591] +* Esql/lookup join grammar {es-pull}116515[#116515] +* Remove snapshot build restriction for match and qstr functions {es-pull}114482[#114482] + +Search:: +* ESQL - Add match operator (:) {es-pull}116819[#116819] + +[[upgrade-8.17.0]] +[float] +=== Upgrades + +Security:: +* Upgrade Bouncy Castle FIPS dependencies {es-pull}112989[#112989] + diff --git a/muted-tests.yml b/muted-tests.yml index fedecb7b4f77c..c744258a1e42d 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -382,8 +382,11 @@ tests: - class: "org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT" method: "test {scoring.*}" issue: https://github.com/elastic/elasticsearch/issues/117641 -- class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT - method: test {scoring.QstrWithFieldAndScoringSortedEval} +- class: "org.elasticsearch.xpack.esql.qa.mixed.MultilusterEsqlSpecIT" + method: "test {scoring.*}" + issue: https://github.com/elastic/elasticsearch/issues/118460 +- class: "org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT" + method: "test {scoring.*}" issue: https://github.com/elastic/elasticsearch/issues/117751 - class: org.elasticsearch.search.ccs.CrossClusterIT method: testCancel @@ -440,3 +443,27 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/116694 - class: org.elasticsearch.repositories.blobstore.testkit.analyze.MinioRepositoryAnalysisRestIT issue: https://github.com/elastic/elasticsearch/issues/118548 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=migrate/10_reindex/Test Reindex With Existing Data Stream} + issue: https://github.com/elastic/elasticsearch/issues/118575 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=migrate/20_reindex_status/Test Reindex With Existing Data Stream} + issue: https://github.com/elastic/elasticsearch/issues/118576 +- class: org.elasticsearch.discovery.ec2.DiscoveryEc2AvailabilityZoneAttributeNoImdsIT + method: testAvailabilityZoneAttribute + issue: https://github.com/elastic/elasticsearch/issues/118564 +- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT + method: test {grok.OverwriteName SYNC} + issue: https://github.com/elastic/elasticsearch/issues/118631 +- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT + method: test {stats.ByStringAndLongWithAlias SYNC} + issue: https://github.com/elastic/elasticsearch/issues/118632 +- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT + method: test {dissect.OverwriteNameWhere SYNC} + issue: https://github.com/elastic/elasticsearch/issues/118633 +- class: org.elasticsearch.xpack.searchablesnapshots.RetrySearchIntegTests + method: testRetryPointInTime + issue: https://github.com/elastic/elasticsearch/issues/118514 +- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT + method: test {grok.OverwriteNameWhere SYNC} + issue: https://github.com/elastic/elasticsearch/issues/118638 \ No newline at end of file diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 7092d7f30f1b9..5f23316b31988 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -474,5 +474,5 @@ exports org.elasticsearch.lucene.spatial; exports org.elasticsearch.inference.configuration; exports org.elasticsearch.monitor.metrics; - + exports org.elasticsearch.plugins.internal.rewriter to org.elasticsearch.inference; } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 7151791d0519a..fcd8b4fb09ba7 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -144,6 +144,7 @@ static TransportVersion def(int id) { public static final TransportVersion RETRIES_AND_OPERATIONS_IN_BLOBSTORE_STATS = def(8_804_00_0); public static final TransportVersion ADD_DATA_STREAM_OPTIONS_TO_TEMPLATES = def(8_805_00_0); public static final TransportVersion KNN_QUERY_RESCORE_OVERSAMPLE = def(8_806_00_0); + public static final TransportVersion SEMANTIC_QUERY_LENIENT = def(8_807_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 9ae0636752b98..04fc9c78fbf84 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -190,7 +190,6 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_15_3 = new Version(8_15_03_99); public static final Version V_8_15_4 = new Version(8_15_04_99); public static final Version V_8_15_5 = new Version(8_15_05_99); - public static final Version V_8_15_6 = new Version(8_15_06_99); public static final Version V_8_16_0 = new Version(8_16_00_99); public static final Version V_8_16_1 = new Version(8_16_01_99); public static final Version V_8_16_2 = new Version(8_16_02_99); diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index ce7eb9b911fb3..35d83586ce177 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -59,6 +59,7 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.IndexStorePlugin; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.threadpool.ThreadPool; @@ -483,7 +484,8 @@ public IndexService newIndexService( IdFieldMapper idFieldMapper, ValuesSourceRegistry valuesSourceRegistry, IndexStorePlugin.IndexFoldersDeletionListener indexFoldersDeletionListener, - Map snapshotCommitSuppliers + Map snapshotCommitSuppliers, + QueryRewriteInterceptor queryRewriteInterceptor ) throws IOException { final IndexEventListener eventListener = freeze(); Function> readerWrapperFactory = indexReaderWrapper @@ -545,7 +547,8 @@ public IndexService newIndexService( indexFoldersDeletionListener, snapshotCommitSupplier, indexCommitListener.get(), - mapperMetrics + mapperMetrics, + queryRewriteInterceptor ); success = true; return indexService; diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 571bbd76a49dd..a5b3991d89bc4 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -85,6 +85,7 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.IndexStorePlugin; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.threadpool.ThreadPool; @@ -162,6 +163,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final Supplier indexSortSupplier; private final ValuesSourceRegistry valuesSourceRegistry; private final MapperMetrics mapperMetrics; + private final QueryRewriteInterceptor queryRewriteInterceptor; @SuppressWarnings("this-escape") public IndexService( @@ -196,7 +198,8 @@ public IndexService( IndexStorePlugin.IndexFoldersDeletionListener indexFoldersDeletionListener, IndexStorePlugin.SnapshotCommitSupplier snapshotCommitSupplier, Engine.IndexCommitListener indexCommitListener, - MapperMetrics mapperMetrics + MapperMetrics mapperMetrics, + QueryRewriteInterceptor queryRewriteInterceptor ) { super(indexSettings); assert indexCreationContext != IndexCreationContext.RELOAD_ANALYZERS @@ -271,6 +274,7 @@ public IndexService( this.indexingOperationListeners = Collections.unmodifiableList(indexingOperationListeners); this.indexCommitListener = indexCommitListener; this.mapperMetrics = mapperMetrics; + this.queryRewriteInterceptor = queryRewriteInterceptor; try (var ignored = threadPool.getThreadContext().clearTraceContext()) { // kick off async ops for the first shard in this index this.refreshTask = new AsyncRefreshTask(this); @@ -802,6 +806,7 @@ public QueryRewriteContext newQueryRewriteContext( allowExpensiveQueries, scriptService, null, + null, null ); } diff --git a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java index f00e6904feac7..05262798bac2a 100644 --- a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.SuggestingErrorOnUnknown; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; import org.elasticsearch.xcontent.AbstractObjectParser; import org.elasticsearch.xcontent.FilterXContentParser; import org.elasticsearch.xcontent.FilterXContentParserWrapper; @@ -278,6 +279,14 @@ protected static List readQueries(StreamInput in) throws IOExcepti @Override public final QueryBuilder rewrite(QueryRewriteContext queryRewriteContext) throws IOException { + QueryRewriteInterceptor queryRewriteInterceptor = queryRewriteContext.getQueryRewriteInterceptor(); + if (queryRewriteInterceptor != null) { + var rewritten = queryRewriteInterceptor.interceptAndRewrite(queryRewriteContext, this); + if (rewritten != this) { + return new InterceptedQueryBuilderWrapper(rewritten); + } + } + QueryBuilder rewritten = doRewrite(queryRewriteContext); if (rewritten == this) { return rewritten; diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java index b0d3065ba3a3f..e166731d47057 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java @@ -104,6 +104,7 @@ public CoordinatorRewriteContext( null, null, null, + null, null ); this.dateFieldRangeInfo = dateFieldRangeInfo; diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java index aacb4b4129c73..31bc7dddacb7f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java @@ -66,6 +66,9 @@ public InnerHitBuilder innerHitBuilder() { public static void extractInnerHits(QueryBuilder query, Map innerHitBuilders) { if (query instanceof AbstractQueryBuilder) { ((AbstractQueryBuilder) query).extractInnerHitBuilders(innerHitBuilders); + } else if (query instanceof InterceptedQueryBuilderWrapper interceptedQuery) { + // Unwrap an intercepted query here + extractInnerHits(interceptedQuery.queryBuilder, innerHitBuilders); } else { throw new IllegalStateException( "provided query builder [" + query.getClass() + "] class should inherit from AbstractQueryBuilder, but it doesn't" diff --git a/server/src/main/java/org/elasticsearch/index/query/InterceptedQueryBuilderWrapper.java b/server/src/main/java/org/elasticsearch/index/query/InterceptedQueryBuilderWrapper.java new file mode 100644 index 0000000000000..b1030e4a76d97 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/query/InterceptedQueryBuilderWrapper.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.search.Query; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Wrapper for instances of {@link QueryBuilder} that have been intercepted using the {@link QueryRewriteInterceptor} to + * break out of the rewrite phase. These instances are unwrapped on serialization. + */ +class InterceptedQueryBuilderWrapper implements QueryBuilder { + + protected final QueryBuilder queryBuilder; + + InterceptedQueryBuilderWrapper(QueryBuilder queryBuilder) { + super(); + this.queryBuilder = queryBuilder; + } + + @Override + public QueryBuilder rewrite(QueryRewriteContext queryRewriteContext) throws IOException { + QueryRewriteInterceptor queryRewriteInterceptor = queryRewriteContext.getQueryRewriteInterceptor(); + try { + queryRewriteContext.setQueryRewriteInterceptor(null); + QueryBuilder rewritten = queryBuilder.rewrite(queryRewriteContext); + return rewritten != queryBuilder ? new InterceptedQueryBuilderWrapper(rewritten) : this; + } finally { + queryRewriteContext.setQueryRewriteInterceptor(queryRewriteInterceptor); + } + } + + @Override + public String getWriteableName() { + return queryBuilder.getWriteableName(); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return queryBuilder.getMinimalSupportedVersion(); + } + + @Override + public Query toQuery(SearchExecutionContext context) throws IOException { + return queryBuilder.toQuery(context); + } + + @Override + public QueryBuilder queryName(String queryName) { + queryBuilder.queryName(queryName); + return this; + } + + @Override + public String queryName() { + return queryBuilder.queryName(); + } + + @Override + public float boost() { + return queryBuilder.boost(); + } + + @Override + public QueryBuilder boost(float boost) { + queryBuilder.boost(boost); + return this; + } + + @Override + public String getName() { + return queryBuilder.getName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + queryBuilder.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return queryBuilder.toXContent(builder, params); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o instanceof InterceptedQueryBuilderWrapper == false) return false; + return Objects.equals(queryBuilder, ((InterceptedQueryBuilderWrapper) o).queryBuilder); + } + + @Override + public int hashCode() { + return Objects.hashCode(queryBuilder); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index fce74aa60ab16..265a0c52593bd 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -28,6 +28,7 @@ import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; import org.elasticsearch.script.ScriptCompiler; import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.search.builder.PointInTimeBuilder; @@ -70,6 +71,7 @@ public class QueryRewriteContext { protected Predicate allowedFields; private final ResolvedIndices resolvedIndices; private final PointInTimeBuilder pit; + private QueryRewriteInterceptor queryRewriteInterceptor; public QueryRewriteContext( final XContentParserConfiguration parserConfiguration, @@ -86,7 +88,8 @@ public QueryRewriteContext( final BooleanSupplier allowExpensiveQueries, final ScriptCompiler scriptService, final ResolvedIndices resolvedIndices, - final PointInTimeBuilder pit + final PointInTimeBuilder pit, + final QueryRewriteInterceptor queryRewriteInterceptor ) { this.parserConfiguration = parserConfiguration; @@ -105,6 +108,7 @@ public QueryRewriteContext( this.scriptService = scriptService; this.resolvedIndices = resolvedIndices; this.pit = pit; + this.queryRewriteInterceptor = queryRewriteInterceptor; } public QueryRewriteContext(final XContentParserConfiguration parserConfiguration, final Client client, final LongSupplier nowInMillis) { @@ -123,6 +127,7 @@ public QueryRewriteContext(final XContentParserConfiguration parserConfiguration null, null, null, + null, null ); } @@ -132,7 +137,8 @@ public QueryRewriteContext( final Client client, final LongSupplier nowInMillis, final ResolvedIndices resolvedIndices, - final PointInTimeBuilder pit + final PointInTimeBuilder pit, + final QueryRewriteInterceptor queryRewriteInterceptor ) { this( parserConfiguration, @@ -149,7 +155,8 @@ public QueryRewriteContext( null, null, resolvedIndices, - pit + pit, + queryRewriteInterceptor ); } @@ -428,4 +435,13 @@ public String getTierPreference() { // It was decided we should only test the first of these potentially multiple preferences. return value.split(",")[0].trim(); } + + public QueryRewriteInterceptor getQueryRewriteInterceptor() { + return queryRewriteInterceptor; + } + + public void setQueryRewriteInterceptor(QueryRewriteInterceptor queryRewriteInterceptor) { + this.queryRewriteInterceptor = queryRewriteInterceptor; + } + } diff --git a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java index fbc3696d40221..b2ee6134a7728 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java @@ -271,6 +271,7 @@ private SearchExecutionContext( allowExpensiveQueries, scriptService, null, + null, null ); this.shardId = shardId; diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStream.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStream.java index 1edd69a6443a7..2486cc66fd4c9 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStream.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStream.java @@ -171,6 +171,10 @@ public void reset() throws IOException { if (markedSlice < 0 || markedSliceOffset < 0) { throw new IOException("Mark has not been set"); } + if (initialized && nextSlice == markedSlice + 1 && currentSliceOffset == markedSliceOffset) { + // Reset at the marked offset should return immediately without re-opening the slice + return; + } nextSlice = markedSlice; initialized = true; diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 87488de1a0e6a..e6d8290286a78 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -137,6 +137,7 @@ import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; @@ -265,6 +266,7 @@ public class IndicesService extends AbstractLifecycleComponent private final CheckedBiConsumer requestCacheKeyDifferentiator; private final MapperMetrics mapperMetrics; private final List searchOperationListeners; + private final QueryRewriteInterceptor queryRewriteInterceptor; @Override protected void doStart() { @@ -333,6 +335,7 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon this.indexFoldersDeletionListeners = new CompositeIndexFoldersDeletionListener(builder.indexFoldersDeletionListeners); this.snapshotCommitSuppliers = builder.snapshotCommitSuppliers; this.requestCacheKeyDifferentiator = builder.requestCacheKeyDifferentiator; + this.queryRewriteInterceptor = builder.queryRewriteInterceptor; this.mapperMetrics = builder.mapperMetrics; // doClose() is called when shutting down a node, yet there might still be ongoing requests // that we need to wait for before closing some resources such as the caches. In order to @@ -781,7 +784,8 @@ private synchronized IndexService createIndexService( idFieldMappers.apply(idxSettings.getMode()), valuesSourceRegistry, indexFoldersDeletionListeners, - snapshotCommitSuppliers + snapshotCommitSuppliers, + queryRewriteInterceptor ); } @@ -1764,7 +1768,7 @@ public AliasFilter buildAliasFilter(ClusterState state, String index, Set requestCacheKeyDifferentiator; MapperMetrics mapperMetrics; List searchOperationListener = List.of(); + QueryRewriteInterceptor queryRewriteInterceptor = null; public IndicesServiceBuilder settings(Settings settings) { this.settings = settings; @@ -239,6 +242,27 @@ public IndicesService build() { .flatMap(m -> m.entrySet().stream()) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + var queryRewriteInterceptors = pluginsService.filterPlugins(SearchPlugin.class) + .map(SearchPlugin::getQueryRewriteInterceptors) + .flatMap(List::stream) + .collect(Collectors.toMap(QueryRewriteInterceptor::getQueryName, interceptor -> { + if (interceptor.getQueryName() == null) { + throw new IllegalArgumentException("QueryRewriteInterceptor [" + interceptor.getClass().getName() + "] requires name"); + } + return interceptor; + }, (a, b) -> { + throw new IllegalStateException( + "Conflicting rewrite interceptors [" + + a.getQueryName() + + "] found in [" + + a.getClass().getName() + + "] and [" + + b.getClass().getName() + + "]" + ); + })); + queryRewriteInterceptor = QueryRewriteInterceptor.multi(queryRewriteInterceptors); + return new IndicesService(this); } } diff --git a/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java b/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java index f5670ebd8a543..e87e9ee85b29c 100644 --- a/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java @@ -23,6 +23,7 @@ import org.elasticsearch.index.query.QueryParser; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionParser; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; import org.elasticsearch.search.SearchExtBuilder; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -128,6 +129,14 @@ default List> getQueries() { return emptyList(); } + /** + * @return Applicable {@link QueryRewriteInterceptor}s configured for this plugin. + * Note: This is internal to Elasticsearch's API and not extensible by external plugins. + */ + default List getQueryRewriteInterceptors() { + return emptyList(); + } + /** * The new {@link Aggregation}s added by this plugin. */ diff --git a/server/src/main/java/org/elasticsearch/plugins/internal/rewriter/QueryRewriteInterceptor.java b/server/src/main/java/org/elasticsearch/plugins/internal/rewriter/QueryRewriteInterceptor.java new file mode 100644 index 0000000000000..8f4fb2ce7491a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/plugins/internal/rewriter/QueryRewriteInterceptor.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.plugins.internal.rewriter; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; + +import java.util.Map; + +/** + * Enables modules and plugins to intercept and rewrite queries during the query rewrite phase on the coordinator node. + */ +public interface QueryRewriteInterceptor { + + /** + * Intercepts and returns a rewritten query if modifications are required; otherwise, + * returns the same provided {@link QueryBuilder} instance unchanged. + * + * @param context the {@link QueryRewriteContext} providing the context for the rewrite operation + * @param queryBuilder the original {@link QueryBuilder} to potentially rewrite + * @return the rewritten {@link QueryBuilder}, or the original instance if no rewrite was needed + */ + QueryBuilder interceptAndRewrite(QueryRewriteContext context, QueryBuilder queryBuilder); + + /** + * Name of the query to be intercepted and rewritten. + */ + String getQueryName(); + + static QueryRewriteInterceptor multi(Map interceptors) { + return interceptors.isEmpty() ? new NoOpQueryRewriteInterceptor() : new CompositeQueryRewriteInterceptor(interceptors); + } + + class CompositeQueryRewriteInterceptor implements QueryRewriteInterceptor { + final String NAME = "composite"; + private final Map interceptors; + + private CompositeQueryRewriteInterceptor(Map interceptors) { + this.interceptors = interceptors; + } + + @Override + public String getQueryName() { + return NAME; + } + + @Override + public QueryBuilder interceptAndRewrite(QueryRewriteContext context, QueryBuilder queryBuilder) { + QueryRewriteInterceptor interceptor = interceptors.get(queryBuilder.getName()); + if (interceptor != null) { + return interceptor.interceptAndRewrite(context, queryBuilder); + } + return queryBuilder; + } + } + + class NoOpQueryRewriteInterceptor implements QueryRewriteInterceptor { + @Override + public QueryBuilder interceptAndRewrite(QueryRewriteContext context, QueryBuilder queryBuilder) { + return queryBuilder; + } + + @Override + public String getQueryName() { + return null; + } + } +} diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index ed3d26141fe04..dcf64996e6617 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -1744,7 +1744,9 @@ protected void doWriteTo(StreamOutput out) throws IOException { NodeClient client = new NodeClient(settings, threadPool); SearchService searchService = mock(SearchService.class); - when(searchService.getRewriteContext(any(), any(), any())).thenReturn(new QueryRewriteContext(null, null, null, null, null)); + when(searchService.getRewriteContext(any(), any(), any())).thenReturn( + new QueryRewriteContext(null, null, null, null, null, null) + ); ClusterService clusterService = new ClusterService( settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java index 8036a964071d2..4abd0c4a9d469 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java @@ -690,7 +690,12 @@ public static Map randomInferenceFields() { } private static InferenceFieldMetadata randomInferenceFieldMetadata(String name) { - return new InferenceFieldMetadata(name, randomIdentifier(), randomSet(1, 5, ESTestCase::randomIdentifier).toArray(String[]::new)); + return new InferenceFieldMetadata( + name, + randomIdentifier(), + randomIdentifier(), + randomSet(1, 5, ESTestCase::randomIdentifier).toArray(String[]::new) + ); } private IndexMetadataStats randomIndexStats(int numberOfShards) { diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 49a4d519c0ea4..c519d4834148d 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -86,6 +86,7 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.IndexStorePlugin; +import org.elasticsearch.plugins.internal.rewriter.MockQueryRewriteInterceptor; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.test.ClusterServiceUtils; @@ -223,7 +224,8 @@ private IndexService newIndexService(IndexModule module) throws IOException { module.indexSettings().getMode().idFieldMapperWithoutFieldData(), null, indexDeletionListener, - emptyMap() + emptyMap(), + new MockQueryRewriteInterceptor() ); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupInferenceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupInferenceFieldMapperTests.java index 809fb161fcbe5..b1470c1ee5b3b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupInferenceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupInferenceFieldMapperTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; -import java.io.IOException; import java.util.Collection; import java.util.List; import java.util.Map; @@ -94,6 +93,7 @@ private static class TestInferenceFieldMapper extends FieldMapper implements Inf public static final TypeParser PARSER = new TypeParser((n, c) -> new Builder(n)); public static final String INFERENCE_ID = "test_inference_id"; + public static final String SEARCH_INFERENCE_ID = "test_search_inference_id"; public static final String CONTENT_TYPE = "test_inference_field"; TestInferenceFieldMapper(String simpleName) { @@ -102,7 +102,7 @@ private static class TestInferenceFieldMapper extends FieldMapper implements Inf @Override public InferenceFieldMetadata getMetadata(Set sourcePaths) { - return new InferenceFieldMetadata(fullPath(), INFERENCE_ID, sourcePaths.toArray(new String[0])); + return new InferenceFieldMetadata(fullPath(), INFERENCE_ID, SEARCH_INFERENCE_ID, sourcePaths.toArray(new String[0])); } @Override @@ -111,7 +111,7 @@ public Object getOriginalValue(Map sourceAsMap) { } @Override - protected void parseCreateField(DocumentParserContext context) throws IOException {} + protected void parseCreateField(DocumentParserContext context) {} @Override public Builder getMergeBuilder() { diff --git a/server/src/test/java/org/elasticsearch/index/query/InterceptedQueryBuilderWrapperTests.java b/server/src/test/java/org/elasticsearch/index/query/InterceptedQueryBuilderWrapperTests.java new file mode 100644 index 0000000000000..6c570e0e71725 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/query/InterceptedQueryBuilderWrapperTests.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; + +public class InterceptedQueryBuilderWrapperTests extends ESTestCase { + + private TestThreadPool threadPool; + private NoOpClient client; + + @Before + public void setup() { + threadPool = createThreadPool(); + client = new NoOpClient(threadPool); + } + + @After + public void cleanup() { + threadPool.close(); + } + + public void testQueryNameReturnsWrappedQueryBuilder() { + MatchAllQueryBuilder matchAllQueryBuilder = new MatchAllQueryBuilder(); + InterceptedQueryBuilderWrapper interceptedQueryBuilderWrapper = new InterceptedQueryBuilderWrapper(matchAllQueryBuilder); + String queryName = randomAlphaOfLengthBetween(5, 10); + QueryBuilder namedQuery = interceptedQueryBuilderWrapper.queryName(queryName); + assertTrue(namedQuery instanceof InterceptedQueryBuilderWrapper); + assertEquals(queryName, namedQuery.queryName()); + } + + public void testQueryBoostReturnsWrappedQueryBuilder() { + MatchAllQueryBuilder matchAllQueryBuilder = new MatchAllQueryBuilder(); + InterceptedQueryBuilderWrapper interceptedQueryBuilderWrapper = new InterceptedQueryBuilderWrapper(matchAllQueryBuilder); + float boost = randomFloat(); + QueryBuilder boostedQuery = interceptedQueryBuilderWrapper.boost(boost); + assertTrue(boostedQuery instanceof InterceptedQueryBuilderWrapper); + assertEquals(boost, boostedQuery.boost(), 0.0001f); + } + + public void testRewrite() throws IOException { + QueryRewriteContext context = new QueryRewriteContext(null, client, null); + context.setQueryRewriteInterceptor(myMatchInterceptor); + + // Queries that are not intercepted behave normally + TermQueryBuilder termQueryBuilder = new TermQueryBuilder("field", "value"); + QueryBuilder rewritten = termQueryBuilder.rewrite(context); + assertTrue(rewritten instanceof TermQueryBuilder); + + // Queries that should be intercepted are and the right thing happens + MatchQueryBuilder matchQueryBuilder = new MatchQueryBuilder("field", "value"); + rewritten = matchQueryBuilder.rewrite(context); + assertTrue(rewritten instanceof InterceptedQueryBuilderWrapper); + assertTrue(((InterceptedQueryBuilderWrapper) rewritten).queryBuilder instanceof MatchQueryBuilder); + MatchQueryBuilder rewrittenMatchQueryBuilder = (MatchQueryBuilder) ((InterceptedQueryBuilderWrapper) rewritten).queryBuilder; + assertEquals("intercepted", rewrittenMatchQueryBuilder.value()); + + // An additional rewrite on an already intercepted query returns the same query + QueryBuilder rewrittenAgain = rewritten.rewrite(context); + assertTrue(rewrittenAgain instanceof InterceptedQueryBuilderWrapper); + assertEquals(rewritten, rewrittenAgain); + } + + private final QueryRewriteInterceptor myMatchInterceptor = new QueryRewriteInterceptor() { + @Override + public QueryBuilder interceptAndRewrite(QueryRewriteContext context, QueryBuilder queryBuilder) { + if (queryBuilder instanceof MatchQueryBuilder matchQueryBuilder) { + return new MatchQueryBuilder(matchQueryBuilder.fieldName(), "intercepted"); + } + return queryBuilder; + } + + @Override + public String getQueryName() { + return MatchQueryBuilder.NAME; + } + }; +} diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java index d07bcf54fdf09..5dd231ab97886 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java @@ -52,6 +52,7 @@ public void testGetTierPreference() { null, null, null, + null, null ); @@ -79,6 +80,7 @@ public void testGetTierPreference() { null, null, null, + null, null ); diff --git a/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java b/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java index c31a68f36de71..256d0f269edb4 100644 --- a/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java +++ b/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java @@ -155,9 +155,10 @@ protected InputStream openSlice(int slice) throws IOException { // Mark input.mark(randomNonNegativeInt()); + int slicesOpenedAtMark = streamsOpened.size(); // Read or skip up to another random point - final int moreBytes = randomIntBetween(0, bytes.length - mark); + int moreBytes = randomIntBetween(0, bytes.length - mark); if (moreBytes > 0) { if (randomBoolean()) { final var moreBytesRead = new byte[moreBytes]; @@ -171,11 +172,13 @@ protected InputStream openSlice(int slice) throws IOException { // Randomly read to EOF if (randomBoolean()) { - input.readAllBytes(); + moreBytes += input.readAllBytes().length; } // Reset input.reset(); + int slicesOpenedAfterReset = streamsOpened.size(); + assert moreBytes > 0 || mark == 0 || slicesOpenedAfterReset == slicesOpenedAtMark : "Reset at mark should not re-open slices"; // Read all remaining bytes, which should be the bytes from mark up to the end final int remainingBytes = bytes.length - mark; diff --git a/test/framework/src/main/java/org/elasticsearch/plugins/internal/rewriter/MockQueryRewriteInterceptor.java b/test/framework/src/main/java/org/elasticsearch/plugins/internal/rewriter/MockQueryRewriteInterceptor.java new file mode 100644 index 0000000000000..196e5bd4f4a2d --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/plugins/internal/rewriter/MockQueryRewriteInterceptor.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.plugins.internal.rewriter; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; + +public class MockQueryRewriteInterceptor implements QueryRewriteInterceptor { + + @Override + public QueryBuilder interceptAndRewrite(QueryRewriteContext context, QueryBuilder queryBuilder) { + return queryBuilder; + } + + @Override + public String getQueryName() { + return this.getClass().getSimpleName(); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index bdf323afb8d96..20cb66affddee 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -71,6 +71,8 @@ import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.plugins.internal.rewriter.MockQueryRewriteInterceptor; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; import org.elasticsearch.plugins.scanners.StablePluginsRegistry; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.MockScriptService; @@ -629,7 +631,8 @@ QueryRewriteContext createQueryRewriteContext() { () -> true, scriptService, createMockResolvedIndices(), - null + null, + createMockQueryRewriteInterceptor() ); } @@ -670,5 +673,9 @@ private ResolvedIndices createMockResolvedIndices() { Map.of(index, indexMetadata) ); } + + private QueryRewriteInterceptor createMockQueryRewriteInterceptor() { + return new MockQueryRewriteInterceptor(); + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensedFeature.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensedFeature.java index d86c15aa14bc9..558303f7e0f0f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensedFeature.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensedFeature.java @@ -104,7 +104,7 @@ public boolean isNeedsActive() { return needsActive; } - /** Create a momentary feature for hte given license level */ + /** Create a momentary feature for the given license level */ public static Momentary momentary(String family, String name, License.OperationMode licenseLevel) { return new Momentary(family, name, licenseLevel, true); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 4f8a18e28aea1..3c7b089b4cd63 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -106,6 +106,7 @@ public class XPackLicenseState { messages.put(XPackField.CCR, XPackLicenseState::ccrAcknowledgementMessages); messages.put(XPackField.ENTERPRISE_SEARCH, XPackLicenseState::enterpriseSearchAcknowledgementMessages); messages.put(XPackField.REDACT_PROCESSOR, XPackLicenseState::redactProcessorAcknowledgementMessages); + messages.put(XPackField.ESQL, XPackLicenseState::esqlAcknowledgementMessages); ACKNOWLEDGMENT_MESSAGES = Collections.unmodifiableMap(messages); } @@ -243,6 +244,26 @@ private static String[] enterpriseSearchAcknowledgementMessages(OperationMode cu return Strings.EMPTY_ARRAY; } + private static String[] esqlAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) { + /* + * Provide an acknowledgement warning to customers that downgrade from Trial or Enterprise to a lower + * license level (Basic, Standard, Gold or Premium) that they will no longer be able to do CCS in ES|QL. + */ + switch (newMode) { + case BASIC: + case STANDARD: + case GOLD: + case PLATINUM: + switch (currentMode) { + case TRIAL: + case ENTERPRISE: + return new String[] { "ES|QL cross-cluster search will be disabled." }; + } + break; + } + return Strings.EMPTY_ARRAY; + } + private static String[] machineLearningAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) { switch (newMode) { case BASIC: diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocateAction.java index 311f3484900f2..bc9c3474ee63a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocateAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocateAction.java @@ -20,8 +20,6 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -73,17 +71,17 @@ public AllocateAction( Map require ) { if (include == null) { - this.include = Collections.emptyMap(); + this.include = Map.of(); } else { this.include = include; } if (exclude == null) { - this.exclude = Collections.emptyMap(); + this.exclude = Map.of(); } else { this.exclude = exclude; } if (require == null) { - this.require = Collections.emptyMap(); + this.require = Map.of(); } else { this.require = require; } @@ -201,7 +199,7 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { } UpdateSettingsStep allocateStep = new UpdateSettingsStep(allocateKey, allocationRoutedKey, client, newSettings.build()); AllocationRoutedStep routedCheckStep = new AllocationRoutedStep(allocationRoutedKey, nextStepKey); - return Arrays.asList(allocateStep, routedCheckStep); + return List.of(allocateStep, routedCheckStep); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStep.java index 7cdef6207c487..bc3fc0ccae02c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStep.java @@ -22,7 +22,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; -import java.util.Collections; +import java.util.List; import static org.elasticsearch.xpack.core.ilm.step.info.AllocationInfo.allShardsActiveAllocationInfo; import static org.elasticsearch.xpack.core.ilm.step.info.AllocationInfo.waitingForActiveShardsAllocationInfo; @@ -62,7 +62,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) { } AllocationDeciders allocationDeciders = new AllocationDeciders( - Collections.singletonList( + List.of( new FilterAllocationDecider( clusterState.getMetadata().settings(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteAction.java index d212492f14d01..8712cefac5d31 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteAction.java @@ -17,7 +17,6 @@ import java.io.IOException; import java.time.Instant; -import java.util.Arrays; import java.util.List; import java.util.Objects; @@ -99,7 +98,7 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) ); CleanupSnapshotStep cleanupSnapshotStep = new CleanupSnapshotStep(cleanSnapshotKey, deleteStepKey, client); DeleteStep deleteStep = new DeleteStep(deleteStepKey, nextStepKey, client); - return Arrays.asList(waitForNoFollowersStep, waitUntilTimeSeriesEndTimeStep, cleanupSnapshotStep, deleteStep); + return List.of(waitForNoFollowersStep, waitUntilTimeSeriesEndTimeStep, cleanupSnapshotStep, deleteStep); } else { WaitForNoFollowersStep waitForNoFollowersStep = new WaitForNoFollowersStep( waitForNoFollowerStepKey, @@ -113,7 +112,7 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) client ); DeleteStep deleteStep = new DeleteStep(deleteStepKey, nextStepKey, client); - return Arrays.asList(waitForNoFollowersStep, waitUntilTimeSeriesEndTimeStep, deleteStep); + return List.of(waitForNoFollowersStep, waitUntilTimeSeriesEndTimeStep, deleteStep); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ForceMergeStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ForceMergeStep.java index f3afe9e4d52cc..741fff63f61f5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ForceMergeStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ForceMergeStep.java @@ -20,7 +20,6 @@ import java.util.Arrays; import java.util.Objects; -import java.util.stream.Collectors; /** * Invokes a force merge on a single index. @@ -67,10 +66,7 @@ public void performAction( policyName, failures == null ? "n/a" - : Strings.collectionToDelimitedString( - Arrays.stream(failures).map(Strings::toString).collect(Collectors.toList()), - "," - ), + : Strings.collectionToDelimitedString(Arrays.stream(failures).map(Strings::toString).toList(), ","), NAME ); logger.warn(errorMessage); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/FreezeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/FreezeAction.java index 67763e781e5a5..09e625b96135c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/FreezeAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/FreezeAction.java @@ -18,7 +18,6 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.io.IOException; -import java.util.Arrays; import java.util.List; /** @@ -98,7 +97,7 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { ); CheckNotDataStreamWriteIndexStep checkNoWriteIndexStep = new CheckNotDataStreamWriteIndexStep(checkNotWriteIndex, freezeStepKey); FreezeStep freezeStep = new FreezeStep(freezeStepKey, nextStepKey, client); - return Arrays.asList(conditionalSkipFreezeStep, checkNoWriteIndexStep, freezeStep); + return List.of(conditionalSkipFreezeStep, checkNoWriteIndexStep, freezeStep); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java index 1a64e589d20b5..6a272b0d2271e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java @@ -26,7 +26,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -import java.util.stream.Collectors; /** * A utility class used for index lifecycle policies @@ -121,7 +120,7 @@ public static ItemUsage calculateUsage( .stream() .filter(indexMetadata -> policyName.equals(indexMetadata.getLifecyclePolicyName())) .map(indexMetadata -> indexMetadata.getIndex().getName()) - .collect(Collectors.toList()); + .toList(); final List allDataStreams = indexNameExpressionResolver.dataStreamNames( state, @@ -136,12 +135,12 @@ public static ItemUsage calculateUsage( } else { return false; } - }).collect(Collectors.toList()); + }).toList(); final List composableTemplates = state.metadata().templatesV2().keySet().stream().filter(templateName -> { Settings settings = MetadataIndexTemplateService.resolveSettings(state.metadata(), templateName); return policyName.equals(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(settings)); - }).collect(Collectors.toList()); + }).toList(); return new ItemUsage(indices, dataStreams, composableTemplates); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ReadOnlyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ReadOnlyAction.java index 117abecafeab3..2b03dc77eb5b6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ReadOnlyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ReadOnlyAction.java @@ -17,7 +17,6 @@ import java.io.IOException; import java.time.Instant; -import java.util.Arrays; import java.util.List; /** @@ -72,7 +71,7 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { client ); ReadOnlyStep readOnlyStep = new ReadOnlyStep(readOnlyKey, nextStepKey, client); - return Arrays.asList(checkNotWriteIndexStep, waitUntilTimeSeriesEndTimeStep, readOnlyStep); + return List.of(checkNotWriteIndexStep, waitUntilTimeSeriesEndTimeStep, readOnlyStep); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java index 515941bce841a..f3c72004d6cc9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java @@ -22,7 +22,6 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.io.IOException; -import java.util.Arrays; import java.util.List; import java.util.Objects; @@ -172,7 +171,7 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) client, INDEXING_COMPLETE ); - return Arrays.asList(waitForRolloverReadyStep, rolloverStep, waitForActiveShardsStep, updateDateStep, setIndexingCompleteStep); + return List.of(waitForRolloverReadyStep, rolloverStep, waitForActiveShardsStep, updateDateStep, setIndexingCompleteStep); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SegmentCountStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SegmentCountStep.java index ad8f450fb0849..95ca049740c73 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SegmentCountStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SegmentCountStep.java @@ -67,10 +67,7 @@ public void evaluateCondition(Metadata metadata, Index index, Listener listener, response.getFailedShards(), failures == null ? "n/a" - : Strings.collectionToDelimitedString( - Arrays.stream(failures).map(Strings::toString).collect(Collectors.toList()), - "," - ) + : Strings.collectionToDelimitedString(Arrays.stream(failures).map(Strings::toString).toList(), ",") ); listener.onResponse(true, new Info(-1)); } else { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetPriorityAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetPriorityAction.java index 376567bc2004c..5f7c1d0c3bf3a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetPriorityAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetPriorityAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.io.IOException; -import java.util.Collections; import java.util.List; import java.util.Objects; @@ -101,7 +100,7 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { Settings indexPriority = recoveryPriority == null ? NULL_PRIORITY_SETTINGS : Settings.builder().put(IndexMetadata.INDEX_PRIORITY_SETTING.getKey(), recoveryPriority).build(); - return Collections.singletonList(new UpdateSettingsStep(key, nextStepKey, client, indexPriority)); + return List.of(new UpdateSettingsStep(key, nextStepKey, client, indexPriority)); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkAction.java index 401d87f853360..70ec5da1d8a2a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkAction.java @@ -31,7 +31,6 @@ import java.time.Instant; import java.util.List; import java.util.Objects; -import java.util.stream.Collectors; import java.util.stream.Stream; import static org.elasticsearch.xpack.core.ilm.ShrinkIndexNameSupplier.SHRUNKEN_INDEX_PREFIX; @@ -329,7 +328,7 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) allowWriteAfterShrinkStep ); - return steps.filter(Objects::nonNull).collect(Collectors.toList()); + return steps.filter(Objects::nonNull).toList(); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleType.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleType.java index 48a0e65bddf22..0fd280f440f39 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleType.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleType.java @@ -14,7 +14,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -30,8 +29,6 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static java.util.stream.Collectors.toList; - /** * Represents the lifecycle of an index from creation to deletion. A * {@link TimeseriesLifecycleType} is made up of a set of {@link Phase}s which it will @@ -114,7 +111,7 @@ public class TimeseriesLifecycleType implements LifecycleType { // Set of actions that cannot be defined (executed) after the managed index has been mounted as searchable snapshot. // It's ordered to produce consistent error messages which can be unit tested. public static final Set ACTIONS_CANNOT_FOLLOW_SEARCHABLE_SNAPSHOT = Collections.unmodifiableSet( - new LinkedHashSet<>(Arrays.asList(ForceMergeAction.NAME, FreezeAction.NAME, ShrinkAction.NAME, DownsampleAction.NAME)) + new LinkedHashSet<>(List.of(ForceMergeAction.NAME, FreezeAction.NAME, ShrinkAction.NAME, DownsampleAction.NAME)) ); private TimeseriesLifecycleType() {} @@ -180,11 +177,11 @@ public static boolean shouldInjectMigrateStepForPhase(Phase phase) { public List getOrderedActions(Phase phase) { Map actions = phase.getActions(); return switch (phase.getName()) { - case HOT_PHASE -> ORDERED_VALID_HOT_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).collect(toList()); - case WARM_PHASE -> ORDERED_VALID_WARM_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).collect(toList()); - case COLD_PHASE -> ORDERED_VALID_COLD_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).collect(toList()); - case FROZEN_PHASE -> ORDERED_VALID_FROZEN_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).collect(toList()); - case DELETE_PHASE -> ORDERED_VALID_DELETE_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).collect(toList()); + case HOT_PHASE -> ORDERED_VALID_HOT_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).toList(); + case WARM_PHASE -> ORDERED_VALID_WARM_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).toList(); + case COLD_PHASE -> ORDERED_VALID_COLD_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).toList(); + case FROZEN_PHASE -> ORDERED_VALID_FROZEN_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).toList(); + case DELETE_PHASE -> ORDERED_VALID_DELETE_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).toList(); default -> throw new IllegalArgumentException("lifecycle type [" + TYPE + "] does not support phase [" + phase.getName() + "]"); }; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UnfollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UnfollowAction.java index 31aaba551a3f3..6bb0178f1471e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UnfollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UnfollowAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.io.IOException; -import java.util.Arrays; import java.util.List; import java.util.Map; @@ -72,7 +71,7 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { UnfollowFollowerIndexStep step5 = new UnfollowFollowerIndexStep(unfollowFollowerIndex, openFollowerIndex, client); OpenIndexStep step6 = new OpenIndexStep(openFollowerIndex, waitForYellowStep, client); WaitForIndexColorStep step7 = new WaitForIndexColorStep(waitForYellowStep, nextStepKey, ClusterHealthStatus.YELLOW); - return Arrays.asList(conditionalSkipUnfollowStep, step1, step2, step3, step4, step5, step6, step7); + return List.of(conditionalSkipUnfollowStep, step1, step2, step3, step4, step5, step6, step7); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStep.java index 224319722297c..590890405b8d7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStep.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.ilm.UnfollowAction.CCR_METADATA_KEY; @@ -78,7 +77,7 @@ static void handleResponse(FollowStatsAction.StatsResponses responses, Listener status.followerGlobalCheckpoint() ) ) - .collect(Collectors.toList()); + .toList(); listener.onResponse(false, new Info(shardFollowTaskInfos)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotAction.java index 08a884f0b8f3c..2633656d7c30c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.io.IOException; -import java.util.Collections; import java.util.List; import java.util.Objects; @@ -62,7 +61,7 @@ public String getPolicy() { @Override public List toSteps(Client client, String phase, StepKey nextStepKey) { StepKey waitForSnapshotKey = new StepKey(phase, NAME, WaitForSnapshotStep.NAME); - return Collections.singletonList(new WaitForSnapshotStep(waitForSnapshotKey, nextStepKey, client, policy)); + return List.of(new WaitForSnapshotStep(waitForSnapshotKey, nextStepKey, client, policy)); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java index 6c49cadb8d189..25f5e0f0617c5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java @@ -67,12 +67,12 @@ public final class MachineLearningField { License.OperationMode.PLATINUM ); - // Ideally this would be 7.0.0, but it has to be 6.4.0 because due to an oversight it's impossible + // Ideally this would be 8.3.0, but it has to be 6.4.0 because due to an oversight it's impossible // for the Java code to distinguish the model states for versions 6.4.0 to 7.9.3 inclusive. public static final MlConfigVersion MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION = MlConfigVersion.fromString("6.4.0"); - // We tell the user we support model snapshots newer than 7.0.0 as that's the major version - // boundary, even though behind the scenes we have to support back to 6.4.0. - public static final MlConfigVersion MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION = MlConfigVersion.V_7_0_0; + // We tell the user we support model snapshots newer than 8.3.0 as that's the version with the last format change, + // even though behind the scenes we have to support back to 6.4.0. + public static final MlConfigVersion MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION = MlConfigVersion.V_8_3_0; private MachineLearningField() {} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index e889d25cd7a96..d788a0b5abd37 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.xpack.core.XPackField; import java.util.Arrays; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; @@ -59,6 +60,12 @@ void assertAckMessages(String feature, OperationMode from, OperationMode to, int assertEquals(expectedMessages, gotMessages.length); } + void assertAckMessages(String feature, OperationMode from, OperationMode to, Set expectedMessages) { + String[] gotMessages = XPackLicenseState.ACKNOWLEDGMENT_MESSAGES.get(feature).apply(from, to); + Set actualMessages = Arrays.stream(gotMessages).collect(Collectors.toSet()); + assertThat(actualMessages, equalTo(expectedMessages)); + } + static T randomFrom(T[] values, Predicate filter) { return randomFrom(Arrays.stream(values).filter(filter).collect(Collectors.toList())); } @@ -143,6 +150,16 @@ public void testCcrAckTrialOrPlatinumToNotTrialOrPlatinum() { assertAckMessages(XPackField.CCR, randomTrialOrPlatinumMode(), randomBasicStandardOrGold(), 1); } + public void testEsqlAckToTrialOrPlatinum() { + assertAckMessages(XPackField.ESQL, randomMode(), randomFrom(TRIAL, ENTERPRISE), 0); + } + + public void testEsqlAckTrialOrEnterpriseToNotTrialOrEnterprise() { + for (OperationMode to : List.of(BASIC, STANDARD, GOLD, PLATINUM)) { + assertAckMessages(XPackField.ESQL, randomFrom(TRIAL, ENTERPRISE), to, Set.of("ES|QL cross-cluster search will be disabled.")); + } + } + public void testExpiredLicense() { // use standard feature which would normally be allowed at all license levels LicensedFeature feature = LicensedFeature.momentary("family", "enterpriseFeature", STANDARD); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocateActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocateActionTests.java index 1fc0afafde353..c5a8185f8511b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocateActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocateActionTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ilm.Step.StepKey; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -44,20 +43,20 @@ static AllocateAction randomInstance() { includes = randomAllocationRoutingMap(1, 100); hasAtLeastOneMap = true; } else { - includes = randomBoolean() ? null : Collections.emptyMap(); + includes = randomBoolean() ? null : Map.of(); } Map excludes; if (randomBoolean()) { hasAtLeastOneMap = true; excludes = randomAllocationRoutingMap(1, 100); } else { - excludes = randomBoolean() ? null : Collections.emptyMap(); + excludes = randomBoolean() ? null : Map.of(); } Map requires; if (hasAtLeastOneMap == false || randomBoolean()) { requires = randomAllocationRoutingMap(1, 100); } else { - requires = randomBoolean() ? null : Collections.emptyMap(); + requires = randomBoolean() ? null : Map.of(); } Integer numberOfReplicas = randomBoolean() ? null : randomIntBetween(0, 10); Integer totalShardsPerNode = randomBoolean() ? null : randomIntBetween(-1, 10); @@ -97,9 +96,9 @@ protected AllocateAction mutateInstance(AllocateAction instance) { } public void testAllMapsNullOrEmpty() { - Map include = randomBoolean() ? null : Collections.emptyMap(); - Map exclude = randomBoolean() ? null : Collections.emptyMap(); - Map require = randomBoolean() ? null : Collections.emptyMap(); + Map include = randomBoolean() ? null : Map.of(); + Map exclude = randomBoolean() ? null : Map.of(); + Map require = randomBoolean() ? null : Map.of(); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> new AllocateAction(null, null, include, exclude, require) @@ -124,8 +123,8 @@ public void testAllMapsNullOrEmpty() { public void testInvalidNumberOfReplicas() { Map include = randomAllocationRoutingMap(1, 5); - Map exclude = randomBoolean() ? null : Collections.emptyMap(); - Map require = randomBoolean() ? null : Collections.emptyMap(); + Map exclude = randomBoolean() ? null : Map.of(); + Map require = randomBoolean() ? null : Map.of(); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> new AllocateAction(randomIntBetween(-1000, -1), randomIntBetween(0, 300), include, exclude, require) @@ -135,8 +134,8 @@ public void testInvalidNumberOfReplicas() { public void testInvalidTotalShardsPerNode() { Map include = randomAllocationRoutingMap(1, 5); - Map exclude = randomBoolean() ? null : Collections.emptyMap(); - Map require = randomBoolean() ? null : Collections.emptyMap(); + Map exclude = randomBoolean() ? null : Map.of(); + Map require = randomBoolean() ? null : Map.of(); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> new AllocateAction(randomIntBetween(0, 300), randomIntBetween(-1000, -2), include, exclude, require) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStepTests.java index afad708ddbe2c..708c3630b8b8a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStepTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.xpack.core.ilm.ClusterStateWaitStep.Result; import org.elasticsearch.xpack.core.ilm.Step.StepKey; -import java.util.Collections; import java.util.Map; import static org.elasticsearch.cluster.routing.TestShardRouting.buildUnassignedInfo; @@ -109,7 +108,7 @@ public void testConditionMet() { public void testRequireConditionMetOnlyOneCopyAllocated() { Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); - Map requires = Collections.singletonMap(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "foo", "bar"); + Map requires = Map.of(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "foo", "bar"); Settings.Builder existingSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()); @@ -187,7 +186,7 @@ public void testClusterExcludeFiltersConditionMetOnlyOneCopyAllocated() { public void testExcludeConditionMetOnlyOneCopyAllocated() { Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); - Map excludes = Collections.singletonMap(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "foo", "bar"); + Map excludes = Map.of(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "foo", "bar"); Settings.Builder existingSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()); @@ -218,7 +217,7 @@ public void testExcludeConditionMetOnlyOneCopyAllocated() { public void testIncludeConditionMetOnlyOneCopyAllocated() { Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); - Map includes = Collections.singletonMap(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "foo", "bar"); + Map includes = Map.of(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "foo", "bar"); Settings.Builder existingSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStepTests.java index 8dcd8fc7ddd55..72bf7cedb2fb9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStepTests.java @@ -29,9 +29,9 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.node.Node; -import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import static org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata.Type.SIGTERM; import static org.elasticsearch.cluster.routing.TestShardRouting.shardRoutingBuilder; @@ -340,7 +340,7 @@ public void testExecuteAllocateReplicaUnassigned() { */ public void testExecuteReplicasNotAllocatedOnSingleNode() { Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); - Map requires = Collections.singletonMap("_id", "node1"); + Map requires = Map.of("_id", "node1"); Settings.Builder existingSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._id", "node1") @@ -376,7 +376,7 @@ public void testExecuteReplicasNotAllocatedOnSingleNode() { public void testExecuteReplicasButCopiesNotPresent() { Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); - Map requires = Collections.singletonMap("_id", "node1"); + Map requires = Map.of("_id", "node1"); Settings.Builder existingSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._id", "node1") @@ -458,7 +458,7 @@ public void testStepCompletableIfAllShardsActive() { .putCustom( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( - Collections.singletonMap( + Map.of( "node1", SingleNodeShutdownMetadata.builder() .setType(type) @@ -537,7 +537,7 @@ public void testStepBecomesUncompletable() { .putCustom( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( - Collections.singletonMap( + Map.of( "node1", SingleNodeShutdownMetadata.builder() .setType(type) @@ -649,7 +649,7 @@ public static UnassignedInfo randomUnassignedInfo(String message) { System.currentTimeMillis(), delayed, UnassignedInfo.AllocationStatus.NO_ATTEMPT, - Collections.emptySet(), + Set.of(), lastAllocatedNodeId ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CloseFollowerIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CloseFollowerIndexStepTests.java index 7ce078826b49a..ef7325be0a496 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CloseFollowerIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CloseFollowerIndexStepTests.java @@ -13,7 +13,8 @@ import org.elasticsearch.index.IndexVersion; import org.mockito.Mockito; -import java.util.Collections; +import java.util.List; +import java.util.Map; import static org.elasticsearch.xpack.core.ilm.UnfollowAction.CCR_METADATA_KEY; import static org.hamcrest.Matchers.equalTo; @@ -24,7 +25,7 @@ public class CloseFollowerIndexStepTests extends AbstractStepTestCase listener = (ActionListener) invocation.getArguments()[1]; - listener.onResponse(new CloseIndexResponse(true, true, Collections.emptyList())); + listener.onResponse(new CloseIndexResponse(true, true, List.of())); return null; }).when(indicesClient).close(Mockito.any(), Mockito.any()); @@ -54,7 +55,7 @@ public void testRequestNotAcknowledged() { assertThat(closeIndexRequest.indices()[0], equalTo("follower-index")); @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; - listener.onResponse(new CloseIndexResponse(false, false, Collections.emptyList())); + listener.onResponse(new CloseIndexResponse(false, false, List.of())); return null; }).when(indicesClient).close(Mockito.any(), Mockito.any()); @@ -85,7 +86,7 @@ public void testCloseFollowingIndexFailed() { public void testCloseFollowerIndexIsNoopForAlreadyClosedIndex() throws Exception { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .state(IndexMetadata.State.CLOSE) .numberOfShards(1) .numberOfReplicas(0) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CloseIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CloseIndexStepTests.java index 02fb49ac71adf..b546aeaa20687 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CloseIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CloseIndexStepTests.java @@ -20,7 +20,7 @@ import org.mockito.Mockito; import org.mockito.stubbing.Answer; -import java.util.Collections; +import java.util.List; import static org.hamcrest.Matchers.equalTo; @@ -77,9 +77,7 @@ public void testPerformAction() { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; assertThat(request.indices(), equalTo(new String[] { indexMetadata.getIndex().getName() })); - listener.onResponse( - new CloseIndexResponse(true, true, Collections.singletonList(new CloseIndexResponse.IndexResult(indexMetadata.getIndex()))) - ); + listener.onResponse(new CloseIndexResponse(true, true, List.of(new CloseIndexResponse.IndexResult(indexMetadata.getIndex())))); return null; }).when(indicesClient).close(Mockito.any(), Mockito.any()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStepTests.java index f24f0f86de7db..eeddda4199665 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStepTests.java @@ -20,7 +20,6 @@ import java.time.Clock; import java.time.Instant; import java.time.ZoneId; -import java.util.Collections; import java.util.Map; import java.util.UUID; @@ -83,7 +82,7 @@ public void testIsConditionMetForUnderlyingStep() { .put(LifecycleSettings.LIFECYCLE_STEP_WAIT_TIME_THRESHOLD, "480h") ) .putCustom(ILM_CUSTOM_METADATA_KEY, Map.of("step_time", String.valueOf(System.currentTimeMillis()))) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -107,7 +106,7 @@ public void testIsConditionMetForUnderlyingStep() { .put(LifecycleSettings.LIFECYCLE_STEP_WAIT_TIME_THRESHOLD, "48h") ) .putCustom(ILM_CUSTOM_METADATA_KEY, Map.of("step_time", String.valueOf(System.currentTimeMillis()))) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -140,7 +139,7 @@ public void testIsConditionMetForUnderlyingStep() { settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true") .put(LifecycleSettings.LIFECYCLE_STEP_WAIT_TIME_THRESHOLD, "1s") ) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .putCustom(ILM_CUSTOM_METADATA_KEY, Map.of("step_time", String.valueOf(1234L))) .numberOfShards(1) .numberOfReplicas(0) @@ -168,7 +167,7 @@ public void testIsConditionMetForUnderlyingStep() { settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "false") .put(LifecycleSettings.LIFECYCLE_STEP_WAIT_TIME_THRESHOLD, "1h") ) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .putCustom(ILM_CUSTOM_METADATA_KEY, Map.of("step_time", String.valueOf(1234L))) .numberOfShards(1) .numberOfReplicas(0) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponseTests.java index 937502281b64d..c4138d228719e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponseTests.java @@ -18,7 +18,6 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -81,7 +80,7 @@ protected boolean assertToXContentEquivalence() { protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new)) + List.of(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new)) ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeActionTests.java index aecf029a1357a..b8d480200fb5d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeActionTests.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.List; -import java.util.stream.Collectors; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; @@ -109,7 +108,7 @@ private void assertBestCompression(ForceMergeAction instance) { // available .skip(1) .map(s -> new Tuple<>(s.getKey(), s.getNextStepKey())) - .collect(Collectors.toList()); + .toList(); StepKey checkNotWriteIndex = new StepKey(phase, ForceMergeAction.NAME, CheckNotDataStreamWriteIndexStep.NAME); StepKey waitTimeSeriesEndTimePassesKey = new StepKey(phase, ForceMergeAction.NAME, WaitUntilTimeSeriesEndTimePassesStep.NAME); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java index bee6351582bc9..908e7b764f136 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexVersion; -import java.util.Collections; +import java.util.List; import java.util.Locale; import static org.elasticsearch.cluster.metadata.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; @@ -82,7 +82,7 @@ private void testPerformAction(String policyName, String expectedPolicyName) { .metadata( Metadata.builder() .put(indexMetadata, false) - .putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(Collections.singletonList(repo))) + .putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(List.of(repo))) .build() ) .build(); @@ -167,7 +167,7 @@ public void testPerformActionWillOverwriteCachedRepository() { .metadata( Metadata.builder() .put(indexMetadata, false) - .putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(Collections.singletonList(repo))) + .putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(List.of(repo))) .build() ) .build(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponseTests.java index ea3c9cc5926ab..6fc98d4c2c728 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponseTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Arrays; +import java.util.List; import java.util.Objects; import java.util.function.Supplier; @@ -292,7 +292,7 @@ protected IndexLifecycleExplainResponse mutateInstance(IndexLifecycleExplainResp protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new)) + List.of(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new)) ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java index 753edfbe334b9..7dd6bfd209660 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.mockito.Mockito; -import java.util.Collections; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -56,7 +55,7 @@ public void testExecuteWithHeadersAsyncNoHeaders() throws InterruptedException { SearchRequest request = new SearchRequest("foo"); - final var policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, Collections.emptyMap()); + final var policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, Map.of()); policyClient.execute(TransportSearchAction.TYPE, request, listener); latch.await(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyMetadataTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyMetadataTests.java index 3e9fd0105feae..b58d7184f741c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyMetadataTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyMetadataTests.java @@ -18,7 +18,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -37,7 +36,7 @@ public void setup() { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList( + List.of( new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new), new NamedWriteableRegistry.Entry( LifecycleType.class, @@ -65,7 +64,7 @@ protected NamedWriteableRegistry getNamedWriteableRegistry() { protected NamedXContentRegistry xContentRegistry() { List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); entries.addAll( - Arrays.asList( + List.of( new NamedXContentRegistry.Entry( LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyTests.java index 70f75f1cfcdfa..1bea0ac6d192c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyTests.java @@ -20,8 +20,6 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; @@ -30,7 +28,6 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.Function; -import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -48,7 +45,7 @@ protected LifecyclePolicy doParseInstance(XContentParser parser) { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList( + List.of( new NamedWriteableRegistry.Entry( LifecycleType.class, TimeseriesLifecycleType.TYPE, @@ -75,7 +72,7 @@ protected NamedWriteableRegistry getNamedWriteableRegistry() { protected NamedXContentRegistry xContentRegistry() { List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); entries.addAll( - Arrays.asList( + List.of( new NamedXContentRegistry.Entry( LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), @@ -150,7 +147,7 @@ public static LifecyclePolicy randomTimeseriesLifecyclePolicy(@Nullable String l ).stream() // Remove the frozen phase, we'll randomly re-add it later .filter(pn -> TimeseriesLifecycleType.FROZEN_PHASE.equals(pn) == false) - .collect(Collectors.toList()); + .toList(); // let's order the phases so we can reason about actions in a previous phase in order to generate a random *valid* policy List orderedPhases = new ArrayList<>(phaseNames.size()); @@ -218,7 +215,7 @@ public static LifecyclePolicy randomTimeseriesLifecyclePolicy(@Nullable String l new Phase( TimeseriesLifecycleType.FROZEN_PHASE, frozenTime, - Collections.singletonMap( + Map.of( SearchableSnapshotAction.NAME, new SearchableSnapshotAction( randomAlphaOfLength(10), @@ -300,11 +297,11 @@ protected LifecyclePolicy mutateInstance(LifecyclePolicy instance) { () -> randomFrom( TimeseriesLifecycleType.ORDERED_VALID_PHASES.stream() .filter(pn -> TimeseriesLifecycleType.FROZEN_PHASE.equals(pn) == false) - .collect(Collectors.toList()) + .toList() ) ); phases = new LinkedHashMap<>(phases); - phases.put(phaseName, new Phase(phaseName, null, Collections.emptyMap())); + phases.put(phaseName, new Phase(phaseName, null, Map.of())); } case 2 -> metadata = randomValueOtherThan(metadata, LifecyclePolicyTests::randomMeta); case 3 -> deprecated = instance.isDeprecated() ? randomFrom(false, null) : true; @@ -337,8 +334,8 @@ public void testToStepsWithOneStep() { lifecycleName = randomAlphaOfLengthBetween(1, 20); Map phases = new LinkedHashMap<>(); - LifecycleAction firstAction = new MockAction(Arrays.asList(mockStep)); - Map actions = Collections.singletonMap(MockAction.NAME, firstAction); + LifecycleAction firstAction = new MockAction(List.of(mockStep)); + Map actions = Map.of(MockAction.NAME, firstAction); Phase firstPhase = new Phase("test", TimeValue.ZERO, actions); phases.put(firstPhase.getName(), firstPhase); LifecyclePolicy policy = new LifecyclePolicy(TestLifecycleType.INSTANCE, lifecycleName, phases, randomMeta()); @@ -372,10 +369,10 @@ public void testToStepsWithTwoPhases() { lifecycleName = randomAlphaOfLengthBetween(1, 20); Map phases = new LinkedHashMap<>(); - LifecycleAction firstAction = new MockAction(Arrays.asList(firstActionStep, firstActionAnotherStep)); - LifecycleAction secondAction = new MockAction(Arrays.asList(secondActionStep)); - Map firstActions = Collections.singletonMap(MockAction.NAME, firstAction); - Map secondActions = Collections.singletonMap(MockAction.NAME, secondAction); + LifecycleAction firstAction = new MockAction(List.of(firstActionStep, firstActionAnotherStep)); + LifecycleAction secondAction = new MockAction(List.of(secondActionStep)); + Map firstActions = Map.of(MockAction.NAME, firstAction); + Map secondActions = Map.of(MockAction.NAME, secondAction); Phase firstPhase = new Phase("first_phase", TimeValue.ZERO, firstActions); Phase secondPhase = new Phase("second_phase", TimeValue.ZERO, secondActions); phases.put(firstPhase.getName(), firstPhase); @@ -401,10 +398,10 @@ public void testToStepsWithTwoPhases() { public void testIsActionSafe() { Map phases = new LinkedHashMap<>(); - LifecycleAction firstAction = new MockAction(Collections.emptyList(), true); - LifecycleAction secondAction = new MockAction(Collections.emptyList(), false); - Map firstActions = Collections.singletonMap(MockAction.NAME, firstAction); - Map secondActions = Collections.singletonMap(MockAction.NAME, secondAction); + LifecycleAction firstAction = new MockAction(List.of(), true); + LifecycleAction secondAction = new MockAction(List.of(), false); + Map firstActions = Map.of(MockAction.NAME, firstAction); + Map secondActions = Map.of(MockAction.NAME, secondAction); Phase firstPhase = new Phase("first_phase", TimeValue.ZERO, firstActions); Phase secondPhase = new Phase("second_phase", TimeValue.ZERO, secondActions); phases.put(firstPhase.getName(), firstPhase); @@ -458,12 +455,9 @@ public void testValidatePolicyName() { public static Map randomMeta() { if (randomBoolean()) { if (randomBoolean()) { - return Collections.singletonMap(randomAlphaOfLength(4), randomAlphaOfLength(4)); + return Map.of(randomAlphaOfLength(4), randomAlphaOfLength(4)); } else { - return Collections.singletonMap( - randomAlphaOfLength(5), - Collections.singletonMap(randomAlphaOfLength(4), randomAlphaOfLength(4)) - ); + return Map.of(randomAlphaOfLength(5), Map.of(randomAlphaOfLength(4), randomAlphaOfLength(4))); } } else { return null; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java index 3efe2dc04ea19..978486c6c0d39 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java @@ -23,8 +23,8 @@ import org.elasticsearch.indices.EmptySystemIndices; import org.elasticsearch.test.ESTestCase; -import java.util.Arrays; -import java.util.Collections; +import java.util.List; +import java.util.Map; import static org.hamcrest.Matchers.equalTo; @@ -40,7 +40,7 @@ public void testCalculateUsage() { ClusterState state = ClusterState.builder(new ClusterName("mycluster")).build(); assertThat( LifecyclePolicyUtils.calculateUsage(iner, state, "mypolicy"), - equalTo(new ItemUsage(Collections.emptyList(), Collections.emptyList(), Collections.emptyList())) + equalTo(new ItemUsage(List.of(), List.of(), List.of())) ); } @@ -52,7 +52,7 @@ public void testCalculateUsage() { .putCustom( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( - Collections.singletonMap("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), + Map.of("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), OperationMode.RUNNING ) ) @@ -61,7 +61,7 @@ public void testCalculateUsage() { .build(); assertThat( LifecyclePolicyUtils.calculateUsage(iner, state, "mypolicy"), - equalTo(new ItemUsage(Collections.emptyList(), Collections.emptyList(), Collections.emptyList())) + equalTo(new ItemUsage(List.of(), List.of(), List.of())) ); } @@ -73,7 +73,7 @@ public void testCalculateUsage() { .putCustom( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( - Collections.singletonMap("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), + Map.of("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), OperationMode.RUNNING ) ) @@ -86,7 +86,7 @@ public void testCalculateUsage() { .build(); assertThat( LifecyclePolicyUtils.calculateUsage(iner, state, "mypolicy"), - equalTo(new ItemUsage(Collections.singleton("myindex"), Collections.emptyList(), Collections.emptyList())) + equalTo(new ItemUsage(List.of("myindex"), List.of(), List.of())) ); } @@ -98,7 +98,7 @@ public void testCalculateUsage() { .putCustom( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( - Collections.singletonMap("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), + Map.of("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), OperationMode.RUNNING ) ) @@ -109,10 +109,10 @@ public void testCalculateUsage() { .putCustom( ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata( - Collections.singletonMap( + Map.of( "mytemplate", ComposableIndexTemplate.builder() - .indexPatterns(Collections.singletonList("myds")) + .indexPatterns(List.of("myds")) .template( new Template( Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy").build(), @@ -130,7 +130,7 @@ public void testCalculateUsage() { .build(); assertThat( LifecyclePolicyUtils.calculateUsage(iner, state, "mypolicy"), - equalTo(new ItemUsage(Collections.singleton("myindex"), Collections.emptyList(), Collections.singleton("mytemplate"))) + equalTo(new ItemUsage(List.of("myindex"), List.of(), List.of("mytemplate"))) ); } @@ -139,7 +139,7 @@ public void testCalculateUsage() { .putCustom( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( - Collections.singletonMap("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), + Map.of("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), OperationMode.RUNNING ) ) @@ -159,10 +159,10 @@ public void testCalculateUsage() { .putCustom( ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata( - Collections.singletonMap( + Map.of( "mytemplate", ComposableIndexTemplate.builder() - .indexPatterns(Collections.singletonList("myds")) + .indexPatterns(List.of("myds")) .template( new Template(Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy").build(), null, null) ) @@ -172,15 +172,13 @@ public void testCalculateUsage() { ) ); // Need to get the real Index instance of myindex: - mBuilder.put(DataStreamTestHelper.newInstance("myds", Collections.singletonList(mBuilder.get("myindex").getIndex()))); + mBuilder.put(DataStreamTestHelper.newInstance("myds", List.of(mBuilder.get("myindex").getIndex()))); // Test where policy exists and is used by an index, datastream, and template ClusterState state = ClusterState.builder(new ClusterName("mycluster")).metadata(mBuilder.build()).build(); assertThat( LifecyclePolicyUtils.calculateUsage(iner, state, "mypolicy"), - equalTo( - new ItemUsage(Arrays.asList("myindex", "another"), Collections.singleton("myds"), Collections.singleton("mytemplate")) - ) + equalTo(new ItemUsage(List.of("myindex", "another"), List.of("myds"), List.of("mytemplate"))) ); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MockAction.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MockAction.java index 0de234615f4c7..79f8a051abe25 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MockAction.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MockAction.java @@ -15,10 +15,8 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Objects; -import java.util.stream.Collectors; public class MockAction implements LifecycleAction { public static final String NAME = "TEST_ACTION"; @@ -32,7 +30,7 @@ public static MockAction parse(XContentParser parser) { } public MockAction() { - this(Collections.emptyList()); + this(List.of()); } public MockAction(List steps) { @@ -77,7 +75,7 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) @Override public void writeTo(StreamOutput out) throws IOException { - out.writeCollection(steps.stream().map(MockStep::new).collect(Collectors.toList())); + out.writeCollection(steps.stream().map(MockStep::new).toList()); out.writeBoolean(safe); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTaskTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTaskTests.java index 9871cb79b595b..475161676f2e8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTaskTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTaskTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; import org.elasticsearch.xpack.core.slm.SnapshotLifecycleStats; -import java.util.Collections; import java.util.Map; import java.util.Optional; @@ -97,9 +96,9 @@ private OperationMode executeILMUpdate( OperationMode requestMode, boolean assertSameClusterState ) { - IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(Collections.emptyMap(), currentMode); + IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(Map.of(), currentMode); SnapshotLifecycleMetadata snapshotLifecycleMetadata = new SnapshotLifecycleMetadata( - Collections.emptyMap(), + Map.of(), currentMode, new SnapshotLifecycleStats() ); @@ -131,9 +130,9 @@ private OperationMode executeSLMUpdate( OperationMode requestMode, boolean assertSameClusterState ) { - IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(Collections.emptyMap(), currentMode); + IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(Map.of(), currentMode); SnapshotLifecycleMetadata snapshotLifecycleMetadata = new SnapshotLifecycleMetadata( - Collections.emptyMap(), + Map.of(), currentMode, new SnapshotLifecycleStats() ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStepTests.java index 51ebc98176955..da5d6eddfc72d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStepTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.xpack.core.ccr.action.ShardFollowTask; import org.mockito.Mockito; -import java.util.Collections; +import java.util.Map; import static org.elasticsearch.xpack.core.ilm.UnfollowAction.CCR_METADATA_KEY; import static org.hamcrest.Matchers.equalTo; @@ -38,7 +38,7 @@ protected PauseFollowerIndexStep newInstance(Step.StepKey key, Step.StepKey next public void testPauseFollowingIndex() throws Exception { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -60,7 +60,7 @@ public void testPauseFollowingIndex() throws Exception { public void testRequestNotAcknowledged() { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -81,7 +81,7 @@ public void testRequestNotAcknowledged() { public void testPauseFollowingIndexFailed() { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -107,7 +107,7 @@ public void testPauseFollowingIndexFailed() { public final void testNoShardFollowPersistentTasks() throws Exception { IndexMetadata indexMetadata = IndexMetadata.builder("managed-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -138,7 +138,7 @@ public final void testNoShardFollowTasksForManagedIndex() throws Exception { IndexMetadata followerIndex = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current())) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -171,7 +171,7 @@ private static ClusterState setupClusterStateWithFollowingIndex(IndexMetadata fo new ByteSizeValue(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), - Collections.emptyMap() + Map.of() ), null ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseCacheManagementTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseCacheManagementTests.java index 952741032fc90..7e78a81776a7a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseCacheManagementTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseCacheManagementTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.xcontent.ParseField; import java.io.IOException; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -84,9 +83,9 @@ public void testRefreshPhaseJson() throws IOException { actions.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); actions.put("set_priority", new SetPriorityAction(100)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); - LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(newPolicy, Collections.emptyMap(), 2L, 2L); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(newPolicy, Map.of(), 2L, 2L); ClusterState existingState = ClusterState.builder(ClusterState.EMPTY_STATE) .metadata(Metadata.builder(Metadata.EMPTY_METADATA).put(meta, false).build()) @@ -315,7 +314,7 @@ public void testIndexCanBeSafelyUpdated() { actions.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); actions.put("set_priority", new SetPriorityAction(100)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); assertTrue(isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy, null)); @@ -351,7 +350,7 @@ public void testIndexCanBeSafelyUpdated() { Map actions = new HashMap<>(); actions.put("set_priority", new SetPriorityAction(150)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); assertFalse(isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy, null)); @@ -390,7 +389,7 @@ public void testIndexCanBeSafelyUpdated() { new RolloverAction(null, null, TimeValue.timeValueSeconds(5), null, null, null, null, null, null, null) ); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); assertFalse(isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy, null)); @@ -422,7 +421,7 @@ public void testIndexCanBeSafelyUpdated() { actions.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); actions.put("set_priority", new SetPriorityAction(100)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); assertFalse(isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy, null)); @@ -443,7 +442,7 @@ public void testIndexCanBeSafelyUpdated() { actions.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); actions.put("set_priority", new SetPriorityAction(100)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); assertFalse(isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy, null)); @@ -482,16 +481,16 @@ public void testUpdateIndicesForPolicy() throws IOException { oldActions.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); oldActions.put("set_priority", new SetPriorityAction(100)); Phase oldHotPhase = new Phase("hot", TimeValue.ZERO, oldActions); - Map oldPhases = Collections.singletonMap("hot", oldHotPhase); + Map oldPhases = Map.of("hot", oldHotPhase); LifecyclePolicy oldPolicy = new LifecyclePolicy("my-policy", oldPhases); Map actions = new HashMap<>(); actions.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); actions.put("set_priority", new SetPriorityAction(100)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); - LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(newPolicy, Collections.emptyMap(), 2L, 2L); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(newPolicy, Map.of(), 2L, 2L); assertTrue(isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy, null)); @@ -509,9 +508,9 @@ public void testUpdateIndicesForPolicy() throws IOException { actions.put("rollover", new RolloverAction(null, null, null, 2L, null, null, null, null, null, null)); actions.put("set_priority", new SetPriorityAction(150)); hotPhase = new Phase("hot", TimeValue.ZERO, actions); - phases = Collections.singletonMap("hot", hotPhase); + phases = Map.of("hot", hotPhase); newPolicy = new LifecyclePolicy("my-policy", phases); - policyMetadata = new LifecyclePolicyMetadata(newPolicy, Collections.emptyMap(), 2L, 2L); + policyMetadata = new LifecyclePolicyMetadata(newPolicy, Map.of(), 2L, 2L); logger.info("--> update with changed policy, but not configured in settings"); updatedState = updateIndicesForPolicy(existingState, REGISTRY, client, oldPolicy, policyMetadata, null); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseExecutionInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseExecutionInfoTests.java index 7622118d2b99f..ce477a07c2f0b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseExecutionInfoTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseExecutionInfoTests.java @@ -18,7 +18,7 @@ import org.junit.Before; import java.io.IOException; -import java.util.Arrays; +import java.util.List; public class PhaseExecutionInfoTests extends AbstractXContentSerializingTestCase { @@ -71,7 +71,7 @@ protected PhaseExecutionInfo mutateInstance(PhaseExecutionInfo instance) { protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new)) + List.of(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new)) ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseTests.java index bf925c4282fc1..5a194b48f7701 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseTests.java @@ -18,9 +18,8 @@ import org.junit.Before; import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -42,9 +41,9 @@ static Phase randomTestPhase(String phaseName) { if (randomBoolean()) { after = randomTimeValue(0, 1_000_000_000, TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS); } - Map actions = Collections.emptyMap(); + Map actions = Map.of(); if (randomBoolean()) { - actions = Collections.singletonMap(MockAction.NAME, new MockAction()); + actions = Map.of(MockAction.NAME, new MockAction()); } return new Phase(phaseName, after, actions); } @@ -61,7 +60,7 @@ protected Reader instanceReader() { protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new)) + List.of(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new)) ); } @@ -85,7 +84,7 @@ protected Phase mutateInstance(Phase instance) { case 1 -> after = TimeValue.timeValueSeconds(after.getSeconds() + randomIntBetween(1, 1000)); case 2 -> { actions = new HashMap<>(actions); - actions.put(MockAction.NAME + "another", new MockAction(Collections.emptyList())); + actions.put(MockAction.NAME + "another", new MockAction(List.of())); } default -> throw new AssertionError("Illegal randomisation branch"); } @@ -93,7 +92,7 @@ protected Phase mutateInstance(Phase instance) { } public void testDefaultAfter() { - Phase phase = new Phase(randomAlphaOfLength(20), null, Collections.emptyMap()); + Phase phase = new Phase(randomAlphaOfLength(20), null, Map.of()); assertEquals(TimeValue.ZERO, phase.getMinimumAge()); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/RolloverStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/RolloverStepTests.java index 4af25d094f5fe..3683690763d93 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/RolloverStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/RolloverStepTests.java @@ -23,9 +23,9 @@ import org.hamcrest.Matchers; import org.mockito.Mockito; -import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Map; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; import static org.mockito.Mockito.verifyNoMoreInteractions; @@ -185,7 +185,7 @@ private void mockClientRolloverCall(String rolloverTarget) { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; assertRolloverIndexRequest(request, rolloverTarget); - listener.onResponse(new RolloverResponse(null, null, Collections.emptyMap(), request.isDryRun(), true, true, true, false)); + listener.onResponse(new RolloverResponse(null, null, Map.of(), request.isDryRun(), true, true, true, false)); return null; }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); } @@ -214,11 +214,7 @@ public void testPerformActionSkipsRolloverForAlreadyRolledIndex() throws Excepti .putAlias(AliasMetadata.builder(rolloverAlias)) .settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, rolloverAlias)) .putRolloverInfo( - new RolloverInfo( - rolloverAlias, - Collections.singletonList(new MaxSizeCondition(ByteSizeValue.ofBytes(2L))), - System.currentTimeMillis() - ) + new RolloverInfo(rolloverAlias, List.of(new MaxSizeCondition(ByteSizeValue.ofBytes(2L))), System.currentTimeMillis()) ) .numberOfShards(randomIntBetween(1, 5)) .numberOfReplicas(randomIntBetween(0, 5)) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SegmentCountStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SegmentCountStepTests.java index 1d14bfb261fc2..9f04e202022c9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SegmentCountStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SegmentCountStepTests.java @@ -77,14 +77,14 @@ public void testIsConditionMet() { ShardSegments shardSegmentsOne = Mockito.mock(ShardSegments.class); ShardSegments[] shardSegmentsArray = new ShardSegments[] { shardSegmentsOne }; IndexShardSegments indexShardSegments = new IndexShardSegments(ShardId.fromString("[idx][123]"), shardSegmentsArray); - Map indexShards = Collections.singletonMap(0, indexShardSegments); + Map indexShards = Map.of(0, indexShardSegments); Spliterator iss = indexShards.values().spliterator(); List segments = new ArrayList<>(); for (int i = 0; i < maxNumSegments - randomIntBetween(0, 3); i++) { segments.add(null); } Mockito.when(indicesSegmentResponse.getStatus()).thenReturn(RestStatus.OK); - Mockito.when(indicesSegmentResponse.getIndices()).thenReturn(Collections.singletonMap(index.getName(), indexSegments)); + Mockito.when(indicesSegmentResponse.getIndices()).thenReturn(Map.of(index.getName(), indexSegments)); Mockito.when(indexSegments.spliterator()).thenReturn(iss); Mockito.when(shardSegmentsOne.getSegments()).thenReturn(segments); @@ -129,14 +129,14 @@ public void testIsConditionIsTrueEvenWhenMoreSegments() { ShardSegments shardSegmentsOne = Mockito.mock(ShardSegments.class); ShardSegments[] shardSegmentsArray = new ShardSegments[] { shardSegmentsOne }; IndexShardSegments indexShardSegments = new IndexShardSegments(ShardId.fromString("[idx][123]"), shardSegmentsArray); - Map indexShards = Collections.singletonMap(0, indexShardSegments); + Map indexShards = Map.of(0, indexShardSegments); Spliterator iss = indexShards.values().spliterator(); List segments = new ArrayList<>(); for (int i = 0; i < maxNumSegments + randomIntBetween(1, 3); i++) { segments.add(null); } Mockito.when(indicesSegmentResponse.getStatus()).thenReturn(RestStatus.OK); - Mockito.when(indicesSegmentResponse.getIndices()).thenReturn(Collections.singletonMap(index.getName(), indexSegments)); + Mockito.when(indicesSegmentResponse.getIndices()).thenReturn(Map.of(index.getName(), indexSegments)); Mockito.when(indexSegments.spliterator()).thenReturn(iss); Mockito.when(shardSegmentsOne.getSegments()).thenReturn(segments); @@ -181,7 +181,7 @@ public void testFailedToRetrieveSomeSegments() { ShardSegments shardSegmentsOne = Mockito.mock(ShardSegments.class); ShardSegments[] shardSegmentsArray = new ShardSegments[] { shardSegmentsOne }; IndexShardSegments indexShardSegments = new IndexShardSegments(ShardId.fromString("[idx][123]"), shardSegmentsArray); - Map indexShards = Collections.singletonMap(0, indexShardSegments); + Map indexShards = Map.of(0, indexShardSegments); Spliterator iss = indexShards.values().spliterator(); List segments = new ArrayList<>(); for (int i = 0; i < maxNumSegments + randomIntBetween(1, 3); i++) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkActionTests.java index a33d6e3332a40..60fa69708e111 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkActionTests.java @@ -28,8 +28,8 @@ import org.mockito.Mockito; import java.io.IOException; -import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -203,11 +203,11 @@ public void assertPerformAction( LifecyclePolicy policy = new LifecyclePolicy( lifecycleName, - Collections.singletonMap("warm", new Phase("warm", TimeValue.ZERO, Collections.singletonMap(action.getWriteableName(), action))) + Map.of("warm", new Phase("warm", TimeValue.ZERO, Map.of(action.getWriteableName(), action))) ); LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata( policy, - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ); @@ -216,10 +216,7 @@ public void assertPerformAction( Metadata.builder() .putCustom( IndexLifecycleMetadata.TYPE, - new IndexLifecycleMetadata( - Collections.singletonMap(policyMetadata.getName(), policyMetadata), - OperationMode.RUNNING - ) + new IndexLifecycleMetadata(Map.of(policyMetadata.getName(), policyMetadata), OperationMode.RUNNING) ) .put( indexMetadataBuilder.putCustom( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkSetAliasStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkSetAliasStepTests.java index 7a03343b461de..c8efce288260f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkSetAliasStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkSetAliasStepTests.java @@ -17,7 +17,6 @@ import org.mockito.Mockito; import org.mockito.stubbing.Answer; -import java.util.Arrays; import java.util.List; import static org.elasticsearch.xpack.core.ilm.ShrinkIndexNameSupplier.SHRUNKEN_INDEX_PREFIX; @@ -73,7 +72,7 @@ public void testPerformAction() throws Exception { String sourceIndex = indexMetadata.getIndex().getName(); String shrunkenIndex = SHRUNKEN_INDEX_PREFIX + sourceIndex; - List expectedAliasActions = Arrays.asList( + List expectedAliasActions = List.of( IndicesAliasesRequest.AliasActions.removeIndex().index(sourceIndex), IndicesAliasesRequest.AliasActions.add().index(shrunkenIndex).alias(sourceIndex), IndicesAliasesRequest.AliasActions.add() diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkStepTests.java index 257df32b4d950..b138339c25197 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkStepTests.java @@ -21,8 +21,8 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import org.mockito.Mockito; -import java.util.Collections; import java.util.Map; +import java.util.Set; import static org.elasticsearch.cluster.metadata.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; import static org.elasticsearch.common.IndexNameGenerator.generateValidIndexName; @@ -101,7 +101,7 @@ public void testPerformAction() throws Exception { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; assertThat(request.getSourceIndex(), equalTo(sourceIndexMetadata.getIndex().getName())); - assertThat(request.getTargetIndexRequest().aliases(), equalTo(Collections.emptySet())); + assertThat(request.getTargetIndexRequest().aliases(), equalTo(Set.of())); Settings.Builder builder = Settings.builder(); builder.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, sourceIndexMetadata.getNumberOfReplicas()) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java index f9f06b10ad2f9..1a99043b86ad7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ilm.Step.StepKey; -import java.util.Arrays; import java.util.List; import java.util.function.BiFunction; @@ -92,7 +91,7 @@ public void testPerformAction() { String targetIndexPrefix = "index_prefix"; String targetIndexName = targetIndexPrefix + sourceIndexName; - List expectedAliasActions = Arrays.asList( + List expectedAliasActions = List.of( AliasActions.removeIndex().index(sourceIndexName), AliasActions.add().index(targetIndexName).alias(sourceIndexName), AliasActions.add() diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleTypeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleTypeTests.java index 55fa3792fa6c7..f7d1ff5294f58 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleTypeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleTypeTests.java @@ -13,9 +13,7 @@ import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -51,13 +49,7 @@ public class TimeseriesLifecycleTypeTests extends ESTestCase { - private static final AllocateAction TEST_ALLOCATE_ACTION = new AllocateAction( - 2, - 20, - Collections.singletonMap("node", "node1"), - null, - null - ); + private static final AllocateAction TEST_ALLOCATE_ACTION = new AllocateAction(2, 20, Map.of("node", "node1"), null, null); private static final DeleteAction TEST_DELETE_ACTION = DeleteAction.WITH_SNAPSHOT_DELETE; private static final WaitForSnapshotAction TEST_WAIT_FOR_SNAPSHOT_ACTION = new WaitForSnapshotAction("policy"); @@ -91,7 +83,7 @@ public void testValidatePhases() { if (invalid) { phaseName += randomAlphaOfLength(5); } - Map phases = Collections.singletonMap(phaseName, new Phase(phaseName, TimeValue.ZERO, Collections.emptyMap())); + Map phases = Map.of(phaseName, new Phase(phaseName, TimeValue.ZERO, Map.of())); if (invalid) { Exception e = expectThrows(IllegalArgumentException.class, () -> TimeseriesLifecycleType.INSTANCE.validate(phases.values())); assertThat(e.getMessage(), equalTo("Timeseries lifecycle does not support phase [" + phaseName + "]")); @@ -109,7 +101,7 @@ public void testValidateHotPhase() { invalidAction = getTestAction(randomFrom("allocate", "delete", "freeze")); actions.put(invalidAction.getWriteableName(), invalidAction); } - Map hotPhase = Collections.singletonMap("hot", new Phase("hot", TimeValue.ZERO, actions)); + Map hotPhase = Map.of("hot", new Phase("hot", TimeValue.ZERO, actions)); if (invalidAction != null) { Exception e = expectThrows(IllegalArgumentException.class, () -> TimeseriesLifecycleType.INSTANCE.validate(hotPhase.values())); @@ -123,14 +115,14 @@ public void testValidateHotPhase() { final Map hotActionMap = hotActions.stream() .map(this::getTestAction) .collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); - TimeseriesLifecycleType.INSTANCE.validate(Collections.singleton(new Phase("hot", TimeValue.ZERO, hotActionMap))); + TimeseriesLifecycleType.INSTANCE.validate(List.of(new Phase("hot", TimeValue.ZERO, hotActionMap))); }; - validateHotActions.accept(Arrays.asList(RolloverAction.NAME)); - validateHotActions.accept(Arrays.asList(RolloverAction.NAME, ForceMergeAction.NAME)); + validateHotActions.accept(List.of(RolloverAction.NAME)); + validateHotActions.accept(List.of(RolloverAction.NAME, ForceMergeAction.NAME)); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> validateHotActions.accept(Arrays.asList(ForceMergeAction.NAME)) + () -> validateHotActions.accept(List.of(ForceMergeAction.NAME)) ); assertThat( e.getMessage(), @@ -148,7 +140,7 @@ public void testValidateWarmPhase() { invalidAction = getTestAction(randomFrom("rollover", "delete", "freeze")); actions.put(invalidAction.getWriteableName(), invalidAction); } - Map warmPhase = Collections.singletonMap("warm", new Phase("warm", TimeValue.ZERO, actions)); + Map warmPhase = Map.of("warm", new Phase("warm", TimeValue.ZERO, actions)); if (invalidAction != null) { Exception e = expectThrows(IllegalArgumentException.class, () -> TimeseriesLifecycleType.INSTANCE.validate(warmPhase.values())); @@ -167,7 +159,7 @@ public void testValidateColdPhase() { invalidAction = getTestAction(randomFrom("rollover", "delete", "forcemerge", "shrink")); actions.put(invalidAction.getWriteableName(), invalidAction); } - Map coldPhase = Collections.singletonMap("cold", new Phase("cold", TimeValue.ZERO, actions)); + Map coldPhase = Map.of("cold", new Phase("cold", TimeValue.ZERO, actions)); if (invalidAction != null) { Exception e = expectThrows(IllegalArgumentException.class, () -> TimeseriesLifecycleType.INSTANCE.validate(coldPhase.values())); @@ -188,7 +180,7 @@ public void testValidateFrozenPhase() { invalidAction = getTestAction(randomFrom("rollover", "delete", "forcemerge", "shrink")); actions.put(invalidAction.getWriteableName(), invalidAction); } - Map frozenPhase = Collections.singletonMap("frozen", new Phase("frozen", TimeValue.ZERO, actions)); + Map frozenPhase = Map.of("frozen", new Phase("frozen", TimeValue.ZERO, actions)); if (invalidAction != null) { Exception e = expectThrows( @@ -210,7 +202,7 @@ public void testValidateDeletePhase() { invalidAction = getTestAction(randomFrom("allocate", "rollover", "forcemerge", "shrink", "freeze", "set_priority")); actions.put(invalidAction.getWriteableName(), invalidAction); } - Map deletePhase = Collections.singletonMap("delete", new Phase("delete", TimeValue.ZERO, actions)); + Map deletePhase = Map.of("delete", new Phase("delete", TimeValue.ZERO, actions)); if (invalidAction != null) { Exception e = expectThrows( @@ -459,7 +451,7 @@ public void testValidateDownsamplingAction() { public void testGetOrderedPhases() { Map phaseMap = new HashMap<>(); for (String phaseName : randomSubsetOf(randomIntBetween(0, ORDERED_VALID_PHASES.size()), ORDERED_VALID_PHASES)) { - phaseMap.put(phaseName, new Phase(phaseName, TimeValue.ZERO, Collections.emptyMap())); + phaseMap.put(phaseName, new Phase(phaseName, TimeValue.ZERO, Map.of())); } assertTrue(isSorted(TimeseriesLifecycleType.INSTANCE.getOrderedPhases(phaseMap), Phase::getName, ORDERED_VALID_PHASES)); @@ -509,7 +501,7 @@ private boolean isUnfollowInjected(String phaseName, String actionName) { public void testGetOrderedActionsInvalidPhase() { IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> TimeseriesLifecycleType.INSTANCE.getOrderedActions(new Phase("invalid", TimeValue.ZERO, Collections.emptyMap())) + () -> TimeseriesLifecycleType.INSTANCE.getOrderedActions(new Phase("invalid", TimeValue.ZERO, Map.of())) ); assertThat(exception.getMessage(), equalTo("lifecycle type [timeseries] does not support phase [invalid]")); } @@ -583,25 +575,25 @@ public void testShouldMigrateDataToTiers() { { // not inject in hot phase - Phase phase = new Phase(HOT_PHASE, TimeValue.ZERO, Collections.emptyMap()); + Phase phase = new Phase(HOT_PHASE, TimeValue.ZERO, Map.of()); assertThat(TimeseriesLifecycleType.shouldInjectMigrateStepForPhase(phase), is(false)); } { // not inject in frozen phase - Phase phase = new Phase(FROZEN_PHASE, TimeValue.ZERO, Collections.emptyMap()); + Phase phase = new Phase(FROZEN_PHASE, TimeValue.ZERO, Map.of()); assertThat(TimeseriesLifecycleType.shouldInjectMigrateStepForPhase(phase), is(false)); } { // not inject in delete phase - Phase phase = new Phase(DELETE_PHASE, TimeValue.ZERO, Collections.emptyMap()); + Phase phase = new Phase(DELETE_PHASE, TimeValue.ZERO, Map.of()); assertThat(TimeseriesLifecycleType.shouldInjectMigrateStepForPhase(phase), is(false)); } { // return false for invalid phase - Phase phase = new Phase(HOT_PHASE + randomAlphaOfLength(5), TimeValue.ZERO, Collections.emptyMap()); + Phase phase = new Phase(HOT_PHASE + randomAlphaOfLength(5), TimeValue.ZERO, Map.of()); assertThat(TimeseriesLifecycleType.shouldInjectMigrateStepForPhase(phase), is(false)); } } @@ -620,7 +612,7 @@ public void testValidatingSearchableSnapshotRepos() { Phase coldPhase = new Phase(HOT_PHASE, TimeValue.ZERO, coldActions); Phase frozenPhase = new Phase(HOT_PHASE, TimeValue.ZERO, frozenActions); - validateAllSearchableSnapshotActionsUseSameRepository(Arrays.asList(hotPhase, coldPhase, frozenPhase)); + validateAllSearchableSnapshotActionsUseSameRepository(List.of(hotPhase, coldPhase, frozenPhase)); } { @@ -634,7 +626,7 @@ public void testValidatingSearchableSnapshotRepos() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> validateAllSearchableSnapshotActionsUseSameRepository(Arrays.asList(hotPhase, coldPhase, frozenPhase)) + () -> validateAllSearchableSnapshotActionsUseSameRepository(List.of(hotPhase, coldPhase, frozenPhase)) ); assertThat( e.getMessage(), @@ -649,25 +641,25 @@ public void testValidatingSearchableSnapshotRepos() { public void testValidatingIncreasingAges() { { - Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase warmPhase = new Phase(WARM_PHASE, TimeValue.ZERO, Collections.emptyMap()); - Phase coldPhase = new Phase(COLD_PHASE, TimeValue.ZERO, Collections.emptyMap()); - Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.ZERO, Collections.emptyMap()); - Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.ZERO, Collections.emptyMap()); + Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase warmPhase = new Phase(WARM_PHASE, TimeValue.ZERO, Map.of()); + Phase coldPhase = new Phase(COLD_PHASE, TimeValue.ZERO, Map.of()); + Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.ZERO, Map.of()); + Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.ZERO, Map.of()); assertFalse( Strings.hasText( - validateMonotonicallyIncreasingPhaseTimings(Arrays.asList(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase)) + validateMonotonicallyIncreasingPhaseTimings(List.of(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase)) ) ); } { - Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase coldPhase = new Phase(COLD_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); + Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase coldPhase = new Phase(COLD_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(1), Map.of()); List phases = new ArrayList<>(); phases.add(hotPhase); @@ -687,15 +679,13 @@ public void testValidatingIncreasingAges() { } { - Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase warmPhase = new Phase(WARM_PHASE, TimeValue.ZERO, Collections.emptyMap()); - Phase coldPhase = new Phase(COLD_PHASE, TimeValue.timeValueHours(12), Collections.emptyMap()); - Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.ZERO, Collections.emptyMap()); - Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.ZERO, Collections.emptyMap()); + Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase warmPhase = new Phase(WARM_PHASE, TimeValue.ZERO, Map.of()); + Phase coldPhase = new Phase(COLD_PHASE, TimeValue.timeValueHours(12), Map.of()); + Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.ZERO, Map.of()); + Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.ZERO, Map.of()); - String err = validateMonotonicallyIncreasingPhaseTimings( - Arrays.asList(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase) - ); + String err = validateMonotonicallyIncreasingPhaseTimings(List.of(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase)); assertThat( err, @@ -708,15 +698,13 @@ public void testValidatingIncreasingAges() { } { - Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(3), Collections.emptyMap()); - Phase coldPhase = new Phase(COLD_PHASE, null, Collections.emptyMap()); - Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(2), Collections.emptyMap()); + Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(3), Map.of()); + Phase coldPhase = new Phase(COLD_PHASE, null, Map.of()); + Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(2), Map.of()); - String err = validateMonotonicallyIncreasingPhaseTimings( - Arrays.asList(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase) - ); + String err = validateMonotonicallyIncreasingPhaseTimings(List.of(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase)); assertThat( err, @@ -729,15 +717,13 @@ public void testValidatingIncreasingAges() { } { - Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(3), Collections.emptyMap()); - Phase coldPhase = new Phase(COLD_PHASE, null, Collections.emptyMap()); - Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(2), Collections.emptyMap()); - Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); + Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(3), Map.of()); + Phase coldPhase = new Phase(COLD_PHASE, null, Map.of()); + Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(2), Map.of()); + Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(1), Map.of()); - String err = validateMonotonicallyIncreasingPhaseTimings( - Arrays.asList(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase) - ); + String err = validateMonotonicallyIncreasingPhaseTimings(List.of(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase)); assertThat( err, @@ -750,15 +736,13 @@ public void testValidatingIncreasingAges() { } { - Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(3), Collections.emptyMap()); - Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(2), Collections.emptyMap()); - Phase coldPhase = new Phase(COLD_PHASE, null, Collections.emptyMap()); - Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(2), Collections.emptyMap()); - Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); + Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(3), Map.of()); + Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(2), Map.of()); + Phase coldPhase = new Phase(COLD_PHASE, null, Map.of()); + Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(2), Map.of()); + Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(1), Map.of()); - String err = validateMonotonicallyIncreasingPhaseTimings( - Arrays.asList(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase) - ); + String err = validateMonotonicallyIncreasingPhaseTimings(List.of(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase)); assertThat( err, @@ -772,15 +756,13 @@ public void testValidatingIncreasingAges() { } { - Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(3), Collections.emptyMap()); - Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(2), Collections.emptyMap()); - Phase coldPhase = new Phase(COLD_PHASE, TimeValue.timeValueDays(2), Collections.emptyMap()); - Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(2), Collections.emptyMap()); - Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); + Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(3), Map.of()); + Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(2), Map.of()); + Phase coldPhase = new Phase(COLD_PHASE, TimeValue.timeValueDays(2), Map.of()); + Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(2), Map.of()); + Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(1), Map.of()); - String err = validateMonotonicallyIncreasingPhaseTimings( - Arrays.asList(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase) - ); + String err = validateMonotonicallyIncreasingPhaseTimings(List.of(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase)); assertThat( err, @@ -799,7 +781,7 @@ public void testValidateFrozenPhaseHasSearchableSnapshot() { Map frozenActions = new HashMap<>(); frozenActions.put(SearchableSnapshotAction.NAME, new SearchableSnapshotAction("repo1", randomBoolean())); Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.ZERO, frozenActions); - validateFrozenPhaseHasSearchableSnapshotAction(Collections.singleton(frozenPhase)); + validateFrozenPhaseHasSearchableSnapshotAction(List.of(frozenPhase)); } { @@ -807,7 +789,7 @@ public void testValidateFrozenPhaseHasSearchableSnapshot() { Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.ZERO, frozenActions); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> validateFrozenPhaseHasSearchableSnapshotAction(Collections.singleton(frozenPhase)) + () -> validateFrozenPhaseHasSearchableSnapshotAction(List.of(frozenPhase)) ); assertThat( e.getMessage(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/UnfollowFollowerIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/UnfollowFollowerIndexStepTests.java index 71f7ea2925f16..8e40d3af86d81 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/UnfollowFollowerIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/UnfollowFollowerIndexStepTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.core.ccr.action.UnfollowAction; import org.mockito.Mockito; -import java.util.Collections; +import java.util.Map; import static org.elasticsearch.xpack.core.ilm.UnfollowAction.CCR_METADATA_KEY; import static org.hamcrest.Matchers.equalTo; @@ -30,7 +30,7 @@ protected UnfollowFollowerIndexStep newInstance(Step.StepKey key, Step.StepKey n public void testUnFollow() throws Exception { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -51,7 +51,7 @@ public void testUnFollow() throws Exception { public void testRequestNotAcknowledged() { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -71,7 +71,7 @@ public void testRequestNotAcknowledged() { public void testUnFollowUnfollowFailed() { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -93,7 +93,7 @@ public void testUnFollowUnfollowFailed() { public void testFailureToReleaseRetentionLeases() throws Exception { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/UpdateRolloverLifecycleDateStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/UpdateRolloverLifecycleDateStepTests.java index e4bcfd88737f2..3ede4d7668cd0 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/UpdateRolloverLifecycleDateStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/UpdateRolloverLifecycleDateStepTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.xpack.core.ilm.Step.StepKey; -import java.util.Collections; import java.util.List; import java.util.function.LongSupplier; @@ -68,7 +67,7 @@ public void testPerformAction() { .numberOfReplicas(randomIntBetween(0, 5)) .build(); IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10)) - .putRolloverInfo(new RolloverInfo(alias, Collections.emptyList(), rolloverTime)) + .putRolloverInfo(new RolloverInfo(alias, List.of(), rolloverTime)) .settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) .numberOfShards(randomIntBetween(1, 5)) .numberOfReplicas(randomIntBetween(0, 5)) @@ -88,7 +87,7 @@ public void testPerformActionOnDataStream() { long rolloverTime = randomValueOtherThan(creationDate, () -> randomNonNegativeLong()); String dataStreamName = "test-datastream"; IndexMetadata originalIndexMeta = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, 1)) - .putRolloverInfo(new RolloverInfo(dataStreamName, Collections.emptyList(), rolloverTime)) + .putRolloverInfo(new RolloverInfo(dataStreamName, List.of(), rolloverTime)) .settings(settings(IndexVersion.current())) .numberOfShards(randomIntBetween(1, 5)) .numberOfReplicas(randomIntBetween(0, 5)) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForDataTierStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForDataTierStepTests.java index 00012575ea5de..2635e14b52eb4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForDataTierStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForDataTierStepTests.java @@ -64,9 +64,7 @@ protected WaitForDataTierStep copyInstance(WaitForDataTierStep instance) { public void testConditionMet() { String notIncludedTier = randomFrom(DataTier.ALL_DATA_TIERS); - List otherTiers = DataTier.ALL_DATA_TIERS.stream() - .filter(tier -> notIncludedTier.equals(tier) == false) - .collect(Collectors.toList()); + List otherTiers = DataTier.ALL_DATA_TIERS.stream().filter(tier -> notIncludedTier.equals(tier) == false).toList(); List includedTiers = randomSubsetOf(between(1, otherTiers.size()), otherTiers); String tierPreference = String.join(",", includedTiers); WaitForDataTierStep step = new WaitForDataTierStep(randomStepKey(), randomStepKey(), tierPreference); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStepTests.java index 4ac5511a247c9..ba94508667776 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStepTests.java @@ -16,9 +16,9 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import org.mockito.Mockito; -import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Map; import static org.elasticsearch.xpack.core.ilm.UnfollowAction.CCR_METADATA_KEY; import static org.hamcrest.Matchers.equalTo; @@ -57,11 +57,11 @@ protected WaitForFollowShardTasksStep copyInstance(WaitForFollowShardTasksStep i public void testConditionMet() { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(2) .numberOfReplicas(0) .build(); - List statsResponses = Arrays.asList( + List statsResponses = List.of( new FollowStatsAction.StatsResponse(createShardFollowTaskStatus(0, 9, 9)), new FollowStatsAction.StatsResponse(createShardFollowTaskStatus(1, 3, 3)) ); @@ -96,11 +96,11 @@ public void onFailure(Exception e) { public void testConditionNotMetShardsNotInSync() { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(2) .numberOfReplicas(0) .build(); - List statsResponses = Arrays.asList( + List statsResponses = List.of( new FollowStatsAction.StatsResponse(createShardFollowTaskStatus(0, 9, 9)), new FollowStatsAction.StatsResponse(createShardFollowTaskStatus(1, 8, 3)) ); @@ -214,7 +214,7 @@ private void mockFollowStatsCall(String expectedIndexName, List listener = (ActionListener) invocationOnMock .getArguments()[2]; - listener.onResponse(new FollowStatsAction.StatsResponses(Collections.emptyList(), Collections.emptyList(), statsResponses)); + listener.onResponse(new FollowStatsAction.StatsResponses(List.of(), List.of(), statsResponses)); return null; }).when(client).execute(Mockito.eq(FollowStatsAction.INSTANCE), Mockito.any(), Mockito.any()); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexingCompleteStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexingCompleteStepTests.java index 2f91393b451d7..a0982e72b11af 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexingCompleteStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexingCompleteStepTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.xpack.core.ilm.Step.StepKey; -import java.util.Collections; +import java.util.Map; import static org.elasticsearch.xpack.core.ilm.UnfollowAction.CCR_METADATA_KEY; import static org.hamcrest.Matchers.equalTo; @@ -54,7 +54,7 @@ protected WaitForIndexingCompleteStep copyInstance(WaitForIndexingCompleteStep i public void testConditionMet() { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -93,7 +93,7 @@ public void testConditionNotMet() { } IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(indexSettings) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java index 0264f7b09c6fd..db0c2957b3ccb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java @@ -38,7 +38,6 @@ import org.mockito.ArgumentCaptor; import org.mockito.Mockito; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Locale; @@ -396,11 +395,7 @@ public void testEvaluateDoesntTriggerRolloverForIndexManuallyRolledOnLifecycleRo .putAlias(AliasMetadata.builder(rolloverAlias)) .settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, rolloverAlias)) .putRolloverInfo( - new RolloverInfo( - rolloverAlias, - Collections.singletonList(new MaxSizeCondition(ByteSizeValue.ofBytes(2L))), - System.currentTimeMillis() - ) + new RolloverInfo(rolloverAlias, List.of(new MaxSizeCondition(ByteSizeValue.ofBytes(2L))), System.currentTimeMillis()) ) .numberOfShards(randomIntBetween(1, 5)) .numberOfReplicas(randomIntBetween(0, 5)) @@ -432,7 +427,7 @@ public void testEvaluateTriggersRolloverForIndexManuallyRolledOnDifferentAlias() .putRolloverInfo( new RolloverInfo( randomAlphaOfLength(5), - Collections.singletonList(new MaxSizeCondition(ByteSizeValue.ofBytes(2L))), + List.of(new MaxSizeCondition(ByteSizeValue.ofBytes(2L))), System.currentTimeMillis() ) ) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/GetLifecycleResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/GetLifecycleResponseTests.java index 05c637a3a66c9..1dc8b24c3231d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/GetLifecycleResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/GetLifecycleResponseTests.java @@ -24,7 +24,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.Map; @@ -90,7 +89,7 @@ protected Writeable.Reader instanceReader() { protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList( + List.of( new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new), new NamedWriteableRegistry.Entry(LifecycleType.class, TestLifecycleType.TYPE, in -> TestLifecycleType.INSTANCE) ) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequestTests.java index feb5ca24a021d..b87a4e41258b8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequestTests.java @@ -34,7 +34,6 @@ import org.junit.Before; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; public class PutLifecycleRequestTests extends AbstractXContentSerializingTestCase { @@ -78,7 +77,7 @@ public String getPolicyName() { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList( + List.of( new NamedWriteableRegistry.Entry( LifecycleType.class, TimeseriesLifecycleType.TYPE, @@ -105,7 +104,7 @@ protected NamedWriteableRegistry getNamedWriteableRegistry() { protected NamedXContentRegistry xContentRegistry() { List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); entries.addAll( - Arrays.asList( + List.of( new NamedXContentRegistry.Entry( LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/RemoveIndexLifecyclePolicyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/RemoveIndexLifecyclePolicyResponseTests.java index 76f4d732f4ae7..44fed3d4b488b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/RemoveIndexLifecyclePolicyResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/RemoveIndexLifecyclePolicyResponseTests.java @@ -14,15 +14,13 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.List; public class RemoveIndexLifecyclePolicyResponseTests extends AbstractXContentSerializingTestCase { @Override protected Response createTestInstance() { - List failedIndexes = Arrays.asList(generateRandomStringArray(20, 20, false)); + List failedIndexes = List.of(generateRandomStringArray(20, 20, false)); return new Response(failedIndexes); } @@ -35,7 +33,7 @@ protected Writeable.Reader instanceReader() { protected Response mutateInstance(Response instance) { List failedIndices = randomValueOtherThan( instance.getFailedIndexes(), - () -> Arrays.asList(generateRandomStringArray(20, 20, false)) + () -> List.of(generateRandomStringArray(20, 20, false)) ); return new Response(failedIndices); } @@ -53,7 +51,7 @@ public void testNullFailedIndices() { public void testHasFailures() { Response response = new Response(new ArrayList<>()); assertFalse(response.hasFailures()); - assertEquals(Collections.emptyList(), response.getFailedIndexes()); + assertEquals(List.of(), response.getFailedIndexes()); int size = randomIntBetween(1, 10); List failedIndexes = new ArrayList<>(size); diff --git a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java index 6d95038e2cbcc..54a48ab34e991 100644 --- a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java +++ b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java @@ -63,7 +63,7 @@ public void testMlDeprecationChecks() throws Exception { indexDoc( ".ml-anomalies-.write-" + jobId, jobId + "_model_snapshot_2", - "{\"job_id\":\"deprecation_check_job\",\"snapshot_id\":\"2\",\"snapshot_doc_count\":1,\"min_version\":\"8.0.0\"}" + "{\"job_id\":\"deprecation_check_job\",\"snapshot_id\":\"2\",\"snapshot_doc_count\":1,\"min_version\":\"8.3.0\"}" ); client().performRequest(new Request("POST", "/.ml-anomalies-*/_refresh")); diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java index 87d0bfb93e18c..7ad0758d99832 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java @@ -366,7 +366,7 @@ private static ClusterState removeSkippedSettings(ClusterState state, String[] i public static class Request extends MasterNodeReadRequest implements IndicesRequest.Replaceable { - private static final IndicesOptions INDICES_OPTIONS = IndicesOptions.fromOptions(false, true, true, true); + private static final IndicesOptions INDICES_OPTIONS = IndicesOptions.fromOptions(false, true, true, true, true); private String[] indices; public Request(TimeValue masterNodeTimeout, String... indices) { diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml index b0f850d09f76d..094d9cbf43089 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml @@ -58,7 +58,7 @@ setup: connector.put: connector_id: test-connector-native body: - index_name: search-test + index_name: content-search-test is_native: true - match: { result: 'created' } @@ -68,7 +68,7 @@ setup: connector_id: test-connector-native - match: { id: test-connector-native } - - match: { index_name: search-test } + - match: { index_name: content-search-test } - match: { is_native: true } - match: { sync_now: false } - match: { status: needs_configuration } @@ -151,6 +151,7 @@ setup: is_native: false service_type: super-connector + --- 'Create Connector - Id returned as part of response': - do: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/130_connector_update_index_name.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/130_connector_update_index_name.yml index 4ffa5435a3d7b..f804dc02a9e01 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/130_connector_update_index_name.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/130_connector_update_index_name.yml @@ -125,3 +125,29 @@ setup: connector_id: test-connector - match: { index_name: search-1-test } + + +--- +"Update Managed Connector Index Name": + - do: + connector.put: + connector_id: test-connector-1 + body: + is_native: true + service_type: super-connector + + - do: + connector.update_index_name: + connector_id: test-connector-1 + body: + index_name: content-search-2-test + + + - match: { result: updated } + + - do: + connector.get: + connector_id: test-connector-1 + + - match: { index_name: content-search-2-test } + diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/140_connector_update_native.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/140_connector_update_native.yml index 77c57532ad479..f8cd24d175312 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/140_connector_update_native.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/140_connector_update_native.yml @@ -7,7 +7,7 @@ setup: connector.put: connector_id: test-connector body: - index_name: search-1-test + index_name: content-search-1-test name: my-connector language: pl is_native: false @@ -29,7 +29,6 @@ setup: connector_id: test-connector - match: { is_native: true } - - match: { status: configured } - do: connector.update_native: @@ -44,7 +43,6 @@ setup: connector_id: test-connector - match: { is_native: false } - - match: { status: configured } --- "Update Connector Native - 404 when connector doesn't exist": diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/15_connector_post.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/15_connector_post.yml index 1cbff6a35e18b..634f99cd53fde 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/15_connector_post.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/15_connector_post.yml @@ -71,7 +71,7 @@ setup: - do: connector.post: body: - index_name: search-test + index_name: content-search-test is_native: true - set: { id: id } @@ -82,7 +82,7 @@ setup: connector_id: $id - match: { id: $id } - - match: { index_name: search-test } + - match: { index_name: content-search-test } - match: { is_native: true } - match: { sync_now: false } - match: { status: needs_configuration } @@ -102,6 +102,7 @@ setup: is_native: false service_type: super-connector + --- 'Create Connector - Index name used by another connector': - do: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/20_connector_list.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/20_connector_list.yml index 10e4620ca5603..697b0ee419181 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/20_connector_list.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/20_connector_list.yml @@ -26,7 +26,7 @@ setup: connector.put: connector_id: connector-b body: - index_name: search-2-test + index_name: content-search-2-test name: my-connector-2 language: en is_native: true @@ -40,13 +40,13 @@ setup: - match: { count: 3 } # Alphabetical order by index_name for results - - match: { results.0.id: "connector-a" } - - match: { results.0.index_name: "search-1-test" } - - match: { results.0.language: "pl" } + - match: { results.0.id: "connector-b" } + - match: { results.0.index_name: "content-search-2-test" } + - match: { results.0.language: "en" } - - match: { results.1.id: "connector-b" } - - match: { results.1.index_name: "search-2-test" } - - match: { results.1.language: "en" } + - match: { results.1.id: "connector-a" } + - match: { results.1.index_name: "search-1-test" } + - match: { results.1.language: "pl" } - match: { results.2.id: "connector-c" } - match: { results.2.index_name: "search-3-test" } @@ -62,9 +62,9 @@ setup: - match: { count: 3 } # Alphabetical order by index_name for results - - match: { results.0.id: "connector-b" } - - match: { results.0.index_name: "search-2-test" } - - match: { results.0.language: "en" } + - match: { results.0.id: "connector-a" } + - match: { results.0.index_name: "search-1-test" } + - match: { results.0.language: "pl" } - match: { results.1.id: "connector-c" } - match: { results.1.index_name: "search-3-test" } @@ -79,13 +79,13 @@ setup: - match: { count: 3 } # Alphabetical order by index_name for results - - match: { results.0.id: "connector-a" } - - match: { results.0.index_name: "search-1-test" } - - match: { results.0.language: "pl" } + - match: { results.0.id: "connector-b" } + - match: { results.0.index_name: "content-search-2-test" } + - match: { results.0.language: "en" } - - match: { results.1.id: "connector-b" } - - match: { results.1.index_name: "search-2-test" } - - match: { results.1.language: "en" } + - match: { results.1.id: "connector-a" } + - match: { results.1.index_name: "search-1-test" } + - match: { results.1.language: "pl" } --- "List Connector - empty": @@ -118,11 +118,11 @@ setup: - do: connector.list: - index_name: search-1-test,search-2-test + index_name: search-1-test,content-search-2-test - match: { count: 2 } - - match: { results.0.index_name: "search-1-test" } - - match: { results.1.index_name: "search-2-test" } + - match: { results.0.index_name: "content-search-2-test" } + - match: { results.1.index_name: "search-1-test" } --- @@ -147,8 +147,8 @@ setup: connector_name: my-connector-1,my-connector-2 - match: { count: 2 } - - match: { results.0.name: "my-connector-1" } - - match: { results.1.name: "my-connector-2" } + - match: { results.0.name: "my-connector-2" } + - match: { results.1.name: "my-connector-1" } --- @@ -156,10 +156,10 @@ setup: - do: connector.list: connector_name: my-connector-1,my-connector-2 - index_name: search-2-test + index_name: content-search-2-test - match: { count: 1 } - - match: { results.0.index_name: "search-2-test" } + - match: { results.0.index_name: "content-search-2-test" } - match: { results.0.name: "my-connector-2" } @@ -230,13 +230,13 @@ setup: - match: { count: 3 } # Alphabetical order by index_name for results - - match: { results.0.id: "connector-a" } - - match: { results.0.index_name: "search-1-test" } - - match: { results.0.language: "pl" } + - match: { results.0.id: "connector-b" } + - match: { results.0.index_name: "content-search-2-test" } + - match: { results.0.language: "en" } - - match: { results.1.id: "connector-b" } - - match: { results.1.index_name: "search-2-test" } - - match: { results.1.language: "en" } + - match: { results.1.id: "connector-a" } + - match: { results.1.index_name: "search-1-test" } + - match: { results.1.language: "pl" } - match: { results.2.id: "connector-c" } - match: { results.2.index_name: "search-3-test" } @@ -255,13 +255,13 @@ setup: - match: { count: 3 } # Alphabetical order by index_name for results - - match: { results.0.id: "connector-a" } - - match: { results.0.index_name: "search-1-test" } - - match: { results.0.language: "pl" } + - match: { results.0.id: "connector-b" } + - match: { results.0.index_name: "content-search-2-test" } + - match: { results.0.language: "en" } - - match: { results.1.id: "connector-b" } - - match: { results.1.index_name: "search-2-test" } - - match: { results.1.language: "en" } + - match: { results.1.id: "connector-a" } + - match: { results.1.index_name: "search-1-test" } + - match: { results.1.language: "pl" } - match: { results.2.id: "connector-c" } - match: { results.2.index_name: "search-3-test" } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Attribute.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Attribute.java index 53debedafc3d8..829943d245149 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Attribute.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Attribute.java @@ -49,8 +49,9 @@ public Attribute(Source source, String name, Nullability nullability, @Nullable this.nullability = nullability; } - public static String rawTemporaryName(String inner, String outer, String suffix) { - return SYNTHETIC_ATTRIBUTE_NAME_PREFIX + inner + "$" + outer + "$" + suffix; + public static String rawTemporaryName(String... parts) { + var name = String.join("$", parts); + return name.isEmpty() || name.startsWith(SYNTHETIC_ATTRIBUTE_NAME_PREFIX) ? name : SYNTHETIC_ATTRIBUTE_NAME_PREFIX + name; } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BlockHash.java index 9b53e6558f4db..191d6443264ca 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BlockHash.java @@ -180,13 +180,16 @@ public static BlockHash buildCategorizeBlockHash( List groups, AggregatorMode aggregatorMode, BlockFactory blockFactory, - AnalysisRegistry analysisRegistry + AnalysisRegistry analysisRegistry, + int emitBatchSize ) { - if (groups.size() != 1) { - throw new IllegalArgumentException("only a single CATEGORIZE group can used"); + if (groups.size() == 1) { + return new CategorizeBlockHash(blockFactory, groups.get(0).channel, aggregatorMode, analysisRegistry); + } else { + assert groups.get(0).isCategorize(); + assert groups.subList(1, groups.size()).stream().noneMatch(GroupSpec::isCategorize); + return new CategorizePackedValuesBlockHash(groups, blockFactory, aggregatorMode, analysisRegistry, emitBatchSize); } - - return new CategorizeBlockHash(blockFactory, groups.get(0).channel, aggregatorMode, analysisRegistry); } /** diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHash.java index 35c6faf84e623..f83776fbdbc85 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHash.java @@ -44,7 +44,7 @@ import java.util.Objects; /** - * Base BlockHash implementation for {@code Categorize} grouping function. + * BlockHash implementation for {@code Categorize} grouping function. */ public class CategorizeBlockHash extends BlockHash { @@ -53,11 +53,9 @@ public class CategorizeBlockHash extends BlockHash { ); private static final int NULL_ORD = 0; - // TODO: this should probably also take an emitBatchSize private final int channel; private final AggregatorMode aggregatorMode; private final TokenListCategorizer.CloseableTokenListCategorizer categorizer; - private final CategorizeEvaluator evaluator; /** @@ -95,12 +93,14 @@ public class CategorizeBlockHash extends BlockHash { } } + boolean seenNull() { + return seenNull; + } + @Override public void add(Page page, GroupingAggregatorFunction.AddInput addInput) { - if (aggregatorMode.isInputPartial() == false) { - addInitial(page, addInput); - } else { - addIntermediate(page, addInput); + try (IntBlock block = add(page)) { + addInput.add(0, block); } } @@ -129,50 +129,38 @@ public void close() { Releasables.close(evaluator, categorizer); } + private IntBlock add(Page page) { + return aggregatorMode.isInputPartial() == false ? addInitial(page) : addIntermediate(page); + } + /** * Adds initial (raw) input to the state. */ - private void addInitial(Page page, GroupingAggregatorFunction.AddInput addInput) { - try (IntBlock result = (IntBlock) evaluator.eval(page.getBlock(channel))) { - addInput.add(0, result); - } + IntBlock addInitial(Page page) { + return (IntBlock) evaluator.eval(page.getBlock(channel)); } /** * Adds intermediate state to the state. */ - private void addIntermediate(Page page, GroupingAggregatorFunction.AddInput addInput) { + private IntBlock addIntermediate(Page page) { if (page.getPositionCount() == 0) { - return; + return null; } BytesRefBlock categorizerState = page.getBlock(channel); if (categorizerState.areAllValuesNull()) { seenNull = true; - try (var newIds = blockFactory.newConstantIntVector(NULL_ORD, 1)) { - addInput.add(0, newIds); - } - return; - } - - Map idMap = readIntermediate(categorizerState.getBytesRef(0, new BytesRef())); - try (IntBlock.Builder newIdsBuilder = blockFactory.newIntBlockBuilder(idMap.size())) { - int fromId = idMap.containsKey(0) ? 0 : 1; - int toId = fromId + idMap.size(); - for (int i = fromId; i < toId; i++) { - newIdsBuilder.appendInt(idMap.get(i)); - } - try (IntBlock newIds = newIdsBuilder.build()) { - addInput.add(0, newIds); - } + return blockFactory.newConstantIntBlockWith(NULL_ORD, 1); } + return recategorize(categorizerState.getBytesRef(0, new BytesRef()), null).asBlock(); } /** - * Read intermediate state from a block. - * - * @return a map from the old category id to the new one. The old ids go from 0 to {@code size - 1}. + * Reads the intermediate state from a block and recategorizes the provided IDs. + * If no IDs are provided, the IDs are the IDs in the categorizer's state in order. + * (So 0...N-1 or 1...N, depending on whether null is present.) */ - private Map readIntermediate(BytesRef bytes) { + IntVector recategorize(BytesRef bytes, IntVector ids) { Map idMap = new HashMap<>(); try (StreamInput in = new BytesArray(bytes).streamInput()) { if (in.readBoolean()) { @@ -185,10 +173,22 @@ private Map readIntermediate(BytesRef bytes) { // +1 because the 0 ordinal is reserved for null idMap.put(oldCategoryId + 1, newCategoryId + 1); } - return idMap; } catch (IOException e) { throw new RuntimeException(e); } + try (IntVector.Builder newIdsBuilder = blockFactory.newIntVectorBuilder(idMap.size())) { + if (ids == null) { + int idOffset = idMap.containsKey(0) ? 0 : 1; + for (int i = 0; i < idMap.size(); i++) { + newIdsBuilder.appendInt(idMap.get(i + idOffset)); + } + } else { + for (int i = 0; i < ids.getPositionCount(); i++) { + newIdsBuilder.appendInt(idMap.get(ids.getInt(i))); + } + } + return newIdsBuilder.build(); + } } /** @@ -198,15 +198,20 @@ private Block buildIntermediateBlock() { if (categorizer.getCategoryCount() == 0) { return blockFactory.newConstantNullBlock(seenNull ? 1 : 0); } + int positionCount = categorizer.getCategoryCount() + (seenNull ? 1 : 0); + // We're returning a block with N positions just because the Page must have all blocks with the same position count! + return blockFactory.newConstantBytesRefBlockWith(serializeCategorizer(), positionCount); + } + + BytesRef serializeCategorizer() { + // TODO: This BytesStreamOutput is not accounted for by the circuit breaker. Fix that! try (BytesStreamOutput out = new BytesStreamOutput()) { out.writeBoolean(seenNull); out.writeVInt(categorizer.getCategoryCount()); for (SerializableTokenListCategory category : categorizer.toCategoriesById()) { category.writeTo(out); } - // We're returning a block with N positions just because the Page must have all blocks with the same position count! - int positionCount = categorizer.getCategoryCount() + (seenNull ? 1 : 0); - return blockFactory.newConstantBytesRefBlockWith(out.bytes().toBytesRef(), positionCount); + return out.bytes().toBytesRef(); } catch (IOException e) { throw new RuntimeException(e); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/CategorizePackedValuesBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/CategorizePackedValuesBlockHash.java new file mode 100644 index 0000000000000..20874cb10ceb8 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/CategorizePackedValuesBlockHash.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation.blockhash; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.compute.aggregation.AggregatorMode; +import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.index.analysis.AnalysisRegistry; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * BlockHash implementation for {@code Categorize} grouping function as first + * grouping expression, followed by one or mode other grouping expressions. + *

+ * For the first grouping (the {@code Categorize} grouping function), a + * {@code CategorizeBlockHash} is used, which outputs integers (category IDs). + * Next, a {@code PackedValuesBlockHash} is used on the category IDs and the + * other groupings (which are not {@code Categorize}s). + */ +public class CategorizePackedValuesBlockHash extends BlockHash { + + private final List specs; + private final AggregatorMode aggregatorMode; + private final Block[] blocks; + private final CategorizeBlockHash categorizeBlockHash; + private final PackedValuesBlockHash packedValuesBlockHash; + + CategorizePackedValuesBlockHash( + List specs, + BlockFactory blockFactory, + AggregatorMode aggregatorMode, + AnalysisRegistry analysisRegistry, + int emitBatchSize + ) { + super(blockFactory); + this.specs = specs; + this.aggregatorMode = aggregatorMode; + blocks = new Block[specs.size()]; + + List delegateSpecs = new ArrayList<>(); + delegateSpecs.add(new GroupSpec(0, ElementType.INT)); + for (int i = 1; i < specs.size(); i++) { + delegateSpecs.add(new GroupSpec(i, specs.get(i).elementType())); + } + + boolean success = false; + try { + categorizeBlockHash = new CategorizeBlockHash(blockFactory, specs.get(0).channel(), aggregatorMode, analysisRegistry); + packedValuesBlockHash = new PackedValuesBlockHash(delegateSpecs, blockFactory, emitBatchSize); + success = true; + } finally { + if (success == false) { + close(); + } + } + } + + @Override + public void add(Page page, GroupingAggregatorFunction.AddInput addInput) { + try (IntBlock categories = getCategories(page)) { + blocks[0] = categories; + for (int i = 1; i < specs.size(); i++) { + blocks[i] = page.getBlock(specs.get(i).channel()); + } + packedValuesBlockHash.add(new Page(blocks), addInput); + } + } + + private IntBlock getCategories(Page page) { + if (aggregatorMode.isInputPartial() == false) { + return categorizeBlockHash.addInitial(page); + } else { + BytesRefBlock stateBlock = page.getBlock(0); + BytesRef stateBytes = stateBlock.getBytesRef(0, new BytesRef()); + try (StreamInput in = new BytesArray(stateBytes).streamInput()) { + BytesRef categorizerState = in.readBytesRef(); + try (IntVector ids = IntVector.readFrom(blockFactory, in)) { + return categorizeBlockHash.recategorize(categorizerState, ids).asBlock(); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + + @Override + public Block[] getKeys() { + Block[] keys = packedValuesBlockHash.getKeys(); + if (aggregatorMode.isOutputPartial() == false) { + // For final output, the keys are the category regexes. + try ( + BytesRefBlock regexes = (BytesRefBlock) categorizeBlockHash.getKeys()[0]; + BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(keys[0].getPositionCount()) + ) { + IntVector idsVector = (IntVector) keys[0].asVector(); + int idsOffset = categorizeBlockHash.seenNull() ? 0 : -1; + BytesRef scratch = new BytesRef(); + for (int i = 0; i < idsVector.getPositionCount(); i++) { + int id = idsVector.getInt(i); + if (id == 0) { + builder.appendNull(); + } else { + builder.appendBytesRef(regexes.getBytesRef(id + idsOffset, scratch)); + } + } + keys[0].close(); + keys[0] = builder.build(); + } + } else { + // For intermediate output, the keys are the delegate PackedValuesBlockHash's + // keys, with the category IDs replaced by the categorizer's internal state + // together with the list of category IDs. + BytesRef state; + // TODO: This BytesStreamOutput is not accounted for by the circuit breaker. Fix that! + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeBytesRef(categorizeBlockHash.serializeCategorizer()); + ((IntVector) keys[0].asVector()).writeTo(out); + state = out.bytes().toBytesRef(); + } catch (IOException e) { + throw new RuntimeException(e); + } + keys[0].close(); + keys[0] = blockFactory.newConstantBytesRefBlockWith(state, keys[0].getPositionCount()); + } + return keys; + } + + @Override + public IntVector nonEmpty() { + return packedValuesBlockHash.nonEmpty(); + } + + @Override + public BitArray seenGroupIds(BigArrays bigArrays) { + return packedValuesBlockHash.seenGroupIds(bigArrays); + } + + @Override + public final ReleasableIterator lookup(Page page, ByteSizeValue targetBlockSize) { + throw new UnsupportedOperationException(); + } + + @Override + public void close() { + Releasables.close(categorizeBlockHash, packedValuesBlockHash); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java index 6f8386ec08de1..ccddfdf5cc74a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java @@ -51,7 +51,13 @@ public Operator get(DriverContext driverContext) { if (groups.stream().anyMatch(BlockHash.GroupSpec::isCategorize)) { return new HashAggregationOperator( aggregators, - () -> BlockHash.buildCategorizeBlockHash(groups, aggregatorMode, driverContext.blockFactory(), analysisRegistry), + () -> BlockHash.buildCategorizeBlockHash( + groups, + aggregatorMode, + driverContext.blockFactory(), + analysisRegistry, + maxPageSize + ), driverContext ); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHashTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHashTests.java index f8428b7c33568..587deda650a23 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHashTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHashTests.java @@ -130,9 +130,6 @@ public void close() { } finally { page.releaseBlocks(); } - - // TODO: randomize values? May give wrong results - // TODO: assert the categorizer state after adding pages. } public void testCategorizeRawMultivalue() { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/CategorizePackedValuesBlockHashTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/CategorizePackedValuesBlockHashTests.java new file mode 100644 index 0000000000000..cfa023af3d18a --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/CategorizePackedValuesBlockHashTests.java @@ -0,0 +1,248 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation.blockhash; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.compute.aggregation.AggregatorMode; +import org.elasticsearch.compute.aggregation.ValuesBytesRefAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.CannedSourceOperator; +import org.elasticsearch.compute.operator.Driver; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.HashAggregationOperator; +import org.elasticsearch.compute.operator.LocalSourceOperator; +import org.elasticsearch.compute.operator.PageConsumerOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.plugins.scanners.StablePluginsRegistry; +import org.elasticsearch.xpack.ml.MachineLearning; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.compute.operator.OperatorTestCase.runDriver; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class CategorizePackedValuesBlockHashTests extends BlockHashTestCase { + + private AnalysisRegistry analysisRegistry; + + @Before + private void initAnalysisRegistry() throws IOException { + analysisRegistry = new AnalysisModule( + TestEnvironment.newEnvironment( + Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build() + ), + List.of(new MachineLearning(Settings.EMPTY), new CommonAnalysisPlugin()), + new StablePluginsRegistry() + ).getAnalysisRegistry(); + } + + public void testCategorize_withDriver() { + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofMb(256)).withCircuitBreaking(); + CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); + DriverContext driverContext = new DriverContext(bigArrays, new BlockFactory(breaker, bigArrays)); + boolean withNull = randomBoolean(); + boolean withMultivalues = randomBoolean(); + + List groupSpecs = List.of( + new BlockHash.GroupSpec(0, ElementType.BYTES_REF, true), + new BlockHash.GroupSpec(1, ElementType.INT, false) + ); + + LocalSourceOperator.BlockSupplier input1 = () -> { + try ( + BytesRefBlock.Builder messagesBuilder = driverContext.blockFactory().newBytesRefBlockBuilder(10); + IntBlock.Builder idsBuilder = driverContext.blockFactory().newIntBlockBuilder(10) + ) { + if (withMultivalues) { + messagesBuilder.beginPositionEntry(); + } + messagesBuilder.appendBytesRef(new BytesRef("connected to 1.1.1")); + messagesBuilder.appendBytesRef(new BytesRef("connected to 1.1.2")); + if (withMultivalues) { + messagesBuilder.endPositionEntry(); + } + idsBuilder.appendInt(7); + if (withMultivalues == false) { + idsBuilder.appendInt(7); + } + + messagesBuilder.appendBytesRef(new BytesRef("connected to 1.1.3")); + messagesBuilder.appendBytesRef(new BytesRef("connection error")); + messagesBuilder.appendBytesRef(new BytesRef("connection error")); + messagesBuilder.appendBytesRef(new BytesRef("connected to 1.1.4")); + idsBuilder.appendInt(42); + idsBuilder.appendInt(7); + idsBuilder.appendInt(42); + idsBuilder.appendInt(7); + + if (withNull) { + messagesBuilder.appendNull(); + idsBuilder.appendInt(43); + } + return new Block[] { messagesBuilder.build(), idsBuilder.build() }; + } + }; + LocalSourceOperator.BlockSupplier input2 = () -> { + try ( + BytesRefBlock.Builder messagesBuilder = driverContext.blockFactory().newBytesRefBlockBuilder(10); + IntBlock.Builder idsBuilder = driverContext.blockFactory().newIntBlockBuilder(10) + ) { + messagesBuilder.appendBytesRef(new BytesRef("connected to 2.1.1")); + messagesBuilder.appendBytesRef(new BytesRef("connected to 2.1.2")); + messagesBuilder.appendBytesRef(new BytesRef("disconnected")); + messagesBuilder.appendBytesRef(new BytesRef("connection error")); + idsBuilder.appendInt(111); + idsBuilder.appendInt(7); + idsBuilder.appendInt(7); + idsBuilder.appendInt(42); + if (withNull) { + messagesBuilder.appendNull(); + idsBuilder.appendNull(); + } + return new Block[] { messagesBuilder.build(), idsBuilder.build() }; + } + }; + + List intermediateOutput = new ArrayList<>(); + + Driver driver = new Driver( + driverContext, + new LocalSourceOperator(input1), + List.of( + new HashAggregationOperator.HashAggregationOperatorFactory( + groupSpecs, + AggregatorMode.INITIAL, + List.of(new ValuesBytesRefAggregatorFunctionSupplier(List.of(0)).groupingAggregatorFactory(AggregatorMode.INITIAL)), + 16 * 1024, + analysisRegistry + ).get(driverContext) + ), + new PageConsumerOperator(intermediateOutput::add), + () -> {} + ); + runDriver(driver); + + driver = new Driver( + driverContext, + new LocalSourceOperator(input2), + List.of( + new HashAggregationOperator.HashAggregationOperatorFactory( + groupSpecs, + AggregatorMode.INITIAL, + List.of(new ValuesBytesRefAggregatorFunctionSupplier(List.of(0)).groupingAggregatorFactory(AggregatorMode.INITIAL)), + 16 * 1024, + analysisRegistry + ).get(driverContext) + ), + new PageConsumerOperator(intermediateOutput::add), + () -> {} + ); + runDriver(driver); + + List finalOutput = new ArrayList<>(); + + driver = new Driver( + driverContext, + new CannedSourceOperator(intermediateOutput.iterator()), + List.of( + new HashAggregationOperator.HashAggregationOperatorFactory( + groupSpecs, + AggregatorMode.FINAL, + List.of(new ValuesBytesRefAggregatorFunctionSupplier(List.of(2)).groupingAggregatorFactory(AggregatorMode.FINAL)), + 16 * 1024, + analysisRegistry + ).get(driverContext) + ), + new PageConsumerOperator(finalOutput::add), + () -> {} + ); + runDriver(driver); + + assertThat(finalOutput, hasSize(1)); + assertThat(finalOutput.get(0).getBlockCount(), equalTo(3)); + BytesRefBlock outputMessages = finalOutput.get(0).getBlock(0); + IntBlock outputIds = finalOutput.get(0).getBlock(1); + BytesRefBlock outputValues = finalOutput.get(0).getBlock(2); + assertThat(outputIds.getPositionCount(), equalTo(outputMessages.getPositionCount())); + assertThat(outputValues.getPositionCount(), equalTo(outputMessages.getPositionCount())); + Map>> result = new HashMap<>(); + for (int i = 0; i < outputMessages.getPositionCount(); i++) { + BytesRef messageBytesRef = ((BytesRef) BlockUtils.toJavaObject(outputMessages, i)); + String message = messageBytesRef == null ? null : messageBytesRef.utf8ToString(); + result.computeIfAbsent(message, key -> new HashMap<>()); + + Integer id = (Integer) BlockUtils.toJavaObject(outputIds, i); + result.get(message).computeIfAbsent(id, key -> new HashSet<>()); + + Object values = BlockUtils.toJavaObject(outputValues, i); + if (values == null) { + result.get(message).get(id).add(null); + } else { + if ((values instanceof List) == false) { + values = List.of(values); + } + for (Object valueObject : (List) values) { + BytesRef value = (BytesRef) valueObject; + result.get(message).get(id).add(value.utf8ToString()); + } + } + } + Releasables.close(() -> Iterators.map(finalOutput.iterator(), (Page p) -> p::releaseBlocks)); + + Map>> expectedResult = Map.of( + ".*?connected.+?to.*?", + Map.of( + 7, + Set.of("connected to 1.1.1", "connected to 1.1.2", "connected to 1.1.4", "connected to 2.1.2"), + 42, + Set.of("connected to 1.1.3"), + 111, + Set.of("connected to 2.1.1") + ), + ".*?connection.+?error.*?", + Map.of(7, Set.of("connection error"), 42, Set.of("connection error")), + ".*?disconnected.*?", + Map.of(7, Set.of("disconnected")) + ); + if (withNull) { + expectedResult = new HashMap<>(expectedResult); + expectedResult.put(null, new HashMap<>()); + expectedResult.get(null).put(null, new HashSet<>()); + expectedResult.get(null).get(null).add(null); + expectedResult.get(null).put(43, new HashSet<>()); + expectedResult.get(null).get(43).add(null); + } + assertThat(result, equalTo(expectedResult)); + } +} diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java index 81070b3155f2e..1120a69cc5166 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java @@ -21,7 +21,7 @@ import java.util.List; import static org.elasticsearch.xpack.esql.CsvTestUtils.isEnabled; -import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V4; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V5; import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.ASYNC; public class MixedClusterEsqlSpecIT extends EsqlSpecTestCase { @@ -96,7 +96,7 @@ protected boolean supportsInferenceTestService() { @Override protected boolean supportsIndexModeLookup() throws IOException { - return hasCapabilities(List.of(JOIN_LOOKUP_V4.capabilityName())); + return hasCapabilities(List.of(JOIN_LOOKUP_V5.capabilityName())); } @Override diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java index 2ec75683ab149..5c7f981c93a97 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -48,7 +48,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.classpathResources; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS_V2; -import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V4; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V5; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_PLANNING_V1; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.METADATA_FIELDS_REMOTE_TEST; import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.SYNC; @@ -124,7 +124,7 @@ protected void shouldSkipTest(String testName) throws IOException { assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS.capabilityName())); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS_V2.capabilityName())); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_PLANNING_V1.capabilityName())); - assumeFalse("LOOKUP JOIN not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_LOOKUP_V4.capabilityName())); + assumeFalse("LOOKUP JOIN not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_LOOKUP_V5.capabilityName())); } private TestFeatureService remoteFeaturesService() throws IOException { @@ -283,8 +283,8 @@ protected boolean supportsInferenceTestService() { @Override protected boolean supportsIndexModeLookup() throws IOException { - // CCS does not yet support JOIN_LOOKUP_V4 and clusters falsely report they have this capability - // return hasCapabilities(List.of(JOIN_LOOKUP_V4.capabilityName())); + // CCS does not yet support JOIN_LOOKUP_V5 and clusters falsely report they have this capability + // return hasCapabilities(List.of(JOIN_LOOKUP_V5.capabilityName())); return false; } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/categorize.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/categorize.csv-spec index 4ce43961a7077..5ad62dd7a21a8 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/categorize.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/categorize.csv-spec @@ -60,6 +60,19 @@ COUNT():long | VALUES(str):keyword | category:keyword 1 | [a, b, c] | .*?disconnected.*? ; +limit before stats +required_capability: categorize_v5 + +FROM sample_data | SORT message | LIMIT 4 + | STATS count=COUNT() BY category=CATEGORIZE(message) + | SORT category +; + +count:long | category:keyword + 3 | .*?Connected.+?to.*? + 1 | .*?Connection.+?error.*? +; + skips stopwords required_capability: categorize_v5 @@ -615,3 +628,159 @@ COUNT():long | x:keyword 3 | [.*?Connection.+?error.*?,.*?Connection.+?error.*?] 1 | [.*?Disconnected.*?,.*?Disconnected.*?] ; + +multiple groupings with categorize and ip +required_capability: categorize_multiple_groupings + +FROM sample_data + | STATS count=COUNT() BY category=CATEGORIZE(message), client_ip + | SORT category, client_ip +; + +count:long | category:keyword | client_ip:ip + 1 | .*?Connected.+?to.*? | 172.21.2.113 + 1 | .*?Connected.+?to.*? | 172.21.2.162 + 1 | .*?Connected.+?to.*? | 172.21.3.15 + 3 | .*?Connection.+?error.*? | 172.21.3.15 + 1 | .*?Disconnected.*? | 172.21.0.5 +; + +multiple groupings with categorize and bucketed timestamp +required_capability: categorize_multiple_groupings + +FROM sample_data + | STATS count=COUNT() BY category=CATEGORIZE(message), timestamp=BUCKET(@timestamp, 1 HOUR) + | SORT category, timestamp +; + +count:long | category:keyword | timestamp:datetime + 2 | .*?Connected.+?to.*? | 2023-10-23T12:00:00.000Z + 1 | .*?Connected.+?to.*? | 2023-10-23T13:00:00.000Z + 3 | .*?Connection.+?error.*? | 2023-10-23T13:00:00.000Z + 1 | .*?Disconnected.*? | 2023-10-23T13:00:00.000Z +; + + +multiple groupings with categorize and limit before stats +required_capability: categorize_multiple_groupings + +FROM sample_data | SORT message | LIMIT 5 + | STATS count=COUNT() BY category=CATEGORIZE(message), client_ip + | SORT category, client_ip +; + +count:long | category:keyword | client_ip:ip + 1 | .*?Connected.+?to.*? | 172.21.2.113 + 1 | .*?Connected.+?to.*? | 172.21.2.162 + 1 | .*?Connected.+?to.*? | 172.21.3.15 + 2 | .*?Connection.+?error.*? | 172.21.3.15 +; + +multiple groupings with categorize and nulls +required_capability: categorize_multiple_groupings + +FROM employees + | STATS SUM(languages) BY category=CATEGORIZE(job_positions), gender + | SORT category DESC, gender ASC + | LIMIT 5 +; + +SUM(languages):long | category:keyword | gender:keyword + 11 | null | F + 16 | null | M + 14 | .*?Tech.+?Lead.*? | F + 23 | .*?Tech.+?Lead.*? | M + 9 | .*?Tech.+?Lead.*? | null +; + +multiple groupings with categorize and a field that's always null +required_capability: categorize_multiple_groupings + +FROM sample_data + | EVAL nullfield = null + | STATS count=COUNT() BY category=CATEGORIZE(nullfield), client_ip + | SORT client_ip +; + +count:long | category:keyword | client_ip:ip + 1 | null | 172.21.0.5 + 1 | null | 172.21.2.113 + 1 | null | 172.21.2.162 + 4 | null | 172.21.3.15 +; + +multiple groupings with categorize and the same text field +required_capability: categorize_multiple_groupings + +FROM sample_data + | STATS count=COUNT() BY category=CATEGORIZE(message), message + | SORT message +; + +count:long | category:keyword | message:keyword + 1 | .*?Connected.+?to.*? | Connected to 10.1.0.1 + 1 | .*?Connected.+?to.*? | Connected to 10.1.0.2 + 1 | .*?Connected.+?to.*? | Connected to 10.1.0.3 + 3 | .*?Connection.+?error.*? | Connection error + 1 | .*?Disconnected.*? | Disconnected +; + +multiple additional complex groupings with categorize +required_capability: categorize_multiple_groupings + +FROM sample_data + | STATS count=COUNT(), duration=SUM(event_duration) BY category=CATEGORIZE(message), SUBSTRING(message, 1, 7), ip_part=TO_LONG(SUBSTRING(TO_STRING(client_ip), 8, 1)), hour=BUCKET(@timestamp, 1 HOUR) + | SORT ip_part, category +; + +count:long | duration:long | category:keyword | SUBSTRING(message, 1, 7):keyword | ip_part:long | hour:datetime + 1 | 1232382 | .*?Disconnected.*? | Disconn | 0 | 2023-10-23T13:00:00.000Z + 2 | 6215122 | .*?Connected.+?to.*? | Connect | 2 | 2023-10-23T12:00:00.000Z + 1 | 1756467 | .*?Connected.+?to.*? | Connect | 3 | 2023-10-23T13:00:00.000Z + 3 | 14027356 | .*?Connection.+?error.*? | Connect | 3 | 2023-10-23T13:00:00.000Z +; + +multiple groupings with categorize and some constants including null +required_capability: categorize_multiple_groupings + +FROM sample_data + | STATS count=MV_COUNT(VALUES(message)) BY category=CATEGORIZE(message), null, constant="constant" + | SORT category +; + +count:integer | category:keyword | null:null | constant:keyword + 3 | .*?Connected.+?to.*? | null | constant + 1 | .*?Connection.+?error.*? | null | constant + 1 | .*?Disconnected.*? | null | constant +; + +multiple groupings with categorize and aggregation filters +required_capability: categorize_multiple_groupings + +FROM employees + | STATS lang_low=AVG(languages) WHERE salary<=50000, lang_high=AVG(languages) WHERE salary>50000 BY category=CATEGORIZE(job_positions), gender + | SORT category, gender + | LIMIT 5 +; + +lang_low:double | lang_high:double | category:keyword | gender:keyword + 2.0 | 5.0 | .*?Accountant.*? | F + 3.0 | 2.5 | .*?Accountant.*? | M + 5.0 | 2.0 | .*?Accountant.*? | null + 3.0 | 3.25 | .*?Architect.*? | F + 3.75 | null | .*?Architect.*? | M +; + +multiple groupings with categorize on null row +required_capability: categorize_multiple_groupings + +ROW message = null, str = ["a", "b", "c"] + | STATS COUNT(), VALUES(str) BY category=CATEGORIZE(message), str + | SORT str +; + +COUNT():long | VALUES(str):keyword | category:keyword | str:keyword + 1 | [a, b, c] | null | a + 1 | [a, b, c] | null | b + 1 | [a, b, c] | null | c +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec index b01e12fa4f470..12e333c0ed9f2 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec @@ -5,7 +5,7 @@ //TODO: this sometimes returns null instead of the looked up value (likely related to the execution order) basicOnTheDataNode -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM employees | EVAL language_code = languages @@ -22,7 +22,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; basicRow -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 ROW language_code = 1 | LOOKUP JOIN languages_lookup ON language_code @@ -33,7 +33,7 @@ language_code:integer | language_name:keyword ; basicOnTheCoordinator -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM employees | SORT emp_no @@ -50,7 +50,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; subsequentEvalOnTheDataNode -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM employees | EVAL language_code = languages @@ -68,7 +68,7 @@ emp_no:integer | language_code:integer | language_name:keyword | language_code_x ; subsequentEvalOnTheCoordinator -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM employees | SORT emp_no @@ -85,8 +85,25 @@ emp_no:integer | language_code:integer | language_name:keyword | language_code_x 10003 | 4 | german | 8 ; +sortEvalBeforeLookup +required_capability: join_lookup_v5 + +FROM employees +| SORT emp_no +| EVAL language_code = (emp_no % 10) + 1 +| LOOKUP JOIN languages_lookup ON language_code +| KEEP emp_no, language_code, language_name +| LIMIT 3 +; + +emp_no:integer | language_code:integer | language_name:keyword +10001 | 2 | French +10002 | 3 | Spanish +10003 | 4 | German +; + lookupIPFromRow -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 ROW left = "left", client_ip = "172.21.0.5", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -97,7 +114,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowing -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -108,7 +125,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowingKeep -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -121,7 +138,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowingKeepReordered -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -134,7 +151,7 @@ right | Development | 172.21.0.5 ; lookupIPFromIndex -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -153,7 +170,7 @@ ignoreOrder:true ; lookupIPFromIndexKeep -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -173,7 +190,7 @@ ignoreOrder:true ; lookupIPFromIndexStats -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -189,7 +206,7 @@ count:long | env:keyword ; lookupIPFromIndexStatsKeep -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -206,7 +223,7 @@ count:long | env:keyword ; lookupMessageFromRow -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 ROW left = "left", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -217,7 +234,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromRowWithShadowing -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 ROW left = "left", message = "Connected to 10.1.0.1", type = "unknown", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -228,7 +245,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromRowWithShadowingKeep -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 ROW left = "left", message = "Connected to 10.1.0.1", type = "unknown", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -240,7 +257,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromIndex -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -258,7 +275,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeep -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -277,7 +294,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeepReordered -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -296,7 +313,7 @@ Success | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; lookupMessageFromIndexStats -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -311,7 +328,7 @@ count:long | type:keyword ; lookupMessageFromIndexStatsKeep -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | LOOKUP JOIN message_types_lookup ON message diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec index d4c7b8c59fdbc..cb38204a71ab0 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec @@ -283,3 +283,33 @@ book_no:keyword | c_score:double 7350 | 2.0 7140 | 3.0 ; + +QstrScoreManipulation +required_capability: metadata_score +required_capability: qstr_function + +from books metadata _score +| where qstr("title:rings") +| eval _score = _score + 1 +| keep book_no, title, _score +| limit 2; + +book_no:keyword | title:text | _score:double +4023 | A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings | 2.6404519081115723 +2714 | Return of the King Being the Third Part of The Lord of the Rings | 2.9239964485168457 +; + +QstrScoreOverride +required_capability: metadata_score +required_capability: qstr_function + +from books metadata _score +| where qstr("title:rings") +| eval _score = "foobar" +| keep book_no, title, _score +| limit 2; + +book_no:keyword | title:text | _score:keyword +4023 | A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings | foobar +2714 | Return of the King Being the Third Part of The Lord of the Rings | foobar +; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEnrichBasedCrossClusterTestCase.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEnrichBasedCrossClusterTestCase.java new file mode 100644 index 0000000000000..66ac32b33cd4d --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEnrichBasedCrossClusterTestCase.java @@ -0,0 +1,290 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.ingest.common.IngestCommonPlugin; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.protocol.xpack.XPackInfoRequest; +import org.elasticsearch.protocol.xpack.XPackInfoResponse; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.action.TransportXPackInfoAction; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureResponse; +import org.elasticsearch.xpack.core.enrich.EnrichPolicy; +import org.elasticsearch.xpack.core.enrich.action.DeleteEnrichPolicyAction; +import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyAction; +import org.elasticsearch.xpack.core.enrich.action.PutEnrichPolicyAction; +import org.elasticsearch.xpack.enrich.EnrichPlugin; +import org.elasticsearch.xpack.esql.EsqlTestUtils; +import org.elasticsearch.xpack.esql.plan.logical.Enrich; +import org.junit.After; +import org.junit.Before; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; + +public abstract class AbstractEnrichBasedCrossClusterTestCase extends AbstractMultiClustersTestCase { + + public static String REMOTE_CLUSTER_1 = "c1"; + public static String REMOTE_CLUSTER_2 = "c2"; + + /** + * subclasses should override if they don't want enrich policies wiped after each test method run + */ + protected boolean tolerateErrorsWhenWipingEnrichPolicies() { + return false; + } + + @Override + protected List remoteClusterAlias() { + return List.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2); + } + + protected Collection allClusters() { + return CollectionUtils.appendToCopy(remoteClusterAlias(), LOCAL_CLUSTER); + } + + @Override + protected Collection> nodePlugins(String clusterAlias) { + List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); + plugins.add(CrossClustersEnrichIT.LocalStateEnrich.class); + plugins.add(IngestCommonPlugin.class); + plugins.add(ReindexPlugin.class); + return plugins; + } + + @Override + protected Settings nodeSettings() { + return Settings.builder().put(super.nodeSettings()).put(XPackSettings.SECURITY_ENABLED.getKey(), false).build(); + } + + static final EnrichPolicy hostPolicy = new EnrichPolicy("match", null, List.of("hosts"), "ip", List.of("ip", "os")); + static final EnrichPolicy vendorPolicy = new EnrichPolicy("match", null, List.of("vendors"), "os", List.of("os", "vendor")); + + @Before + public void setupHostsEnrich() { + // the hosts policy are identical on every node + Map allHosts = Map.of( + "192.168.1.2", + "Windows", + "192.168.1.3", + "MacOS", + "192.168.1.4", + "Linux", + "192.168.1.5", + "Android", + "192.168.1.6", + "iOS", + "192.168.1.7", + "Windows", + "192.168.1.8", + "MacOS", + "192.168.1.9", + "Linux", + "192.168.1.10", + "Linux", + "192.168.1.11", + "Windows" + ); + for (String cluster : allClusters()) { + Client client = client(cluster); + client.admin().indices().prepareCreate("hosts").setMapping("ip", "type=ip", "os", "type=keyword").get(); + for (Map.Entry h : allHosts.entrySet()) { + client.prepareIndex("hosts").setSource("ip", h.getKey(), "os", h.getValue()).get(); + } + client.admin().indices().prepareRefresh("hosts").get(); + client.execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "hosts", hostPolicy)) + .actionGet(); + client.execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "hosts")) + .actionGet(); + assertAcked(client.admin().indices().prepareDelete("hosts")); + } + } + + @Before + public void setupVendorPolicy() { + var localVendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Samsung", "Linux", "Redhat"); + var c1Vendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Google", "Linux", "Suse"); + var c2Vendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Sony", "Linux", "Ubuntu"); + var vendors = Map.of(LOCAL_CLUSTER, localVendors, REMOTE_CLUSTER_1, c1Vendors, REMOTE_CLUSTER_2, c2Vendors); + for (Map.Entry> e : vendors.entrySet()) { + Client client = client(e.getKey()); + client.admin().indices().prepareCreate("vendors").setMapping("os", "type=keyword", "vendor", "type=keyword").get(); + for (Map.Entry v : e.getValue().entrySet()) { + client.prepareIndex("vendors").setSource("os", v.getKey(), "vendor", v.getValue()).get(); + } + client.admin().indices().prepareRefresh("vendors").get(); + client.execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "vendors", vendorPolicy)) + .actionGet(); + client.execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "vendors")) + .actionGet(); + assertAcked(client.admin().indices().prepareDelete("vendors")); + } + } + + @Before + public void setupEventsIndices() { + record Event(long timestamp, String user, String host) { + + } + List e0 = List.of( + new Event(1, "matthew", "192.168.1.3"), + new Event(2, "simon", "192.168.1.5"), + new Event(3, "park", "192.168.1.2"), + new Event(4, "andrew", "192.168.1.7"), + new Event(5, "simon", "192.168.1.20"), + new Event(6, "kevin", "192.168.1.2"), + new Event(7, "akio", "192.168.1.5"), + new Event(8, "luke", "192.168.1.2"), + new Event(9, "jack", "192.168.1.4") + ); + List e1 = List.of( + new Event(1, "andres", "192.168.1.2"), + new Event(2, "sergio", "192.168.1.6"), + new Event(3, "kylian", "192.168.1.8"), + new Event(4, "andrew", "192.168.1.9"), + new Event(5, "jack", "192.168.1.3"), + new Event(6, "kevin", "192.168.1.4"), + new Event(7, "akio", "192.168.1.7"), + new Event(8, "kevin", "192.168.1.21"), + new Event(9, "andres", "192.168.1.8") + ); + List e2 = List.of( + new Event(1, "park", "192.168.1.25"), + new Event(2, "akio", "192.168.1.5"), + new Event(3, "park", "192.168.1.2"), + new Event(4, "kevin", "192.168.1.3") + ); + for (var c : Map.of(LOCAL_CLUSTER, e0, REMOTE_CLUSTER_1, e1, REMOTE_CLUSTER_2, e2).entrySet()) { + Client client = client(c.getKey()); + client.admin() + .indices() + .prepareCreate("events") + .setMapping("timestamp", "type=long", "user", "type=keyword", "host", "type=ip") + .get(); + for (var e : c.getValue()) { + client.prepareIndex("events").setSource("timestamp", e.timestamp, "user", e.user, "host", e.host).get(); + } + client.admin().indices().prepareRefresh("events").get(); + } + } + + @After + public void wipeEnrichPolicies() { + for (String cluster : allClusters()) { + cluster(cluster).wipe(Set.of()); + for (String policy : List.of("hosts", "vendors")) { + if (tolerateErrorsWhenWipingEnrichPolicies()) { + try { + client(cluster).execute( + DeleteEnrichPolicyAction.INSTANCE, + new DeleteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policy) + ); + } catch (Exception e) { + assertThat(e.getMessage(), containsString("Cluster is already closed")); + } + + } else { + client(cluster).execute( + DeleteEnrichPolicyAction.INSTANCE, + new DeleteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policy) + ); + } + } + } + } + + static String enrichHosts(Enrich.Mode mode) { + return EsqlTestUtils.randomEnrichCommand("hosts", mode, hostPolicy.getMatchField(), hostPolicy.getEnrichFields()); + } + + static String enrichVendors(Enrich.Mode mode) { + return EsqlTestUtils.randomEnrichCommand("vendors", mode, vendorPolicy.getMatchField(), vendorPolicy.getEnrichFields()); + } + + protected EsqlQueryResponse runQuery(String query, Boolean ccsMetadataInResponse) { + EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); + request.query(query); + request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + if (randomBoolean()) { + request.profile(true); + } + if (ccsMetadataInResponse != null) { + request.includeCCSMetadata(ccsMetadataInResponse); + } + return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); + } + + public static Tuple randomIncludeCCSMetadata() { + return switch (randomIntBetween(1, 3)) { + case 1 -> new Tuple<>(Boolean.TRUE, Boolean.TRUE); + case 2 -> new Tuple<>(Boolean.FALSE, Boolean.FALSE); + case 3 -> new Tuple<>(null, Boolean.FALSE); + default -> throw new AssertionError("should not get here"); + }; + } + + public static class LocalStateEnrich extends LocalStateCompositeXPackPlugin { + public LocalStateEnrich(final Settings settings, final Path configPath) throws Exception { + super(settings, configPath); + + plugins.add(new EnrichPlugin(settings) { + @Override + protected XPackLicenseState getLicenseState() { + return this.getLicenseState(); + } + }); + } + + public static class EnrichTransportXPackInfoAction extends TransportXPackInfoAction { + @Inject + public EnrichTransportXPackInfoAction( + TransportService transportService, + ActionFilters actionFilters, + LicenseService licenseService, + NodeClient client + ) { + super(transportService, actionFilters, licenseService, client); + } + + @Override + protected List> infoActions() { + return Collections.singletonList(XPackInfoFeatureAction.ENRICH); + } + } + + @Override + protected Class> getInfoAction() { + return CrossClustersQueriesWithInvalidLicenseIT.LocalStateEnrich.EnrichTransportXPackInfoAction.class; + } + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java index 440582dcfbb45..ea78ee2e3cfbd 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java @@ -35,7 +35,6 @@ import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; -import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.junit.Before; import java.io.IOException; @@ -78,7 +77,7 @@ protected Map skipUnavailableForRemoteClusters() { @Override protected Collection> nodePlugins(String clusterAlias) { List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); - plugins.add(EsqlPlugin.class); + plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); plugins.add(EsqlAsyncActionIT.LocalStateEsqlAsync.class); // allows the async_search DELETE action plugins.add(InternalExchangePlugin.class); plugins.add(PauseFieldPlugin.class); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterEnrichUnavailableClustersIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterEnrichUnavailableClustersIT.java index d142752d0c408..09ad97b08f357 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterEnrichUnavailableClustersIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterEnrichUnavailableClustersIT.java @@ -8,36 +8,21 @@ package org.elasticsearch.xpack.esql.action; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.Tuple; -import org.elasticsearch.ingest.common.IngestCommonPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.reindex.ReindexPlugin; -import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.transport.RemoteClusterAware; -import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyAction; -import org.elasticsearch.xpack.core.enrich.action.PutEnrichPolicyAction; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; -import org.junit.Before; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Locale; -import java.util.Map; import java.util.Set; -import java.util.concurrent.TimeUnit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; -import static org.elasticsearch.xpack.esql.action.CrossClustersEnrichIT.enrichHosts; -import static org.elasticsearch.xpack.esql.action.CrossClustersEnrichIT.enrichVendors; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -47,151 +32,26 @@ * This IT test is the dual of CrossClustersEnrichIT, which tests "happy path" * and this one tests unavailable cluster scenarios using (most of) the same tests. */ -public class CrossClusterEnrichUnavailableClustersIT extends AbstractMultiClustersTestCase { - - public static String REMOTE_CLUSTER_1 = "c1"; - public static String REMOTE_CLUSTER_2 = "c2"; - - @Override - protected Collection remoteClusterAlias() { - return List.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2); - } +public class CrossClusterEnrichUnavailableClustersIT extends AbstractEnrichBasedCrossClusterTestCase { @Override protected boolean reuseClusters() { return false; } - private Collection allClusters() { - return CollectionUtils.appendToCopy(remoteClusterAlias(), LOCAL_CLUSTER); + @Override + protected boolean tolerateErrorsWhenWipingEnrichPolicies() { + // attempt to wipe will fail since some clusters are already closed + return true; } @Override protected Collection> nodePlugins(String clusterAlias) { List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); - plugins.add(EsqlPlugin.class); - plugins.add(CrossClustersEnrichIT.LocalStateEnrich.class); - plugins.add(IngestCommonPlugin.class); - plugins.add(ReindexPlugin.class); + plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); return plugins; } - @Override - protected Settings nodeSettings() { - return Settings.builder().put(super.nodeSettings()).put(XPackSettings.SECURITY_ENABLED.getKey(), false).build(); - } - - @Before - public void setupHostsEnrich() { - // the hosts policy are identical on every node - Map allHosts = Map.of( - "192.168.1.2", - "Windows", - "192.168.1.3", - "MacOS", - "192.168.1.4", - "Linux", - "192.168.1.5", - "Android", - "192.168.1.6", - "iOS", - "192.168.1.7", - "Windows", - "192.168.1.8", - "MacOS", - "192.168.1.9", - "Linux", - "192.168.1.10", - "Linux", - "192.168.1.11", - "Windows" - ); - for (String cluster : allClusters()) { - Client client = client(cluster); - client.admin().indices().prepareCreate("hosts").setMapping("ip", "type=ip", "os", "type=keyword").get(); - for (Map.Entry h : allHosts.entrySet()) { - client.prepareIndex("hosts").setSource("ip", h.getKey(), "os", h.getValue()).get(); - } - client.admin().indices().prepareRefresh("hosts").get(); - client.execute( - PutEnrichPolicyAction.INSTANCE, - new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "hosts", CrossClustersEnrichIT.hostPolicy) - ).actionGet(); - client.execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "hosts")) - .actionGet(); - assertAcked(client.admin().indices().prepareDelete("hosts")); - } - } - - @Before - public void setupVendorPolicy() { - var localVendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Samsung", "Linux", "Redhat"); - var c1Vendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Google", "Linux", "Suse"); - var c2Vendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Sony", "Linux", "Ubuntu"); - var vendors = Map.of(LOCAL_CLUSTER, localVendors, "c1", c1Vendors, "c2", c2Vendors); - for (Map.Entry> e : vendors.entrySet()) { - Client client = client(e.getKey()); - client.admin().indices().prepareCreate("vendors").setMapping("os", "type=keyword", "vendor", "type=keyword").get(); - for (Map.Entry v : e.getValue().entrySet()) { - client.prepareIndex("vendors").setSource("os", v.getKey(), "vendor", v.getValue()).get(); - } - client.admin().indices().prepareRefresh("vendors").get(); - client.execute( - PutEnrichPolicyAction.INSTANCE, - new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "vendors", CrossClustersEnrichIT.vendorPolicy) - ).actionGet(); - client.execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "vendors")) - .actionGet(); - assertAcked(client.admin().indices().prepareDelete("vendors")); - } - } - - @Before - public void setupEventsIndices() { - record Event(long timestamp, String user, String host) {} - - List e0 = List.of( - new Event(1, "matthew", "192.168.1.3"), - new Event(2, "simon", "192.168.1.5"), - new Event(3, "park", "192.168.1.2"), - new Event(4, "andrew", "192.168.1.7"), - new Event(5, "simon", "192.168.1.20"), - new Event(6, "kevin", "192.168.1.2"), - new Event(7, "akio", "192.168.1.5"), - new Event(8, "luke", "192.168.1.2"), - new Event(9, "jack", "192.168.1.4") - ); - List e1 = List.of( - new Event(1, "andres", "192.168.1.2"), - new Event(2, "sergio", "192.168.1.6"), - new Event(3, "kylian", "192.168.1.8"), - new Event(4, "andrew", "192.168.1.9"), - new Event(5, "jack", "192.168.1.3"), - new Event(6, "kevin", "192.168.1.4"), - new Event(7, "akio", "192.168.1.7"), - new Event(8, "kevin", "192.168.1.21"), - new Event(9, "andres", "192.168.1.8") - ); - List e2 = List.of( - new Event(1, "park", "192.168.1.25"), - new Event(2, "akio", "192.168.1.5"), - new Event(3, "park", "192.168.1.2"), - new Event(4, "kevin", "192.168.1.3") - ); - for (var c : Map.of(LOCAL_CLUSTER, e0, "c1", e1, "c2", e2).entrySet()) { - Client client = client(c.getKey()); - client.admin() - .indices() - .prepareCreate("events") - .setMapping("timestamp", "type=long", "user", "type=keyword", "host", "type=ip") - .get(); - for (var e : c.getValue()) { - client.prepareIndex("events").setSource("timestamp", e.timestamp, "user", e.user, "host", e.host).get(); - } - client.admin().indices().prepareRefresh("events").get(); - } - } - public void testEnrichWithHostsPolicyAndDisconnectedRemotesWithSkipUnavailableTrue() throws IOException { setSkipUnavailable(REMOTE_CLUSTER_1, true); setSkipUnavailable(REMOTE_CLUSTER_2, true); @@ -645,19 +505,6 @@ public void testEnrichRemoteWithVendor() throws IOException { } } - protected EsqlQueryResponse runQuery(String query, Boolean ccsMetadataInResponse) { - EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); - request.query(query); - request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); - if (randomBoolean()) { - request.profile(true); - } - if (ccsMetadataInResponse != null) { - request.includeCCSMetadata(ccsMetadataInResponse); - } - return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); - } - private static void assertCCSExecutionInfoDetails(EsqlExecutionInfo executionInfo) { assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); assertTrue(executionInfo.isCrossClusterSearch()); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterQueryUnavailableRemotesIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterQueryUnavailableRemotesIT.java index 0f1aa8541fdd9..3607e080bae90 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterQueryUnavailableRemotesIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterQueryUnavailableRemotesIT.java @@ -18,7 +18,6 @@ import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import java.io.IOException; import java.util.ArrayList; @@ -54,8 +53,8 @@ protected boolean reuseClusters() { @Override protected Collection> nodePlugins(String clusterAlias) { List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); - plugins.add(EsqlPlugin.class); - plugins.add(org.elasticsearch.xpack.esql.action.CrossClustersQueryIT.InternalExchangePlugin.class); + plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); + plugins.add(CrossClustersQueryIT.InternalExchangePlugin.class); return plugins; } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java index 0910e820c118a..68bfc60202365 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java @@ -33,7 +33,6 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.plugin.ComputeService; -import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.junit.Before; import java.util.ArrayList; @@ -62,7 +61,7 @@ protected Collection remoteClusterAlias() { @Override protected Collection> nodePlugins(String clusterAlias) { List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); - plugins.add(EsqlPlugin.class); + plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); plugins.add(InternalExchangePlugin.class); plugins.add(PauseFieldPlugin.class); return plugins; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java index e8e9f45694e9c..4e6be6cc2bf74 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java @@ -7,218 +7,34 @@ package org.elasticsearch.xpack.esql.action; -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.TransportAction; -import org.elasticsearch.client.internal.Client; -import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.Tuple; -import org.elasticsearch.ingest.common.IngestCommonPlugin; -import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.license.LicenseService; -import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.protocol.xpack.XPackInfoRequest; -import org.elasticsearch.protocol.xpack.XPackInfoResponse; -import org.elasticsearch.reindex.ReindexPlugin; -import org.elasticsearch.test.AbstractMultiClustersTestCase; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.action.TransportXPackInfoAction; -import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; -import org.elasticsearch.xpack.core.action.XPackInfoFeatureResponse; -import org.elasticsearch.xpack.core.enrich.EnrichPolicy; -import org.elasticsearch.xpack.core.enrich.action.DeleteEnrichPolicyAction; -import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyAction; -import org.elasticsearch.xpack.core.enrich.action.PutEnrichPolicyAction; -import org.elasticsearch.xpack.enrich.EnrichPlugin; -import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; -import org.junit.After; -import org.junit.Before; -import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Locale; -import java.util.Map; import java.util.Set; -import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -public class CrossClustersEnrichIT extends AbstractMultiClustersTestCase { - - @Override - protected Collection remoteClusterAlias() { - return List.of("c1", "c2"); - } - - protected Collection allClusters() { - return CollectionUtils.appendToCopy(remoteClusterAlias(), LOCAL_CLUSTER); - } +public class CrossClustersEnrichIT extends AbstractEnrichBasedCrossClusterTestCase { @Override protected Collection> nodePlugins(String clusterAlias) { List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); - plugins.add(EsqlPlugin.class); - plugins.add(LocalStateEnrich.class); - plugins.add(IngestCommonPlugin.class); - plugins.add(ReindexPlugin.class); + plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); return plugins; } - @Override - protected Settings nodeSettings() { - return Settings.builder().put(super.nodeSettings()).put(XPackSettings.SECURITY_ENABLED.getKey(), false).build(); - } - - static final EnrichPolicy hostPolicy = new EnrichPolicy("match", null, List.of("hosts"), "ip", List.of("ip", "os")); - static final EnrichPolicy vendorPolicy = new EnrichPolicy("match", null, List.of("vendors"), "os", List.of("os", "vendor")); - - @Before - public void setupHostsEnrich() { - // the hosts policy are identical on every node - Map allHosts = Map.of( - "192.168.1.2", - "Windows", - "192.168.1.3", - "MacOS", - "192.168.1.4", - "Linux", - "192.168.1.5", - "Android", - "192.168.1.6", - "iOS", - "192.168.1.7", - "Windows", - "192.168.1.8", - "MacOS", - "192.168.1.9", - "Linux", - "192.168.1.10", - "Linux", - "192.168.1.11", - "Windows" - ); - for (String cluster : allClusters()) { - Client client = client(cluster); - client.admin().indices().prepareCreate("hosts").setMapping("ip", "type=ip", "os", "type=keyword").get(); - for (Map.Entry h : allHosts.entrySet()) { - client.prepareIndex("hosts").setSource("ip", h.getKey(), "os", h.getValue()).get(); - } - client.admin().indices().prepareRefresh("hosts").get(); - client.execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "hosts", hostPolicy)) - .actionGet(); - client.execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "hosts")) - .actionGet(); - assertAcked(client.admin().indices().prepareDelete("hosts")); - } - } - - @Before - public void setupVendorPolicy() { - var localVendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Samsung", "Linux", "Redhat"); - var c1Vendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Google", "Linux", "Suse"); - var c2Vendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Sony", "Linux", "Ubuntu"); - var vendors = Map.of(LOCAL_CLUSTER, localVendors, "c1", c1Vendors, "c2", c2Vendors); - for (Map.Entry> e : vendors.entrySet()) { - Client client = client(e.getKey()); - client.admin().indices().prepareCreate("vendors").setMapping("os", "type=keyword", "vendor", "type=keyword").get(); - for (Map.Entry v : e.getValue().entrySet()) { - client.prepareIndex("vendors").setSource("os", v.getKey(), "vendor", v.getValue()).get(); - } - client.admin().indices().prepareRefresh("vendors").get(); - client.execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "vendors", vendorPolicy)) - .actionGet(); - client.execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "vendors")) - .actionGet(); - assertAcked(client.admin().indices().prepareDelete("vendors")); - } - } - - @Before - public void setupEventsIndices() { - record Event(long timestamp, String user, String host) { - - } - List e0 = List.of( - new Event(1, "matthew", "192.168.1.3"), - new Event(2, "simon", "192.168.1.5"), - new Event(3, "park", "192.168.1.2"), - new Event(4, "andrew", "192.168.1.7"), - new Event(5, "simon", "192.168.1.20"), - new Event(6, "kevin", "192.168.1.2"), - new Event(7, "akio", "192.168.1.5"), - new Event(8, "luke", "192.168.1.2"), - new Event(9, "jack", "192.168.1.4") - ); - List e1 = List.of( - new Event(1, "andres", "192.168.1.2"), - new Event(2, "sergio", "192.168.1.6"), - new Event(3, "kylian", "192.168.1.8"), - new Event(4, "andrew", "192.168.1.9"), - new Event(5, "jack", "192.168.1.3"), - new Event(6, "kevin", "192.168.1.4"), - new Event(7, "akio", "192.168.1.7"), - new Event(8, "kevin", "192.168.1.21"), - new Event(9, "andres", "192.168.1.8") - ); - List e2 = List.of( - new Event(1, "park", "192.168.1.25"), - new Event(2, "akio", "192.168.1.5"), - new Event(3, "park", "192.168.1.2"), - new Event(4, "kevin", "192.168.1.3") - ); - for (var c : Map.of(LOCAL_CLUSTER, e0, "c1", e1, "c2", e2).entrySet()) { - Client client = client(c.getKey()); - client.admin() - .indices() - .prepareCreate("events") - .setMapping("timestamp", "type=long", "user", "type=keyword", "host", "type=ip") - .get(); - for (var e : c.getValue()) { - client.prepareIndex("events").setSource("timestamp", e.timestamp, "user", e.user, "host", e.host).get(); - } - client.admin().indices().prepareRefresh("events").get(); - } - } - - @After - public void wipeEnrichPolicies() { - for (String cluster : allClusters()) { - cluster(cluster).wipe(Set.of()); - for (String policy : List.of("hosts", "vendors")) { - client(cluster).execute( - DeleteEnrichPolicyAction.INSTANCE, - new DeleteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policy) - ); - } - } - } - - static String enrichHosts(Enrich.Mode mode) { - return EsqlTestUtils.randomEnrichCommand("hosts", mode, hostPolicy.getMatchField(), hostPolicy.getEnrichFields()); - } - - static String enrichVendors(Enrich.Mode mode) { - return EsqlTestUtils.randomEnrichCommand("vendors", mode, vendorPolicy.getMatchField(), vendorPolicy.getEnrichFields()); - } - public void testWithHostsPolicy() { for (var mode : Enrich.Mode.values()) { String query = "FROM events | eval ip= TO_STR(host) | " + enrichHosts(mode) + " | stats c = COUNT(*) by os | SORT os"; @@ -606,19 +422,6 @@ public void testEnrichCoordinatorThenEnrichRemote() { ); } - protected EsqlQueryResponse runQuery(String query, Boolean ccsMetadataInResponse) { - EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); - request.query(query); - request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); - if (randomBoolean()) { - request.profile(true); - } - if (ccsMetadataInResponse != null) { - request.includeCCSMetadata(ccsMetadataInResponse); - } - return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); - } - private static void assertCCSExecutionInfoDetails(EsqlExecutionInfo executionInfo) { assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); assertTrue(executionInfo.isCrossClusterSearch()); @@ -637,49 +440,4 @@ private static void assertCCSExecutionInfoDetails(EsqlExecutionInfo executionInf assertThat(cluster.getFailedShards(), equalTo(0)); } } - - public static Tuple randomIncludeCCSMetadata() { - return switch (randomIntBetween(1, 3)) { - case 1 -> new Tuple<>(Boolean.TRUE, Boolean.TRUE); - case 2 -> new Tuple<>(Boolean.FALSE, Boolean.FALSE); - case 3 -> new Tuple<>(null, Boolean.FALSE); - default -> throw new AssertionError("should not get here"); - }; - } - - public static class LocalStateEnrich extends LocalStateCompositeXPackPlugin { - - public LocalStateEnrich(final Settings settings, final Path configPath) throws Exception { - super(settings, configPath); - - plugins.add(new EnrichPlugin(settings) { - @Override - protected XPackLicenseState getLicenseState() { - return this.getLicenseState(); - } - }); - } - - public static class EnrichTransportXPackInfoAction extends TransportXPackInfoAction { - @Inject - public EnrichTransportXPackInfoAction( - TransportService transportService, - ActionFilters actionFilters, - LicenseService licenseService, - NodeClient client - ) { - super(transportService, actionFilters, licenseService, client); - } - - @Override - protected List> infoActions() { - return Collections.singletonList(XPackInfoFeatureAction.ENRICH); - } - } - - @Override - protected Class> getInfoAction() { - return EnrichTransportXPackInfoAction.class; - } - } } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueriesWithInvalidLicenseIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueriesWithInvalidLicenseIT.java new file mode 100644 index 0000000000000..1ed42b696d65e --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueriesWithInvalidLicenseIT.java @@ -0,0 +1,203 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.xpack.esql.plan.logical.Enrich; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.Set; + +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; + +public class CrossClustersQueriesWithInvalidLicenseIT extends AbstractEnrichBasedCrossClusterTestCase { + + private static final String LICENSE_ERROR_MESSAGE = "A valid Enterprise license is required to run ES|QL cross-cluster searches."; + + @Override + protected Collection> nodePlugins(String clusterAlias) { + List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); + plugins.add(EsqlPluginWithNonEnterpriseOrExpiredLicense.class); // key plugin for the test + return plugins; + } + + public void testBasicCrossClusterQuery() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + ElasticsearchStatusException e = expectThrows( + ElasticsearchStatusException.class, + () -> runQuery("FROM *,*:* | LIMIT 5", requestIncludeMeta) + ); + assertThat(e.getMessage(), containsString(LICENSE_ERROR_MESSAGE)); + } + + public void testMetadataCrossClusterQuery() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + ElasticsearchStatusException e = expectThrows( + ElasticsearchStatusException.class, + () -> runQuery("FROM events,*:* METADATA _index | SORT _index", requestIncludeMeta) + ); + assertThat(e.getMessage(), containsString(LICENSE_ERROR_MESSAGE)); + } + + public void testQueryAgainstNonMatchingClusterWildcardPattern() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + // since this wildcarded expression does not resolve to a valid remote cluster, it is not considered + // a cross-cluster search and thus should not throw a license error + String q = "FROM xremote*:events"; + { + String limit1 = q + " | STATS count(*)"; + try (EsqlQueryResponse resp = runQuery(limit1, requestIncludeMeta)) { + assertThat(resp.columns().size(), equalTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(false)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + assertThat(resp.columns().size(), equalTo(1)); + assertThat(getValuesList(resp).size(), equalTo(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(false)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + } + } + } + + public void testCCSWithLimit0() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + + // local only query does not need a valid Enterprise or Trial license + try (EsqlQueryResponse resp = runQuery("FROM events | LIMIT 0", requestIncludeMeta)) { + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertNotNull(executionInfo); + assertThat(executionInfo.isCrossClusterSearch(), is(false)); + assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); + } + + // cross-cluster searches should fail with license error + String q = randomFrom("FROM events,c1:* | LIMIT 0", "FROM c1:* | LIMIT 0"); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getMessage(), containsString(LICENSE_ERROR_MESSAGE)); + } + + public void testSearchesWhereNonExistentClusterIsSpecified() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + // this one query should be allowed since x* does not resolve to any known remote cluster + try (EsqlQueryResponse resp = runQuery("FROM events,x*:no_such_index* | STATS count(*)", requestIncludeMeta)) { + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + List> values = getValuesList(resp); + assertThat(values, hasSize(1)); + + assertNotNull(executionInfo); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(LOCAL_CLUSTER))); + assertThat(executionInfo.isCrossClusterSearch(), is(false)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + // since this not a CCS, only the overall took time in the EsqlExecutionInfo matters + assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); + } + + ElasticsearchStatusException e = expectThrows( + ElasticsearchStatusException.class, + () -> runQuery("FROM events,no_such_cluster:no_such_index* | STATS count(*)", requestIncludeMeta) + ); + // with a valid license this would throw "no such remote cluster" exception, but without a valid license, should get a license error + assertThat(e.getMessage(), containsString(LICENSE_ERROR_MESSAGE)); + } + + public void testEnrichWithHostsPolicy() { + // local-only queries do not need an Enterprise or Trial license + for (var mode : Enrich.Mode.values()) { + String query = "FROM events | eval ip= TO_STR(host) | " + enrichHosts(mode) + " | stats c = COUNT(*) by os | SORT os"; + try (EsqlQueryResponse resp = runQuery(query, null)) { + List> rows = getValuesList(resp); + assertThat( + rows, + equalTo( + List.of( + List.of(2L, "Android"), + List.of(1L, "Linux"), + List.of(1L, "MacOS"), + List.of(4L, "Windows"), + Arrays.asList(1L, (String) null) + ) + ) + ); + assertFalse(resp.getExecutionInfo().isCrossClusterSearch()); + } + } + + // cross-cluster query should fail due to not having valid Enterprise or Trial license + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + + for (var mode : Enrich.Mode.values()) { + String query = "FROM *:events | eval ip= TO_STR(host) | " + enrichHosts(mode) + " | stats c = COUNT(*) by os | SORT os"; + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> runQuery(query, requestIncludeMeta)); + assertThat(e.getMessage(), containsString("A valid Enterprise license is required to run ES|QL cross-cluster searches.")); + } + + for (var mode : Enrich.Mode.values()) { + String query = "FROM *:events,events | eval ip= TO_STR(host) | " + enrichHosts(mode) + " | stats c = COUNT(*) by os | SORT os"; + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> runQuery(query, requestIncludeMeta)); + assertThat(e.getMessage(), containsString("A valid Enterprise license is required to run ES|QL cross-cluster searches.")); + } + } + + public void testAggThenEnrichRemote() { + String query = String.format(Locale.ROOT, """ + FROM *:events,events + | eval ip= TO_STR(host) + | %s + | stats c = COUNT(*) by os + | %s + | sort vendor + """, enrichHosts(Enrich.Mode.ANY), enrichVendors(Enrich.Mode.REMOTE)); + var error = expectThrows(ElasticsearchStatusException.class, () -> runQuery(query, randomBoolean()).close()); + // with a valid license this would fail with "ENRICH with remote policy can't be executed after STATS", so ensure here + // that the license error is detected first and returned rather than a VerificationException + assertThat(error.getMessage(), containsString(LICENSE_ERROR_MESSAGE)); + } + + public void testEnrichCoordinatorThenEnrichRemote() { + String query = String.format(Locale.ROOT, """ + FROM *:events,events + | eval ip= TO_STR(host) + | %s + | %s + | sort vendor + """, enrichHosts(Enrich.Mode.COORDINATOR), enrichVendors(Enrich.Mode.REMOTE)); + var error = expectThrows(ElasticsearchStatusException.class, () -> runQuery(query, randomBoolean()).close()); + assertThat( + error.getMessage(), + // with a valid license the error is "ENRICH with remote policy can't be executed after another ENRICH with coordinator policy", + // so ensure here that the license error is detected first and returned rather than a VerificationException + containsString(LICENSE_ERROR_MESSAGE) + ); + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java index 596c70e57ccd6..64cb7f9fe6dd0 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java @@ -32,7 +32,6 @@ import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.esql.VerificationException; -import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import java.io.IOException; @@ -73,13 +72,13 @@ protected Collection remoteClusterAlias() { @Override protected Map skipUnavailableForRemoteClusters() { - return Map.of(REMOTE_CLUSTER_1, randomBoolean()); + return Map.of(REMOTE_CLUSTER_1, randomBoolean(), REMOTE_CLUSTER_2, randomBoolean()); } @Override protected Collection> nodePlugins(String clusterAlias) { List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); - plugins.add(EsqlPlugin.class); + plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); plugins.add(InternalExchangePlugin.class); return plugins; } @@ -184,7 +183,7 @@ public void testSuccessfulPathways() { } public void testSearchesAgainstNonMatchingIndicesWithLocalOnly() { - Map testClusterInfo = setupClusters(2); + Map testClusterInfo = setupTwoClusters(); String localIndex = (String) testClusterInfo.get("local.index"); { @@ -905,7 +904,7 @@ public void testSearchesWhereNonExistentClusterIsSpecifiedWithWildcards() { // cluster-foo* matches nothing and so should not be present in the EsqlExecutionInfo try ( EsqlQueryResponse resp = runQuery( - "from logs-*,no_such_index*,cluster-a:no_such_index*,cluster-foo*:* | stats sum (v)", + "FROM logs-*,no_such_index*,cluster-a:no_such_index*,cluster-foo*:* | STATS sum (v)", requestIncludeMeta ) ) { @@ -1009,7 +1008,7 @@ public void testMetadataIndex() { try ( EsqlQueryResponse resp = runQuery( - "FROM logs*,*:logs* METADATA _index | stats sum(v) by _index | sort _index", + Strings.format("FROM logs*,%s:logs* METADATA _index | stats sum(v) by _index | sort _index", REMOTE_CLUSTER_1), requestIncludeMeta ) ) { @@ -1091,7 +1090,7 @@ public void testProfile() { final int remoteOnlyProfiles; { EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); - request.query("FROM *:logs* | stats sum(v)"); + request.query("FROM c*:logs* | stats sum(v)"); request.pragmas(pragmas); request.profile(true); try (EsqlQueryResponse resp = runQuery(request)) { @@ -1124,7 +1123,7 @@ public void testProfile() { final int allProfiles; { EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); - request.query("FROM logs*,*:logs* | stats total = sum(v)"); + request.query("FROM logs*,c*:logs* | stats total = sum(v)"); request.pragmas(pragmas); request.profile(true); try (EsqlQueryResponse resp = runQuery(request)) { @@ -1169,7 +1168,7 @@ public void testWarnings() throws Exception { int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); - request.query("FROM logs*,*:logs* | EVAL ip = to_ip(id) | STATS total = sum(v) by ip | LIMIT 10"); + request.query("FROM logs*,c*:logs* | EVAL ip = to_ip(id) | STATS total = sum(v) by ip | LIMIT 10"); InternalTestCluster cluster = cluster(LOCAL_CLUSTER); String node = randomFrom(cluster.getNodeNames()); CountDownLatch latch = new CountDownLatch(1); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlPluginWithEnterpriseOrTrialLicense.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlPluginWithEnterpriseOrTrialLicense.java new file mode 100644 index 0000000000000..34d09fc541572 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlPluginWithEnterpriseOrTrialLicense.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.license.License; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.license.internal.XPackLicenseStatus; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; + +import static org.elasticsearch.test.ESTestCase.randomFrom; + +/** + * In IT tests, use this instead of the EsqlPlugin in order to use ES|QL features + * that require an Enteprise (or Trial) license. + */ +public class EsqlPluginWithEnterpriseOrTrialLicense extends EsqlPlugin { + protected XPackLicenseState getLicenseState() { + License.OperationMode operationMode = randomFrom(License.OperationMode.ENTERPRISE, License.OperationMode.TRIAL); + return new XPackLicenseState(() -> System.currentTimeMillis(), new XPackLicenseStatus(operationMode, true, "Test license expired")); + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlPluginWithNonEnterpriseOrExpiredLicense.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlPluginWithNonEnterpriseOrExpiredLicense.java new file mode 100644 index 0000000000000..46c3f3f6204cd --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlPluginWithNonEnterpriseOrExpiredLicense.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.license.License; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.license.internal.XPackLicenseStatus; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; + +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.elasticsearch.test.ESTestCase.randomFrom; + +/** + * In IT tests, use this instead of the EsqlPlugin in order to test ES|QL features + * using either a: + * - an active (non-expired) basic, standard, missing, gold or platinum Elasticsearch license, OR + * - an expired enterprise or trial license + */ +public class EsqlPluginWithNonEnterpriseOrExpiredLicense extends EsqlPlugin { + protected XPackLicenseState getLicenseState() { + License.OperationMode operationMode; + boolean active; + if (randomBoolean()) { + operationMode = randomFrom( + License.OperationMode.PLATINUM, + License.OperationMode.GOLD, + License.OperationMode.BASIC, + License.OperationMode.MISSING, + License.OperationMode.STANDARD + ); + active = true; + } else { + operationMode = randomFrom(License.OperationMode.ENTERPRISE, License.OperationMode.TRIAL); + active = false; // expired + } + + return new XPackLicenseState( + () -> System.currentTimeMillis(), + new XPackLicenseStatus(operationMode, active, "Test license expired") + ); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 8619c0461ac35..f64c2c2cdbcd4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -407,6 +407,10 @@ public enum Cap { */ CATEGORIZE_V5, + /** + * Support for multiple groupings in "CATEGORIZE". + */ + CATEGORIZE_MULTIPLE_GROUPINGS, /** * QSTR function */ @@ -523,7 +527,7 @@ public enum Cap { /** * LOOKUP JOIN */ - JOIN_LOOKUP_V4(Build.current().isSnapshot()), + JOIN_LOOKUP_V5(Build.current().isSnapshot()), /** * Fix for https://github.com/elastic/elasticsearch/issues/117054 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index ecfe1aa7f9169..f01cc265e330b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -18,7 +18,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; -import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.expression.function.Function; @@ -208,7 +207,6 @@ else if (p instanceof Lookup lookup) { checkJoin(p, failures); }); checkRemoteEnrich(plan, failures); - checkMetadataScoreNameReserved(plan, failures); if (failures.isEmpty()) { checkLicense(plan, licenseState, failures); @@ -222,13 +220,6 @@ else if (p instanceof Lookup lookup) { return failures; } - private static void checkMetadataScoreNameReserved(LogicalPlan p, Set failures) { - // _score can only be set as metadata attribute - if (p.inputSet().stream().anyMatch(a -> MetadataAttribute.SCORE.equals(a.name()) && (a instanceof MetadataAttribute) == false)) { - failures.add(fail(p, "`" + MetadataAttribute.SCORE + "` is a reserved METADATA attribute")); - } - } - private void checkSort(LogicalPlan p, Set failures) { if (p instanceof OrderBy ob) { ob.order().forEach(o -> { @@ -325,11 +316,15 @@ private static void checkAggregate(LogicalPlan p, Set failures) { private static void checkCategorizeGrouping(Aggregate agg, Set failures) { // Forbid CATEGORIZE grouping function with other groupings if (agg.groupings().size() > 1) { - agg.groupings().forEach(g -> { + agg.groupings().subList(1, agg.groupings().size()).forEach(g -> { g.forEachDown( Categorize.class, categorize -> failures.add( - fail(categorize, "cannot use CATEGORIZE grouping function [{}] with multiple groupings", categorize.sourceText()) + fail( + categorize, + "CATEGORIZE grouping function [{}] can only be in the first grouping expression", + categorize.sourceText() + ) ) ); }); @@ -382,6 +377,18 @@ private static void checkCategorizeGrouping(Aggregate agg, Set failures ); } }))); + agg.aggregates().forEach(a -> a.forEachDown(FilteredExpression.class, fe -> fe.filter().forEachDown(Attribute.class, attribute -> { + var categorize = categorizeByAttribute.get(attribute); + if (categorize != null) { + failures.add( + fail( + attribute, + "cannot reference CATEGORIZE grouping function [{}] within an aggregation filter", + attribute.sourceText() + ) + ); + } + }))); } private static void checkRateAggregates(Expression expr, int nestedLevel, Set failures) { @@ -421,7 +428,8 @@ private static void checkInvalidNamedExpressionUsage( Expression filter = fe.filter(); failures.add(fail(filter, "WHERE clause allowed only for aggregate functions, none found in [{}]", fe.sourceText())); } - Expression f = fe.filter(); // check the filter has to be a boolean term, similar as checkFilterConditionType + Expression f = fe.filter(); + // check the filter has to be a boolean term, similar as checkFilterConditionType if (f.dataType() != NULL && f.dataType() != BOOLEAN) { failures.add(fail(f, "Condition expression needs to be boolean, found [{}]", f.dataType())); } @@ -432,9 +440,10 @@ private static void checkInvalidNamedExpressionUsage( fail(af, "cannot use aggregate function [{}] in aggregate WHERE clause [{}]", af.sourceText(), fe.sourceText()) ); } - // check the bucketing function against the group + // check the grouping function against the group else if (c instanceof GroupingFunction gf) { - if (Expressions.anyMatch(groups, ex -> ex instanceof Alias a && a.child().semanticEquals(gf)) == false) { + if (c instanceof Categorize + || Expressions.anyMatch(groups, ex -> ex instanceof Alias a && a.child().semanticEquals(gf)) == false) { failures.add(fail(gf, "can only use grouping function [{}] as part of the BY clause", gf.sourceText())); } } @@ -596,6 +605,10 @@ private void gatherMetrics(LogicalPlan plan, BitSet b) { functions.forEach(f -> metrics.incFunctionMetric(f)); } + public XPackLicenseState licenseState() { + return licenseState; + } + /** * Limit QL's comparisons to types we support. This should agree with * {@link EsqlBinaryComparison}'s checkCompatibility method diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Categorize.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Categorize.java index ded913a78bdf1..a100dd64915f1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Categorize.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Categorize.java @@ -95,7 +95,8 @@ public boolean foldable() { @Override public Nullability nullable() { - // Both nulls and empty strings result in null values + // Null strings and strings that don't produce tokens after analysis lead to null values. + // This includes empty strings, only whitespace, (hexa)decimal numbers and stopwords. return Nullability.TRUE; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java index ee51a6f391a65..d3fc9e15e2e04 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java @@ -25,6 +25,9 @@ public class EsIndex implements Writeable { private final Map mapping; private final Map indexNameWithModes; + /** + * Intended for tests. Returns an index with an empty index mode map. + */ public EsIndex(String name, Map mapping) { this(name, mapping, Map.of()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java index 48bafd8eef00e..1eaade043658b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java @@ -57,7 +57,7 @@ protected List> batches() { } protected List> rules(boolean optimizeForEsSource) { - List> esSourceRules = new ArrayList<>(4); + List> esSourceRules = new ArrayList<>(6); esSourceRules.add(new ReplaceSourceAttributes()); if (optimizeForEsSource) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java index 5e91425296822..dce828dbf192d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java @@ -14,7 +14,6 @@ public final class LogicalVerifier { - private static final PlanConsistencyChecker DEPENDENCY_CHECK = new PlanConsistencyChecker<>(); public static final LogicalVerifier INSTANCE = new LogicalVerifier(); private LogicalVerifier() {} @@ -25,7 +24,7 @@ public Failures verify(LogicalPlan plan) { Failures dependencyFailures = new Failures(); plan.forEachUp(p -> { - DEPENDENCY_CHECK.checkPlan(p, dependencyFailures); + PlanConsistencyChecker.checkPlan(p, dependencyFailures); if (failures.hasFailures() == false) { p.forEachExpression(ex -> { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java index 8bd8aba01fd21..4ec90fc1ed50a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java @@ -8,9 +8,12 @@ package org.elasticsearch.xpack.esql.optimizer; import org.elasticsearch.xpack.esql.common.Failure; +import org.elasticsearch.xpack.esql.common.Failures; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.optimizer.rules.PlanConsistencyChecker; +import org.elasticsearch.xpack.esql.plan.logical.Enrich; +import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; @@ -24,17 +27,21 @@ public final class PhysicalVerifier { public static final PhysicalVerifier INSTANCE = new PhysicalVerifier(); - private static final PlanConsistencyChecker DEPENDENCY_CHECK = new PlanConsistencyChecker<>(); private PhysicalVerifier() {} /** Verifies the physical plan. */ public Collection verify(PhysicalPlan plan) { Set failures = new LinkedHashSet<>(); + Failures depFailures = new Failures(); + + // AwaitsFix https://github.com/elastic/elasticsearch/issues/118531 + var enriches = plan.collectFirstChildren(EnrichExec.class::isInstance); + if (enriches.isEmpty() == false && ((EnrichExec) enriches.get(0)).mode() == Enrich.Mode.REMOTE) { + return failures; + } plan.forEachDown(p -> { - // FIXME: re-enable - // DEPENDENCY_CHECK.checkPlan(p, failures); if (p instanceof FieldExtractExec fieldExtractExec) { Attribute sourceAttribute = fieldExtractExec.sourceAttribute(); if (sourceAttribute == null) { @@ -48,8 +55,13 @@ public Collection verify(PhysicalPlan plan) { ); } } + PlanConsistencyChecker.checkPlan(p, depFailures); }); + if (depFailures.hasFailures()) { + throw new IllegalStateException(depFailures.toString()); + } + return failures; } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PlanConsistencyChecker.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PlanConsistencyChecker.java index 30de8945a4c20..d5bd110e8df74 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PlanConsistencyChecker.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PlanConsistencyChecker.java @@ -12,27 +12,42 @@ import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.plan.QueryPlan; +import org.elasticsearch.xpack.esql.plan.logical.BinaryPlan; +import org.elasticsearch.xpack.esql.plan.physical.BinaryExec; import java.util.HashSet; import java.util.Set; import static org.elasticsearch.xpack.esql.common.Failure.fail; -public class PlanConsistencyChecker

> { +public class PlanConsistencyChecker { /** * Check whether a single {@link QueryPlan} produces no duplicate attributes and its children provide all of its required * {@link QueryPlan#references() references}. Otherwise, add * {@link org.elasticsearch.xpack.esql.common.Failure Failure}s to the {@link Failures} object. */ - public void checkPlan(P p, Failures failures) { - AttributeSet refs = p.references(); - AttributeSet input = p.inputSet(); - AttributeSet missing = refs.subtract(input); - // TODO: for Joins, we should probably check if the required fields from the left child are actually in the left child, not - // just any child (and analogously for the right child). - if (missing.isEmpty() == false) { - failures.add(fail(p, "Plan [{}] optimized incorrectly due to missing references {}", p.nodeString(), missing)); + public static void checkPlan(QueryPlan p, Failures failures) { + if (p instanceof BinaryPlan binaryPlan) { + checkMissingBinary( + p, + binaryPlan.leftReferences(), + binaryPlan.left().outputSet(), + binaryPlan.rightReferences(), + binaryPlan.right().outputSet(), + failures + ); + } else if (p instanceof BinaryExec binaryExec) { + checkMissingBinary( + p, + binaryExec.leftReferences(), + binaryExec.left().outputSet(), + binaryExec.rightReferences(), + binaryExec.right().outputSet(), + failures + ); + } else { + checkMissing(p, p.references(), p.inputSet(), "missing references", failures); } Set outputAttributeNames = new HashSet<>(); @@ -45,4 +60,29 @@ public void checkPlan(P p, Failures failures) { } } } + + private static void checkMissingBinary( + QueryPlan plan, + AttributeSet leftReferences, + AttributeSet leftInput, + AttributeSet rightReferences, + AttributeSet rightInput, + Failures failures + ) { + checkMissing(plan, leftReferences, leftInput, "missing references from left hand side", failures); + checkMissing(plan, rightReferences, rightInput, "missing references from right hand side", failures); + } + + private static void checkMissing( + QueryPlan plan, + AttributeSet references, + AttributeSet input, + String detailErrorMessage, + Failures failures + ) { + AttributeSet missing = references.subtract(input); + if (missing.isEmpty() == false) { + failures.add(fail(plan, "Plan [{}] optimized incorrectly due to {} {}", plan.nodeString(), detailErrorMessage, missing)); + } + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java index fb9d3f7e2f91e..1cacebdf27cd2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java @@ -19,7 +19,6 @@ import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; public final class PushDownAndCombineLimits extends OptimizerRules.OptimizerRule { @@ -63,8 +62,10 @@ public LogicalPlan rule(Limit limit) { } } } else if (limit.child() instanceof Join join) { - if (join.config().type() == JoinTypes.LEFT && join.right() instanceof LocalRelation) { - // This is a hash join from something like a lookup. + if (join.config().type() == JoinTypes.LEFT) { + // NOTE! This is only correct because our LEFT JOINs preserve the number of rows from the left hand side. + // This deviates from SQL semantics. In SQL, multiple matches on the right hand side lead to multiple rows in the output. + // For us, multiple matches on the right hand side are collected into multi-values. return join.replaceChildren(limit.replaceChild(join.left()), join.right()); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java index ed8851b64c27e..61b1554fb71bc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java @@ -11,7 +11,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; -import org.elasticsearch.xpack.esql.expression.function.grouping.Categorize; import org.elasticsearch.xpack.esql.optimizer.rules.physical.ProjectAwayColumns; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; @@ -22,7 +21,6 @@ import java.util.ArrayList; import java.util.LinkedHashSet; -import java.util.LinkedList; import java.util.List; import java.util.Set; @@ -54,18 +52,9 @@ public PhysicalPlan apply(PhysicalPlan plan) { * it loads the field lazily. If we have more than one field we need to * make sure the fields are loaded for the standard hash aggregator. */ - if (p instanceof AggregateExec agg && agg.groupings().size() == 1) { - // CATEGORIZE requires the standard hash aggregator as well. - if (agg.groupings().get(0).anyMatch(e -> e instanceof Categorize) == false) { - var leaves = new LinkedList<>(); - // TODO: this seems out of place - agg.aggregates() - .stream() - .filter(a -> agg.groupings().contains(a) == false) - .forEach(a -> leaves.addAll(a.collectLeaves())); - var remove = agg.groupings().stream().filter(g -> leaves.contains(g) == false).toList(); - missing.removeAll(Expressions.references(remove)); - } + if (p instanceof AggregateExec agg) { + var ordinalAttributes = agg.ordinalAttributes(); + missing.removeAll(Expressions.references(ordinalAttributes)); } // add extractor diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java index 91cd7f7a15840..dbd22dd297f88 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.esql.plan.logical; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.tree.Source; import java.util.Arrays; @@ -30,6 +31,10 @@ public LogicalPlan right() { return right; } + public abstract AttributeSet leftReferences(); + + public abstract AttributeSet rightReferences(); + @Override public final BinaryPlan replaceChildren(List newChildren) { return replaceChildren(newChildren.get(0), newChildren.get(1)); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java index 6af29fb23b3bb..a2c159e506880 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeSet; +import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -97,6 +98,16 @@ public List output() { return lazyOutput; } + @Override + public AttributeSet leftReferences() { + return Expressions.references(config().leftFields()); + } + + @Override + public AttributeSet rightReferences() { + return Expressions.references(config().rightFields()); + } + public List rightOutputFields() { AttributeSet leftInputs = left().outputSet(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExec.java index 891d03c571b27..3c2d49567813c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExec.java @@ -18,10 +18,13 @@ import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.grouping.Categorize; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; import java.util.List; import java.util.Objects; @@ -181,7 +184,27 @@ public List output() { @Override protected AttributeSet computeReferences() { - return mode.isInputPartial() ? new AttributeSet(intermediateAttributes) : Aggregate.computeReferences(aggregates, groupings); + return mode.isInputPartial() + ? new AttributeSet(intermediateAttributes) + : Aggregate.computeReferences(aggregates, groupings).subtract(new AttributeSet(ordinalAttributes())); + } + + /** Returns the attributes that can be loaded from ordinals -- no explicit extraction is needed */ + public List ordinalAttributes() { + List orginalAttributs = new ArrayList<>(groupings.size()); + // Ordinals can be leveraged just for a single grouping. If there are multiple groupings, fields need to be laoded for the + // hash aggregator. + // CATEGORIZE requires the standard hash aggregator as well. + if (groupings().size() == 1 && groupings.get(0).anyMatch(e -> e instanceof Categorize) == false) { + var leaves = new HashSet<>(); + aggregates.stream().filter(a -> groupings.contains(a) == false).forEach(a -> leaves.addAll(a.collectLeaves())); + groupings.forEach(g -> { + if (leaves.contains(g) == false) { + orginalAttributs.add((Attribute) g); + } + }); + } + return orginalAttributs; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/BinaryExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/BinaryExec.java index 6f200bad17a72..9a1b76205b595 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/BinaryExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/BinaryExec.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.plan.physical; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.tree.Source; import java.io.IOException; @@ -40,6 +41,10 @@ public PhysicalPlan right() { return right; } + public abstract AttributeSet leftReferences(); + + public abstract AttributeSet rightReferences(); + @Override public void writeTo(StreamOutput out) throws IOException { Source.EMPTY.writeTo(out); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeExec.java index 5530b3ea54d3d..d1d834b71047a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeExec.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; @@ -72,6 +73,12 @@ public boolean inBetweenAggs() { return inBetweenAggs; } + @Override + protected AttributeSet computeReferences() { + // ExchangeExec does no input referencing, it only outputs all synthetic attributes, "sourced" from remote exchanges. + return AttributeSet.EMPTY; + } + @Override public UnaryExec replaceChild(PhysicalPlan newChild) { return new ExchangeExec(source(), output, inBetweenAggs, newChild); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExec.java index 35c6e4846bd88..ec996c5c84064 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExec.java @@ -89,12 +89,7 @@ public static Attribute extractSourceAttributesFrom(PhysicalPlan plan) { @Override protected AttributeSet computeReferences() { - AttributeSet required = new AttributeSet(docValuesAttributes); - - required.add(sourceAttribute); - required.addAll(attributesToExtract); - - return required; + return sourceAttribute != null ? new AttributeSet(sourceAttribute) : AttributeSet.EMPTY; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java index 5ae3702993fcb..362c83bf76213 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java @@ -119,6 +119,16 @@ protected AttributeSet computeReferences() { return Expressions.references(leftFields); } + @Override + public AttributeSet leftReferences() { + return Expressions.references(leftFields); + } + + @Override + public AttributeSet rightReferences() { + return Expressions.references(rightFields); + } + @Override public HashJoinExec replaceChildren(PhysicalPlan left, PhysicalPlan right) { return new HashJoinExec(source(), left, right, matchFields, leftFields, rightFields, output); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LookupJoinExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LookupJoinExec.java index 8b1cc047309e7..2aff38993aa98 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LookupJoinExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LookupJoinExec.java @@ -93,9 +93,9 @@ public List addedFields() { public List output() { if (lazyOutput == null) { lazyOutput = new ArrayList<>(left().output()); - for (Attribute attr : addedFields) { - lazyOutput.add(attr); - } + var addedFieldsNames = addedFields.stream().map(Attribute::name).toList(); + lazyOutput.removeIf(a -> addedFieldsNames.contains(a.name())); + lazyOutput.addAll(addedFields); } return lazyOutput; } @@ -119,6 +119,21 @@ protected AttributeSet computeReferences() { return Expressions.references(leftFields); } + @Override + public AttributeSet leftReferences() { + return Expressions.references(leftFields); + } + + @Override + public AttributeSet rightReferences() { + // TODO: currently it's hard coded that we add all fields from the lookup index. But the output we "officially" get from the right + // hand side is inconsistent: + // - After logical optimization, there's a FragmentExec with an EsRelation on the right hand side with all the fields. + // - After local physical optimization, there's just an EsQueryExec here, with no fields other than _doc mentioned and we don't + // insert field extractions in the plan, either. + return AttributeSet.EMPTY; + } + @Override public LookupJoinExec replaceChildren(PhysicalPlan left, PhysicalPlan right) { return new LookupJoinExec(source(), left, right, leftFields, rightFields, addedFields); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java index 35aba7665ec87..57ba1c8016feb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java @@ -297,9 +297,9 @@ private void aggregatesToFactory( // coordinator/exchange phase else if (mode == AggregatorMode.FINAL || mode == AggregatorMode.INTERMEDIATE) { if (grouping) { - sourceAttr = aggregateMapper.mapGrouping(aggregateFunction); + sourceAttr = aggregateMapper.mapGrouping(ne); } else { - sourceAttr = aggregateMapper.mapNonGrouping(aggregateFunction); + sourceAttr = aggregateMapper.mapNonGrouping(ne); } } else { throw new EsqlIllegalArgumentException("illegal aggregation mode"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java index 41a6a17a50dcb..138165bd4f0bb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java @@ -13,6 +13,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeMap; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; @@ -91,7 +92,7 @@ final class AggregateMapper { private record AggDef(Class aggClazz, String type, String extra, boolean grouping) {} /** Map of AggDef types to intermediate named expressions. */ - private static final Map> mapper = AGG_FUNCTIONS.stream() + private static final Map> MAPPER = AGG_FUNCTIONS.stream() .flatMap(AggregateMapper::typeAndNames) .flatMap(AggregateMapper::groupingAndNonGrouping) .collect(Collectors.toUnmodifiableMap(aggDef -> aggDef, AggregateMapper::lookupIntermediateState)); @@ -103,50 +104,57 @@ private record AggDef(Class aggClazz, String type, String extra, boolean grou cache = new HashMap<>(); } - public List mapNonGrouping(List aggregates) { + public List mapNonGrouping(List aggregates) { return doMapping(aggregates, false); } - public List mapNonGrouping(Expression aggregate) { + public List mapNonGrouping(NamedExpression aggregate) { return map(aggregate, false).toList(); } - public List mapGrouping(List aggregates) { + public List mapGrouping(List aggregates) { return doMapping(aggregates, true); } - private List doMapping(List aggregates, boolean grouping) { + private List doMapping(List aggregates, boolean grouping) { AttributeMap attrToExpressions = new AttributeMap<>(); - aggregates.stream().flatMap(agg -> map(agg, grouping)).forEach(ne -> attrToExpressions.put(ne.toAttribute(), ne)); + aggregates.stream().flatMap(ne -> map(ne, grouping)).forEach(ne -> attrToExpressions.put(ne.toAttribute(), ne)); return attrToExpressions.values().stream().toList(); } - public List mapGrouping(Expression aggregate) { + public List mapGrouping(NamedExpression aggregate) { return map(aggregate, true).toList(); } - private Stream map(Expression aggregate, boolean grouping) { - return cache.computeIfAbsent(Alias.unwrap(aggregate), aggKey -> computeEntryForAgg(aggKey, grouping)).stream(); + private Stream map(NamedExpression ne, boolean grouping) { + return cache.computeIfAbsent(Alias.unwrap(ne), aggKey -> computeEntryForAgg(ne.name(), aggKey, grouping)).stream(); } - private static List computeEntryForAgg(Expression aggregate, boolean grouping) { - var aggDef = aggDefOrNull(aggregate, grouping); - if (aggDef != null) { - var is = getNonNull(aggDef); - var exp = isToNE(is).toList(); - return exp; + private static List computeEntryForAgg(String aggAlias, Expression aggregate, boolean grouping) { + if (aggregate instanceof AggregateFunction aggregateFunction) { + return entryForAgg(aggAlias, aggregateFunction, grouping); } if (aggregate instanceof FieldAttribute || aggregate instanceof MetadataAttribute || aggregate instanceof ReferenceAttribute) { - // This condition is a little pedantic, but do we expected other expressions here? if so, then add them + // This condition is a little pedantic, but do we expect other expressions here? if so, then add them return List.of(); - } else { - throw new EsqlIllegalArgumentException("unknown agg: " + aggregate.getClass() + ": " + aggregate); } + throw new EsqlIllegalArgumentException("unknown agg: " + aggregate.getClass() + ": " + aggregate); + } + + private static List entryForAgg(String aggAlias, AggregateFunction aggregateFunction, boolean grouping) { + var aggDef = new AggDef( + aggregateFunction.getClass(), + dataTypeToString(aggregateFunction.field().dataType(), aggregateFunction.getClass()), + aggregateFunction instanceof SpatialCentroid ? "SourceValues" : "", + grouping + ); + var is = getNonNull(aggDef); + return isToNE(is, aggAlias).toList(); } /** Gets the agg from the mapper - wrapper around map::get for more informative failure.*/ private static List getNonNull(AggDef aggDef) { - var l = mapper.get(aggDef); + var l = MAPPER.get(aggDef); if (l == null) { throw new EsqlIllegalArgumentException("Cannot find intermediate state for: " + aggDef); } @@ -199,18 +207,6 @@ private static Stream groupingAndNonGrouping(Tuple, Tuple lookupIntermediateState(AggDef aggDef) { try { @@ -257,7 +253,7 @@ private static String determinePackageName(Class clazz) { } /** Maps intermediate state description to named expressions. */ - private static Stream isToNE(List intermediateStateDescs) { + private static Stream isToNE(List intermediateStateDescs, String aggAlias) { return intermediateStateDescs.stream().map(is -> { final DataType dataType; if (Strings.isEmpty(is.dataType())) { @@ -265,7 +261,7 @@ private static Stream isToNE(List interm } else { dataType = DataType.fromEs(is.dataType()); } - return new ReferenceAttribute(Source.EMPTY, is.name(), dataType); + return new ReferenceAttribute(Source.EMPTY, Attribute.rawTemporaryName(aggAlias, is.name()), dataType); }); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlLicenseChecker.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlLicenseChecker.java new file mode 100644 index 0000000000000..0a52ee75de3b2 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlLicenseChecker.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.session; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicensedFeature; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestStatus; + +public class EsqlLicenseChecker { + + public static final LicensedFeature.Momentary CCS_FEATURE = LicensedFeature.momentary( + null, + "esql-ccs", + License.OperationMode.ENTERPRISE + ); + + /** + * Only call this method once you know the user is doing a cross-cluster query, as it will update + * the license_usage timestamp for the esql-ccs feature if the license is Enterprise (or Trial). + * @param licenseState + * @return true if the user has a license that allows ESQL CCS. + */ + public static boolean isCcsAllowed(XPackLicenseState licenseState) { + if (licenseState == null) { + return false; + } + return CCS_FEATURE.check(licenseState); + } + + /** + * @param licenseState existing license state. Need to extract info on the current installed license. + * @return ElasticsearchStatusException with an error message informing the caller what license is needed + * to run ES|QL cross-cluster searches and what license (if any) was found. + */ + public static ElasticsearchStatusException invalidLicenseForCcsException(XPackLicenseState licenseState) { + String message = "A valid Enterprise license is required to run ES|QL cross-cluster searches. License found: "; + if (licenseState == null) { + message += "none"; + } else { + message += licenseState.statusDescription(); + } + return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index 4f7c620bc8d12..83480f6651abf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -298,6 +298,9 @@ public void analyzedPlan( .map(e -> new EnrichPolicyResolver.UnresolvedPolicy((String) e.policyName().fold(), e.mode())) .collect(Collectors.toSet()); final List indices = preAnalysis.indices; + + EsqlSessionCCSUtils.checkForCcsLicense(indices, indicesExpressionGrouper, verifier.licenseState()); + // TODO: make a separate call for lookup indices final Set targetClusters = enrichPolicyResolver.groupIndicesPerCluster( indices.stream().flatMap(t -> Arrays.stream(Strings.commaDelimitedListToStringArray(t.id().index()))).toArray(String[]::new) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java index 4fe2fef7e3f45..662572c466511 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java @@ -9,17 +9,24 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.indices.IndicesExpressionGrouper; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.NoSuchRemoteClusterException; import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.analysis.Analyzer; +import org.elasticsearch.xpack.esql.analysis.TableInfo; import org.elasticsearch.xpack.esql.index.IndexResolution; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; @@ -255,6 +262,9 @@ static boolean missingIndicesIsFatal(String clusterAlias, EsqlExecutionInfo exec } private static boolean concreteIndexRequested(String indexExpression) { + if (Strings.isNullOrBlank(indexExpression)) { + return false; + } for (String expr : indexExpression.split(",")) { if (expr.charAt(0) == '<' || expr.startsWith("-<")) { // skip date math expressions @@ -288,4 +298,37 @@ static void updateExecutionInfoAtEndOfPlanning(EsqlExecutionInfo execInfo) { } } } + + /** + * Checks the index expression for the presence of remote clusters. If found, it will ensure that the caller + * has a valid Enterprise (or Trial) license on the querying cluster. + * @param indices index expression requested by user + * @param indicesGrouper grouper of index expressions by cluster alias + * @param licenseState license state on the querying cluster + * @throws org.elasticsearch.ElasticsearchStatusException if the license is not valid (or present) for ES|QL CCS search. + */ + public static void checkForCcsLicense( + List indices, + IndicesExpressionGrouper indicesGrouper, + XPackLicenseState licenseState + ) { + for (TableInfo tableInfo : indices) { + Map groupedIndices; + try { + groupedIndices = indicesGrouper.groupIndices(IndicesOptions.DEFAULT, tableInfo.id().index()); + } catch (NoSuchRemoteClusterException e) { + if (EsqlLicenseChecker.isCcsAllowed(licenseState)) { + throw e; + } else { + throw EsqlLicenseChecker.invalidLicenseForCcsException(licenseState); + } + } + // check if it is a cross-cluster query + if (groupedIndices.size() > 1 || groupedIndices.containsKey(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY) == false) { + if (EsqlLicenseChecker.isCcsAllowed(licenseState) == false) { + throw EsqlLicenseChecker.invalidLicenseForCcsException(licenseState); + } + } + } + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 5330ddf95a752..b54baddc88b28 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -260,7 +260,7 @@ public final void test() throws Throwable { ); assumeFalse( "lookup join disabled for csv tests", - testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.JOIN_LOOKUP_V4.capabilityName()) + testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.JOIN_LOOKUP_V5.capabilityName()) ); assumeFalse( "can't use TERM function in csv tests", diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java index 4e89a09db9ed4..5e79e40b7e938 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.analysis; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.enrich.ResolvedEnrichPolicy; @@ -104,6 +105,11 @@ public static LogicalPlan analyze(String query, String mapping, QueryParams para return analyzer.analyze(plan); } + public static IndexResolution loadMapping(String resource, String indexName, IndexMode indexMode) { + EsIndex test = new EsIndex(indexName, EsqlTestUtils.loadMapping(resource), Map.of(indexName, indexMode)); + return IndexResolution.valid(test); + } + public static IndexResolution loadMapping(String resource, String indexName) { EsIndex test = new EsIndex(indexName, EsqlTestUtils.loadMapping(resource)); return IndexResolution.valid(test); @@ -118,7 +124,7 @@ public static IndexResolution expandedDefaultIndexResolution() { } public static IndexResolution defaultLookupResolution() { - return loadMapping("mapping-languages.json", "languages_lookup"); + return loadMapping("mapping-languages.json", "languages_lookup", IndexMode.LOOKUP); } public static EnrichResolution defaultEnrichResolution() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index dbe2c5f463f50..2f192936ba86c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -2146,7 +2146,7 @@ public void testLookupMatchTypeWrong() { } public void testLookupJoinUnknownIndex() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V4.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V5.isEnabled()); String errorMessage = "Unknown index [foobar]"; IndexResolution missingLookupIndex = IndexResolution.invalid(errorMessage); @@ -2175,7 +2175,7 @@ public void testLookupJoinUnknownIndex() { } public void testLookupJoinUnknownField() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V4.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V5.isEnabled()); String query = "FROM test | LOOKUP JOIN languages_lookup ON last_name"; String errorMessage = "1:45: Unknown column [last_name] in right side of join"; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 92cac30f1bb20..4b916106165fb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.EsqlCapabilities; -import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; @@ -22,7 +21,6 @@ import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.parser.QueryParam; import org.elasticsearch.xpack.esql.parser.QueryParams; -import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import java.util.ArrayList; import java.util.LinkedHashMap; @@ -1805,29 +1803,6 @@ public void testToDatePeriodToTimeDurationWithInvalidType() { ); } - public void testNonMetadataScore() { - assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); - assertEquals("1:12: `_score` is a reserved METADATA attribute", error("from foo | eval _score = 10")); - - assertEquals( - "1:48: `_score` is a reserved METADATA attribute", - error("from foo metadata _score | where qstr(\"bar\") | eval _score = _score + 1") - ); - } - - public void testScoreRenaming() { - assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); - assertEquals("1:33: `_score` is a reserved METADATA attribute", error("from foo METADATA _id, _score | rename _id as _score")); - - assertTrue(passes("from foo metadata _score | rename _score as foo").stream().anyMatch(a -> a.name().equals("foo"))); - } - - private List passes(String query) { - LogicalPlan logicalPlan = defaultAnalyzer.analyze(parser.createStatement(query)); - assertTrue(logicalPlan.resolved()); - return logicalPlan.output(); - } - public void testIntervalAsString() { // DateTrunc for (String interval : List.of("1 minu", "1 dy", "1.5 minutes", "0.5 days", "minutes 1", "day 5")) { @@ -1894,38 +1869,35 @@ public void testIntervalAsString() { ); } - public void testCategorizeSingleGrouping() { - assumeTrue("requires Categorize capability", EsqlCapabilities.Cap.CATEGORIZE_V5.isEnabled()); - - query("from test | STATS COUNT(*) BY CATEGORIZE(first_name)"); - query("from test | STATS COUNT(*) BY cat = CATEGORIZE(first_name)"); + public void testCategorizeOnlyFirstGrouping() { + query("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name)"); + query("FROM test | STATS COUNT(*) BY cat = CATEGORIZE(first_name)"); + query("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), emp_no"); + query("FROM test | STATS COUNT(*) BY a = CATEGORIZE(first_name), b = emp_no"); assertEquals( - "1:31: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings", - error("from test | STATS COUNT(*) BY CATEGORIZE(first_name), emp_no") + "1:39: CATEGORIZE grouping function [CATEGORIZE(first_name)] can only be in the first grouping expression", + error("FROM test | STATS COUNT(*) BY emp_no, CATEGORIZE(first_name)") ); assertEquals( - "1:39: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings", - error("FROM test | STATS COUNT(*) BY emp_no, CATEGORIZE(first_name)") + "1:55: CATEGORIZE grouping function [CATEGORIZE(last_name)] can only be in the first grouping expression", + error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), CATEGORIZE(last_name)") ); assertEquals( - "1:35: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings", - error("FROM test | STATS COUNT(*) BY a = CATEGORIZE(first_name), b = emp_no") + "1:55: CATEGORIZE grouping function [CATEGORIZE(first_name)] can only be in the first grouping expression", + error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), CATEGORIZE(first_name)") ); assertEquals( - "1:31: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings\n" - + "line 1:55: cannot use CATEGORIZE grouping function [CATEGORIZE(last_name)] with multiple groupings", - error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), CATEGORIZE(last_name)") + "1:63: CATEGORIZE grouping function [CATEGORIZE(last_name)] can only be in the first grouping expression", + error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), emp_no, CATEGORIZE(last_name)") ); assertEquals( - "1:31: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings", - error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), CATEGORIZE(first_name)") + "1:63: CATEGORIZE grouping function [CATEGORIZE(first_name)] can only be in the first grouping expression", + error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), emp_no, CATEGORIZE(first_name)") ); } public void testCategorizeNestedGrouping() { - assumeTrue("requires Categorize capability", EsqlCapabilities.Cap.CATEGORIZE_V5.isEnabled()); - query("from test | STATS COUNT(*) BY CATEGORIZE(LENGTH(first_name)::string)"); assertEquals( @@ -1939,8 +1911,6 @@ public void testCategorizeNestedGrouping() { } public void testCategorizeWithinAggregations() { - assumeTrue("requires Categorize capability", EsqlCapabilities.Cap.CATEGORIZE_V5.isEnabled()); - query("from test | STATS MV_COUNT(cat), COUNT(*) BY cat = CATEGORIZE(first_name)"); query("from test | STATS MV_COUNT(CATEGORIZE(first_name)), COUNT(*) BY cat = CATEGORIZE(first_name)"); query("from test | STATS MV_COUNT(CATEGORIZE(first_name)), COUNT(*) BY CATEGORIZE(first_name)"); @@ -1968,6 +1938,24 @@ public void testCategorizeWithinAggregations() { ); } + public void testCategorizeWithFilteredAggregations() { + query("FROM test | STATS COUNT(*) WHERE first_name == \"John\" BY CATEGORIZE(last_name)"); + query("FROM test | STATS COUNT(*) WHERE last_name == \"Doe\" BY CATEGORIZE(last_name)"); + + assertEquals( + "1:34: can only use grouping function [CATEGORIZE(first_name)] as part of the BY clause", + error("FROM test | STATS COUNT(*) WHERE CATEGORIZE(first_name) == \"John\" BY CATEGORIZE(last_name)") + ); + assertEquals( + "1:34: can only use grouping function [CATEGORIZE(last_name)] as part of the BY clause", + error("FROM test | STATS COUNT(*) WHERE CATEGORIZE(last_name) == \"Doe\" BY CATEGORIZE(last_name)") + ); + assertEquals( + "1:34: cannot reference CATEGORIZE grouping function [category] within an aggregation filter", + error("FROM test | STATS COUNT(*) WHERE category == \"Doe\" BY category = CATEGORIZE(last_name)") + ); + } + public void testSortByAggregate() { assertEquals("1:18: Aggregate functions are not allowed in SORT [COUNT]", error("ROW a = 1 | SORT count(*)")); assertEquals("1:28: Aggregate functions are not allowed in SORT [COUNT]", error("ROW a = 1 | SORT to_string(count(*))")); @@ -1976,7 +1964,7 @@ public void testSortByAggregate() { } public void testLookupJoinDataTypeMismatch() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V4.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V5.isEnabled()); query("FROM test | EVAL language_code = languages | LOOKUP JOIN languages_lookup ON language_code"); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java index c01668d0e6c48..2d3ba1be7a643 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java @@ -250,7 +250,7 @@ public void testCountFieldWithEval() { var esStatsQuery = as(exg.child(), EsStatsQueryExec.class); assertThat(esStatsQuery.limit(), is(nullValue())); - assertThat(Expressions.names(esStatsQuery.output()), contains("count", "seen")); + assertThat(Expressions.names(esStatsQuery.output()), contains("$$c$count", "$$c$seen")); var stat = as(esStatsQuery.stats().get(0), Stat.class); assertThat(stat.query(), is(QueryBuilders.existsQuery("salary"))); } @@ -271,7 +271,7 @@ public void testCountOneFieldWithFilter() { var exchange = as(agg.child(), ExchangeExec.class); var esStatsQuery = as(exchange.child(), EsStatsQueryExec.class); assertThat(esStatsQuery.limit(), is(nullValue())); - assertThat(Expressions.names(esStatsQuery.output()), contains("count", "seen")); + assertThat(Expressions.names(esStatsQuery.output()), contains("$$c$count", "$$c$seen")); var stat = as(esStatsQuery.stats().get(0), Stat.class); Source source = new Source(2, 8, "salary > 1000"); var exists = QueryBuilders.existsQuery("salary"); @@ -381,7 +381,7 @@ public void testAnotherCountAllWithFilter() { var exchange = as(agg.child(), ExchangeExec.class); var esStatsQuery = as(exchange.child(), EsStatsQueryExec.class); assertThat(esStatsQuery.limit(), is(nullValue())); - assertThat(Expressions.names(esStatsQuery.output()), contains("count", "seen")); + assertThat(Expressions.names(esStatsQuery.output()), contains("$$c$count", "$$c$seen")); var source = ((SingleValueQuery.Builder) esStatsQuery.query()).source(); var expected = wrapWithSingleQuery(query, QueryBuilders.rangeQuery("emp_no").gt(10010), "emp_no", source); assertThat(expected.toString(), is(esStatsQuery.query().toString())); @@ -992,7 +992,7 @@ public boolean exists(String field) { var exchange = as(agg.child(), ExchangeExec.class); assertThat(exchange.inBetweenAggs(), is(true)); var localSource = as(exchange.child(), LocalSourceExec.class); - assertThat(Expressions.names(localSource.output()), contains("count", "seen")); + assertThat(Expressions.names(localSource.output()), contains("$$c$count", "$$c$seen")); } /** @@ -1147,7 +1147,7 @@ public void testIsNotNull_TextField_Pushdown_WithCount() { var exg = as(agg.child(), ExchangeExec.class); var esStatsQuery = as(exg.child(), EsStatsQueryExec.class); assertThat(esStatsQuery.limit(), is(nullValue())); - assertThat(Expressions.names(esStatsQuery.output()), contains("count", "seen")); + assertThat(Expressions.names(esStatsQuery.output()), contains("$$c$count", "$$c$seen")); var stat = as(esStatsQuery.stats().get(0), Stat.class); assertThat(stat.query(), is(QueryBuilders.existsQuery("job"))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 737bb2eb23a6f..fd2f4333cc5f4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.VerificationException; -import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils; @@ -41,6 +40,7 @@ import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; +import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.util.Holder; @@ -113,7 +113,9 @@ import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.join.InlineJoin; import org.elasticsearch.xpack.esql.plan.logical.join.Join; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinConfig; import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; +import org.elasticsearch.xpack.esql.plan.logical.join.LookupJoin; import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; @@ -139,6 +141,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.TWO; import static org.elasticsearch.xpack.esql.EsqlTestUtils.as; import static org.elasticsearch.xpack.esql.EsqlTestUtils.emptySource; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.fieldAttribute; import static org.elasticsearch.xpack.esql.EsqlTestUtils.getFieldAttribute; import static org.elasticsearch.xpack.esql.EsqlTestUtils.loadMapping; import static org.elasticsearch.xpack.esql.EsqlTestUtils.localSource; @@ -146,6 +149,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; import static org.elasticsearch.xpack.esql.analysis.Analyzer.NO_FIELDS; import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.analyze; +import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.defaultLookupResolution; import static org.elasticsearch.xpack.esql.core.expression.Literal.NULL; import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; @@ -218,7 +222,13 @@ public static void init() { EsIndex test = new EsIndex("test", mapping, Map.of("test", IndexMode.STANDARD)); IndexResolution getIndexResult = IndexResolution.valid(test); analyzer = new Analyzer( - new AnalyzerContext(EsqlTestUtils.TEST_CFG, new EsqlFunctionRegistry(), getIndexResult, enrichResolution), + new AnalyzerContext( + EsqlTestUtils.TEST_CFG, + new EsqlFunctionRegistry(), + getIndexResult, + defaultLookupResolution(), + enrichResolution + ), TEST_VERIFIER ); @@ -1212,8 +1222,6 @@ public void testCombineProjectionWithAggregationFirstAndAliasedGroupingUsedInAgg * \_EsRelation[test][_meta_field{f}#23, emp_no{f}#17, first_name{f}#18, ..] */ public void testCombineProjectionWithCategorizeGrouping() { - assumeTrue("requires Categorize capability", EsqlCapabilities.Cap.CATEGORIZE_V5.isEnabled()); - var plan = plan(""" from test | eval k = first_name, k1 = k @@ -1294,6 +1302,26 @@ public void testCombineLimits() { ); } + public void testPushdownLimitsPastLeftJoin() { + var leftChild = emptySource(); + var rightChild = new LocalRelation(Source.EMPTY, List.of(fieldAttribute()), LocalSupplier.EMPTY); + assertNotEquals(leftChild, rightChild); + + var joinConfig = new JoinConfig(JoinTypes.LEFT, List.of(), List.of(), List.of()); + var join = switch (randomIntBetween(0, 2)) { + case 0 -> new Join(EMPTY, leftChild, rightChild, joinConfig); + case 1 -> new LookupJoin(EMPTY, leftChild, rightChild, joinConfig); + case 2 -> new InlineJoin(EMPTY, leftChild, rightChild, joinConfig); + default -> throw new IllegalArgumentException(); + }; + + var limit = new Limit(EMPTY, L(10), join); + + var optimizedPlan = new PushDownAndCombineLimits().rule(limit); + + assertEquals(join.replaceChildren(limit.replaceChild(join.left()), join.right()), optimizedPlan); + } + public void testMultipleCombineLimits() { var numberOfLimits = randomIntBetween(3, 10); var minimum = randomIntBetween(10, 99); @@ -3949,8 +3977,6 @@ public void testNestedExpressionsInGroups() { * \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] */ public void testNestedExpressionsInGroupsWithCategorize() { - assumeTrue("requires Categorize capability", EsqlCapabilities.Cap.CATEGORIZE_V5.isEnabled()); - var plan = optimizedPlan(""" from test | stats c = count(salary) by CATEGORIZE(CONCAT(first_name, "abc")) @@ -4877,6 +4903,26 @@ public void testPlanSanityCheck() throws Exception { assertThat(e.getMessage(), containsString(" optimized incorrectly due to missing references [salary")); } + public void testPlanSanityCheckWithBinaryPlans() throws Exception { + var plan = optimizedPlan(""" + FROM test + | RENAME languages AS language_code + | LOOKUP JOIN languages_lookup ON language_code + """); + + var project = as(plan, Project.class); + var limit = as(project.child(), Limit.class); + var join = as(limit.child(), Join.class); + + var joinWithInvalidLeftPlan = join.replaceChildren(join.right(), join.right()); + IllegalStateException e = expectThrows(IllegalStateException.class, () -> logicalOptimizer.optimize(joinWithInvalidLeftPlan)); + assertThat(e.getMessage(), containsString(" optimized incorrectly due to missing references from left hand side [language_code")); + + var joinWithInvalidRightPlan = join.replaceChildren(join.left(), join.left()); + e = expectThrows(IllegalStateException.class, () -> logicalOptimizer.optimize(joinWithInvalidRightPlan)); + assertThat(e.getMessage(), containsString(" optimized incorrectly due to missing references from right hand side [language_code")); + } + // https://github.com/elastic/elasticsearch/issues/104995 public void testNoWrongIsNotNullPruning() { var plan = optimizedPlan(""" diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 317aa3ab6f5e9..d43e41aed6a0e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.compute.aggregation.AggregatorMode; import org.elasticsearch.core.Tuple; import org.elasticsearch.geometry.Circle; import org.elasticsearch.geometry.Polygon; @@ -114,6 +115,7 @@ import org.elasticsearch.xpack.esql.plan.physical.HashJoinExec; import org.elasticsearch.xpack.esql.plan.physical.LimitExec; import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec; +import org.elasticsearch.xpack.esql.plan.physical.LookupJoinExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.plan.physical.ProjectExec; import org.elasticsearch.xpack.esql.plan.physical.TopNExec; @@ -127,6 +129,7 @@ import org.elasticsearch.xpack.esql.stats.SearchStats; import org.junit.Before; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; @@ -153,6 +156,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; import static org.elasticsearch.xpack.esql.SerializationTestUtils.assertSerialization; import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.analyze; +import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.defaultLookupResolution; import static org.elasticsearch.xpack.esql.core.expression.Expressions.name; import static org.elasticsearch.xpack.esql.core.expression.Expressions.names; import static org.elasticsearch.xpack.esql.core.expression.function.scalar.FunctionTestUtils.l; @@ -279,16 +283,30 @@ TestDataSource makeTestDataSource( String indexName, String mappingFileName, EsqlFunctionRegistry functionRegistry, + IndexResolution lookupResolution, EnrichResolution enrichResolution, SearchStats stats ) { Map mapping = loadMapping(mappingFileName); EsIndex index = new EsIndex(indexName, mapping, Map.of("test", IndexMode.STANDARD)); IndexResolution getIndexResult = IndexResolution.valid(index); - Analyzer analyzer = new Analyzer(new AnalyzerContext(config, functionRegistry, getIndexResult, enrichResolution), TEST_VERIFIER); + Analyzer analyzer = new Analyzer( + new AnalyzerContext(config, functionRegistry, getIndexResult, lookupResolution, enrichResolution), + TEST_VERIFIER + ); return new TestDataSource(mapping, index, analyzer, stats); } + TestDataSource makeTestDataSource( + String indexName, + String mappingFileName, + EsqlFunctionRegistry functionRegistry, + EnrichResolution enrichResolution, + SearchStats stats + ) { + return makeTestDataSource(indexName, mappingFileName, functionRegistry, defaultLookupResolution(), enrichResolution, stats); + } + TestDataSource makeTestDataSource( String indexName, String mappingFileName, @@ -2286,6 +2304,91 @@ public void testFieldExtractWithoutSourceAttributes() { ); } + public void testVerifierOnMissingReferences() { + var plan = physicalPlan(""" + from test + | stats s = sum(salary) by emp_no + | where emp_no > 10 + """); + + plan = plan.transformUp( + AggregateExec.class, + a -> new AggregateExec( + a.source(), + a.child(), + a.groupings(), + List.of(), // remove the aggs (and thus the groupings) entirely + a.getMode(), + a.intermediateAttributes(), + a.estimatedRowSize() + ) + ); + final var finalPlan = plan; + var e = expectThrows(IllegalStateException.class, () -> physicalPlanOptimizer.verify(finalPlan)); + assertThat(e.getMessage(), containsString(" > 10[INTEGER]]] optimized incorrectly due to missing references [emp_no{f}#")); + } + + public void testVerifierOnMissingReferencesWithBinaryPlans() throws Exception { + // Do not assert serialization: + // This will have a LookupJoinExec, which is not serializable because it doesn't leave the coordinator. + var plan = physicalPlan(""" + FROM test + | RENAME languages AS language_code + | SORT language_code + | LOOKUP JOIN languages_lookup ON language_code + """, testData, false); + + var planWithInvalidJoinLeftSide = plan.transformUp(LookupJoinExec.class, join -> join.replaceChildren(join.right(), join.right())); + + var e = expectThrows(IllegalStateException.class, () -> physicalPlanOptimizer.verify(planWithInvalidJoinLeftSide)); + assertThat(e.getMessage(), containsString(" optimized incorrectly due to missing references from left hand side [language_code")); + + var planWithInvalidJoinRightSide = plan.transformUp( + LookupJoinExec.class, + // LookupJoinExec.rightReferences() is currently EMPTY (hack); use a HashJoinExec instead. + join -> new HashJoinExec( + join.source(), + join.left(), + join.left(), + join.leftFields(), + join.leftFields(), + join.rightFields(), + join.output() + ) + ); + + e = expectThrows(IllegalStateException.class, () -> physicalPlanOptimizer.verify(planWithInvalidJoinRightSide)); + assertThat(e.getMessage(), containsString(" optimized incorrectly due to missing references from right hand side [language_code")); + } + + public void testVerifierOnDuplicateOutputAttributes() { + var plan = physicalPlan(""" + from test + | stats s = sum(salary) by emp_no + | where emp_no > 10 + """); + + plan = plan.transformUp(AggregateExec.class, a -> { + List intermediates = new ArrayList<>(a.intermediateAttributes()); + intermediates.add(intermediates.get(0)); + return new AggregateExec( + a.source(), + a.child(), + a.groupings(), + a.aggregates(), + AggregatorMode.INTERMEDIATE, // FINAL would deduplicate aggregates() + intermediates, + a.estimatedRowSize() + ); + }); + final var finalPlan = plan; + var e = expectThrows(IllegalStateException.class, () -> physicalPlanOptimizer.verify(finalPlan)); + assertThat( + e.getMessage(), + containsString("Plan [LimitExec[1000[INTEGER]]] optimized incorrectly due to duplicate output attribute emp_no{f}#") + ); + } + public void testProjectAwayColumns() { var rule = new ProjectAwayColumns(); @@ -2557,7 +2660,7 @@ public boolean exists(String field) { var exchange = asRemoteExchange(aggregate.child()); var localSourceExec = as(exchange.child(), LocalSourceExec.class); - assertThat(Expressions.names(localSourceExec.output()), contains("languages", "min", "seen")); + assertThat(Expressions.names(localSourceExec.output()), contains("languages", "$$m$min", "$$m$seen")); } /** @@ -2593,9 +2696,9 @@ public void testPartialAggFoldingOutput() { var limit = as(optimized, LimitExec.class); var agg = as(limit.child(), AggregateExec.class); var exchange = as(agg.child(), ExchangeExec.class); - assertThat(Expressions.names(exchange.output()), contains("count", "seen")); + assertThat(Expressions.names(exchange.output()), contains("$$c$count", "$$c$seen")); var source = as(exchange.child(), LocalSourceExec.class); - assertThat(Expressions.names(source.output()), contains("count", "seen")); + assertThat(Expressions.names(source.output()), contains("$$c$count", "$$c$seen")); } /** @@ -2627,7 +2730,7 @@ public void testGlobalAggFoldingOutput() { var aggFinal = as(limit.child(), AggregateExec.class); var aggPartial = as(aggFinal.child(), AggregateExec.class); // The partial aggregation's output is determined via AbstractPhysicalOperationProviders.intermediateAttributes() - assertThat(Expressions.names(aggPartial.output()), contains("count", "seen")); + assertThat(Expressions.names(aggPartial.output()), contains("$$c$count", "$$c$seen")); limit = as(aggPartial.child(), LimitExec.class); var exchange = as(limit.child(), ExchangeExec.class); var project = as(exchange.child(), ProjectExec.class); @@ -2665,9 +2768,15 @@ public void testPartialAggFoldingOutputForSyntheticAgg() { var aggFinal = as(limit.child(), AggregateExec.class); assertThat(aggFinal.output(), hasSize(2)); var exchange = as(aggFinal.child(), ExchangeExec.class); - assertThat(Expressions.names(exchange.output()), contains("sum", "seen", "count", "seen")); + assertThat( + Expressions.names(exchange.output()), + contains("$$SUM$a$0$sum", "$$SUM$a$0$seen", "$$COUNT$a$1$count", "$$COUNT$a$1$seen") + ); var source = as(exchange.child(), LocalSourceExec.class); - assertThat(Expressions.names(source.output()), contains("sum", "seen", "count", "seen")); + assertThat( + Expressions.names(source.output()), + contains("$$SUM$a$0$sum", "$$SUM$a$0$seen", "$$COUNT$a$1$count", "$$COUNT$a$1$seen") + ); } /** @@ -6706,11 +6815,17 @@ private PhysicalPlan physicalPlan(String query) { } private PhysicalPlan physicalPlan(String query, TestDataSource dataSource) { + return physicalPlan(query, dataSource, true); + } + + private PhysicalPlan physicalPlan(String query, TestDataSource dataSource, boolean assertSerialization) { var logical = logicalOptimizer.optimize(dataSource.analyzer.analyze(parser.createStatement(query))); // System.out.println("Logical\n" + logical); var physical = mapper.map(logical); // System.out.println(physical); - assertSerialization(physical); + if (assertSerialization) { + assertSerialization(physical); + } return physical; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java index 60b632c443f8e..1000c05282fdb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java @@ -8,10 +8,18 @@ package org.elasticsearch.xpack.esql.session; import org.apache.lucene.index.CorruptIndexException; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.index.IndexMode; +import org.elasticsearch.indices.IndicesExpressionGrouper; +import org.elasticsearch.license.License; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.license.internal.XPackLicenseStatus; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.NoSeedNodeLeftException; @@ -20,9 +28,11 @@ import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; +import org.elasticsearch.xpack.esql.analysis.TableInfo; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.index.EsIndex; import org.elasticsearch.xpack.esql.index.IndexResolution; +import org.elasticsearch.xpack.esql.plan.TableIdentifier; import org.elasticsearch.xpack.esql.type.EsFieldTests; import java.util.ArrayList; @@ -32,8 +42,12 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.LongSupplier; import java.util.function.Predicate; +import java.util.stream.Collectors; +import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; +import static org.elasticsearch.xpack.esql.session.EsqlSessionCCSUtils.checkForCcsLicense; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -627,4 +641,148 @@ public void testMissingIndicesIsFatal() { } } + + public void testCheckForCcsLicense() { + final TestIndicesExpressionGrouper indicesGrouper = new TestIndicesExpressionGrouper(); + + // this seems to be used only for tracking usage of features, not for checking if a license is expired + final LongSupplier currTime = () -> System.currentTimeMillis(); + + XPackLicenseState enterpriseLicenseValid = new XPackLicenseState(currTime, activeLicenseStatus(License.OperationMode.ENTERPRISE)); + XPackLicenseState trialLicenseValid = new XPackLicenseState(currTime, activeLicenseStatus(License.OperationMode.TRIAL)); + XPackLicenseState platinumLicenseValid = new XPackLicenseState(currTime, activeLicenseStatus(License.OperationMode.PLATINUM)); + XPackLicenseState goldLicenseValid = new XPackLicenseState(currTime, activeLicenseStatus(License.OperationMode.GOLD)); + XPackLicenseState basicLicenseValid = new XPackLicenseState(currTime, activeLicenseStatus(License.OperationMode.BASIC)); + XPackLicenseState standardLicenseValid = new XPackLicenseState(currTime, activeLicenseStatus(License.OperationMode.STANDARD)); + XPackLicenseState missingLicense = new XPackLicenseState(currTime, activeLicenseStatus(License.OperationMode.MISSING)); + XPackLicenseState nullLicense = null; + + final XPackLicenseStatus enterpriseStatus = inactiveLicenseStatus(License.OperationMode.ENTERPRISE); + XPackLicenseState enterpriseLicenseInactive = new XPackLicenseState(currTime, enterpriseStatus); + XPackLicenseState trialLicenseInactive = new XPackLicenseState(currTime, inactiveLicenseStatus(License.OperationMode.TRIAL)); + XPackLicenseState platinumLicenseInactive = new XPackLicenseState(currTime, inactiveLicenseStatus(License.OperationMode.PLATINUM)); + XPackLicenseState goldLicenseInactive = new XPackLicenseState(currTime, inactiveLicenseStatus(License.OperationMode.GOLD)); + XPackLicenseState basicLicenseInactive = new XPackLicenseState(currTime, inactiveLicenseStatus(License.OperationMode.BASIC)); + XPackLicenseState standardLicenseInactive = new XPackLicenseState(currTime, inactiveLicenseStatus(License.OperationMode.STANDARD)); + XPackLicenseState missingLicenseInactive = new XPackLicenseState(currTime, inactiveLicenseStatus(License.OperationMode.MISSING)); + + // local only search does not require an enterprise license + { + List indices = new ArrayList<>(); + indices.add(new TableInfo(new TableIdentifier(EMPTY, null, randomFrom("idx", "idx1,idx2*")))); + + checkForCcsLicense(indices, indicesGrouper, enterpriseLicenseValid); + checkForCcsLicense(indices, indicesGrouper, platinumLicenseValid); + checkForCcsLicense(indices, indicesGrouper, goldLicenseValid); + checkForCcsLicense(indices, indicesGrouper, trialLicenseValid); + checkForCcsLicense(indices, indicesGrouper, basicLicenseValid); + checkForCcsLicense(indices, indicesGrouper, standardLicenseValid); + checkForCcsLicense(indices, indicesGrouper, missingLicense); + checkForCcsLicense(indices, indicesGrouper, nullLicense); + + checkForCcsLicense(indices, indicesGrouper, enterpriseLicenseInactive); + checkForCcsLicense(indices, indicesGrouper, platinumLicenseInactive); + checkForCcsLicense(indices, indicesGrouper, goldLicenseInactive); + checkForCcsLicense(indices, indicesGrouper, trialLicenseInactive); + checkForCcsLicense(indices, indicesGrouper, basicLicenseInactive); + checkForCcsLicense(indices, indicesGrouper, standardLicenseInactive); + checkForCcsLicense(indices, indicesGrouper, missingLicenseInactive); + } + + // cross-cluster search requires a valid (active, non-expired) enterprise license OR a valid trial license + { + List indices = new ArrayList<>(); + final String indexExprWithRemotes = randomFrom("remote:idx", "idx1,remote:idx2*,remote:logs,c*:idx4"); + if (randomBoolean()) { + indices.add(new TableInfo(new TableIdentifier(EMPTY, null, indexExprWithRemotes))); + } else { + indices.add(new TableInfo(new TableIdentifier(EMPTY, null, randomFrom("idx", "idx1,idx2*")))); + indices.add(new TableInfo(new TableIdentifier(EMPTY, null, indexExprWithRemotes))); + } + + // licenses that work + checkForCcsLicense(indices, indicesGrouper, enterpriseLicenseValid); + checkForCcsLicense(indices, indicesGrouper, trialLicenseValid); + + // all others fail --- + + // active non-expired non-Enterprise non-Trial licenses + assertLicenseCheckFails(indices, indicesGrouper, platinumLicenseValid, "active platinum license"); + assertLicenseCheckFails(indices, indicesGrouper, goldLicenseValid, "active gold license"); + assertLicenseCheckFails(indices, indicesGrouper, basicLicenseValid, "active basic license"); + assertLicenseCheckFails(indices, indicesGrouper, standardLicenseValid, "active standard license"); + assertLicenseCheckFails(indices, indicesGrouper, missingLicense, "active missing license"); + assertLicenseCheckFails(indices, indicesGrouper, nullLicense, "none"); + + // inactive/expired licenses + assertLicenseCheckFails(indices, indicesGrouper, enterpriseLicenseInactive, "expired enterprise license"); + assertLicenseCheckFails(indices, indicesGrouper, trialLicenseInactive, "expired trial license"); + assertLicenseCheckFails(indices, indicesGrouper, platinumLicenseInactive, "expired platinum license"); + assertLicenseCheckFails(indices, indicesGrouper, goldLicenseInactive, "expired gold license"); + assertLicenseCheckFails(indices, indicesGrouper, basicLicenseInactive, "expired basic license"); + assertLicenseCheckFails(indices, indicesGrouper, standardLicenseInactive, "expired standard license"); + assertLicenseCheckFails(indices, indicesGrouper, missingLicenseInactive, "expired missing license"); + } + } + + private XPackLicenseStatus activeLicenseStatus(License.OperationMode operationMode) { + return new XPackLicenseStatus(operationMode, true, null); + } + + private XPackLicenseStatus inactiveLicenseStatus(License.OperationMode operationMode) { + return new XPackLicenseStatus(operationMode, false, "License Expired 123"); + } + + private void assertLicenseCheckFails( + List indices, + TestIndicesExpressionGrouper indicesGrouper, + XPackLicenseState licenseState, + String expectedErrorMessageSuffix + ) { + ElasticsearchStatusException e = expectThrows( + ElasticsearchStatusException.class, + () -> checkForCcsLicense(indices, indicesGrouper, licenseState) + ); + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat( + e.getMessage(), + equalTo( + "A valid Enterprise license is required to run ES|QL cross-cluster searches. License found: " + expectedErrorMessageSuffix + ) + ); + } + + static class TestIndicesExpressionGrouper implements IndicesExpressionGrouper { + @Override + public Map groupIndices(IndicesOptions indicesOptions, String[] indexExpressions) { + final Map originalIndicesMap = new HashMap<>(); + final String localKey = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + + for (String expr : indexExpressions) { + assertFalse(Strings.isNullOrBlank(expr)); + String[] split = expr.split(":", 2); + assertTrue("Bad index expression: " + expr, split.length < 3); + String clusterAlias; + String indexExpr; + if (split.length == 1) { + clusterAlias = localKey; + indexExpr = expr; + } else { + clusterAlias = split[0]; + indexExpr = split[1]; + + } + OriginalIndices currIndices = originalIndicesMap.get(clusterAlias); + if (currIndices == null) { + originalIndicesMap.put(clusterAlias, new OriginalIndices(new String[] { indexExpr }, indicesOptions)); + } else { + List indicesList = Arrays.stream(currIndices.indices()).collect(Collectors.toList()); + indicesList.add(indexExpr); + originalIndicesMap.put(clusterAlias, new OriginalIndices(indicesList.toArray(new String[0]), indicesOptions)); + } + } + return originalIndicesMap; + } + } + } diff --git a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ilm/CCRIndexLifecycleIT.java b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ilm/CCRIndexLifecycleIT.java index 5168cd11eb172..a5d966873dda1 100644 --- a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ilm/CCRIndexLifecycleIT.java +++ b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ilm/CCRIndexLifecycleIT.java @@ -38,7 +38,6 @@ import java.util.Optional; import java.util.concurrent.TimeUnit; -import static java.util.Collections.singletonMap; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.ilm.ShrinkIndexNameSupplier.SHRUNKEN_INDEX_PREFIX; import static org.hamcrest.Matchers.equalTo; @@ -762,8 +761,8 @@ private void assertDocumentExists(RestClient client, String index, String id) th } private void createNewSingletonPolicy(String policyName, String phaseName, LifecycleAction action, TimeValue after) throws IOException { - Phase phase = new Phase(phaseName, after, singletonMap(action.getWriteableName(), action)); - LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policyName, singletonMap(phase.getName(), phase)); + Phase phase = new Phase(phaseName, after, Map.of(action.getWriteableName(), action)); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policyName, Map.of(phase.getName(), phase)); XContentBuilder builder = jsonBuilder(); lifecyclePolicy.toXContent(builder, null); final StringEntity entity = new StringEntity("{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/MigrateToDataTiersIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/MigrateToDataTiersIT.java index 60e71b095039e..811d07a436677 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/MigrateToDataTiersIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/MigrateToDataTiersIT.java @@ -46,7 +46,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static java.util.Collections.singletonMap; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createIndexWithSettings; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createNewSingletonPolicy; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createPolicy; @@ -101,11 +100,11 @@ public void testMigrateToDataTiersAction() throws Exception { Map warmActions = new HashMap<>(); warmActions.put(SetPriorityAction.NAME, new SetPriorityAction(50)); warmActions.put(ForceMergeAction.NAME, new ForceMergeAction(1, null)); - warmActions.put(AllocateAction.NAME, new AllocateAction(null, null, singletonMap("data", "warm"), null, null)); + warmActions.put(AllocateAction.NAME, new AllocateAction(null, null, Map.of("data", "warm"), null, null)); warmActions.put(ShrinkAction.NAME, new ShrinkAction(1, null, false)); Map coldActions = new HashMap<>(); coldActions.put(SetPriorityAction.NAME, new SetPriorityAction(0)); - coldActions.put(AllocateAction.NAME, new AllocateAction(0, null, null, null, singletonMap("data", "cold"))); + coldActions.put(AllocateAction.NAME, new AllocateAction(0, null, null, null, Map.of("data", "cold"))); createPolicy( client(), @@ -114,7 +113,7 @@ public void testMigrateToDataTiersAction() throws Exception { new Phase("warm", TimeValue.ZERO, warmActions), new Phase("cold", TimeValue.timeValueDays(100), coldActions), null, - new Phase("delete", TimeValue.ZERO, singletonMap(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE)) + new Phase("delete", TimeValue.ZERO, Map.of(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE)) ); createIndexWithSettings( @@ -377,11 +376,11 @@ public void testMigrationDryRun() throws Exception { Map warmActions = new HashMap<>(); warmActions.put(SetPriorityAction.NAME, new SetPriorityAction(50)); warmActions.put(ForceMergeAction.NAME, new ForceMergeAction(1, null)); - warmActions.put(AllocateAction.NAME, new AllocateAction(null, null, singletonMap("data", "warm"), null, null)); + warmActions.put(AllocateAction.NAME, new AllocateAction(null, null, Map.of("data", "warm"), null, null)); warmActions.put(ShrinkAction.NAME, new ShrinkAction(1, null, false)); Map coldActions = new HashMap<>(); coldActions.put(SetPriorityAction.NAME, new SetPriorityAction(0)); - coldActions.put(AllocateAction.NAME, new AllocateAction(0, null, null, null, singletonMap("data", "cold"))); + coldActions.put(AllocateAction.NAME, new AllocateAction(0, null, null, null, Map.of("data", "cold"))); createPolicy( client(), @@ -390,7 +389,7 @@ public void testMigrationDryRun() throws Exception { new Phase("warm", TimeValue.ZERO, warmActions), new Phase("cold", TimeValue.timeValueDays(100), coldActions), null, - new Phase("delete", TimeValue.ZERO, singletonMap(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE)) + new Phase("delete", TimeValue.ZERO, Map.of(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE)) ); createIndexWithSettings( diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/TimeSeriesRestDriver.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/TimeSeriesRestDriver.java index 3949139db033b..a1c7ebc2d8b2c 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/TimeSeriesRestDriver.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/TimeSeriesRestDriver.java @@ -41,7 +41,6 @@ import java.io.IOException; import java.io.InputStream; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -50,7 +49,6 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import static java.util.Collections.singletonMap; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.test.ESTestCase.randomAlphaOfLengthBetween; import static org.elasticsearch.test.ESTestCase.randomBoolean; @@ -154,8 +152,8 @@ public static void createNewSingletonPolicy( LifecycleAction action, TimeValue after ) throws IOException { - Phase phase = new Phase(phaseName, after, singletonMap(action.getWriteableName(), action)); - LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policyName, singletonMap(phase.getName(), phase)); + Phase phase = new Phase(phaseName, after, Map.of(action.getWriteableName(), action)); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policyName, Map.of(phase.getName(), phase)); XContentBuilder builder = jsonBuilder(); lifecyclePolicy.toXContent(builder, null); final StringEntity entity = new StringEntity("{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); @@ -202,7 +200,7 @@ public static void createFullPolicy(RestClient client, String policyName, TimeVa new AllocateAction( 1, null, - singletonMap("_name", "javaRestTest-0,javaRestTest-1," + "javaRestTest-2," + "javaRestTest-3"), + Map.of("_name", "javaRestTest-0,javaRestTest-1," + "javaRestTest-2," + "javaRestTest-3"), null, null ) @@ -215,7 +213,7 @@ public static void createFullPolicy(RestClient client, String policyName, TimeVa new AllocateAction( 0, null, - singletonMap("_name", "javaRestTest-0,javaRestTest-1," + "javaRestTest-2," + "javaRestTest-3"), + Map.of("_name", "javaRestTest-0,javaRestTest-1," + "javaRestTest-2," + "javaRestTest-3"), null, null ) @@ -224,7 +222,7 @@ public static void createFullPolicy(RestClient client, String policyName, TimeVa phases.put("hot", new Phase("hot", hotTime, hotActions)); phases.put("warm", new Phase("warm", TimeValue.ZERO, warmActions)); phases.put("cold", new Phase("cold", TimeValue.ZERO, coldActions)); - phases.put("delete", new Phase("delete", TimeValue.ZERO, singletonMap(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE))); + phases.put("delete", new Phase("delete", TimeValue.ZERO, Map.of(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE))); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policyName, phases); // PUT policy XContentBuilder builder = jsonBuilder(); @@ -300,7 +298,7 @@ public static Map getOnlyIndexSettings(RestClient client, String Map responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); Map indexSettings = (Map) responseMap.get(index); if (indexSettings == null) { - return Collections.emptyMap(); + return Map.of(); } return (Map) indexSettings.get("settings"); } diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/ChangePolicyForIndexIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/ChangePolicyForIndexIT.java index 7f75b010346ad..370e00785e843 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/ChangePolicyForIndexIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/ChangePolicyForIndexIT.java @@ -32,7 +32,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static java.util.Collections.singletonMap; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createIndexWithSettings; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createNewSingletonPolicy; @@ -67,7 +66,7 @@ public void testChangePolicyForIndex() throws Exception { new Phase( "hot", TimeValue.ZERO, - singletonMap(RolloverAction.NAME, new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)) + Map.of(RolloverAction.NAME, new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)) ) ); phases1.put( @@ -75,7 +74,7 @@ public void testChangePolicyForIndex() throws Exception { new Phase( "warm", TimeValue.ZERO, - singletonMap(AllocateAction.NAME, new AllocateAction(1, null, singletonMap("_name", "foobarbaz"), null, null)) + Map.of(AllocateAction.NAME, new AllocateAction(1, null, Map.of("_name", "foobarbaz"), null, null)) ) ); LifecyclePolicy lifecyclePolicy1 = new LifecyclePolicy("policy_1", phases1); @@ -85,7 +84,7 @@ public void testChangePolicyForIndex() throws Exception { new Phase( "hot", TimeValue.ZERO, - singletonMap(RolloverAction.NAME, new RolloverAction(null, null, null, 1000L, null, null, null, null, null, null)) + Map.of(RolloverAction.NAME, new RolloverAction(null, null, null, 1000L, null, null, null, null, null, null)) ) ); phases2.put( @@ -93,15 +92,9 @@ public void testChangePolicyForIndex() throws Exception { new Phase( "warm", TimeValue.ZERO, - singletonMap( + Map.of( AllocateAction.NAME, - new AllocateAction( - 1, - null, - singletonMap("_name", "javaRestTest-0,javaRestTest-1,javaRestTest-2,javaRestTest-3"), - null, - null - ) + new AllocateAction(1, null, Map.of("_name", "javaRestTest-0,javaRestTest-1,javaRestTest-2,javaRestTest-3"), null, null) ) ) ); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java index 2b722a6555a08..4c53d711ffdef 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java @@ -58,7 +58,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static java.util.Collections.singletonMap; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createFullPolicy; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createIndexWithSettings; @@ -219,7 +218,7 @@ public void testAllocateOnlyAllocation() throws Exception { Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) ); String allocateNodeName = "javaRestTest-0,javaRestTest-1,javaRestTest-2,javaRestTest-3"; - AllocateAction allocateAction = new AllocateAction(null, null, singletonMap("_name", allocateNodeName), null, null); + AllocateAction allocateAction = new AllocateAction(null, null, Map.of("_name", allocateNodeName), null, null); String endPhase = randomFrom("warm", "cold"); createNewSingletonPolicy(client(), policy, endPhase, allocateAction); updatePolicy(client(), index, policy); @@ -978,7 +977,7 @@ public void testHaltAtEndOfPhase() throws Exception { hotActions.put(SetPriorityAction.NAME, new SetPriorityAction(100)); Map phases = new HashMap<>(); phases.put("hot", new Phase("hot", TimeValue.ZERO, hotActions)); - phases.put("delete", new Phase("delete", TimeValue.ZERO, singletonMap(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE))); + phases.put("delete", new Phase("delete", TimeValue.ZERO, Map.of(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE))); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, phases); // PUT policy XContentBuilder builder = jsonBuilder(); @@ -1004,7 +1003,7 @@ public void testDeleteActionDoesntDeleteSearchableSnapshot() throws Exception { phases.put("cold", new Phase("cold", TimeValue.ZERO, coldActions)); phases.put( "delete", - new Phase("delete", TimeValue.timeValueMillis(10000), singletonMap(DeleteAction.NAME, DeleteAction.NO_SNAPSHOT_DELETE)) + new Phase("delete", TimeValue.timeValueMillis(10000), Map.of(DeleteAction.NAME, DeleteAction.NO_SNAPSHOT_DELETE)) ); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, phases); // PUT policy diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/SearchableSnapshotActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/SearchableSnapshotActionIT.java index fefeaa95319ed..61fea054b7293 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/SearchableSnapshotActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/SearchableSnapshotActionIT.java @@ -47,7 +47,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static java.util.Collections.singletonMap; import static org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createComposableTemplate; @@ -185,7 +184,7 @@ public void testDeleteActionDeletesSearchableSnapshot() throws Exception { Map coldActions = Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo)); Map phases = new HashMap<>(); phases.put("cold", new Phase("cold", TimeValue.ZERO, coldActions)); - phases.put("delete", new Phase("delete", TimeValue.timeValueMillis(10000), singletonMap(DeleteAction.NAME, WITH_SNAPSHOT_DELETE))); + phases.put("delete", new Phase("delete", TimeValue.timeValueMillis(10000), Map.of(DeleteAction.NAME, WITH_SNAPSHOT_DELETE))); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, phases); // PUT policy XContentBuilder builder = jsonBuilder(); @@ -455,7 +454,7 @@ public void testIdenticalSearchableSnapshotActionIsNoop() throws Exception { new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), null ); @@ -516,12 +515,12 @@ public void testConvertingSearchableSnapshotFromFullToPartial() throws Exception new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), new Phase( "frozen", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), null ); @@ -586,7 +585,7 @@ public void testResumingSearchableSnapshotFromFullToPartial() throws Exception { new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), null, null @@ -600,12 +599,12 @@ public void testResumingSearchableSnapshotFromFullToPartial() throws Exception { new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), new Phase( "frozen", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), null ); @@ -664,14 +663,14 @@ public void testResumingSearchableSnapshotFromFullToPartial() throws Exception { new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), new Phase( "frozen", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), - new Phase("delete", TimeValue.ZERO, singletonMap(DeleteAction.NAME, WITH_SNAPSHOT_DELETE)) + new Phase("delete", TimeValue.ZERO, Map.of(DeleteAction.NAME, WITH_SNAPSHOT_DELETE)) ); assertBusy(() -> { logger.info("--> waiting for [{}] to be deleted...", partiallyMountedIndexName); @@ -695,7 +694,7 @@ public void testResumingSearchableSnapshotFromPartialToFull() throws Exception { new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), null, null @@ -710,12 +709,12 @@ public void testResumingSearchableSnapshotFromPartialToFull() throws Exception { new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), new Phase( "frozen", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), null ); @@ -775,10 +774,10 @@ public void testResumingSearchableSnapshotFromPartialToFull() throws Exception { new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), null, - new Phase("delete", TimeValue.ZERO, singletonMap(DeleteAction.NAME, WITH_SNAPSHOT_DELETE)) + new Phase("delete", TimeValue.ZERO, Map.of(DeleteAction.NAME, WITH_SNAPSHOT_DELETE)) ); assertBusy(() -> { logger.info("--> waiting for [{}] to be deleted...", restoredPartiallyMountedIndexName); @@ -803,12 +802,12 @@ public void testSecondSearchableSnapshotUsingDifferentRepoThrows() throws Except new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), new Phase( "frozen", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(secondRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(secondRepo, randomBoolean())) ), null ) @@ -934,12 +933,12 @@ public void testSearchableSnapshotTotalShardsPerNode() throws Exception { new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), new Phase( "frozen", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean(), totalShardsPerNode)) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean(), totalShardsPerNode)) ), null ); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/ShrinkActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/ShrinkActionIT.java index d2f2dbbd0c9fb..2fecf3c617ccd 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/ShrinkActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/ShrinkActionIT.java @@ -39,7 +39,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static java.util.Collections.singletonMap; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createIndexWithSettings; @@ -286,7 +285,7 @@ public void testSetSingleNodeAllocationRetriesUntilItSucceeds() throws Exception TimeValue.ZERO, Map.of(migrateAction.getWriteableName(), migrateAction, shrinkAction.getWriteableName(), shrinkAction) ); - LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, singletonMap(phase.getName(), phase)); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of(phase.getName(), phase)); XContentBuilder builder = jsonBuilder(); lifecyclePolicy.toXContent(builder, null); final StringEntity entity = new StringEntity("{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); diff --git a/x-pack/plugin/ilm/qa/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java b/x-pack/plugin/ilm/qa/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java index 9460500177616..12dede7067b03 100644 --- a/x-pack/plugin/ilm/qa/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java +++ b/x-pack/plugin/ilm/qa/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java @@ -45,7 +45,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static java.util.Collections.singletonMap; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -225,8 +224,8 @@ public void testWhenUserLimitedByOnlyAliasOfIndexCanWriteToIndexWhichWasRolledov } private void createNewSingletonPolicy(RestClient client, String policy, String phaseName, LifecycleAction action) throws IOException { - Phase phase = new Phase(phaseName, TimeValue.ZERO, singletonMap(action.getWriteableName(), action)); - LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, singletonMap(phase.getName(), phase)); + Phase phase = new Phase(phaseName, TimeValue.ZERO, Map.of(action.getWriteableName(), action)); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of(phase.getName(), phase)); XContentBuilder builder = jsonBuilder(); lifecyclePolicy.toXContent(builder, null); final StringEntity entity = new StringEntity("{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ClusterStateWaitThresholdBreachTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ClusterStateWaitThresholdBreachTests.java index 55daa8104c12a..f25028824b56e 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ClusterStateWaitThresholdBreachTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ClusterStateWaitThresholdBreachTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.junit.Before; -import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Locale; @@ -65,7 +64,7 @@ public void refreshDataStreamAndPolicy() { @Override protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class, Ccr.class); + return List.of(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class, Ccr.class); } @Override diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataTiersMigrationsTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataTiersMigrationsTests.java index 7a0e00e5c4147..6d409bf474cfc 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataTiersMigrationsTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataTiersMigrationsTests.java @@ -30,9 +30,8 @@ import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.junit.Before; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -57,7 +56,7 @@ public void refreshDataStreamAndPolicy() { @Override protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class); + return List.of(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class); } @Override @@ -100,9 +99,9 @@ public void testIndexDataTierMigration() throws Exception { logger.info("starting a cold data node"); internalCluster().startNode(coldNode(Settings.EMPTY)); - Phase hotPhase = new Phase("hot", TimeValue.ZERO, Collections.emptyMap()); - Phase warmPhase = new Phase("warm", TimeValue.ZERO, Collections.emptyMap()); - Phase coldPhase = new Phase("cold", TimeValue.ZERO, Collections.emptyMap()); + Phase hotPhase = new Phase("hot", TimeValue.ZERO, Map.of()); + Phase warmPhase = new Phase("warm", TimeValue.ZERO, Map.of()); + Phase coldPhase = new Phase("cold", TimeValue.ZERO, Map.of()); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of("hot", hotPhase, "warm", warmPhase, "cold", coldPhase)); PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, lifecyclePolicy); assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); @@ -161,9 +160,9 @@ public void testUserOptsOutOfTierMigration() throws Exception { logger.info("starting a cold data node"); internalCluster().startNode(coldNode(Settings.EMPTY)); - Phase hotPhase = new Phase("hot", TimeValue.ZERO, Collections.emptyMap()); - Phase warmPhase = new Phase("warm", TimeValue.ZERO, Collections.emptyMap()); - Phase coldPhase = new Phase("cold", TimeValue.ZERO, Collections.emptyMap()); + Phase hotPhase = new Phase("hot", TimeValue.ZERO, Map.of()); + Phase warmPhase = new Phase("warm", TimeValue.ZERO, Map.of()); + Phase coldPhase = new Phase("cold", TimeValue.ZERO, Map.of()); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of("hot", hotPhase, "warm", warmPhase, "cold", coldPhase)); PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, lifecyclePolicy); assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeIT.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeIT.java index b443c769407c5..2c4c1c9e20bb6 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeIT.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeIT.java @@ -33,10 +33,9 @@ import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -49,7 +48,7 @@ public class ILMMultiNodeIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, DataStreamsPlugin.class, IndexLifecycle.class, Ccr.class); + return List.of(LocalStateCompositeXPackPlugin.class, DataStreamsPlugin.class, IndexLifecycle.class, Ccr.class); } @Override @@ -69,9 +68,9 @@ public void testShrinkOnTiers() throws Exception { ensureGreen(); RolloverAction rolloverAction = new RolloverAction(null, null, null, 1L, null, null, null, null, null, null); - Phase hotPhase = new Phase("hot", TimeValue.ZERO, Collections.singletonMap(rolloverAction.getWriteableName(), rolloverAction)); + Phase hotPhase = new Phase("hot", TimeValue.ZERO, Map.of(rolloverAction.getWriteableName(), rolloverAction)); ShrinkAction shrinkAction = new ShrinkAction(1, null, false); - Phase warmPhase = new Phase("warm", TimeValue.ZERO, Collections.singletonMap(shrinkAction.getWriteableName(), shrinkAction)); + Phase warmPhase = new Phase("warm", TimeValue.ZERO, Map.of(shrinkAction.getWriteableName(), shrinkAction)); Map phases = new HashMap<>(); phases.put(hotPhase.getName(), hotPhase); phases.put(warmPhase.getName(), warmPhase); @@ -89,7 +88,7 @@ public void testShrinkOnTiers() throws Exception { ); ComposableIndexTemplate template = ComposableIndexTemplate.builder() - .indexPatterns(Collections.singletonList(index)) + .indexPatterns(List.of(index)) .template(t) .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build(); @@ -121,12 +120,12 @@ public void testShrinkOnTiers() throws Exception { } public void startHotOnlyNode() { - Settings nodeSettings = Settings.builder().putList("node.roles", Arrays.asList("master", "data_hot", "ingest")).build(); + Settings nodeSettings = Settings.builder().putList("node.roles", List.of("master", "data_hot", "ingest")).build(); internalCluster().startNode(nodeSettings); } public void startWarmOnlyNode() { - Settings nodeSettings = Settings.builder().putList("node.roles", Arrays.asList("master", "data_warm", "ingest")).build(); + Settings nodeSettings = Settings.builder().putList("node.roles", List.of("master", "data_warm", "ingest")).build(); internalCluster().startNode(nodeSettings); } } diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeWithCCRDisabledIT.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeWithCCRDisabledIT.java index e02dd5fe45676..b91a309a23ae5 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeWithCCRDisabledIT.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeWithCCRDisabledIT.java @@ -34,10 +34,9 @@ import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -50,7 +49,7 @@ public class ILMMultiNodeWithCCRDisabledIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, DataStreamsPlugin.class, IndexLifecycle.class, Ccr.class); + return List.of(LocalStateCompositeXPackPlugin.class, DataStreamsPlugin.class, IndexLifecycle.class, Ccr.class); } @Override @@ -75,7 +74,7 @@ public void testShrinkOnTiers() throws Exception { actions.put(shrinkAction.getWriteableName(), shrinkAction); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - LifecyclePolicy lifecyclePolicy = new LifecyclePolicy("shrink-policy", Collections.singletonMap(hotPhase.getName(), hotPhase)); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy("shrink-policy", Map.of(hotPhase.getName(), hotPhase)); client().execute(ILMActions.PUT, new PutLifecycleRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, lifecyclePolicy)).get(); Template t = new Template( @@ -89,7 +88,7 @@ public void testShrinkOnTiers() throws Exception { ); ComposableIndexTemplate template = ComposableIndexTemplate.builder() - .indexPatterns(Collections.singletonList(index)) + .indexPatterns(List.of(index)) .template(t) .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build(); @@ -121,12 +120,12 @@ public void testShrinkOnTiers() throws Exception { } public void startHotOnlyNode() { - Settings nodeSettings = Settings.builder().putList("node.roles", Arrays.asList("master", "data_hot", "ingest")).build(); + Settings nodeSettings = Settings.builder().putList("node.roles", List.of("master", "data_hot", "ingest")).build(); internalCluster().startNode(nodeSettings); } public void startWarmOnlyNode() { - Settings nodeSettings = Settings.builder().putList("node.roles", Arrays.asList("master", "data_warm", "ingest")).build(); + Settings nodeSettings = Settings.builder().putList("node.roles", List.of("master", "data_warm", "ingest")).build(); internalCluster().startNode(nodeSettings); } } diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java index d06a9f9cc19b1..644f88dc533b9 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java @@ -56,9 +56,7 @@ import java.io.IOException; import java.time.Instant; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -112,7 +110,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { @Override protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class, TestILMPlugin.class); + return List.of(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class, TestILMPlugin.class); } @Before @@ -128,9 +126,9 @@ public void init() { Step.StepKey compKey = new Step.StepKey("mock", "complete", "complete"); steps.add(new ObservableClusterStateWaitStep(key, compKey)); steps.add(new PhaseCompleteStep(compKey, null)); - Map actions = Collections.singletonMap(ObservableAction.NAME, OBSERVABLE_ACTION); + Map actions = Map.of(ObservableAction.NAME, OBSERVABLE_ACTION); mockPhase = new Phase("mock", TimeValue.timeValueSeconds(0), actions); - Map phases = Collections.singletonMap("mock", mockPhase); + Map phases = Map.of("mock", mockPhase); lifecyclePolicy = newLockableLifecyclePolicy("test", phases); } @@ -311,7 +309,7 @@ public void testExplainExecution() throws Exception { updateIndexSettings(Settings.builder().put("index.lifecycle.test.complete", true), "test"); { - Phase phase = new Phase("mock", TimeValue.ZERO, Collections.singletonMap("TEST_ACTION", OBSERVABLE_ACTION)); + Phase phase = new Phase("mock", TimeValue.ZERO, Map.of("TEST_ACTION", OBSERVABLE_ACTION)); PhaseExecutionInfo expectedExecutionInfo = new PhaseExecutionInfo(lifecyclePolicy.getName(), phase, 1L, actualModifiedDate); assertBusy(() -> { IndexLifecycleExplainResponse indexResponse = executeExplainRequestAndGetTestIndexResponse("test"); @@ -526,12 +524,12 @@ public List> getSettings() { Setting.Property.Dynamic, Setting.Property.IndexScope ); - return Collections.singletonList(COMPLETE_SETTING); + return List.of(COMPLETE_SETTING); } @Override public List getNamedXContent() { - return Arrays.asList(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ObservableAction.NAME), (p) -> { + return List.of(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ObservableAction.NAME), (p) -> { MockAction.parse(p); return OBSERVABLE_ACTION; })); @@ -539,7 +537,7 @@ public List getNamedXContent() { @Override public List getNamedWriteables() { - return Arrays.asList( + return List.of( new NamedWriteableRegistry.Entry(LifecycleType.class, LockableLifecycleType.TYPE, (in) -> LockableLifecycleType.INSTANCE), new NamedWriteableRegistry.Entry(LifecycleAction.class, ObservableAction.NAME, ObservableAction::readObservableAction), new NamedWriteableRegistry.Entry( diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java index 9efe46402428c..a36b74d9932d9 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java @@ -251,7 +251,7 @@ static List migrateIlmPolicies( ) { IndexLifecycleMetadata currentLifecycleMetadata = currentState.metadata().custom(IndexLifecycleMetadata.TYPE); if (currentLifecycleMetadata == null) { - return Collections.emptyList(); + return List.of(); } List migratedPolicies = new ArrayList<>(); @@ -827,7 +827,6 @@ public MigratedEntities( this.migratedPolicies = Collections.unmodifiableList(migratedPolicies); this.migratedTemplates = migratedTemplates; } - } /** diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorService.java index 42d1955f0d453..c5d367804db42 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorService.java @@ -41,7 +41,6 @@ import org.elasticsearch.xpack.core.ilm.WaitForRolloverReadyStep; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -219,8 +218,8 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources GREEN, "No Index Lifecycle Management policies configured", createDetails(verbose, ilmMetadata, currentMode), - Collections.emptyList(), - Collections.emptyList() + List.of(), + List.of() ); } else if (currentMode != OperationMode.RUNNING) { return createIndicator( @@ -238,8 +237,8 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources GREEN, "Index Lifecycle Management is running", createDetails(verbose, ilmMetadata, currentMode), - Collections.emptyList(), - Collections.emptyList() + List.of(), + List.of() ); } else { return createIndicator( diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java index e59bde7253051..71d61caa5fe31 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java @@ -54,7 +54,6 @@ import java.io.Closeable; import java.time.Clock; import java.util.Collection; -import java.util.Collections; import java.util.Map; import java.util.Set; import java.util.function.LongSupplier; @@ -500,7 +499,7 @@ static Set indicesOnShuttingDownNodesInDangerousStep(ClusterState state, SingleNodeShutdownMetadata.Type.REPLACE ); if (shutdownNodes.isEmpty()) { - return Collections.emptySet(); + return Set.of(); } Set indicesPreventingShutdown = state.metadata() diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportGetLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportGetLifecycleAction.java index f4598727d6123..5fa0f881559fb 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportGetLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportGetLifecycleAction.java @@ -32,7 +32,6 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -71,7 +70,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A IndexLifecycleMetadata metadata = clusterService.state().metadata().custom(IndexLifecycleMetadata.TYPE); if (metadata == null) { if (request.getPolicyNames().length == 0) { - listener.onResponse(new Response(Collections.emptyList())); + listener.onResponse(new Response(List.of())); } else { listener.onFailure( new ResourceNotFoundException("Lifecycle policy not found: {}", Arrays.toString(request.getPolicyNames())) diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryItem.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryItem.java index 977887a0487f3..efd54e05cb153 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryItem.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryItem.java @@ -18,7 +18,7 @@ import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; -import java.util.Collections; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; @@ -110,7 +110,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } private static String exceptionToString(Exception exception) { - Params stacktraceParams = new MapParams(Collections.singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false")); + Params stacktraceParams = new MapParams(Map.of(REST_EXCEPTION_SKIP_STACK_TRACE, "false")); String exceptionString; try (XContentBuilder causeXContentBuilder = JsonXContent.contentBuilder()) { causeXContentBuilder.startObject(); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java index 570c2f5231acf..2ee133b6292bd 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java @@ -48,7 +48,6 @@ import java.io.ByteArrayInputStream; import java.nio.charset.StandardCharsets; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -118,10 +117,7 @@ public void testMigrateIlmPolicyForIndexWithoutILMMetadata() { Metadata.builder() .putCustom( IndexLifecycleMetadata.TYPE, - new IndexLifecycleMetadata( - Collections.singletonMap(policyMetadata.getName(), policyMetadata), - OperationMode.STOPPED - ) + new IndexLifecycleMetadata(Map.of(policyMetadata.getName(), policyMetadata), OperationMode.STOPPED) ) .put(IndexMetadata.builder(indexName).settings(getBaseIndexSettings())) .build() @@ -176,7 +172,7 @@ public void testMigrateIlmPolicyForPhaseWithDeactivatedMigrateAction() { ); LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata( policy, - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ); @@ -186,10 +182,7 @@ public void testMigrateIlmPolicyForPhaseWithDeactivatedMigrateAction() { Metadata.builder() .putCustom( IndexLifecycleMetadata.TYPE, - new IndexLifecycleMetadata( - Collections.singletonMap(policyMetadata.getName(), policyMetadata), - OperationMode.STOPPED - ) + new IndexLifecycleMetadata(Map.of(policyMetadata.getName(), policyMetadata), OperationMode.STOPPED) ) .put(IndexMetadata.builder(indexName).settings(getBaseIndexSettings())) .build() @@ -245,10 +238,7 @@ public void testMigrateIlmPolicyRefreshesCachedPhase() { Metadata.builder() .putCustom( IndexLifecycleMetadata.TYPE, - new IndexLifecycleMetadata( - Collections.singletonMap(policyMetadata.getName(), policyMetadata), - OperationMode.STOPPED - ) + new IndexLifecycleMetadata(Map.of(policyMetadata.getName(), policyMetadata), OperationMode.STOPPED) ) .put(indexMetadata) .build() @@ -302,10 +292,7 @@ public void testMigrateIlmPolicyRefreshesCachedPhase() { .putCustom( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( - Collections.singletonMap( - policyMetadataWithTotalShardsPerNode.getName(), - policyMetadataWithTotalShardsPerNode - ), + Map.of(policyMetadataWithTotalShardsPerNode.getName(), policyMetadataWithTotalShardsPerNode), OperationMode.STOPPED ) ) @@ -352,10 +339,7 @@ public void testMigrateIlmPolicyRefreshesCachedPhase() { Metadata.builder() .putCustom( IndexLifecycleMetadata.TYPE, - new IndexLifecycleMetadata( - Collections.singletonMap(policyMetadata.getName(), policyMetadata), - OperationMode.STOPPED - ) + new IndexLifecycleMetadata(Map.of(policyMetadata.getName(), policyMetadata), OperationMode.STOPPED) ) .put(indexMetadata) .build() @@ -406,10 +390,7 @@ public void testMigrateIlmPolicyRefreshesCachedPhase() { Metadata.builder() .putCustom( IndexLifecycleMetadata.TYPE, - new IndexLifecycleMetadata( - Collections.singletonMap(policyMetadata.getName(), policyMetadata), - OperationMode.STOPPED - ) + new IndexLifecycleMetadata(Map.of(policyMetadata.getName(), policyMetadata), OperationMode.STOPPED) ) .put(indexMetadata) .build() @@ -456,10 +437,7 @@ public void testMigrateIlmPolicyRefreshesCachedPhase() { Metadata.builder() .putCustom( IndexLifecycleMetadata.TYPE, - new IndexLifecycleMetadata( - Collections.singletonMap(policyMetadata.getName(), policyMetadata), - OperationMode.STOPPED - ) + new IndexLifecycleMetadata(Map.of(policyMetadata.getName(), policyMetadata), OperationMode.STOPPED) ) .put(indexMetadata) .build() @@ -1008,7 +986,7 @@ public void testMigrateToDataTiersRouting() { ); LifecyclePolicyMetadata policyWithDataAttribute = new LifecyclePolicyMetadata( policyToMigrate, - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ); @@ -1026,7 +1004,7 @@ public void testMigrateToDataTiersRouting() { ); LifecyclePolicyMetadata policyWithOtherAttribute = new LifecyclePolicyMetadata( shouldntBeMigratedPolicy, - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ); @@ -1215,7 +1193,7 @@ public void testDryRunDoesntRequireILMStopped() { public void testMigrationDoesNotRemoveComposableTemplates() { ComposableIndexTemplate composableIndexTemplate = ComposableIndexTemplate.builder() - .indexPatterns(Collections.singletonList("*")) + .indexPatterns(List.of("*")) .template(new Template(Settings.builder().put(DATA_ROUTING_REQUIRE_SETTING, "hot").build(), null, null)) .build(); @@ -1285,7 +1263,7 @@ private LifecyclePolicyMetadata getWarmColdPolicyMeta( new Phase("cold", TimeValue.ZERO, Map.of(coldAllocateAction.getWriteableName(), coldAllocateAction)) ) ); - return new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()); + return new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong()); } public void testMigrateLegacyIndexTemplates() { diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTaskTests.java index b3146e81d08fc..06d11bff069fd 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTaskTests.java @@ -42,9 +42,8 @@ import org.mockito.Mockito; import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.elasticsearch.cluster.metadata.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; @@ -91,42 +90,33 @@ public void prepareState() throws IOException { Phase mixedPhase = new Phase( "first_phase", TimeValue.ZERO, - Collections.singletonMap(MockAction.NAME, new MockAction(Arrays.asList(firstStep, secondStep, thirdStep))) + Map.of(MockAction.NAME, new MockAction(List.of(firstStep, secondStep, thirdStep))) ); Phase allClusterPhase = new Phase( "first_phase", TimeValue.ZERO, - Collections.singletonMap(MockAction.NAME, new MockAction(Arrays.asList(firstStep, allClusterSecondStep))) + Map.of(MockAction.NAME, new MockAction(List.of(firstStep, allClusterSecondStep))) ); Phase invalidPhase = new Phase( "invalid_phase", TimeValue.ZERO, - Collections.singletonMap( - MockAction.NAME, - new MockAction(Arrays.asList(new MockClusterStateActionStep(firstStepKey, invalidStepKey))) - ) - ); - LifecyclePolicy mixedPolicy = newTestLifecyclePolicy(mixedPolicyName, Collections.singletonMap(mixedPhase.getName(), mixedPhase)); - LifecyclePolicy allClusterPolicy = newTestLifecyclePolicy( - allClusterPolicyName, - Collections.singletonMap(allClusterPhase.getName(), allClusterPhase) - ); - LifecyclePolicy invalidPolicy = newTestLifecyclePolicy( - invalidPolicyName, - Collections.singletonMap(invalidPhase.getName(), invalidPhase) + Map.of(MockAction.NAME, new MockAction(List.of(new MockClusterStateActionStep(firstStepKey, invalidStepKey)))) ); + LifecyclePolicy mixedPolicy = newTestLifecyclePolicy(mixedPolicyName, Map.of(mixedPhase.getName(), mixedPhase)); + LifecyclePolicy allClusterPolicy = newTestLifecyclePolicy(allClusterPolicyName, Map.of(allClusterPhase.getName(), allClusterPhase)); + LifecyclePolicy invalidPolicy = newTestLifecyclePolicy(invalidPolicyName, Map.of(invalidPhase.getName(), invalidPhase)); Map policyMap = new HashMap<>(); policyMap.put( mixedPolicyName, - new LifecyclePolicyMetadata(mixedPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) + new LifecyclePolicyMetadata(mixedPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong()) ); policyMap.put( allClusterPolicyName, - new LifecyclePolicyMetadata(allClusterPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) + new LifecyclePolicyMetadata(allClusterPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong()) ); policyMap.put( invalidPolicyName, - new LifecyclePolicyMetadata(invalidPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) + new LifecyclePolicyMetadata(invalidPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong()) ); policyStepsRegistry = new PolicyStepsRegistry(NamedXContentRegistry.EMPTY, client, null); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorServiceTests.java index 9e2a67caac253..7a37aaba96c18 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorServiceTests.java @@ -36,7 +36,6 @@ import org.elasticsearch.xpack.core.ilm.LifecycleSettings; import java.io.IOException; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -177,7 +176,7 @@ public void testIsYellowWhenNotRunningAndPoliciesConfigured() { YELLOW, "Index Lifecycle Management is not running", new SimpleHealthIndicatorDetails(Map.of("ilm_status", status, "policies", 1, "stagnating_indices", 0)), - Collections.singletonList( + List.of( new HealthIndicatorImpact( NAME, IlmHealthIndicatorService.AUTOMATION_DISABLED_IMPACT_ID, @@ -251,7 +250,7 @@ public void testSkippingFieldsWhenVerboseIsFalse() { YELLOW, "Index Lifecycle Management is not running", HealthIndicatorDetails.EMPTY, - Collections.singletonList( + List.of( new HealthIndicatorImpact( NAME, IlmHealthIndicatorService.AUTOMATION_DISABLED_IMPACT_ID, diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportActionTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportActionTests.java index d81faf6a398d7..4e8d7440eb773 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportActionTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportActionTests.java @@ -33,7 +33,6 @@ import org.mockito.Mockito; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -75,18 +74,18 @@ public void testUsageStats() throws Exception { indexPolicies.put("index_3", policy1Name); indexPolicies.put("index_4", policy1Name); indexPolicies.put("index_5", policy3Name); - LifecyclePolicy policy1 = new LifecyclePolicy(policy1Name, Collections.emptyMap()); + LifecyclePolicy policy1 = new LifecyclePolicy(policy1Name, Map.of()); policies.add(policy1); - PolicyStats policy1Stats = new PolicyStats(Collections.emptyMap(), 4); + PolicyStats policy1Stats = new PolicyStats(Map.of(), 4); Map phases1 = new HashMap<>(); LifecyclePolicy policy2 = new LifecyclePolicy(policy2Name, phases1); policies.add(policy2); - PolicyStats policy2Stats = new PolicyStats(Collections.emptyMap(), 0); + PolicyStats policy2Stats = new PolicyStats(Map.of(), 0); - LifecyclePolicy policy3 = new LifecyclePolicy(policy3Name, Collections.emptyMap()); + LifecyclePolicy policy3 = new LifecyclePolicy(policy3Name, Map.of()); policies.add(policy3); - PolicyStats policy3Stats = new PolicyStats(Collections.emptyMap(), 1); + PolicyStats policy3Stats = new PolicyStats(Map.of(), 1); ClusterState clusterState = buildClusterState(policies, indexPolicies); Mockito.when(clusterService.state()).thenReturn(clusterState); @@ -110,7 +109,7 @@ public void testUsageStats() throws Exception { private ClusterState buildClusterState(List lifecyclePolicies, Map indexPolicies) { Map lifecyclePolicyMetadatasMap = lifecyclePolicies.stream() - .map(p -> new LifecyclePolicyMetadata(p, Collections.emptyMap(), 1, 0L)) + .map(p -> new LifecyclePolicyMetadata(p, Map.of(), 1, 0L)) .collect(Collectors.toMap(LifecyclePolicyMetadata::getName, Function.identity())); IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(lifecyclePolicyMetadatasMap, OperationMode.RUNNING); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java index e757488c2690e..ece83fe6bc437 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java @@ -44,8 +44,6 @@ import org.elasticsearch.xpack.core.ilm.WaitForSnapshotAction; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.SortedMap; @@ -63,10 +61,7 @@ protected IndexLifecycleMetadata createTestInstance() { Map policies = Maps.newMapWithExpectedSize(numPolicies); for (int i = 0; i < numPolicies; i++) { LifecyclePolicy policy = randomTimeseriesLifecyclePolicy(randomAlphaOfLength(4) + i); - policies.put( - policy.getName(), - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policies.put(policy.getName(), new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); } return new IndexLifecycleMetadata(policies, randomFrom(OperationMode.values())); } @@ -84,7 +79,7 @@ protected Reader instanceReader() { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList( + List.of( new NamedWriteableRegistry.Entry( LifecycleType.class, TimeseriesLifecycleType.TYPE, @@ -111,7 +106,7 @@ protected NamedWriteableRegistry getNamedWriteableRegistry() { protected NamedXContentRegistry xContentRegistry() { List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); entries.addAll( - Arrays.asList( + List.of( new NamedXContentRegistry.Entry( LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), @@ -155,7 +150,7 @@ protected Metadata.Custom mutateInstance(Custom instance) { policyName, new LifecyclePolicyMetadata( randomTimeseriesLifecyclePolicy(policyName), - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ) @@ -192,9 +187,9 @@ public static IndexLifecycleMetadata createTestInstance(int numPolicies, Operati Map phases = Maps.newMapWithExpectedSize(numberPhases); for (int j = 0; j < numberPhases; j++) { TimeValue after = randomTimeValue(0, 1_000_000_000, TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS); - Map actions = Collections.emptyMap(); + Map actions = Map.of(); if (randomBoolean()) { - actions = Collections.singletonMap(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE); + actions = Map.of(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE); } String phaseName = randomAlphaOfLength(10); phases.put(phaseName, new Phase(phaseName, after, actions)); @@ -204,7 +199,7 @@ public static IndexLifecycleMetadata createTestInstance(int numPolicies, Operati policyName, new LifecyclePolicyMetadata( newTestLifecyclePolicy(policyName, phases), - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ) diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java index 8a4859fcd8b77..374f10b604f18 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java @@ -73,8 +73,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -89,7 +87,6 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiFunction; -import static java.util.stream.Collectors.toList; import static org.elasticsearch.cluster.metadata.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.awaitLatch; import static org.elasticsearch.xpack.core.ilm.LifecycleSettings.LIFECYCLE_HISTORY_INDEX_ENABLED_SETTING; @@ -248,7 +245,7 @@ public void testRunPolicyErrorStepOnRetryableFailedStep() { List waitForRolloverStepList = action.toSteps(client, phaseName, null) .stream() .filter(s -> s.getKey().name().equals(WaitForRolloverReadyStep.NAME)) - .collect(toList()); + .toList(); assertThat(waitForRolloverStepList.size(), is(1)); Step waitForRolloverStep = waitForRolloverStepList.get(0); StepKey stepKey = waitForRolloverStep.getKey(); @@ -288,7 +285,7 @@ public void testRunStateChangePolicyWithNoNextStep() throws Exception { .build(); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); DiscoveryNode node = clusterService.localNode(); - IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) @@ -317,7 +314,7 @@ public void testRunStateChangePolicyWithNextStep() throws Exception { StepKey nextStepKey = new StepKey("phase", "action", "next_cluster_state_action_step"); MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, nextStepKey); MockClusterStateActionStep nextStep = new MockClusterStateActionStep(nextStepKey, null); - MockPolicyStepsRegistry stepRegistry = createMultiStepPolicyStepRegistry(policyName, Arrays.asList(step, nextStep)); + MockPolicyStepsRegistry stepRegistry = createMultiStepPolicyStepRegistry(policyName, List.of(step, nextStep)); stepRegistry.setResolver((i, k) -> { if (stepKey.equals(k)) { return step; @@ -340,7 +337,7 @@ public void testRunStateChangePolicyWithNextStep() throws Exception { .build(); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); DiscoveryNode node = clusterService.localNode(); - IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) @@ -427,7 +424,7 @@ public void doTestRunPolicyWithFailureToReadPolicy(boolean asyncAction, boolean .build(); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); DiscoveryNode node = clusterService.localNode(); - IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) @@ -476,7 +473,7 @@ public void testRunAsyncActionDoesNotRun() { .build(); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); DiscoveryNode node = clusterService.localNode(); - IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) @@ -503,7 +500,7 @@ public void testRunStateChangePolicyWithAsyncActionNextStep() throws Exception { StepKey nextStepKey = new StepKey("phase", "action", "async_action_step"); MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, nextStepKey); MockAsyncActionStep nextStep = new MockAsyncActionStep(nextStepKey, null); - MockPolicyStepsRegistry stepRegistry = createMultiStepPolicyStepRegistry(policyName, Arrays.asList(step, nextStep)); + MockPolicyStepsRegistry stepRegistry = createMultiStepPolicyStepRegistry(policyName, List.of(step, nextStep)); stepRegistry.setResolver((i, k) -> { if (stepKey.equals(k)) { return step; @@ -526,7 +523,7 @@ public void testRunStateChangePolicyWithAsyncActionNextStep() throws Exception { .build(); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); DiscoveryNode node = clusterService.localNode(); - IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) @@ -603,7 +600,7 @@ public void testRunPeriodicStep() throws Exception { .build(); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); DiscoveryNode node = clusterService.localNode(); - IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) @@ -785,7 +782,7 @@ public void testGetCurrentStep() { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicyWithAllPhases(policyName); - LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong()); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Map.of(), 1, randomNonNegativeLong()); String phaseName = randomFrom(policy.getPhases().keySet()); Phase phase = policy.getPhases().get(phaseName); PhaseExecutionInfo pei = new PhaseExecutionInfo(policy.getName(), phase, 1, randomNonNegativeLong()); @@ -824,7 +821,7 @@ public void testIsReadyToTransition() { StepKey stepKey = new StepKey("phase", MockAction.NAME, MockAction.NAME); MockAsyncActionStep step = new MockAsyncActionStep(stepKey, null); SortedMap lifecyclePolicyMap = new TreeMap<>( - Collections.singletonMap( + Map.of( policyName, new LifecyclePolicyMetadata( createPolicy(policyName, null, step.getKey()), @@ -834,9 +831,9 @@ public void testIsReadyToTransition() { ) ) ); - Map firstStepMap = Collections.singletonMap(policyName, step); - Map policySteps = Collections.singletonMap(step.getKey(), step); - Map> stepMap = Collections.singletonMap(policyName, policySteps); + Map firstStepMap = Map.of(policyName, step); + Map policySteps = Map.of(step.getKey(), step); + Map> stepMap = Map.of(policyName, policySteps); PolicyStepsRegistry policyStepsRegistry = new PolicyStepsRegistry( lifecyclePolicyMap, firstStepMap, @@ -897,7 +894,7 @@ private static LifecyclePolicy createPolicy(String policyName, StepKey safeStep, assert unsafeStep == null || safeStep.phase().equals(unsafeStep.phase()) == false : "safe and unsafe actions must be in different phases"; Map actions = new HashMap<>(); - List steps = Collections.singletonList(new MockStep(safeStep, null)); + List steps = List.of(new MockStep(safeStep, null)); MockAction safeAction = new MockAction(steps, true); actions.put(safeAction.getWriteableName(), safeAction); Phase phase = new Phase(safeStep.phase(), TimeValue.timeValueMillis(0), actions); @@ -906,7 +903,7 @@ private static LifecyclePolicy createPolicy(String policyName, StepKey safeStep, if (unsafeStep != null) { assert MockAction.NAME.equals(unsafeStep.action()) : "The unsafe action needs to be MockAction.NAME"; Map actions = new HashMap<>(); - List steps = Collections.singletonList(new MockStep(unsafeStep, null)); + List steps = List.of(new MockStep(unsafeStep, null)); MockAction unsafeAction = new MockAction(steps, false); actions.put(unsafeAction.getWriteableName(), unsafeAction); Phase phase = new Phase(unsafeStep.phase(), TimeValue.timeValueMillis(0), actions); @@ -1233,7 +1230,7 @@ public Step getStep(IndexMetadata indexMetadata, StepKey stepKey) { } public static MockPolicyStepsRegistry createOneStepPolicyStepRegistry(String policyName, Step step) { - return createMultiStepPolicyStepRegistry(policyName, Collections.singletonList(step)); + return createMultiStepPolicyStepRegistry(policyName, List.of(step)); } public static MockPolicyStepsRegistry createMultiStepPolicyStepRegistry(String policyName, List steps) { diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java index eceb81542377a..b77e643bc2853 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java @@ -58,9 +58,9 @@ import java.time.Clock; import java.time.Instant; import java.time.ZoneId; -import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; import java.util.UUID; @@ -114,7 +114,7 @@ public void prepareServices() { }).when(executorService).execute(any()); Settings settings = Settings.builder().put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, "1s").build(); when(clusterService.getClusterSettings()).thenReturn( - new ClusterSettings(settings, Collections.singleton(LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING)) + new ClusterSettings(settings, Set.of(LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING)) ); when(clusterService.lifecycleState()).thenReturn(State.STARTED); @@ -154,14 +154,11 @@ public void testStoppedModeSkip() { randomStepKey(), randomStepKey() ); - MockAction mockAction = new MockAction(Collections.singletonList(mockStep)); - Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); - LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase)); + MockAction mockAction = new MockAction(List.of(mockStep)); + Phase phase = new Phase("phase", TimeValue.ZERO, Map.of("action", mockAction)); + LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Map.of(phase.getName(), phase)); SortedMap policyMap = new TreeMap<>(); - policyMap.put( - policyName, - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); IndexMetadata indexMetadata = IndexMetadata.builder(index.getName()) .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_NAME, policyName)) @@ -191,14 +188,11 @@ public void testRequestedStopOnShrink() { mockShrinkStep, randomStepKey() ); - MockAction mockAction = new MockAction(Collections.singletonList(mockStep)); - Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); - LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase)); + MockAction mockAction = new MockAction(List.of(mockStep)); + Phase phase = new Phase("phase", TimeValue.ZERO, Map.of("action", mockAction)); + LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Map.of(phase.getName(), phase)); SortedMap policyMap = new TreeMap<>(); - policyMap.put( - policyName, - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); lifecycleState.setPhase(mockShrinkStep.phase()); @@ -250,14 +244,11 @@ private void verifyCanStopWithStep(String stoppableStep) { mockShrinkStep, randomStepKey() ); - MockAction mockAction = new MockAction(Collections.singletonList(mockStep)); - Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); - LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase)); + MockAction mockAction = new MockAction(List.of(mockStep)); + Phase phase = new Phase("phase", TimeValue.ZERO, Map.of("action", mockAction)); + LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Map.of(phase.getName(), phase)); SortedMap policyMap = new TreeMap<>(); - policyMap.put( - policyName, - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); lifecycleState.setPhase(mockShrinkStep.phase()); @@ -301,14 +292,11 @@ public void testRequestedStopOnSafeAction() { currentStepKey, randomStepKey() ); - MockAction mockAction = new MockAction(Collections.singletonList(mockStep)); - Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); - LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase)); + MockAction mockAction = new MockAction(List.of(mockStep)); + Phase phase = new Phase("phase", TimeValue.ZERO, Map.of("action", mockAction)); + LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Map.of(phase.getName(), phase)); SortedMap policyMap = new TreeMap<>(); - policyMap.put( - policyName, - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); lifecycleState.setPhase(currentStepKey.phase()); @@ -370,9 +358,9 @@ public void doTestExceptionStillProcessesOtherIndices(boolean useOnMaster) { } else { i1mockStep = new IndexLifecycleRunnerTests.MockClusterStateActionStep(i1currentStepKey, randomStepKey()); } - MockAction i1mockAction = new MockAction(Collections.singletonList(i1mockStep)); - Phase i1phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", i1mockAction)); - LifecyclePolicy i1policy = newTestLifecyclePolicy(policy1, Collections.singletonMap(i1phase.getName(), i1phase)); + MockAction i1mockAction = new MockAction(List.of(i1mockStep)); + Phase i1phase = new Phase("phase", TimeValue.ZERO, Map.of("action", i1mockAction)); + LifecyclePolicy i1policy = newTestLifecyclePolicy(policy1, Map.of(i1phase.getName(), i1phase)); Index index1 = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); LifecycleExecutionState.Builder i1lifecycleState = LifecycleExecutionState.builder(); i1lifecycleState.setPhase(i1currentStepKey.phase()); @@ -387,9 +375,9 @@ public void doTestExceptionStillProcessesOtherIndices(boolean useOnMaster) { } else { i2mockStep = new IndexLifecycleRunnerTests.MockClusterStateActionStep(i2currentStepKey, randomStepKey()); } - MockAction mockAction = new MockAction(Collections.singletonList(i2mockStep)); - Phase i2phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); - LifecyclePolicy i2policy = newTestLifecyclePolicy(policy1, Collections.singletonMap(i2phase.getName(), i1phase)); + MockAction mockAction = new MockAction(List.of(i2mockStep)); + Phase i2phase = new Phase("phase", TimeValue.ZERO, Map.of("action", mockAction)); + LifecyclePolicy i2policy = newTestLifecyclePolicy(policy1, Map.of(i2phase.getName(), i1phase)); Index index2 = new Index( randomValueOtherThan(index1.getName(), () -> randomAlphaOfLengthBetween(1, 20)), randomAlphaOfLengthBetween(1, 20) @@ -422,14 +410,8 @@ public void doTestExceptionStillProcessesOtherIndices(boolean useOnMaster) { } SortedMap policyMap = new TreeMap<>(); - policyMap.put( - policy1, - new LifecyclePolicyMetadata(i1policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); - policyMap.put( - policy2, - new LifecyclePolicyMetadata(i2policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMap.put(policy1, new LifecyclePolicyMetadata(i1policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); + policyMap.put(policy2, new LifecyclePolicyMetadata(i2policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); IndexMetadata i1indexMetadata = IndexMetadata.builder(index1.getName()) .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_NAME, policy1)) @@ -533,14 +515,8 @@ public void testIndicesOnShuttingDownNodesInDangerousStep() { SingleNodeShutdownMetadata.Type.REPLACE )) { ClusterState state = ClusterState.builder(ClusterName.DEFAULT).build(); - assertThat( - IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "regular_node"), - equalTo(Collections.emptySet()) - ); - assertThat( - IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), - equalTo(Collections.emptySet()) - ); + assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "regular_node"), equalTo(Set.of())); + assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), equalTo(Set.of())); IndexMetadata nonDangerousIndex = IndexMetadata.builder("no_danger") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy")) @@ -583,7 +559,7 @@ public void testIndicesOnShuttingDownNodesInDangerousStep() { Map indices = Map.of("no_danger", nonDangerousIndex, "danger", dangerousIndex); Metadata metadata = Metadata.builder() - .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING)) + .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING)) .indices(indices) .persistentSettings(settings(IndexVersion.current()).build()) .build(); @@ -612,14 +588,8 @@ public void testIndicesOnShuttingDownNodesInDangerousStep() { .build(); // No danger yet, because no node is shutting down - assertThat( - IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "regular_node"), - equalTo(Collections.emptySet()) - ); - assertThat( - IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), - equalTo(Collections.emptySet()) - ); + assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "regular_node"), equalTo(Set.of())); + assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), equalTo(Set.of())); state = ClusterState.builder(state) .metadata( @@ -627,7 +597,7 @@ public void testIndicesOnShuttingDownNodesInDangerousStep() { .putCustom( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( - Collections.singletonMap( + Map.of( "shutdown_node", SingleNodeShutdownMetadata.builder() .setNodeId("shutdown_node") @@ -642,15 +612,12 @@ public void testIndicesOnShuttingDownNodesInDangerousStep() { ) .build(); - assertThat( - IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "regular_node"), - equalTo(Collections.emptySet()) - ); + assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "regular_node"), equalTo(Set.of())); // No danger, because this is a "RESTART" type shutdown assertThat( "restart type shutdowns are not considered dangerous", IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), - equalTo(Collections.emptySet()) + equalTo(Set.of()) ); final String targetNodeName = type == SingleNodeShutdownMetadata.Type.REPLACE ? randomAlphaOfLengthBetween(10, 20) : null; @@ -661,7 +628,7 @@ public void testIndicesOnShuttingDownNodesInDangerousStep() { .putCustom( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( - Collections.singletonMap( + Map.of( "shutdown_node", SingleNodeShutdownMetadata.builder() .setNodeId("shutdown_node") @@ -679,10 +646,7 @@ public void testIndicesOnShuttingDownNodesInDangerousStep() { .build(); // The dangerous index should be calculated as being in danger now - assertThat( - IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), - equalTo(Collections.singleton("danger")) - ); + assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), equalTo(Set.of("danger"))); } } } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java index 49aa0a65a5704..a1f51f1fae90f 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java @@ -48,7 +48,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -80,8 +79,8 @@ public void testMoveClusterStateToNextStep() { .stream() .findFirst() .orElseThrow(() -> new AssertionError("expected next phase to be present")); - List policyMetadatas = Collections.singletonList( - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) + List policyMetadatas = List.of( + new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong()) ); Step.StepKey currentStep = new Step.StepKey("current_phase", "current_action", "current_step"); Step.StepKey nextStep = new Step.StepKey(nextPhase.getName(), "next_action", "next_step"); @@ -128,8 +127,8 @@ public void testMoveClusterStateToNextStepSamePhase() { p -> p.getPhases().isEmpty(), () -> LifecyclePolicyTests.randomTestLifecyclePolicy("policy") ); - List policyMetadatas = Collections.singletonList( - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) + List policyMetadatas = List.of( + new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong()) ); Step.StepKey currentStep = new Step.StepKey("current_phase", "current_action", "current_step"); Step.StepKey nextStep = new Step.StepKey("current_phase", "next_action", "next_step"); @@ -179,8 +178,8 @@ public void testMoveClusterStateToNextStepSameAction() { p -> p.getPhases().isEmpty(), () -> LifecyclePolicyTests.randomTestLifecyclePolicy("policy") ); - List policyMetadatas = Collections.singletonList( - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) + List policyMetadatas = List.of( + new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong()) ); Step.StepKey currentStep = new Step.StepKey("current_phase", "current_action", "current_step"); Step.StepKey nextStep = new Step.StepKey("current_phase", "current_action", "next_step"); @@ -236,8 +235,8 @@ public void testSuccessfulValidatedMoveClusterStateToNextStep() { .stream() .findFirst() .orElseThrow(() -> new AssertionError("expected next phase to be present")); - List policyMetadatas = Collections.singletonList( - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) + List policyMetadatas = List.of( + new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong()) ); Step.StepKey currentStepKey = new Step.StepKey("current_phase", "current_action", "current_step"); Step.StepKey nextStepKey = new Step.StepKey(nextPhase.getName(), "next_action", "next_step"); @@ -279,7 +278,7 @@ public void testValidatedMoveClusterStateToNextStepWithoutPolicy() { lifecycleState.setAction(currentStepKey.action()); lifecycleState.setStep(currentStepKey.name()); - ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), List.of()); Index index = clusterState.metadata().index(indexName).getIndex(); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, @@ -303,7 +302,7 @@ public void testValidatedMoveClusterStateToNextStepInvalidNextStep() { lifecycleState.setAction(currentStepKey.action()); lifecycleState.setStep(currentStepKey.name()); - ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), List.of()); Index index = clusterState.metadata().index(indexName).getIndex(); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, @@ -325,7 +324,7 @@ public void testMoveClusterStateToErrorStep() throws IOException { lifecycleState.setPhase(currentStep.phase()); lifecycleState.setAction(currentStep.action()); lifecycleState.setStep(currentStep.name()); - ClusterState clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), Collections.emptyList()); + ClusterState clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), List.of()); Index index = clusterState.metadata().index(indexName).getIndex(); ClusterState newClusterState = IndexLifecycleTransition.moveClusterStateToErrorStep( @@ -359,7 +358,7 @@ public void testAddStepInfoToClusterState() throws IOException { lifecycleState.setPhase(currentStep.phase()); lifecycleState.setAction(currentStep.action()); lifecycleState.setStep(currentStep.name()); - ClusterState clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), Collections.emptyList()); + ClusterState clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), List.of()); Index index = clusterState.metadata().index(indexName).getIndex(); ClusterState newClusterState = IndexLifecycleTransition.addStepInfoToClusterState(index, clusterState, stepInfo); assertClusterStateStepInfo(clusterState, index, currentStep, newClusterState, stepInfo); @@ -378,9 +377,7 @@ public void testRemovePolicyForIndex() { lifecycleState.setAction(currentStep.action()); lifecycleState.setStep(currentStep.name()); List policyMetadatas = new ArrayList<>(); - policyMetadatas.add( - new LifecyclePolicyMetadata(oldPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMetadatas.add(new LifecyclePolicyMetadata(oldPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); Index index = clusterState.metadata().index(indexName).getIndex(); Index[] indices = new Index[] { index }; @@ -399,7 +396,7 @@ public void testRemovePolicyForIndexNoCurrentPolicy() { indexName, indexSettingsBuilder, LifecycleExecutionState.builder().build(), - Collections.emptyList() + List.of() ); Index index = clusterState.metadata().index(indexName).getIndex(); Index[] indices = new Index[] { index }; @@ -414,7 +411,7 @@ public void testRemovePolicyForIndexNoCurrentPolicy() { public void testRemovePolicyForIndexIndexDoesntExist() { String indexName = randomAlphaOfLength(10); String oldPolicyName = "old_policy"; - LifecyclePolicy oldPolicy = newTestLifecyclePolicy(oldPolicyName, Collections.emptyMap()); + LifecyclePolicy oldPolicy = newTestLifecyclePolicy(oldPolicyName, Map.of()); Step.StepKey currentStep = AbstractStepTestCase.randomStepKey(); Settings.Builder indexSettingsBuilder = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, oldPolicyName); LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); @@ -422,9 +419,7 @@ public void testRemovePolicyForIndexIndexDoesntExist() { lifecycleState.setAction(currentStep.action()); lifecycleState.setStep(currentStep.name()); List policyMetadatas = new ArrayList<>(); - policyMetadatas.add( - new LifecyclePolicyMetadata(oldPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMetadatas.add(new LifecyclePolicyMetadata(oldPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); Index index = new Index("doesnt_exist", "im_not_here"); Index[] indices = new Index[] { index }; @@ -448,9 +443,7 @@ public void testRemovePolicyForIndexIndexInUnsafe() { lifecycleState.setAction(currentStep.action()); lifecycleState.setStep(currentStep.name()); List policyMetadatas = new ArrayList<>(); - policyMetadatas.add( - new LifecyclePolicyMetadata(oldPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMetadatas.add(new LifecyclePolicyMetadata(oldPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); Index index = clusterState.metadata().index(indexName).getIndex(); Index[] indices = new Index[] { index }; @@ -475,9 +468,7 @@ public void testRemovePolicyWithIndexingComplete() { lifecycleState.setAction(currentStep.action()); lifecycleState.setStep(currentStep.name()); List policyMetadatas = new ArrayList<>(); - policyMetadatas.add( - new LifecyclePolicyMetadata(oldPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMetadatas.add(new LifecyclePolicyMetadata(oldPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); Index index = clusterState.metadata().index(indexName).getIndex(); Index[] indices = new Index[] { index }; @@ -756,7 +747,7 @@ public void testMoveClusterStateToFailedStep() { LifecyclePolicy policy = createPolicy(policyName, failedStepKey, null); LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata( policy, - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ); @@ -771,12 +762,7 @@ public void testMoveClusterStateToFailedStep() { lifecycleState.setStep(errorStepKey.name()); lifecycleState.setStepTime(now); lifecycleState.setFailedStep(failedStepKey.name()); - ClusterState clusterState = buildClusterState( - indexName, - indexSettingsBuilder, - lifecycleState.build(), - Collections.singletonList(policyMetadata) - ); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), List.of(policyMetadata)); Index index = clusterState.metadata().index(indexName).getIndex(); ClusterState nextClusterState = IndexLifecycleTransition.moveClusterStateToPreviouslyFailedStep( clusterState, @@ -802,7 +788,7 @@ public void testMoveClusterStateToFailedStepWithUnknownStep() { LifecyclePolicy policy = createPolicy(policyName, failedStepKey, null); LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata( policy, - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ); @@ -817,12 +803,7 @@ public void testMoveClusterStateToFailedStepWithUnknownStep() { lifecycleState.setStep(errorStepKey.name()); lifecycleState.setStepTime(now); lifecycleState.setFailedStep(failedStepKey.name()); - ClusterState clusterState = buildClusterState( - indexName, - indexSettingsBuilder, - lifecycleState.build(), - Collections.singletonList(policyMetadata) - ); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), List.of(policyMetadata)); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> IndexLifecycleTransition.moveClusterStateToPreviouslyFailedStep(clusterState, indexName, () -> now, policyRegistry, false) @@ -840,7 +821,7 @@ public void testMoveClusterStateToFailedStepIndexNotFound() { existingIndexName, Settings.builder(), LifecycleExecutionState.builder().build(), - Collections.emptyList() + List.of() ); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, @@ -863,7 +844,7 @@ public void testMoveClusterStateToFailedStepInvalidPolicySetting() { lifecycleState.setAction(errorStepKey.action()); lifecycleState.setStep(errorStepKey.name()); lifecycleState.setFailedStep(failedStepKey.name()); - ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), List.of()); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> IndexLifecycleTransition.moveClusterStateToPreviouslyFailedStep(clusterState, indexName, () -> now, policyRegistry, false) @@ -883,7 +864,7 @@ public void testMoveClusterStateToFailedNotOnError() { lifecycleState.setPhase(failedStepKey.phase()); lifecycleState.setAction(failedStepKey.action()); lifecycleState.setStep(failedStepKey.name()); - ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), List.of()); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> IndexLifecycleTransition.moveClusterStateToPreviouslyFailedStep(clusterState, indexName, () -> now, policyRegistry, false) @@ -906,7 +887,7 @@ public void testMoveClusterStateToPreviouslyFailedStepAsAutomaticRetryAndSetsPre LifecyclePolicy policy = createPolicy(policyName, failedStepKey, null); LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata( policy, - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ); @@ -923,12 +904,7 @@ public void testMoveClusterStateToPreviouslyFailedStepAsAutomaticRetryAndSetsPre lifecycleState.setFailedStep(failedStepKey.name()); String initialStepInfo = randomAlphaOfLengthBetween(10, 50); lifecycleState.setStepInfo(initialStepInfo); - ClusterState clusterState = buildClusterState( - indexName, - indexSettingsBuilder, - lifecycleState.build(), - Collections.singletonList(policyMetadata) - ); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), List.of(policyMetadata)); Index index = clusterState.metadata().index(indexName).getIndex(); ClusterState nextClusterState = IndexLifecycleTransition.moveClusterStateToPreviouslyFailedStep( clusterState, @@ -976,13 +952,11 @@ public void testMoveToFailedStepDoesntRefreshCachedPhaseWhenUnsafe() { Map actions = new HashMap<>(); actions.put("set_priority", new SetPriorityAction(100)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy currentPolicy = new LifecyclePolicy("my-policy", phases); List policyMetadatas = new ArrayList<>(); - policyMetadatas.add( - new LifecyclePolicyMetadata(currentPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMetadatas.add(new LifecyclePolicyMetadata(currentPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); Step.StepKey errorStepKey = new Step.StepKey("hot", RolloverAction.NAME, ErrorStep.NAME); PolicyStepsRegistry stepsRegistry = createOneStepPolicyStepRegistry("my-policy", new ErrorStep(errorStepKey)); @@ -1040,9 +1014,9 @@ public void testRefreshPhaseJson() throws IOException { actions.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); actions.put("set_priority", new SetPriorityAction(100)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); - LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(newPolicy, Collections.emptyMap(), 2L, 2L); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(newPolicy, Map.of(), 2L, 2L); ClusterState existingState = ClusterState.builder(ClusterState.EMPTY_STATE) .metadata(Metadata.builder(Metadata.EMPTY_METADATA).put(meta, false).build()) @@ -1185,7 +1159,7 @@ public void testMoveStateToNextActionAndUpdateCachedPhase() { actions.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); actions.put("set_priority", new SetPriorityAction(100)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy currentPolicy = new LifecyclePolicy("my-policy", phases); { @@ -1195,10 +1169,10 @@ public void testMoveStateToNextActionAndUpdateCachedPhase() { Map actionsWithoutRollover = new HashMap<>(); actionsWithoutRollover.put("set_priority", new SetPriorityAction(100)); Phase hotPhaseNoRollover = new Phase("hot", TimeValue.ZERO, actionsWithoutRollover); - Map phasesNoRollover = Collections.singletonMap("hot", hotPhaseNoRollover); + Map phasesNoRollover = Map.of("hot", hotPhaseNoRollover); LifecyclePolicyMetadata updatedPolicyMetadata = new LifecyclePolicyMetadata( new LifecyclePolicy("my-policy", phasesNoRollover), - Collections.emptyMap(), + Map.of(), 2L, 2L ); @@ -1233,10 +1207,10 @@ public void testMoveStateToNextActionAndUpdateCachedPhase() { Map actionsWitoutSetPriority = new HashMap<>(); actionsWitoutSetPriority.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); Phase hotPhaseNoSetPriority = new Phase("hot", TimeValue.ZERO, actionsWitoutSetPriority); - Map phasesWithoutSetPriority = Collections.singletonMap("hot", hotPhaseNoSetPriority); + Map phasesWithoutSetPriority = Map.of("hot", hotPhaseNoSetPriority); LifecyclePolicyMetadata updatedPolicyMetadata = new LifecyclePolicyMetadata( new LifecyclePolicy("my-policy", phasesWithoutSetPriority), - Collections.emptyMap(), + Map.of(), 2L, 2L ); @@ -1275,7 +1249,7 @@ private static LifecyclePolicy createPolicy(String policyName, Step.StepKey safe assert unsafeStep == null || safeStep.phase().equals(unsafeStep.phase()) == false : "safe and unsafe actions must be in different phases"; Map actions = new HashMap<>(); - List steps = Collections.singletonList(new MockStep(safeStep, null)); + List steps = List.of(new MockStep(safeStep, null)); MockAction safeAction = new MockAction(steps, true); actions.put(safeAction.getWriteableName(), safeAction); Phase phase = new Phase(safeStep.phase(), TimeValue.timeValueMillis(0), actions); @@ -1284,7 +1258,7 @@ private static LifecyclePolicy createPolicy(String policyName, Step.StepKey safe if (unsafeStep != null) { assert MockAction.NAME.equals(unsafeStep.action()) : "The unsafe action needs to be MockAction.NAME"; Map actions = new HashMap<>(); - List steps = Collections.singletonList(new MockStep(unsafeStep, null)); + List steps = List.of(new MockStep(unsafeStep, null)); MockAction unsafeAction = new MockAction(steps, false); actions.put(unsafeAction.getWriteableName(), unsafeAction); Phase phase = new Phase(unsafeStep.phase(), TimeValue.timeValueMillis(0), actions); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTaskTests.java index eee3fe3ce53c2..81688ec1503cd 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTaskTests.java @@ -28,7 +28,7 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import org.junit.Before; -import java.util.Collections; +import java.util.Map; import static org.elasticsearch.cluster.metadata.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; import static org.hamcrest.Matchers.containsString; @@ -53,10 +53,7 @@ public void setupClusterState() { .build(); index = indexMetadata.getIndex(); IndexLifecycleMetadata ilmMeta = new IndexLifecycleMetadata( - Collections.singletonMap( - policy, - new LifecyclePolicyMetadata(lifecyclePolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ), + Map.of(policy, new LifecyclePolicyMetadata(lifecyclePolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())), OperationMode.RUNNING ); Metadata metadata = Metadata.builder() diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTaskTests.java index f9a8d4a2ab486..554e9a48c625e 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTaskTests.java @@ -29,7 +29,6 @@ import org.junit.Before; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; @@ -67,10 +66,7 @@ public void setupClusterState() { index = indexMetadata.getIndex(); lifecyclePolicy = LifecyclePolicyTests.randomTestLifecyclePolicy(policy); IndexLifecycleMetadata ilmMeta = new IndexLifecycleMetadata( - Collections.singletonMap( - policy, - new LifecyclePolicyMetadata(lifecyclePolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ), + Map.of(policy, new LifecyclePolicyMetadata(lifecyclePolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())), OperationMode.RUNNING ); Metadata metadata = Metadata.builder() @@ -95,7 +91,7 @@ public void testExecuteSuccessfullyMoved() throws Exception { AlwaysExistingStepRegistry stepRegistry = new AlwaysExistingStepRegistry(client); stepRegistry.update( new IndexLifecycleMetadata( - Map.of(policy, new LifecyclePolicyMetadata(lifecyclePolicy, Collections.emptyMap(), 2L, 2L)), + Map.of(policy, new LifecyclePolicyMetadata(lifecyclePolicy, Map.of(), 2L, 2L)), OperationMode.RUNNING ) ); @@ -169,7 +165,7 @@ public void testExecuteSuccessfulMoveWithInvalidNextStep() throws Exception { AlwaysExistingStepRegistry stepRegistry = new AlwaysExistingStepRegistry(client); stepRegistry.update( new IndexLifecycleMetadata( - Map.of(policy, new LifecyclePolicyMetadata(lifecyclePolicy, Collections.emptyMap(), 2L, 2L)), + Map.of(policy, new LifecyclePolicyMetadata(lifecyclePolicy, Map.of(), 2L, 2L)), OperationMode.RUNNING ) ); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistryTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistryTests.java index 36d537a57382c..f61267d40a513 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistryTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistryTests.java @@ -46,7 +46,6 @@ import org.elasticsearch.xpack.core.ilm.Step; import org.mockito.Mockito; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -76,7 +75,7 @@ private IndexMetadata emptyMetadata(Index index) { public void testGetFirstStep() { String policyName = randomAlphaOfLengthBetween(2, 10); Step expectedFirstStep = new MockStep(MOCK_STEP_KEY, null); - Map firstStepMap = Collections.singletonMap(policyName, expectedFirstStep); + Map firstStepMap = Map.of(policyName, expectedFirstStep); PolicyStepsRegistry registry = new PolicyStepsRegistry(null, firstStepMap, null, NamedXContentRegistry.EMPTY, null, null); Step actualFirstStep = registry.getFirstStep(policyName); assertThat(actualFirstStep, sameInstance(expectedFirstStep)); @@ -85,7 +84,7 @@ public void testGetFirstStep() { public void testGetFirstStepUnknownPolicy() { String policyName = randomAlphaOfLengthBetween(2, 10); Step expectedFirstStep = new MockStep(MOCK_STEP_KEY, null); - Map firstStepMap = Collections.singletonMap(policyName, expectedFirstStep); + Map firstStepMap = Map.of(policyName, expectedFirstStep); PolicyStepsRegistry registry = new PolicyStepsRegistry(null, firstStepMap, null, NamedXContentRegistry.EMPTY, null, null); Step actualFirstStep = registry.getFirstStep(policyName + "unknown"); assertNull(actualFirstStep); @@ -95,7 +94,7 @@ public void testGetStep() { Client client = mock(Client.class); Mockito.when(client.settings()).thenReturn(Settings.EMPTY); LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicyWithAllPhases("policy"); - LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong()); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Map.of(), 1, randomNonNegativeLong()); String phaseName = randomFrom(policy.getPhases().keySet()); Phase phase = policy.getPhases().get(phaseName); PhaseExecutionInfo pei = new PhaseExecutionInfo(policy.getName(), phase, 1, randomNonNegativeLong()); @@ -119,7 +118,7 @@ public void testGetStepErrorStep() { Step.StepKey errorStepKey = new Step.StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), ErrorStep.NAME); Step expectedStep = new ErrorStep(errorStepKey); Index index = new Index("test", "uuid"); - Map> indexSteps = Collections.singletonMap(index, Collections.singletonList(expectedStep)); + Map> indexSteps = Map.of(index, List.of(expectedStep)); PolicyStepsRegistry registry = new PolicyStepsRegistry(null, null, null, NamedXContentRegistry.EMPTY, null, null); Step actualStep = registry.getStep(emptyMetadata(index), errorStepKey); assertThat(actualStep, equalTo(expectedStep)); @@ -143,7 +142,7 @@ public void testGetStepForIndexWithNoPhaseGetsInitializationStep() { Client client = mock(Client.class); Mockito.when(client.settings()).thenReturn(Settings.EMPTY); LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicy("policy"); - LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong()); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Map.of(), 1, randomNonNegativeLong()); IndexMetadata indexMetadata = IndexMetadata.builder("test") .settings(indexSettings(IndexVersion.current(), 1, 0).put(LifecycleSettings.LIFECYCLE_NAME, "policy").build()) .build(); @@ -158,7 +157,7 @@ public void testGetStepUnknownStepKey() { Client client = mock(Client.class); Mockito.when(client.settings()).thenReturn(Settings.EMPTY); LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicyWithAllPhases("policy"); - LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong()); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Map.of(), 1, randomNonNegativeLong()); String phaseName = randomFrom(policy.getPhases().keySet()); Phase phase = policy.getPhases().get(phaseName); PhaseExecutionInfo pei = new PhaseExecutionInfo(policy.getName(), phase, 1, randomNonNegativeLong()); @@ -193,7 +192,7 @@ public void testUpdateFromNothingToSomethingToNothing() throws Exception { headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); } - Map policyMap = Collections.singletonMap( + Map policyMap = Map.of( newPolicy.getName(), new LifecyclePolicyMetadata(newPolicy, headers, randomNonNegativeLong(), randomNonNegativeLong()) ); @@ -271,7 +270,7 @@ public void testUpdateFromNothingToSomethingToNothing() throws Exception { assertThat(registry.getStepMap(), equalTo(registryStepMap)); // remove policy - lifecycleMetadata = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + lifecycleMetadata = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING); currentState = ClusterState.builder(currentState) .metadata(Metadata.builder(metadata).putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata)) .build(); @@ -291,7 +290,7 @@ public void testUpdateChangedPolicy() { headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); } - Map policyMap = Collections.singletonMap( + Map policyMap = Map.of( newPolicy.getName(), new LifecyclePolicyMetadata(newPolicy, headers, randomNonNegativeLong(), randomNonNegativeLong()) ); @@ -316,10 +315,7 @@ public void testUpdateChangedPolicy() { // swap out policy newPolicy = LifecyclePolicyTests.randomTestLifecyclePolicy(policyName); lifecycleMetadata = new IndexLifecycleMetadata( - Collections.singletonMap( - policyName, - new LifecyclePolicyMetadata(newPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ), + Map.of(policyName, new LifecyclePolicyMetadata(newPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())), OperationMode.RUNNING ); currentState = ClusterState.builder(currentState) @@ -356,7 +352,7 @@ public void testUpdatePolicyButNoPhaseChangeIndexStepsDontChange() throws Except headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); } - Map policyMap = Collections.singletonMap( + Map policyMap = Map.of( newPolicy.getName(), new LifecyclePolicyMetadata(newPolicy, headers, randomNonNegativeLong(), randomNonNegativeLong()) ); @@ -411,7 +407,7 @@ public void testUpdatePolicyButNoPhaseChangeIndexStepsDontChange() throws Except assertThat(((ShrinkStep) gotStep).getNumberOfShards(), equalTo(1)); // Update the policy with the new policy, but keep the phase the same - policyMap = Collections.singletonMap( + policyMap = Map.of( updatedPolicy.getName(), new LifecyclePolicyMetadata(updatedPolicy, headers, randomNonNegativeLong(), randomNonNegativeLong()) ); @@ -457,7 +453,7 @@ public void testGetStepMultithreaded() throws Exception { .build(); SortedMap metas = new TreeMap<>(); - metas.put("policy", new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong())); + metas.put("policy", new LifecyclePolicyMetadata(policy, Map.of(), 1, randomNonNegativeLong())); IndexLifecycleMetadata meta = new IndexLifecycleMetadata(metas, OperationMode.RUNNING); PolicyStepsRegistry registry = new PolicyStepsRegistry(REGISTRY, client, null); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/StagnatingIndicesFinderTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/StagnatingIndicesFinderTests.java index be2d449353242..95412f92b6156 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/StagnatingIndicesFinderTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/StagnatingIndicesFinderTests.java @@ -28,7 +28,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.LongSupplier; -import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; @@ -61,7 +60,7 @@ public void testStagnatingIndicesFinder() { assertEquals(expectedMaxTimeOnStep, maxTimeOnStep); assertEquals(expectedMaxRetriesPerStep, maxRetriesPerStep); return rc; - }).collect(Collectors.toList()); + }).toList(); // Per the evaluator, the timeSupplier _must_ be called only twice when(mockedTimeSupplier.getAsLong()).thenReturn(instant, instant); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportStopILMActionTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportStopILMActionTests.java index 8c0fede4c11dc..bd0d63ebb0f3d 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportStopILMActionTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportStopILMActionTests.java @@ -24,7 +24,8 @@ import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.mockito.ArgumentMatcher; -import static java.util.Collections.emptyMap; +import java.util.Map; + import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; @@ -50,7 +51,7 @@ public void testStopILMClusterStatePriorityIsImmediate() { ILMActions.STOP.name(), "description", new TaskId(randomLong() + ":" + randomLong()), - emptyMap() + Map.of() ); StopILMRequest request = new StopILMRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); transportStopILMAction.masterOperation(task, request, ClusterState.EMPTY_STATE, ActionListener.noop()); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java index 513945a0a8635..a7a6004c0ebb2 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java @@ -10,6 +10,7 @@ import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; +import org.elasticsearch.xpack.inference.queries.SemanticMatchQueryRewriteInterceptor; import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder; @@ -41,7 +42,8 @@ public Set getTestFeatures() { SemanticTextFieldMapper.SEMANTIC_TEXT_DELETE_FIX, SemanticTextFieldMapper.SEMANTIC_TEXT_ZERO_SIZE_FIX, SemanticTextFieldMapper.SEMANTIC_TEXT_ALWAYS_EMIT_INFERENCE_ID_FIX, - SEMANTIC_TEXT_HIGHLIGHTER + SEMANTIC_TEXT_HIGHLIGHTER, + SemanticMatchQueryRewriteInterceptor.SEMANTIC_MATCH_QUERY_REWRITE_INTERCEPTION_SUPPORTED ); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index 8d5acbad26658..b032fcda8b0f4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -33,6 +33,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.plugins.SystemIndexPlugin; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.search.fetch.subphase.highlight.Highlighter; @@ -70,6 +71,7 @@ import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.mapper.OffsetSourceFieldMapper; import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; +import org.elasticsearch.xpack.inference.queries.SemanticMatchQueryRewriteInterceptor; import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder; import org.elasticsearch.xpack.inference.rank.random.RandomRankBuilder; import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; @@ -385,6 +387,11 @@ public List> getQueries() { return List.of(new QuerySpec<>(SemanticQueryBuilder.NAME, SemanticQueryBuilder::new, SemanticQueryBuilder::fromXContent)); } + @Override + public List getQueryRewriteInterceptors() { + return List.of(new SemanticMatchQueryRewriteInterceptor()); + } + @Override public List> getRetrievers() { return List.of( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticMatchQueryRewriteInterceptor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticMatchQueryRewriteInterceptor.java new file mode 100644 index 0000000000000..a4a8123935c3e --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticMatchQueryRewriteInterceptor.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.queries; + +import org.elasticsearch.action.ResolvedIndices; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.mapper.IndexFieldMapper; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +public class SemanticMatchQueryRewriteInterceptor implements QueryRewriteInterceptor { + + public static final NodeFeature SEMANTIC_MATCH_QUERY_REWRITE_INTERCEPTION_SUPPORTED = new NodeFeature( + "search.semantic_match_query_rewrite_interception_supported" + ); + + public SemanticMatchQueryRewriteInterceptor() {} + + @Override + public QueryBuilder interceptAndRewrite(QueryRewriteContext context, QueryBuilder queryBuilder) { + assert (queryBuilder instanceof MatchQueryBuilder); + MatchQueryBuilder matchQueryBuilder = (MatchQueryBuilder) queryBuilder; + QueryBuilder rewritten = queryBuilder; + ResolvedIndices resolvedIndices = context.getResolvedIndices(); + if (resolvedIndices != null) { + Collection indexMetadataCollection = resolvedIndices.getConcreteLocalIndicesMetadata().values(); + List inferenceIndices = new ArrayList<>(); + List nonInferenceIndices = new ArrayList<>(); + for (IndexMetadata indexMetadata : indexMetadataCollection) { + String indexName = indexMetadata.getIndex().getName(); + InferenceFieldMetadata inferenceFieldMetadata = indexMetadata.getInferenceFields().get(matchQueryBuilder.fieldName()); + if (inferenceFieldMetadata != null) { + inferenceIndices.add(indexName); + } else { + nonInferenceIndices.add(indexName); + } + } + + if (inferenceIndices.isEmpty()) { + return rewritten; + } else if (nonInferenceIndices.isEmpty() == false) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + for (String inferenceIndexName : inferenceIndices) { + // Add a separate clause for each semantic query, because they may be using different inference endpoints + // TODO - consolidate this to a single clause once the semantic query supports multiple inference endpoints + boolQueryBuilder.should( + createSemanticSubQuery(inferenceIndexName, matchQueryBuilder.fieldName(), (String) matchQueryBuilder.value()) + ); + } + boolQueryBuilder.should(createMatchSubQuery(nonInferenceIndices, matchQueryBuilder)); + rewritten = boolQueryBuilder; + } else { + rewritten = new SemanticQueryBuilder(matchQueryBuilder.fieldName(), (String) matchQueryBuilder.value(), false); + } + } + + return rewritten; + + } + + @Override + public String getQueryName() { + return MatchQueryBuilder.NAME; + } + + private QueryBuilder createSemanticSubQuery(String indexName, String fieldName, String value) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.must(new SemanticQueryBuilder(fieldName, value, true)); + boolQueryBuilder.filter(new TermQueryBuilder(IndexFieldMapper.NAME, indexName)); + return boolQueryBuilder; + } + + private QueryBuilder createMatchSubQuery(List indices, MatchQueryBuilder matchQueryBuilder) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.must(matchQueryBuilder); + boolQueryBuilder.filter(new TermsQueryBuilder(IndexFieldMapper.NAME, indices)); + return boolQueryBuilder; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java index 501b6e6c2bfe2..30094ff7dbdfc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java @@ -45,6 +45,7 @@ import java.util.Objects; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -53,16 +54,18 @@ public class SemanticQueryBuilder extends AbstractQueryBuilder PARSER = new ConstructingObjectParser<>( NAME, false, - args -> new SemanticQueryBuilder((String) args[0], (String) args[1]) + args -> new SemanticQueryBuilder((String) args[0], (String) args[1], (Boolean) args[2]) ); static { PARSER.declareString(constructorArg(), FIELD_FIELD); PARSER.declareString(constructorArg(), QUERY_FIELD); + PARSER.declareBoolean(optionalConstructorArg(), LENIENT_FIELD); declareStandardFields(PARSER); } @@ -71,8 +74,13 @@ public class SemanticQueryBuilder extends AbstractQueryBuilder inferenceResultsSupplier; private final InferenceResults inferenceResults; private final boolean noInferenceResults; + private final Boolean lenient; public SemanticQueryBuilder(String fieldName, String query) { + this(fieldName, query, null); + } + + public SemanticQueryBuilder(String fieldName, String query, Boolean lenient) { if (fieldName == null) { throw new IllegalArgumentException("[" + NAME + "] requires a " + FIELD_FIELD.getPreferredName() + " value"); } @@ -84,6 +92,7 @@ public SemanticQueryBuilder(String fieldName, String query) { this.inferenceResults = null; this.inferenceResultsSupplier = null; this.noInferenceResults = false; + this.lenient = lenient; } public SemanticQueryBuilder(StreamInput in) throws IOException { @@ -93,6 +102,11 @@ public SemanticQueryBuilder(StreamInput in) throws IOException { this.inferenceResults = in.readOptionalNamedWriteable(InferenceResults.class); this.noInferenceResults = in.readBoolean(); this.inferenceResultsSupplier = null; + if (in.getTransportVersion().onOrAfter(TransportVersions.SEMANTIC_QUERY_LENIENT)) { + this.lenient = in.readOptionalBoolean(); + } else { + this.lenient = null; + } } @Override @@ -104,6 +118,9 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(query); out.writeOptionalNamedWriteable(inferenceResults); out.writeBoolean(noInferenceResults); + if (out.getTransportVersion().onOrAfter(TransportVersions.SEMANTIC_QUERY_LENIENT)) { + out.writeOptionalBoolean(lenient); + } } private SemanticQueryBuilder( @@ -119,6 +136,7 @@ private SemanticQueryBuilder( this.inferenceResultsSupplier = inferenceResultsSupplier; this.inferenceResults = inferenceResults; this.noInferenceResults = noInferenceResults; + this.lenient = other.lenient; } @Override @@ -140,6 +158,9 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep builder.startObject(NAME); builder.field(FIELD_FIELD.getPreferredName(), fieldName); builder.field(QUERY_FIELD.getPreferredName(), query); + if (lenient != null) { + builder.field(LENIENT_FIELD.getPreferredName(), lenient); + } boostAndQueryNameToXContent(builder); builder.endObject(); } @@ -167,6 +188,8 @@ private QueryBuilder doRewriteBuildSemanticQuery(SearchExecutionContext searchEx } return semanticTextFieldType.semanticQuery(inferenceResults, searchExecutionContext.requestSize(), boost(), queryName()); + } else if (lenient != null && lenient) { + return new MatchNoneQueryBuilder(); } else { throw new IllegalArgumentException( "Field [" + fieldName + "] of type [" + fieldType.typeName() + "] does not support " + NAME + " queries" diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java index 2416aeb62ff33..c68a629b999c5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java @@ -102,7 +102,7 @@ public void testFilterNoop() throws Exception { new BulkItemRequest[0] ); request.setInferenceFieldMap( - Map.of("foo", new InferenceFieldMetadata("foo", "bar", generateRandomStringArray(5, 10, false, false))) + Map.of("foo", new InferenceFieldMetadata("foo", "bar", "baz", generateRandomStringArray(5, 10, false, false))) ); filter.apply(task, TransportShardBulkAction.ACTION_NAME, request, actionListener, actionFilterChain); awaitLatch(chainExecuted, 10, TimeUnit.SECONDS); diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml index c2704a4c22914..3d3790d879ef1 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml @@ -101,7 +101,7 @@ setup: index: test-sparse-index id: doc_1 body: - inference_field: ["inference test", "another inference test"] + inference_field: [ "inference test", "another inference test" ] non_inference_field: "non inference test" refresh: true @@ -132,7 +132,7 @@ setup: index: test-sparse-index id: doc_1 body: - inference_field: [40, 49.678] + inference_field: [ 40, 49.678 ] refresh: true - do: @@ -229,7 +229,7 @@ setup: index: test-dense-index id: doc_1 body: - inference_field: ["inference test", "another inference test"] + inference_field: [ "inference test", "another inference test" ] non_inference_field: "non inference test" refresh: true @@ -260,7 +260,7 @@ setup: index: test-dense-index id: doc_1 body: - inference_field: [45.1, 100] + inference_field: [ 45.1, 100 ] refresh: true - do: @@ -387,7 +387,7 @@ setup: index: test-dense-index id: doc_1 body: - inference_field: ["inference test", "another inference test"] + inference_field: [ "inference test", "another inference test" ] non_inference_field: "non inference test" refresh: true @@ -418,7 +418,7 @@ setup: index: test-sparse-index id: doc_1 body: - inference_field: ["inference test", "another inference test"] + inference_field: [ "inference test", "another inference test" ] non_inference_field: "non inference test" refresh: true @@ -440,7 +440,7 @@ setup: - match: { hits.hits.0._id: "doc_1" } - close_to: { hits.hits.0._score: { value: 3.783733e19, error: 1e13 } } - length: { hits.hits.0._source.inference_field.inference.chunks: 2 } - - match: { hits.hits.0.matched_queries: ["i-like-naming-my-queries"] } + - match: { hits.hits.0.matched_queries: [ "i-like-naming-my-queries" ] } --- "Query an index alias": @@ -452,7 +452,7 @@ setup: index: test-sparse-index id: doc_1 body: - inference_field: ["inference test", "another inference test"] + inference_field: [ "inference test", "another inference test" ] non_inference_field: "non inference test" refresh: true @@ -503,6 +503,48 @@ setup: - match: { error.root_cause.0.type: "illegal_argument_exception" } - match: { error.root_cause.0.reason: "Field [non_inference_field] of type [text] does not support semantic queries" } +--- +"Query the wrong field type with lenient: true": + - requires: + cluster_features: "search.semantic_match_query_rewrite_interception_supported" + reason: lenient introduced in 8.18.0 + + - do: + index: + index: test-sparse-index + id: doc_1 + body: + inference_field: "inference test" + non_inference_field: "non inference test" + refresh: true + + - do: + catch: bad_request + search: + index: test-sparse-index + body: + query: + semantic: + field: "non_inference_field" + query: "inference test" + + - match: { error.type: "search_phase_execution_exception" } + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Field [non_inference_field] of type [text] does not support semantic queries" } + + - do: + search: + index: test-sparse-index + body: + query: + semantic: + field: "non_inference_field" + query: "inference test" + lenient: true + + - match: { hits.total.value: 0 } + + --- "Query a missing field": - do: @@ -783,7 +825,7 @@ setup: index: test-dense-index id: doc_1 body: - inference_field: ["inference test", "another inference test"] + inference_field: [ "inference test", "another inference test" ] non_inference_field: "non inference test" refresh: true @@ -844,11 +886,11 @@ setup: "Query a field that uses the default ELSER 2 endpoint": - requires: reason: "default ELSER 2 inference ID is enabled via a capability" - test_runner_features: [capabilities] + test_runner_features: [ capabilities ] capabilities: - method: GET path: /_inference - capabilities: [default_elser_2] + capabilities: [ default_elser_2 ] - do: indices.create: diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/45_semantic_text_match.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/45_semantic_text_match.yml new file mode 100644 index 0000000000000..cdbf73d31a272 --- /dev/null +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/45_semantic_text_match.yml @@ -0,0 +1,284 @@ +setup: + - requires: + cluster_features: "search.semantic_match_query_rewrite_interception_supported" + reason: semantic_text match support introduced in 8.18.0 + + - do: + inference.put: + task_type: sparse_embedding + inference_id: sparse-inference-id + body: > + { + "service": "test_service", + "service_settings": { + "model": "my_model", + "api_key": "abc64" + }, + "task_settings": { + } + } + + - do: + inference.put: + task_type: sparse_embedding + inference_id: sparse-inference-id-2 + body: > + { + "service": "test_service", + "service_settings": { + "model": "my_model", + "api_key": "abc64" + }, + "task_settings": { + } + } + + - do: + inference.put: + task_type: text_embedding + inference_id: dense-inference-id + body: > + { + "service": "text_embedding_test_service", + "service_settings": { + "model": "my_model", + "dimensions": 10, + "api_key": "abc64", + "similarity": "COSINE" + }, + "task_settings": { + } + } + + - do: + indices.create: + index: test-sparse-index + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: sparse-inference-id + non_inference_field: + type: text + + - do: + indices.create: + index: test-dense-index + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: dense-inference-id + non_inference_field: + type: text + + - do: + indices.create: + index: test-text-only-index + body: + mappings: + properties: + inference_field: + type: text + non_inference_field: + type: text + +--- +"Query using a sparse embedding model": + - skip: + features: [ "headers", "close_to" ] + + - do: + index: + index: test-sparse-index + id: doc_1 + body: + inference_field: [ "inference test", "another inference test" ] + non_inference_field: "non inference test" + refresh: true + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-sparse-index + body: + query: + match: + inference_field: + query: "inference test" + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + +--- +"Query using a dense embedding model": + - skip: + features: [ "headers", "close_to" ] + + - do: + index: + index: test-dense-index + id: doc_1 + body: + inference_field: [ "inference test", "another inference test" ] + non_inference_field: "non inference test" + refresh: true + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-dense-index + body: + query: + match: + inference_field: + query: "inference test" + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + +--- +"Query an index alias": + - skip: + features: [ "headers", "close_to" ] + + - do: + index: + index: test-sparse-index + id: doc_1 + body: + inference_field: [ "inference test", "another inference test" ] + non_inference_field: "non inference test" + refresh: true + + - do: + indices.put_alias: + index: test-sparse-index + name: my-alias + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: my-alias + body: + query: + match: + inference_field: + query: "inference test" + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + +--- +"Query indices with both semantic_text and regular text content": + + - do: + index: + index: test-sparse-index + id: doc_1 + body: + inference_field: [ "inference test", "another inference test" ] + non_inference_field: "non inference test" + refresh: true + + - do: + index: + index: test-text-only-index + id: doc_2 + body: + inference_field: [ "inference test", "not an inference field" ] + non_inference_field: "non inference test" + refresh: true + + - do: + search: + index: + - test-sparse-index + - test-text-only-index + body: + query: + match: + inference_field: + query: "inference test" + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "doc_1" } + - match: { hits.hits.1._id: "doc_2" } + + # Test querying multiple indices that either use the same inference ID or combine semantic_text with lexical search + - do: + indices.create: + index: test-sparse-index-2 + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: sparse-inference-id + non_inference_field: + type: text + + - do: + index: + index: test-sparse-index-2 + id: doc_3 + body: + inference_field: "another inference test" + refresh: true + + - do: + search: + index: + - test-sparse-index* + - test-text-only-index + body: + query: + match: + inference_field: + query: "inference test" + + - match: { hits.total.value: 3 } + - match: { hits.hits.0._id: "doc_1" } + - match: { hits.hits.1._id: "doc_3" } + - match: { hits.hits.2._id: "doc_2" } + +--- +"Query a field that has no indexed inference results": + - skip: + features: [ "headers" ] + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-sparse-index + body: + query: + match: + inference_field: + query: "inference test" + + - match: { hits.total.value: 0 } + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-dense-index + body: + query: + match: + inference_field: + query: "inference test" + + - match: { hits.total.value: 0 } diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java index 08fda90f9fd73..8fe87b043c78b 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java @@ -195,7 +195,7 @@ public void testOpenJobWithOldSnapshot() { assertThat( ex.getMessage(), containsString( - "[open-job-with-old-model-snapshot] job model snapshot [snap_1] has min version before [7.0.0], " + "[open-job-with-old-model-snapshot] job model snapshot [snap_1] has min version before [8.3.0], " + "please revert to a newer model snapshot or reset the job" ) ); diff --git a/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderTests.java b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderTests.java index d20f0f88aeb16..bdd6d73ec0fbf 100644 --- a/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderTests.java +++ b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderTests.java @@ -54,7 +54,9 @@ public void testRetrieverExtractionErrors() throws IOException { IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true) - .rewrite(new QueryRewriteContext(parserConfig(), null, null, null, new PointInTimeBuilder(new BytesArray("pitid")))) + .rewrite( + new QueryRewriteContext(parserConfig(), null, null, null, new PointInTimeBuilder(new BytesArray("pitid")), null) + ) ); assertEquals("[search_after] cannot be used in children of compound retrievers", iae.getMessage()); } @@ -70,7 +72,9 @@ public void testRetrieverExtractionErrors() throws IOException { IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true) - .rewrite(new QueryRewriteContext(parserConfig(), null, null, null, new PointInTimeBuilder(new BytesArray("pitid")))) + .rewrite( + new QueryRewriteContext(parserConfig(), null, null, null, new PointInTimeBuilder(new BytesArray("pitid")), null) + ) ); assertEquals("[terminate_after] cannot be used in children of compound retrievers", iae.getMessage()); } diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java index 09449f81121fd..d6bad85161fd9 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.remotecluster; +import org.apache.http.client.methods.HttpGet; import org.elasticsearch.Build; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -22,6 +23,7 @@ import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.junit.RunnableTestRuleAdapter; +import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.After; @@ -34,6 +36,7 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Base64; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -51,6 +54,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.not; public class RemoteClusterSecurityEsqlIT extends AbstractRemoteClusterSecurityTestCase { private static final AtomicReference> API_KEY_MAP_REF = new AtomicReference<>(); @@ -342,6 +346,14 @@ public void testCrossClusterQuery() throws Exception { configureRemoteCluster(); populateData(); + Map esqlCcsLicenseFeatureUsage = fetchEsqlCcsFeatureUsageFromNode(client()); + + Object ccsLastUsedTimestampAtStartOfTest = null; + if (esqlCcsLicenseFeatureUsage.isEmpty() == false) { + // some test runs will have a usage value already, so capture that to compare at end of test + ccsLastUsedTimestampAtStartOfTest = esqlCcsLicenseFeatureUsage.get("last_used"); + } + // query remote cluster only Request request = esqlRequest(""" FROM my_remote_cluster:employees @@ -385,6 +397,15 @@ public void testCrossClusterQuery() throws Exception { | LIMIT 2 | KEEP emp_id, department""")); assertRemoteOnlyAgainst2IndexResults(response); + + // check that the esql-ccs license feature is now present and that the last_used field has been updated + esqlCcsLicenseFeatureUsage = fetchEsqlCcsFeatureUsageFromNode(client()); + assertThat(esqlCcsLicenseFeatureUsage.size(), equalTo(5)); + Object lastUsed = esqlCcsLicenseFeatureUsage.get("last_used"); + assertNotNull("lastUsed should not be null", lastUsed); + if (ccsLastUsedTimestampAtStartOfTest != null) { + assertThat(lastUsed.toString(), not(equalTo(ccsLastUsedTimestampAtStartOfTest.toString()))); + } } @SuppressWarnings("unchecked") @@ -1660,4 +1681,18 @@ void assertExpectedClustersForMissingIndicesTests(Map responseMa assertThat((int) shards.get("failed"), is(0)); } } + + private static Map fetchEsqlCcsFeatureUsageFromNode(RestClient client) throws IOException { + Request request = new Request(HttpGet.METHOD_NAME, "_license/feature_usage"); + request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", basicAuthHeaderValue(USER, PASS))); + Response response = client.performRequest(request); + ObjectPath path = ObjectPath.createFromResponse(response); + List> features = path.evaluate("features"); + for (var feature : features) { + if ("esql-ccs".equals(feature.get("name"))) { + return feature; + } + } + return Collections.emptyMap(); + } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java index 337d3c5820c07..24586e5f36337 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java @@ -169,7 +169,14 @@ private void handleScriptException(ScriptException scriptException, boolean unat * @param numFailureRetries the number of configured retries */ private void handleBulkIndexingException(BulkIndexingException bulkIndexingException, boolean unattended, int numFailureRetries) { - if (unattended == false && bulkIndexingException.isIrrecoverable()) { + if (bulkIndexingException.getCause() instanceof ClusterBlockException) { + retryWithoutIncrementingFailureCount( + bulkIndexingException, + bulkIndexingException.getDetailedMessage(), + unattended, + numFailureRetries + ); + } else if (unattended == false && bulkIndexingException.isIrrecoverable()) { String message = TransformMessages.getMessage( TransformMessages.LOG_TRANSFORM_PIVOT_IRRECOVERABLE_BULK_INDEXING_ERROR, bulkIndexingException.getDetailedMessage() @@ -232,12 +239,46 @@ private void retry(Throwable unwrappedException, String message, boolean unatten && unwrappedException.getClass().equals(context.getLastFailure().getClass()); final int failureCount = context.incrementAndGetFailureCount(unwrappedException); - if (unattended == false && numFailureRetries != -1 && failureCount > numFailureRetries) { fail(unwrappedException, "task encountered more than " + numFailureRetries + " failures; latest failure: " + message); return; } + logRetry(unwrappedException, message, unattended, numFailureRetries, failureCount, repeatedFailure); + } + + /** + * Terminate failure handling without incrementing the retries used + *

+ * This is used when there is an ongoing recoverable issue and we want to retain + * retries for any issues that may occur after the issue is resolved + * + * @param unwrappedException The exception caught + * @param message error message to log/audit + * @param unattended whether the transform runs in unattended mode + * @param numFailureRetries the number of configured retries + */ + private void retryWithoutIncrementingFailureCount( + Throwable unwrappedException, + String message, + boolean unattended, + int numFailureRetries + ) { + // group failures to decide whether to report it below + final boolean repeatedFailure = context.getLastFailure() != null + && unwrappedException.getClass().equals(context.getLastFailure().getClass()); + + logRetry(unwrappedException, message, unattended, numFailureRetries, context.getFailureCount(), repeatedFailure); + } + + private void logRetry( + Throwable unwrappedException, + String message, + boolean unattended, + int numFailureRetries, + int failureCount, + boolean repeatedFailure + ) { // Since our schedule fires again very quickly after failures it is possible to run into the same failure numerous // times in a row, very quickly. We do not want to spam the audit log with repeated failures, so only record the first one // and if the number of retries is about to exceed diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java index 84c8d4e140408..3894ff3043ccd 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; import org.elasticsearch.xpack.transform.notifications.MockTransformAuditor; +import java.util.List; import java.util.Map; import java.util.Set; @@ -63,9 +64,121 @@ public int getFailureCountChangedCounter() { } } - public void testUnattended() { + public void testHandleIndexerFailure_CircuitBreakingExceptionNewPageSizeLessThanMinimumPageSize() { + var e = new CircuitBreakingException(randomAlphaOfLength(10), 1, 0, randomFrom(CircuitBreaker.Durability.values())); + assertRetryIfUnattendedOtherwiseFail(e); + } + + public void testHandleIndexerFailure_CircuitBreakingExceptionNewPageSizeNotLessThanMinimumPageSize() { + var e = new CircuitBreakingException(randomAlphaOfLength(10), 1, 1, randomFrom(CircuitBreaker.Durability.values())); + + List.of(true, false).forEach((unattended) -> { assertNoFailureAndContextPageSizeSet(e, unattended, 365); }); + } + + public void testHandleIndexerFailure_ScriptException() { + var e = new ScriptException( + randomAlphaOfLength(10), + new ArithmeticException(randomAlphaOfLength(10)), + singletonList(randomAlphaOfLength(10)), + randomAlphaOfLength(10), + randomAlphaOfLength(10) + ); + assertRetryIfUnattendedOtherwiseFail(e); + } + + public void testHandleIndexerFailure_BulkIndexExceptionWrappingClusterBlockException() { + final BulkIndexingException bulkIndexingException = new BulkIndexingException( + randomAlphaOfLength(10), + new ClusterBlockException(Map.of("test-index", Set.of(MetadataIndexStateService.INDEX_CLOSED_BLOCK))), + randomBoolean() + ); + + List.of(true, false).forEach((unattended) -> { assertRetryFailureCountNotIncremented(bulkIndexingException, unattended); }); + } + + public void testHandleIndexerFailure_IrrecoverableBulkIndexException() { + final BulkIndexingException e = new BulkIndexingException( + randomAlphaOfLength(10), + new ElasticsearchStatusException(randomAlphaOfLength(10), RestStatus.INTERNAL_SERVER_ERROR), + true + ); + assertRetryIfUnattendedOtherwiseFail(e); + } + + public void testHandleIndexerFailure_RecoverableBulkIndexException() { + final BulkIndexingException bulkIndexingException = new BulkIndexingException( + randomAlphaOfLength(10), + new ElasticsearchStatusException(randomAlphaOfLength(10), RestStatus.INTERNAL_SERVER_ERROR), + false + ); + + List.of(true, false).forEach((unattended) -> { assertRetry(bulkIndexingException, unattended); }); + } + + public void testHandleIndexerFailure_ClusterBlockException() { + List.of(true, false).forEach((unattended) -> { + assertRetry( + new ClusterBlockException(Map.of(randomAlphaOfLength(10), Set.of(MetadataIndexStateService.INDEX_CLOSED_BLOCK))), + unattended + ); + }); + } + + public void testHandleIndexerFailure_SearchPhaseExecutionExceptionWithNoShardSearchFailures() { + List.of(true, false).forEach((unattended) -> { + assertRetry( + new SearchPhaseExecutionException(randomAlphaOfLength(10), randomAlphaOfLength(10), ShardSearchFailure.EMPTY_ARRAY), + unattended + ); + }); + } + + public void testHandleIndexerFailure_SearchPhaseExecutionExceptionWithShardSearchFailures() { + List.of(true, false).forEach((unattended) -> { + assertRetry( + new SearchPhaseExecutionException( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + new ShardSearchFailure[] { new ShardSearchFailure(new Exception()) } + ), + unattended + ); + }); + } + + public void testHandleIndexerFailure_RecoverableElasticsearchException() { + List.of(true, false).forEach((unattended) -> { + assertRetry(new ElasticsearchStatusException(randomAlphaOfLength(10), RestStatus.INTERNAL_SERVER_ERROR), unattended); + }); + } + + public void testHandleIndexerFailure_IrrecoverableElasticsearchException() { + var e = new ElasticsearchStatusException(randomAlphaOfLength(10), RestStatus.NOT_FOUND); + assertRetryIfUnattendedOtherwiseFail(e); + } + + public void testHandleIndexerFailure_IllegalArgumentException() { + var e = new IllegalArgumentException(randomAlphaOfLength(10)); + assertRetryIfUnattendedOtherwiseFail(e); + } + + public void testHandleIndexerFailure_UnexpectedException() { + List.of(true, false).forEach((unattended) -> { assertRetry(new Exception(), unattended); }); + } + + private void assertRetryIfUnattendedOtherwiseFail(Exception e) { + List.of(true, false).forEach((unattended) -> { + if (unattended) { + assertRetry(e, unattended); + } else { + assertFailure(e); + } + }); + } + + private void assertRetry(Exception e, boolean unattended) { String transformId = randomAlphaOfLength(10); - SettingsConfig settings = new SettingsConfig.Builder().setUnattended(true).build(); + SettingsConfig settings = new SettingsConfig.Builder().setNumFailureRetries(2).setUnattended(unattended).build(); MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); MockTransformContextListener contextListener = new MockTransformContextListener(); @@ -74,51 +187,33 @@ public void testUnattended() { TransformFailureHandler handler = new TransformFailureHandler(auditor, context, transformId); - handler.handleIndexerFailure( - new SearchPhaseExecutionException( - "query", - "Partial shards failure", - new ShardSearchFailure[] { - new ShardSearchFailure(new CircuitBreakingException("to much memory", 110, 100, CircuitBreaker.Durability.TRANSIENT)) } - ), - settings - ); + assertNoFailure(handler, e, contextListener, settings, true); + assertNoFailure(handler, e, contextListener, settings, true); + if (unattended) { + assertNoFailure(handler, e, contextListener, settings, true); + } else { + // fail after max retry attempts reached + assertFailure(handler, e, contextListener, settings, true); + } + } - // CBE isn't a failure, but it only affects page size(which we don't test here) - assertFalse(contextListener.getFailed()); - assertEquals(0, contextListener.getFailureCountChangedCounter()); + private void assertRetryFailureCountNotIncremented(Exception e, boolean unattended) { + String transformId = randomAlphaOfLength(10); + SettingsConfig settings = new SettingsConfig.Builder().setNumFailureRetries(2).setUnattended(unattended).build(); - assertNoFailure( - handler, - new SearchPhaseExecutionException( - "query", - "Partial shards failure", - new ShardSearchFailure[] { - new ShardSearchFailure( - new ScriptException( - "runtime error", - new ArithmeticException("/ by zero"), - singletonList("stack"), - "test", - "painless" - ) - ) } - ), - contextListener, - settings - ); - assertNoFailure( - handler, - new ElasticsearchStatusException("something really bad happened", RestStatus.INTERNAL_SERVER_ERROR), - contextListener, - settings - ); - assertNoFailure(handler, new IllegalArgumentException("expected apples not oranges"), contextListener, settings); - assertNoFailure(handler, new RuntimeException("the s*** hit the fan"), contextListener, settings); - assertNoFailure(handler, new NullPointerException("NPE"), contextListener, settings); + MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); + MockTransformContextListener contextListener = new MockTransformContextListener(); + TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); + context.setPageSize(500); + + TransformFailureHandler handler = new TransformFailureHandler(auditor, context, transformId); + + assertNoFailure(handler, e, contextListener, settings, false); + assertNoFailure(handler, e, contextListener, settings, false); + assertNoFailure(handler, e, contextListener, settings, false); } - public void testClusterBlock() { + private void assertFailure(Exception e) { String transformId = randomAlphaOfLength(10); SettingsConfig settings = new SettingsConfig.Builder().setNumFailureRetries(2).build(); @@ -129,32 +224,50 @@ public void testClusterBlock() { TransformFailureHandler handler = new TransformFailureHandler(auditor, context, transformId); - final ClusterBlockException clusterBlock = new ClusterBlockException( - Map.of("test-index", Set.of(MetadataIndexStateService.INDEX_CLOSED_BLOCK)) - ); + assertFailure(handler, e, contextListener, settings, false); + } - handler.handleIndexerFailure(clusterBlock, settings); - assertFalse(contextListener.getFailed()); - assertEquals(1, contextListener.getFailureCountChangedCounter()); + private void assertNoFailure( + TransformFailureHandler handler, + Exception e, + MockTransformContextListener mockTransformContextListener, + SettingsConfig settings, + boolean failureCountIncremented + ) { + handler.handleIndexerFailure(e, settings); + assertFalse(mockTransformContextListener.getFailed()); + assertEquals(failureCountIncremented ? 1 : 0, mockTransformContextListener.getFailureCountChangedCounter()); + mockTransformContextListener.reset(); + } - handler.handleIndexerFailure(clusterBlock, settings); - assertFalse(contextListener.getFailed()); - assertEquals(2, contextListener.getFailureCountChangedCounter()); + private void assertNoFailureAndContextPageSizeSet(Exception e, boolean unattended, int newPageSize) { + String transformId = randomAlphaOfLength(10); + SettingsConfig settings = new SettingsConfig.Builder().setNumFailureRetries(2).setUnattended(unattended).build(); - handler.handleIndexerFailure(clusterBlock, settings); - assertTrue(contextListener.getFailed()); - assertEquals(3, contextListener.getFailureCountChangedCounter()); + MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); + MockTransformContextListener contextListener = new MockTransformContextListener(); + TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); + context.setPageSize(500); + + TransformFailureHandler handler = new TransformFailureHandler(auditor, context, transformId); + + handler.handleIndexerFailure(e, settings); + assertFalse(contextListener.getFailed()); + assertEquals(0, contextListener.getFailureCountChangedCounter()); + assertEquals(newPageSize, context.getPageSize()); + contextListener.reset(); } - private void assertNoFailure( + private void assertFailure( TransformFailureHandler handler, Exception e, MockTransformContextListener mockTransformContextListener, - SettingsConfig settings + SettingsConfig settings, + boolean failureCountChanged ) { handler.handleIndexerFailure(e, settings); - assertFalse(mockTransformContextListener.getFailed()); - assertEquals(1, mockTransformContextListener.getFailureCountChangedCounter()); + assertTrue(mockTransformContextListener.getFailed()); + assertEquals(failureCountChanged ? 1 : 0, mockTransformContextListener.getFailureCountChangedCounter()); mockTransformContextListener.reset(); } diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/80_esql.yml b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/80_esql.yml index 4c0bbfd7ec139..1b435c551fbe9 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/80_esql.yml +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/80_esql.yml @@ -86,11 +86,12 @@ teardown: ignore: 404 --- -"Index data and search on the mixed cluster": +"ES|QL cross-cluster query fails with basic license": - skip: features: allowed_warnings - do: + catch: bad_request allowed_warnings: - "Line 1:21: Square brackets '[]' need to be removed in FROM METADATA declaration" headers: { Authorization: "Basic am9lOnMza3JpdC1wYXNzd29yZA==" } @@ -98,23 +99,11 @@ teardown: body: query: 'FROM *:esql*,esql_* | STATS total = sum(cost) by tag | SORT tag | LIMIT 10' - - match: {columns.0.name: "total"} - - match: {columns.0.type: "long"} - - match: {columns.1.name: "tag"} - - match: {columns.1.type: "keyword"} - - - match: {values.0.0: 2200} - - match: {values.0.1: "computer"} - - match: {values.1.0: 170} - - match: {values.1.1: "headphone"} - - match: {values.2.0: 2100 } - - match: {values.2.1: "laptop" } - - match: {values.3.0: 1000 } - - match: {values.3.1: "monitor" } - - match: {values.4.0: 550 } - - match: {values.4.1: "tablet" } + - match: { error.type: "status_exception" } + - match: { error.reason: "A valid Enterprise license is required to run ES|QL cross-cluster searches. License found: active basic license" } - do: + catch: bad_request allowed_warnings: - "Line 1:21: Square brackets '[]' need to be removed in FROM METADATA declaration" headers: { Authorization: "Basic am9lOnMza3JpdC1wYXNzd29yZA==" } @@ -128,28 +117,11 @@ teardown: lte: "2023-01-03" format: "yyyy-MM-dd" - - match: {columns.0.name: "_index"} - - match: {columns.0.type: "keyword"} - - match: {columns.1.name: "tag"} - - match: {columns.1.type: "keyword"} - - match: {columns.2.name: "cost" } - - match: {columns.2.type: "long" } - - - match: {values.0.0: "esql_local"} - - match: {values.0.1: "monitor"} - - match: {values.0.2: 250 } - - match: {values.1.0: "my_remote_cluster:esql_index" } - - match: {values.1.1: "tablet"} - - match: {values.1.2: 450 } - - match: {values.2.0: "my_remote_cluster:esql_index" } - - match: {values.2.1: "computer" } - - match: {values.2.2: 1200 } - - match: {values.3.0: "esql_local"} - - match: {values.3.1: "laptop" } - - match: {values.3.2: 2100 } + - match: { error.type: "status_exception" } + - match: { error.reason: "A valid Enterprise license is required to run ES|QL cross-cluster searches. License found: active basic license" } --- -"Enrich across clusters": +"ES|QL enrich query across clusters fails with basic license": - requires: cluster_features: ["gte_v8.13.0"] reason: "Enrich across clusters available in 8.13 or later" @@ -194,27 +166,14 @@ teardown: index: suggestions - do: + catch: bad_request headers: { Authorization: "Basic am9lOnMza3JpdC1wYXNzd29yZA==" } esql.query: body: query: 'FROM *:esql*,esql_* | STATS total = sum(cost) by tag | SORT total DESC | LIMIT 3 | ENRICH suggestions | KEEP tag, total, phrase' - - match: {columns.0.name: "tag"} - - match: {columns.0.type: "keyword"} - - match: {columns.1.name: "total" } - - match: {columns.1.type: "long" } - - match: {columns.2.name: "phrase" } - - match: {columns.2.type: "keyword" } - - - match: {values.0.0: "computer"} - - match: {values.0.1: 2200} - - match: {values.0.2: "best desktop for programming"} - - match: {values.1.0: "laptop"} - - match: {values.1.1: 2100 } - - match: {values.1.2: "the best battery life laptop"} - - match: {values.2.0: "monitor" } - - match: {values.2.1: 1000 } - - match: {values.2.2: "4k or 5k or 6K monitor?" } + - match: { error.type: "status_exception" } + - match: { error.reason: "A valid Enterprise license is required to run ES|QL cross-cluster searches. License found: active basic license" } - do: enrich.delete_policy: