diff --git a/.ci/matrix-build-javas.yml b/.ci/matrix-build-javas.yml index debb074fce1cb..795e2e81f5f92 100644 --- a/.ci/matrix-build-javas.yml +++ b/.ci/matrix-build-javas.yml @@ -7,3 +7,4 @@ ES_BUILD_JAVA: - java11 + - openjdk12 diff --git a/.ci/matrix-runtime-javas.yml b/.ci/matrix-runtime-javas.yml index cb7757aba6079..0cea9c939e149 100644 --- a/.ci/matrix-runtime-javas.yml +++ b/.ci/matrix-runtime-javas.yml @@ -9,5 +9,6 @@ ES_RUNTIME_JAVA: - java8 - java8fips - java11 + - openjdk12 - zulu8 - zulu11 diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index b82e57bbcfd95..c2741ed5819f4 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -293,18 +293,7 @@ class BuildPlugin implements Plugin { it.standardOutput = dockerVersionOutput }) final String dockerVersion = dockerVersionOutput.toString().trim() - final Matcher matcher = dockerVersion =~ /Docker version (\d+\.\d+)\.\d+(?:-ce)?, build [0-9a-f]{7}/ - assert matcher.matches() : dockerVersion - final dockerMajorMinorVersion = matcher.group(1) - final String[] majorMinor = dockerMajorMinorVersion.split("\\.") - if (Integer.parseInt(majorMinor[0]) < 17 - || (Integer.parseInt(majorMinor[0]) == 17 && Integer.parseInt(majorMinor[1]) < 5)) { - final String message = String.format( - Locale.ROOT, - "building Docker images requires Docker version 17.05+ due to use of multi-stage builds yet was [%s]", - dockerVersion) - throwDockerRequiredException(message) - } + checkDockerVersionRecent(dockerVersion) final ByteArrayOutputStream dockerImagesErrorOutput = new ByteArrayOutputStream() // the Docker binary executes, check that we can execute a privileged command @@ -339,6 +328,21 @@ class BuildPlugin implements Plugin { } } + protected static void checkDockerVersionRecent(String dockerVersion) { + final Matcher matcher = dockerVersion =~ /Docker version (\d+\.\d+)\.\d+(?:-ce)?, build [0-9a-f]{7,40}/ + assert matcher.matches(): dockerVersion + final dockerMajorMinorVersion = matcher.group(1) + final String[] majorMinor = dockerMajorMinorVersion.split("\\.") + if (Integer.parseInt(majorMinor[0]) < 17 + || (Integer.parseInt(majorMinor[0]) == 17 && Integer.parseInt(majorMinor[1]) < 5)) { + final String message = String.format( + Locale.ROOT, + "building Docker images requires Docker version 17.05+ due to use of multi-stage builds yet was [%s]", + dockerVersion) + throwDockerRequiredException(message) + } + } + private static void throwDockerRequiredException(final String message) { throw new GradleException( message + "\nyou can address this by attending to the reported issue, " diff --git a/buildSrc/src/test/groovy/org/elasticsearch/gradle/BuildPluginTests.java b/buildSrc/src/test/groovy/org/elasticsearch/gradle/BuildPluginTests.java new file mode 100644 index 0000000000000..90af9a2401ace --- /dev/null +++ b/buildSrc/src/test/groovy/org/elasticsearch/gradle/BuildPluginTests.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle; + +import org.elasticsearch.gradle.test.GradleUnitTestCase; +import org.gradle.api.GradleException; +import org.junit.Test; + + +public class BuildPluginTests extends GradleUnitTestCase { + + public void testPassingDockerVersions() { + BuildPlugin.checkDockerVersionRecent("Docker version 18.06.1-ce, build e68fc7a215d7"); + BuildPlugin.checkDockerVersionRecent("Docker version 17.05.0, build e68fc7a"); + BuildPlugin.checkDockerVersionRecent("Docker version 17.05.1, build e68fc7a"); + } + + @Test(expected = GradleException.class) + public void testFailingDockerVersions() { + BuildPlugin.checkDockerVersionRecent("Docker version 17.04.0, build e68fc7a"); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index c7a54a9ac32fe..63620bd987712 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -299,8 +299,16 @@ static Request multiGet(MultiGetRequest multiGetRequest) throws IOException { static Request index(IndexRequest indexRequest) { String method = Strings.hasLength(indexRequest.id()) ? HttpPut.METHOD_NAME : HttpPost.METHOD_NAME; - boolean isCreate = (indexRequest.opType() == DocWriteRequest.OpType.CREATE); - String endpoint = endpoint(indexRequest.index(), indexRequest.type(), indexRequest.id(), isCreate ? "_create" : null); + + String endpoint; + if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) { + endpoint = indexRequest.type().equals(MapperService.SINGLE_MAPPING_NAME) + ? endpoint(indexRequest.index(), "_create", indexRequest.id()) + : endpoint(indexRequest.index(), indexRequest.type(), indexRequest.id(), "_create"); + } else { + endpoint = endpoint(indexRequest.index(), indexRequest.type(), indexRequest.id()); + } + Request request = new Request(method, endpoint); Params parameters = new Params(request); @@ -471,7 +479,7 @@ static Request count(CountRequest countRequest) throws IOException { } static Request explain(ExplainRequest explainRequest) throws IOException { - String endpoint = explainRequest.isTypeless() + String endpoint = explainRequest.type().equals(MapperService.SINGLE_MAPPING_NAME) ? endpoint(explainRequest.index(), "_explain", explainRequest.id()) : endpoint(explainRequest.index(), explainRequest.type(), explainRequest.id(), "_explain"); Request request = new Request(HttpGet.METHOD_NAME, endpoint); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 372b8caf1cb95..698f7557c1306 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -661,7 +661,7 @@ public void testIndex() throws IOException { Request request = RequestConverters.index(indexRequest); if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) { - assertEquals("/" + index + "/_doc/" + id + "/_create", request.getEndpoint()); + assertEquals("/" + index + "/_create/" + id, request.getEndpoint()); } else if (id != null) { assertEquals("/" + index + "/_doc/" + id, request.getEndpoint()); } else { @@ -1685,17 +1685,17 @@ public void testEndpointBuilder() { assertEquals("/a/b", endpointBuilder.build()); } { - EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPart("b").addPathPartAsIs("_create"); - assertEquals("/a/b/_create", endpointBuilder.build()); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPart("b").addPathPartAsIs("_endpoint"); + assertEquals("/a/b/_endpoint", endpointBuilder.build()); } { - EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b", "c").addPathPartAsIs("_create"); - assertEquals("/a/b/c/_create", endpointBuilder.build()); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b", "c").addPathPartAsIs("_endpoint"); + assertEquals("/a/b/c/_endpoint", endpointBuilder.build()); } { - EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPartAsIs("_create"); - assertEquals("/a/_create", endpointBuilder.build()); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPartAsIs("_endpoint"); + assertEquals("/a/_endpoint", endpointBuilder.build()); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index 87b2f97173215..c8220e9cc0c05 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -1318,7 +1318,6 @@ public void onFailure(Exception e) { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/36362") public void testInvalidateToken() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index cd8aaeb510a49..8a446bf037a12 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,4 +1,4 @@ -:version: 7.0.0-alpha1 +:version: 7.0.0-alpha2 :major-version: 7.x :lucene_version: 8.0.0 :lucene_version_path: 8_0_0 diff --git a/docs/java-rest/high-level/search/search.asciidoc b/docs/java-rest/high-level/search/search.asciidoc index dc717655e66a1..1c454ce6ba14e 100644 --- a/docs/java-rest/high-level/search/search.asciidoc +++ b/docs/java-rest/high-level/search/search.asciidoc @@ -460,4 +460,5 @@ include-tagged::{doc-tests-file}[{api}-request-profiling-aggs] <4> Retrieve the time in millis spent executing the Lucene collector <5> Retrieve the profile results for the sub-aggregations (if any) -The Rest API documentation contains more information about {ref}/_profiling_aggregations.html[Profiling Aggregations] +The Rest API documentation contains more information about +{ref}/search-profile-aggregations.html[Profiling aggregations]. diff --git a/docs/plugins/discovery.asciidoc b/docs/plugins/discovery.asciidoc index 46b61146b128d..926acead09ea1 100644 --- a/docs/plugins/discovery.asciidoc +++ b/docs/plugins/discovery.asciidoc @@ -1,8 +1,8 @@ [[discovery]] == Discovery Plugins -Discovery plugins extend Elasticsearch by adding new discovery mechanisms that -can be used instead of {ref}/modules-discovery-zen.html[Zen Discovery]. +Discovery plugins extend Elasticsearch by adding new hosts providers that can be +used to extend the {ref}/modules-discovery.html[cluster formation module]. [float] ==== Core discovery plugins @@ -11,22 +11,24 @@ The core discovery plugins are: <>:: -The EC2 discovery plugin uses the https://github.com/aws/aws-sdk-java[AWS API] for unicast discovery. +The EC2 discovery plugin uses the https://github.com/aws/aws-sdk-java[AWS API] +for unicast discovery. <>:: -The Azure Classic discovery plugin uses the Azure Classic API for unicast discovery. +The Azure Classic discovery plugin uses the Azure Classic API for unicast +discovery. <>:: -The Google Compute Engine discovery plugin uses the GCE API for unicast discovery. +The Google Compute Engine discovery plugin uses the GCE API for unicast +discovery. [float] ==== Community contributed discovery plugins A number of discovery plugins have been contributed by our community: -* https://github.com/shikhar/eskka[eskka Discovery Plugin] (by Shikhar Bhushan) * https://github.com/fabric8io/elasticsearch-cloud-kubernetes[Kubernetes Discovery Plugin] (by Jimmi Dyson, http://fabric8.io[fabric8]) include::discovery-ec2.asciidoc[] diff --git a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc index ce843f332c62a..f64fb7e91d665 100644 --- a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ccr-delete-auto-follow-pattern]] -=== Delete Auto-Follow Pattern API +=== Delete auto-follow pattern API ++++ -Delete Auto-Follow Pattern +Delete auto-follow pattern ++++ beta[] diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc index 19eb2b928ae07..3db92ce6222b0 100644 --- a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ccr-get-auto-follow-pattern]] -=== Get Auto-Follow Pattern API +=== Get auto-follow pattern API ++++ -Get Auto-Follow Pattern +Get auto-follow pattern ++++ beta[] diff --git a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc index 0b81fa57a402a..f1a4a974602cb 100644 --- a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ccr-put-auto-follow-pattern]] -=== Create Auto-Follow Pattern API +=== Create auto-follow pattern API ++++ -Create Auto-Follow Pattern +Create auto-follow pattern ++++ beta[] diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc index a2f9a22881d24..755bf63f0183f 100644 --- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ccr-get-follow-stats]] -=== Get Follower Stats API +=== Get follower stats API ++++ -Get Follower Stats +Get follower stats ++++ beta[] diff --git a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc index e330701aaf397..0d56ee76bd9b9 100644 --- a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ccr-post-pause-follow]] -=== Pause Follower API +=== Pause follower API ++++ -Pause Follower +Pause follower ++++ beta[] diff --git a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc index 55da1b0cbd4ca..e8b4cd50f27e7 100644 --- a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ccr-post-resume-follow]] -=== Resume Follower API +=== Resume follower API ++++ -Resume Follower +Resume follower ++++ beta[] diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc index 6386a2b09a5fc..3f6156c1e6820 100644 --- a/docs/reference/ccr/apis/follow/put-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ccr-put-follow]] -=== Create Follower API +=== Create follower API ++++ -Create Follower +Create follower ++++ beta[] diff --git a/docs/reference/ccr/apis/get-ccr-stats.asciidoc b/docs/reference/ccr/apis/get-ccr-stats.asciidoc index d849a99c459d4..f47e49ee82674 100644 --- a/docs/reference/ccr/apis/get-ccr-stats.asciidoc +++ b/docs/reference/ccr/apis/get-ccr-stats.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ccr-get-stats]] -=== Get Cross-Cluster Replication Stats API +=== Get cross-cluster replication stats API ++++ -Get CCR Stats +Get CCR stats ++++ beta[] diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc index 85116cd6d4ab0..4ee189793de9c 100644 --- a/docs/reference/ccr/getting-started.asciidoc +++ b/docs/reference/ccr/getting-started.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="platinum"] [[ccr-getting-started]] -== Getting Started with {ccr} +== Getting started with {ccr} beta[] diff --git a/docs/reference/commands/certgen.asciidoc b/docs/reference/commands/certgen.asciidoc index 956a4637ed31f..2ca489b3a60a9 100644 --- a/docs/reference/commands/certgen.asciidoc +++ b/docs/reference/commands/certgen.asciidoc @@ -51,7 +51,8 @@ keys for each instance. If you chose to generate a CA, which is the default behavior, the certificate and private key are included in the output file. If you chose to generate CSRs, you should provide them to your commercial or organization-specific certificate authority to obtain signed certificates. The -signed certificates must be in PEM format to work with {security}. +signed certificates must be in PEM format to work with the {stack} +{security-features}. [float] === Parameters diff --git a/docs/reference/commands/certutil.asciidoc b/docs/reference/commands/certutil.asciidoc index 4b04f95445ef6..06e9dc53bd9b6 100644 --- a/docs/reference/commands/certutil.asciidoc +++ b/docs/reference/commands/certutil.asciidoc @@ -93,7 +93,8 @@ the command produces a zip file containing the generated certificates and keys. The `csr` mode generates certificate signing requests (CSRs) that you can send to a trusted certificate authority to obtain signed certificates. The signed -certificates must be in PEM or PKCS#12 format to work with {security}. +certificates must be in PEM or PKCS#12 format to work with {es} +{security-features}. By default, the command produces a single CSR for a single instance. diff --git a/docs/reference/commands/setup-passwords.asciidoc b/docs/reference/commands/setup-passwords.asciidoc index e2d4dfdc13d3d..6e6d3dd75ed21 100644 --- a/docs/reference/commands/setup-passwords.asciidoc +++ b/docs/reference/commands/setup-passwords.asciidoc @@ -19,8 +19,8 @@ bin/elasticsearch-setup-passwords auto|interactive [float] === Description -This command is intended for use only during the initial configuration of -{xpack}. It uses the +This command is intended for use only during the initial configuration of the +{es} {security-features}. It uses the {stack-ov}/built-in-users.html#bootstrap-elastic-passwords[`elastic` bootstrap password] to run user management API requests. After you set a password for the `elastic` user, the bootstrap password is no longer active and you cannot use this command. @@ -36,7 +36,7 @@ location, ensure that the *ES_PATH_CONF* environment variable returns the correct path before you run the `elasticsearch-setup-passwords` command. You can override settings in your `elasticsearch.yml` file by using the `-E` command option. For more information about debugging connection failures, see -{xpack-ref}/trb-security-setup.html[`elasticsearch-setup-passwords` command fails due to connection failure]. +{stack-ov}/trb-security-setup.html[`elasticsearch-setup-passwords` command fails due to connection failure]. [float] === Parameters diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index 15d78e961ab04..70769b5b67237 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -189,7 +189,7 @@ Another option to specify `create` is to use the following uri: [source,js] -------------------------------------------------- -PUT twitter/_doc/1/_create +PUT twitter/_create/1 { "user" : "kimchy", "post_date" : "2009-11-15T14:12:12", diff --git a/docs/reference/ilm/apis/delete-lifecycle.asciidoc b/docs/reference/ilm/apis/delete-lifecycle.asciidoc index 2427169c5e797..0b9fce20d10d0 100644 --- a/docs/reference/ilm/apis/delete-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/delete-lifecycle.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[ilm-delete-lifecycle]] -=== Delete Lifecycle Policy API +=== Delete lifecycle policy API ++++ -Delete Policy +Delete policy ++++ beta[] diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc index 3854981794999..1c8f5c9d861f1 100644 --- a/docs/reference/ilm/apis/explain.asciidoc +++ b/docs/reference/ilm/apis/explain.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[ilm-explain-lifecycle]] -=== Explain Lifecycle API +=== Explain lifecycle API ++++ -Explain Lifecycle +Explain lifecycle ++++ beta[] @@ -170,7 +170,7 @@ entered this phase <3> The date the loaded policy was last modified <4> The epoch time when the loaded policy was last modified -If {ILM} is waiting for a step to complete, the response includes status +If {ilm-init} is waiting for a step to complete, the response includes status information for the step that's being performed on the index. [source,js] diff --git a/docs/reference/ilm/apis/get-lifecycle.asciidoc b/docs/reference/ilm/apis/get-lifecycle.asciidoc index 9603da9cd64b3..161e82b091b3e 100644 --- a/docs/reference/ilm/apis/get-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/get-lifecycle.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[ilm-get-lifecycle]] -=== Get Lifecycle Policy API +=== Get lifecycle policy API ++++ -Get Policy +Get policy ++++ beta[] diff --git a/docs/reference/ilm/apis/get-status.asciidoc b/docs/reference/ilm/apis/get-status.asciidoc index 59557e8fbeee2..4dceb18a3611a 100644 --- a/docs/reference/ilm/apis/get-status.asciidoc +++ b/docs/reference/ilm/apis/get-status.asciidoc @@ -1,14 +1,14 @@ [role="xpack"] [testenv="basic"] [[ilm-get-status]] -=== Get {ILM} Status API +=== Get {ilm} status API ++++ -Get {ILM} Status +Get {ilm} status ++++ beta[] -Retrieves the current {ilm} status. +Retrieves the current {ilm} ({ilm-init}) status. ==== Request @@ -16,9 +16,9 @@ Retrieves the current {ilm} status. ==== Description -Returns the status of the {ILM} plugin. The `operation_mode` field in the +Returns the status of the {ilm-init} plugin. The `operation_mode` field in the response shows one of three states: `STARTED`, `STOPPING`, -or `STOPPED`. You can change the status of the {ILM} plugin with the +or `STOPPED`. You can change the status of the {ilm-init} plugin with the <> and <> APIs. ==== Request Parameters @@ -32,7 +32,7 @@ For more information, see {stack-ov}/security-privileges.html[Security Privilege ==== Examples -The following example gets the {ILM} plugin status. +The following example gets the {ilm-init} plugin status. [source,js] -------------------------------------------------- diff --git a/docs/reference/ilm/apis/ilm-api.asciidoc b/docs/reference/ilm/apis/ilm-api.asciidoc index 62e708be0ff5c..dcc3d1962cb64 100644 --- a/docs/reference/ilm/apis/ilm-api.asciidoc +++ b/docs/reference/ilm/apis/ilm-api.asciidoc @@ -1,5 +1,5 @@ [[index-lifecycle-management-api]] -== Index Lifecycle Management API +== {ilm-cap} API beta[] @@ -7,24 +7,24 @@ You can use the following APIs to manage policies on indices. [float] [[ilm-api-policy-endpoint]] -=== Policy Management APIs +=== Policy management APIs -* <> -* <> -* <> +* <> +* <> +* <> [float] [[ilm-api-index-endpoint]] -=== Index Management APIs +=== Index management APIs -* <> -* <> +* <> +* <> [float] [[ilm-api-management-endpoint]] -=== Operation Management APIs +=== Operation management APIs -* <> +* <> * <> * <> * <> diff --git a/docs/reference/ilm/apis/move-to-step.asciidoc b/docs/reference/ilm/apis/move-to-step.asciidoc index ca1c825772a80..6d648f5270209 100644 --- a/docs/reference/ilm/apis/move-to-step.asciidoc +++ b/docs/reference/ilm/apis/move-to-step.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[ilm-move-to-step]] -=== Move to Lifecycle Step API +=== Move to lifecycle step API ++++ -Move to Step +Move to step ++++ beta[] diff --git a/docs/reference/ilm/apis/put-lifecycle.asciidoc b/docs/reference/ilm/apis/put-lifecycle.asciidoc index 3f15e6d2c32b5..83facc6f42b01 100644 --- a/docs/reference/ilm/apis/put-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/put-lifecycle.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[ilm-put-lifecycle]] -=== Create Lifecycle Policy API +=== Create lifecycle policy API ++++ -Create Policy +Create policy ++++ beta[] @@ -35,7 +35,7 @@ include::{docdir}/rest-api/timeoutparms.asciidoc[] You must have the `manage_ilm` cluster privilege to use this API. You must also have the `manage` index privilege on all indices being managed by `policy`. -All operations executed by {Ilm} for a policy are executed as the user that +All operations executed by {ilm} for a policy are executed as the user that put the latest version of a policy. For more information, see {stack-ov}/security-privileges.html[Security Privileges]. diff --git a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc index e0e9bf3280db8..b8b44c3d0fe28 100644 --- a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc +++ b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[ilm-remove-policy]] -=== Remove Policy from Index API +=== Remove policy from index API ++++ -Remove Policy +Remove policy ++++ beta[] diff --git a/docs/reference/ilm/apis/retry-policy.asciidoc b/docs/reference/ilm/apis/retry-policy.asciidoc index d2dbd964aa526..2d4944a7ac40d 100644 --- a/docs/reference/ilm/apis/retry-policy.asciidoc +++ b/docs/reference/ilm/apis/retry-policy.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[ilm-retry-policy]] -=== Retry Policy Execution API +=== Retry policy execution API ++++ -Retry Policy +Retry policy ++++ beta[] diff --git a/docs/reference/ilm/apis/start.asciidoc b/docs/reference/ilm/apis/start.asciidoc index a739ebb71da12..97b7985c65428 100644 --- a/docs/reference/ilm/apis/start.asciidoc +++ b/docs/reference/ilm/apis/start.asciidoc @@ -1,14 +1,14 @@ [role="xpack"] [testenv="basic"] [[ilm-start]] -=== Start {ILM} API +=== Start {ilm} API ++++ -Start {ILM} +Start {ilm} ++++ beta[] -Start the {ILM} plugin. +Start the {ilm} ({ilm-init}) plugin. ==== Request @@ -16,9 +16,9 @@ Start the {ILM} plugin. ==== Description -Starts the {ILM} plugin if it is currently stopped. {ILM} is started -automatically when the cluster is formed. Restarting {ILM} is only -necessary if it has been stopped using the <>. +Starts the {ilm-init} plugin if it is currently stopped. {ilm-init} is started +automatically when the cluster is formed. Restarting {ilm-init} is only +necessary if it has been stopped using the <>. ==== Request Parameters @@ -27,7 +27,7 @@ include::{docdir}/rest-api/timeoutparms.asciidoc[] ==== Authorization You must have the `manage_ilm` cluster privilege to use this API. -For more information, see {stack-ov}/security-privileges.html[Security Privileges]. +For more information, see {stack-ov}/security-privileges.html[Security privileges]. ==== Examples diff --git a/docs/reference/ilm/apis/stop.asciidoc b/docs/reference/ilm/apis/stop.asciidoc index a33b945d53717..b2a1dba411a16 100644 --- a/docs/reference/ilm/apis/stop.asciidoc +++ b/docs/reference/ilm/apis/stop.asciidoc @@ -1,14 +1,14 @@ [role="xpack"] [testenv="basic"] [[ilm-stop]] -=== Stop {ILM} API +=== Stop {ilm} API ++++ -Stop {ILM} +Stop {ilm} ++++ beta[] -Stop the {ILM} plugin. +Stop the {ilm} ({ilm-init}) plugin. ==== Request @@ -16,14 +16,14 @@ Stop the {ILM} plugin. ==== Description -Halts all lifecycle management operations and stops the {ILM} plugin. This is -useful when you are performing maintenance on the cluster and need to prevent -{ILM} from performing any actions on your indices. +Halts all lifecycle management operations and stops the {ilm-init} plugin. This +is useful when you are performing maintenance on the cluster and need to prevent +{ilm-init} from performing any actions on your indices. The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the <> API to see -if {ILM} is running. +if {ilm-init} is running. ==== Request Parameters diff --git a/docs/reference/ilm/error-handling.asciidoc b/docs/reference/ilm/error-handling.asciidoc index 07e018422c2d6..639c2fbaddd6d 100644 --- a/docs/reference/ilm/error-handling.asciidoc +++ b/docs/reference/ilm/error-handling.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="basic"] [[index-lifecycle-error-handling]] -== Index Lifecycle Error Handling +== Index lifecycle error handling beta[] diff --git a/docs/reference/ilm/getting-started-ilm.asciidoc b/docs/reference/ilm/getting-started-ilm.asciidoc index 401883d6a7c11..63193544bfff5 100644 --- a/docs/reference/ilm/getting-started-ilm.asciidoc +++ b/docs/reference/ilm/getting-started-ilm.asciidoc @@ -5,8 +5,8 @@ beta[] -Let's jump into {ILM} by working through a hands-on scenario. -This section will leverage many new concepts unique to {ILM} that +Let's jump into {ilm} ({ilm-init}) by working through a hands-on scenario. +This section will leverage many new concepts unique to {ilm-init} that you may not be familiar with. The following sections will explore these in more details. @@ -21,7 +21,7 @@ after 90 days. beta[] -There are many new features introduced by {ILM}, but we will only focus on +There are many new features introduced by {ilm-init}, but we will only focus on a few that are needed for our example. For starters, we will use the <> API to define our first policy. Lifecycle policies are defined in JSON and include specific @@ -99,7 +99,7 @@ PUT _template/datastream_template <3> alias to use for the rollover action, required since a rollover action is defined in the policy. -The above index template introduces a few new settings specific to {ILM}. +The above index template introduces a few new settings specific to {ilm-init}. The first being `index.lifecycle.name`. This setting will configure the "datastream_policy" to the index applying this template. This means that all newly created indices prefixed "datastream-" will be managed by @@ -148,7 +148,7 @@ beta[] Now that we have an index managed by our policy, how do we tell what is going on? Which phase are we in? Is something broken? This section will go over a few APIs and their responses to help us inspect our indices with respect -to {ILM}. +to {ilm-init}. With the help of the <>, we can know things like which phase we're in and when we entered that phase. The API @@ -162,7 +162,7 @@ GET datastream-*/_ilm/explain // CONSOLE // TEST[continued] -The above request will retrieve {ILM} execution information for all our +The above request will retrieve {ilm-init} execution information for all our managed indices. @@ -214,7 +214,7 @@ You can read about the full details of this response in the <>. For now, let's focus on how the response details which phase, action, and step we're in. We are in the "hot" phase, and "rollover" action. Rollover will continue to be called -by {ILM} until its conditions are met and it rolls over the index. +by {ilm-init} until its conditions are met and it rolls over the index. Afterwards, the original index will stay in the hot phase until 90 more days pass and it is deleted in the delete phase. As time goes on, new indices will be created and deleted. @@ -226,7 +226,7 @@ that same alias. -That's it! We have our first use-case managed by {ILM}. +That's it! We have our first use-case managed by {ilm-init}. To learn more about all our APIs, check out <>. diff --git a/docs/reference/ilm/ilm-and-snapshots.asciidoc b/docs/reference/ilm/ilm-and-snapshots.asciidoc index 847bf6337888f..45028b4f1f85c 100644 --- a/docs/reference/ilm/ilm-and-snapshots.asciidoc +++ b/docs/reference/ilm/ilm-and-snapshots.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="basic"] [[index-lifecycle-and-snapshots]] -== Restoring Snapshots of Managed Indices +== Restoring snapshots of managed indices beta[] diff --git a/docs/reference/ilm/index.asciidoc b/docs/reference/ilm/index.asciidoc index 6721abdd473e3..aaf7e2eede54e 100644 --- a/docs/reference/ilm/index.asciidoc +++ b/docs/reference/ilm/index.asciidoc @@ -3,17 +3,14 @@ [[index-lifecycle-management]] = Managing the index lifecycle -:ilm: index lifecycle management -:Ilm: Index lifecycle management -:ILM: ILM [partintro] -- beta[] -The <> enable you to automate how you -want to manage your indices over time. Rather than simply performing management -actions on your indices on a set schedule, you can base actions on other factors -such as shard size and performance requirements. +The <> enable you to +automate how you want to manage your indices over time. Rather than simply +performing management actions on your indices on a set schedule, you can base +actions on other factors such as shard size and performance requirements. You control how indices are handled as they age by attaching a lifecycle policy to the index template used to create them. You can update diff --git a/docs/reference/ilm/policy-definitions.asciidoc b/docs/reference/ilm/policy-definitions.asciidoc index 9b674689e7a08..2e2aababad647 100644 --- a/docs/reference/ilm/policy-definitions.asciidoc +++ b/docs/reference/ilm/policy-definitions.asciidoc @@ -2,7 +2,7 @@ beta[] [role="xpack"] [testenv="basic"] [[ilm-policy-definition]] -== Policy Phases and Actions +== Policy phases and actions beta[] @@ -71,8 +71,8 @@ index is rolled over, then `min_age` is the time elapsed from the time the index is rolled over. The intention here is to execute following phases and actions relative to when data was written last to a rolled over index. -The previous phase's actions must complete before {ILM} will check `min_age` and -transition into the next phase. +The previous phase's actions must complete before {ilm} will check `min_age` +and transition into the next phase. === Phase Execution @@ -80,8 +80,8 @@ beta[] The current phase definition, of an index's policy being executed, is stored in the index's metadata. The phase and its actions are compiled into a series -of discrete steps that are executed sequentially. Since some {ILM} actions are -more complex and involve multiple operations against an index, each of these +of discrete steps that are executed sequentially. Since some {ilm-init} actions +are more complex and involve multiple operations against an index, each of these operations are done in isolation in a unit called a "step". The <> exposes this information to us to see which step our index is either to execute next, or is currently diff --git a/docs/reference/ilm/set-up-lifecycle-policy.asciidoc b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc index 99fa155f9c31c..1ab8d4399d5e7 100644 --- a/docs/reference/ilm/set-up-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc @@ -6,9 +6,9 @@ beta[] In order for an index to use an {ilm} policy to manage its lifecycle we must -first define a lifecycle policy for it to use. The following request creates -a policy called `my_policy` in Elasticsearch which we can later use to manage -our indexes. +first define a lifecycle policy for it to use. The following request creates a +policy called `my_policy` in Elasticsearch which we can later use to manage our +indexes. [source,js] ------------------------ @@ -39,7 +39,7 @@ PUT _ilm/policy/my_policy {ilm} will manage an index using the policy defined in the `index.lifecycle.name` index setting. If this setting does not exist in the -settings for a particular index {ilm} will not manage that index. +settings for a particular index, {ilm} will not manage that index. To set the policy for an index there are two options: @@ -90,7 +90,7 @@ PUT test-000001 <1> Set this initial index to be the write index for this alias. We can now write data to the `test-alias` alias. Because we have a rollover -action defined in our policy when the index grows larger than 25GB {ilm} will +action defined in our policy, when the index grows larger than 25GB {ilm} will create a new index and roll the alias over to use the new index automatically. === Apply a policy to a create index request diff --git a/docs/reference/ilm/start-stop-ilm.asciidoc b/docs/reference/ilm/start-stop-ilm.asciidoc index 516f85a0e39d2..4414c13ee0f5c 100644 --- a/docs/reference/ilm/start-stop-ilm.asciidoc +++ b/docs/reference/ilm/start-stop-ilm.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="basic"] [[start-stop-ilm]] -== Start And Stop {ilm} +== Start and stop {ilm} beta[] diff --git a/docs/reference/ilm/update-lifecycle-policy.asciidoc b/docs/reference/ilm/update-lifecycle-policy.asciidoc index c63714c5309d9..01d93b64b01d7 100644 --- a/docs/reference/ilm/update-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/update-lifecycle-policy.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[update-lifecycle-policy]] -== Update Lifecycle Policy +== Update lifecycle policy ++++ -Update Policy +Update policy ++++ beta[] diff --git a/docs/reference/ilm/using-policies-rollover.asciidoc b/docs/reference/ilm/using-policies-rollover.asciidoc index 3cc4271e83dc8..3af6e125fcd99 100644 --- a/docs/reference/ilm/using-policies-rollover.asciidoc +++ b/docs/reference/ilm/using-policies-rollover.asciidoc @@ -129,19 +129,19 @@ the new index, enabling indexing to continue uninterrupted. beta[] -After an index has been rolled over by {ilm}, the -`index.lifecycle.indexing_complete` setting will be set to `true` on the index. -This indicates to {ilm} that this index has already been rolled over, and does -not need to be rolled over again. If you <> -from an index and set it to use another policy, this setting indicates that the -new policy should skip execution of the Rollover action. - -You can also set this setting to `true` manually if you want to indicate that -{ilm} should not roll over a particular index. This is useful if you need to -make an exception to your normal Lifecycle Policy and switching the alias to a +The `index.lifecycle.indexing_complete` setting indicates to {ilm} whether this +index has already been rolled over. If it is set to `true`, that indicates that +this index has already been rolled over and does not need to be rolled over +again. Therefore, {ilm} will skip any Rollover Action configured in the +associated lifecycle policy for this index. This is useful if you need to make +an exception to your normal Lifecycle Policy and switching the alias to a different index by hand, but do not want to remove the index from {ilm} completely. +This setting is set to `true` automatically by ILM upon the successful +completion of a Rollover Action. However, it will be removed if +<> from the index. + IMPORTANT: If `index.lifecycle.indexing_complete` is set to `true` on an index, it will not be rolled over by {ilm}, but {ilm} will verify that this index is no longer the write index for the alias specified by diff --git a/docs/reference/indices/apis/freeze.asciidoc b/docs/reference/indices/apis/freeze.asciidoc index 5ca9ecbc6b801..180c22dd62438 100644 --- a/docs/reference/indices/apis/freeze.asciidoc +++ b/docs/reference/indices/apis/freeze.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[freeze-index-api]] -== Freeze Index API +== Freeze index API ++++ -Freeze Index +Freeze index ++++ Freezes an index. diff --git a/docs/reference/indices/apis/unfreeze.asciidoc b/docs/reference/indices/apis/unfreeze.asciidoc index 4a01813463516..1350c08ac7e30 100644 --- a/docs/reference/indices/apis/unfreeze.asciidoc +++ b/docs/reference/indices/apis/unfreeze.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[unfreeze-index-api]] -== Unfreeze Index API +== Unfreeze index API ++++ -Unfreeze Index +Unfreeze index ++++ Unfreezes an index. diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 2f422a1f4f70c..89adbbb9b7c44 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -1324,1854 +1324,29 @@ pipeline basis. Useful to find out which pipelines are used the most or spent th Additional ingest processors can be implemented and installed as Elasticsearch {plugins}/intro.html[plugins]. See {plugins}/ingest.html[Ingest plugins] for information about the available ingest plugins. -[[append-processor]] -=== Append Processor -Appends one or more values to an existing array if the field already exists and it is an array. -Converts a scalar to an array and appends one or more values to it if the field exists and it is a scalar. -Creates an array containing the provided values if the field doesn't exist. -Accepts a single value or an array of values. - -[[append-options]] -.Append Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to be appended to. Supports <>. -| `value` | yes | - | The value to be appended. Supports <>. -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -{ - "append": { - "field": "tags", - "value": ["production", "{{app}}", "{{owner}}"] - } -} --------------------------------------------------- -// NOTCONSOLE - -[[bytes-processor]] -=== Bytes Processor -Converts a human readable byte value (e.g. 1kb) to its value in bytes (e.g. 1024). - -Supported human readable units are "b", "kb", "mb", "gb", "tb", "pb" case insensitive. An error will occur if -the field is not a supported format or resultant value exceeds 2^63. - -[[bytes-options]] -.Bytes Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to convert -| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place -| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -{ - "bytes": { - "field": "file.size" - } -} --------------------------------------------------- -// NOTCONSOLE - -[[convert-processor]] -=== Convert Processor -Converts a field in the currently ingested document to a different type, such as converting a string to an integer. -If the field value is an array, all members will be converted. - -The supported types include: `integer`, `long`, `float`, `double`, `string`, `boolean`, and `auto`. - -Specifying `boolean` will set the field to true if its string value is equal to `true` (ignore case), to -false if its string value is equal to `false` (ignore case), or it will throw an exception otherwise. - -Specifying `auto` will attempt to convert the string-valued `field` into the closest non-string type. -For example, a field whose value is `"true"` will be converted to its respective boolean type: `true`. Do note -that float takes precedence of double in `auto`. A value of `"242.15"` will "automatically" be converted to -`242.15` of type `float`. If a provided field cannot be appropriately converted, the Convert Processor will -still process successfully and leave the field value as-is. In such a case, `target_field` will -still be updated with the unconverted field value. - -[[convert-options]] -.Convert Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field whose value is to be converted -| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place -| `type` | yes | - | The type to convert the existing value to -| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -PUT _ingest/pipeline/my-pipeline-id -{ - "description": "converts the content of the id field to an integer", - "processors" : [ - { - "convert" : { - "field" : "id", - "type": "integer" - } - } - ] -} --------------------------------------------------- -// NOTCONSOLE - -[[date-processor]] -=== Date Processor - -Parses dates from fields, and then uses the date or timestamp as the timestamp for the document. -By default, the date processor adds the parsed date as a new field called `@timestamp`. You can specify a -different field by setting the `target_field` configuration parameter. Multiple date formats are supported -as part of the same date processor definition. They will be used sequentially to attempt parsing the date field, -in the same order they were defined as part of the processor definition. - -[[date-options]] -.Date options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to get the date from. -| `target_field` | no | @timestamp | The field that will hold the parsed date. -| `formats` | yes | - | An array of the expected date formats. Can be a Joda pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. -| `timezone` | no | UTC | The timezone to use when parsing the date. Supports <>. -| `locale` | no | ENGLISH | The locale to use when parsing the date, relevant when parsing month names or week days. Supports <>. -include::ingest-node-common-processor.asciidoc[] -|====== - -Here is an example that adds the parsed date to the `timestamp` field based on the `initial_date` field: - -[source,js] --------------------------------------------------- -{ - "description" : "...", - "processors" : [ - { - "date" : { - "field" : "initial_date", - "target_field" : "timestamp", - "formats" : ["dd/MM/yyyy hh:mm:ss"], - "timezone" : "Europe/Amsterdam" - } - } - ] -} --------------------------------------------------- -// NOTCONSOLE - -The `timezone` and `locale` processor parameters are templated. This means that their values can be -extracted from fields within documents. The example below shows how to extract the locale/timezone -details from existing fields, `my_timezone` and `my_locale`, in the ingested document that contain -the timezone and locale values. - -[source,js] --------------------------------------------------- -{ - "description" : "...", - "processors" : [ - { - "date" : { - "field" : "initial_date", - "target_field" : "timestamp", - "formats" : ["ISO8601"], - "timezone" : "{{my_timezone}}", - "locale" : "{{my_locale}}" - } - } - ] -} --------------------------------------------------- -// NOTCONSOLE - -[[date-index-name-processor]] -=== Date Index Name Processor - -The purpose of this processor is to point documents to the right time based index based -on a date or timestamp field in a document by using the <>. - -The processor sets the `_index` meta field with a date math index name expression based on the provided index name -prefix, a date or timestamp field in the documents being processed and the provided date rounding. - -First, this processor fetches the date or timestamp from a field in the document being processed. Optionally, -date formatting can be configured on how the field's value should be parsed into a date. Then this date, -the provided index name prefix and the provided date rounding get formatted into a date math index name expression. -Also here optionally date formatting can be specified on how the date should be formatted into a date math index name -expression. - -An example pipeline that points documents to a monthly index that starts with a `myindex-` prefix based on a -date in the `date1` field: - -[source,js] --------------------------------------------------- -PUT _ingest/pipeline/monthlyindex -{ - "description": "monthly date-time index naming", - "processors" : [ - { - "date_index_name" : { - "field" : "date1", - "index_name_prefix" : "myindex-", - "date_rounding" : "M" - } - } - ] -} --------------------------------------------------- -// CONSOLE - - -Using that pipeline for an index request: - -[source,js] --------------------------------------------------- -PUT /myindex/_doc/1?pipeline=monthlyindex -{ - "date1" : "2016-04-25T12:02:01.789Z" -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -[source,js] --------------------------------------------------- -{ - "_index" : "myindex-2016-04-01", - "_type" : "_doc", - "_id" : "1", - "_version" : 1, - "result" : "created", - "_shards" : { - "total" : 2, - "successful" : 1, - "failed" : 0 - }, - "_seq_no" : 55, - "_primary_term" : 1 -} --------------------------------------------------- -// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] - - -The above request will not index this document into the `myindex` index, but into the `myindex-2016-04-01` index because -it was rounded by month. This is because the date-index-name-processor overrides the `_index` property of the document. - -To see the date-math value of the index supplied in the actual index request which resulted in the above document being -indexed into `myindex-2016-04-01` we can inspect the effects of the processor using a simulate request. - - -[source,js] --------------------------------------------------- -POST _ingest/pipeline/_simulate -{ - "pipeline" : - { - "description": "monthly date-time index naming", - "processors" : [ - { - "date_index_name" : { - "field" : "date1", - "index_name_prefix" : "myindex-", - "date_rounding" : "M" - } - } - ] - }, - "docs": [ - { - "_source": { - "date1": "2016-04-25T12:02:01.789Z" - } - } - ] -} --------------------------------------------------- -// CONSOLE - -and the result: - -[source,js] --------------------------------------------------- -{ - "docs" : [ - { - "doc" : { - "_id" : "_id", - "_index" : "", - "_type" : "_type", - "_source" : { - "date1" : "2016-04-25T12:02:01.789Z" - }, - "_ingest" : { - "timestamp" : "2016-11-08T19:43:03.850+0000" - } - } - } - ] -} --------------------------------------------------- -// TESTRESPONSE[s/2016-11-08T19:43:03.850\+0000/$body.docs.0.doc._ingest.timestamp/] - -The above example shows that `_index` was set to ``. Elasticsearch -understands this to mean `2016-04-01` as is explained in the <> - -[[date-index-name-options]] -.Date index name options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to get the date or timestamp from. -| `index_name_prefix` | no | - | A prefix of the index name to be prepended before the printed date. Supports <>. -| `date_rounding` | yes | - | How to round the date when formatting the date into the index name. Valid values are: `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second). Supports <>. -| `date_formats` | no | yyyy-MM-dd'T'HH:mm:ss.SSSZ | An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. Can be a Joda pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. -| `timezone` | no | UTC | The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names. -| `locale` | no | ENGLISH | The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days. -| `index_name_format` | no | yyyy-MM-dd | The format to be used when printing the parsed date into the index name. An valid Joda pattern is expected here. Supports <>. -include::ingest-node-common-processor.asciidoc[] -|====== - -[[dissect-processor]] -=== Dissect Processor - -Similar to the <>, dissect also extracts structured fields out of a single text field -within a document. However unlike the <>, dissect does not use -https://en.wikipedia.org/wiki/Regular_expression[Regular Expressions]. This allows dissect's syntax to be simple and for -some cases faster than the <>. - -Dissect matches a single text field against a defined pattern. - -For example the following pattern: -[source,txt] --------------------------------------------------- -%{clientip} %{ident} %{auth} [%{@timestamp}] \"%{verb} %{request} HTTP/%{httpversion}\" %{status} %{size} --------------------------------------------------- -will match a log line of this format: -[source,txt] --------------------------------------------------- -1.2.3.4 - - [30/Apr/1998:22:00:52 +0000] \"GET /english/venues/cities/images/montpellier/18.gif HTTP/1.0\" 200 3171 --------------------------------------------------- -and result in a document with the following fields: -[source,js] --------------------------------------------------- -"doc": { - "_index": "_index", - "_type": "_type", - "_id": "_id", - "_source": { - "request": "/english/venues/cities/images/montpellier/18.gif", - "auth": "-", - "ident": "-", - "verb": "GET", - "@timestamp": "30/Apr/1998:22:00:52 +0000", - "size": "3171", - "clientip": "1.2.3.4", - "httpversion": "1.0", - "status": "200" - } -} --------------------------------------------------- -// NOTCONSOLE - -A dissect pattern is defined by the parts of the string that will be discarded. In the example above the first part -to be discarded is a single space. Dissect finds this space, then assigns the value of `clientip` is everything up -until that space. -Later dissect matches the `[` and then `]` and then assigns `@timestamp` to everything in-between `[` and `]`. -Paying special attention the parts of the string to discard will help build successful dissect patterns. - -Successful matches require all keys in a pattern to have a value. If any of the `%{keyname}` defined in the pattern do -not have a value, then an exception is thrown and may be handled by the <> directive. -An empty key `%{}` or a <> can be used to match values, but exclude the value from -the final document. All matched values are represented as string data types. The <> -may be used to convert to expected data type. - -Dissect also supports <> that can change dissect's default -behavior. For example you can instruct dissect to ignore certain fields, append fields, skip over padding, etc. -See <> for more information. - -[[dissect-options]] -.Dissect Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to dissect -| `pattern` | yes | - | The pattern to apply to the field -| `append_separator`| no | "" (empty string) | The character(s) that separate the appended fields. -| `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -{ - "dissect": { - "field": "message", - "pattern" : "%{clientip} %{ident} %{auth} [%{@timestamp}] \"%{verb} %{request} HTTP/%{httpversion}\" %{status} %{size}" - } -} --------------------------------------------------- -// NOTCONSOLE -[[dissect-key-modifiers]] -==== Dissect key modifiers -Key modifiers can change the default behavior for dissection. Key modifiers may be found on the left or right -of the `%{keyname}` always inside the `%{` and `}`. For example `%{+keyname ->}` has the append and right padding -modifiers. - -.Dissect Key Modifiers -[options="header"] -|====== -| Modifier | Name | Position | Example | Description | Details -| `->` | Skip right padding | (far) right | `%{keyname1->}` | Skips any repeated characters to the right | <> -| `+` | Append | left | `%{+keyname} %{+keyname}` | Appends two or more fields together | <> -| `+` with `/n` | Append with order | left and right | `%{+keyname/2} %{+keyname/1}` | Appends two or more fields together in the order specified | <> -| `?` | Named skip key | left | `%{?ignoreme}` | Skips the matched value in the output. Same behavior as `%{}`| <> -| `*` and `&` | Reference keys | left | `%{*r1} %{&r1}` | Sets the output key as value of `*` and output value of `&` | <> -|====== - -[[dissect-modifier-skip-right-padding]] -===== Right padding modifier (`->`) - -The algorithm that performs the dissection is very strict in that it requires all characters in the pattern to match -the source string. For example, the pattern `%{fookey} %{barkey}` (1 space), will match the string "foo{nbsp}bar" -(1 space), but will not match the string "foo{nbsp}{nbsp}bar" (2 spaces) since the pattern has only 1 space and the -source string has 2 spaces. - -The right padding modifier helps with this case. Adding the right padding modifier to the pattern `%{fookey->} %{barkey}`, -It will now will match "foo{nbsp}bar" (1 space) and "foo{nbsp}{nbsp}bar" (2 spaces) -and even "foo{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}bar" (10 spaces). - -Use the right padding modifier to allow for repetition of the characters after a `%{keyname->}`. - -The right padding modifier may be placed on any key with any other modifiers. It should always be the furthest right -modifier. For example: `%{+keyname/1->}` and `%{->}` - -Right padding modifier example -|====== -| *Pattern* | `%{ts->} %{level}` -| *Input* | 1998-08-10T17:15:42,466{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}WARN -| *Result* a| -* ts = 1998-08-10T17:15:42,466 -* level = WARN -|====== - -The right padding modifier may be used with an empty key to help skip unwanted data. For example, the same input string, but wrapped with brackets requires the use of an empty right padded key to achieve the same result. - -Right padding modifier with empty key example -|====== -| *Pattern* | `[%{ts}]%{->}[%{level}]` -| *Input* | [1998-08-10T17:15:42,466]{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}[WARN] -| *Result* a| -* ts = 1998-08-10T17:15:42,466 -* level = WARN -|====== - -===== Append modifier (`+`) -[[dissect-modifier-append-key]] -Dissect supports appending two or more results together for the output. -Values are appended left to right. An append separator can be specified. -In this example the append_separator is defined as a space. - -Append modifier example -|====== -| *Pattern* | `%{+name} %{+name} %{+name} %{+name}` -| *Input* | john jacob jingleheimer schmidt -| *Result* a| -* name = john jacob jingleheimer schmidt -|====== - -===== Append with order modifier (`+` and `/n`) -[[dissect-modifier-append-key-with-order]] -Dissect supports appending two or more results together for the output. -Values are appended based on the order defined (`/n`). An append separator can be specified. -In this example the append_separator is defined as a comma. - -Append with order modifier example -|====== -| *Pattern* | `%{+name/2} %{+name/4} %{+name/3} %{+name/1}` -| *Input* | john jacob jingleheimer schmidt -| *Result* a| -* name = schmidt,john,jingleheimer,jacob -|====== - -===== Named skip key (`?`) -[[dissect-modifier-named-skip-key]] -Dissect supports ignoring matches in the final result. This can be done with an empty key `%{}`, but for readability -it may be desired to give that empty key a name. - -Named skip key modifier example -|====== -| *Pattern* | `%{clientip} %{?ident} %{?auth} [%{@timestamp}]` -| *Input* | 1.2.3.4 - - [30/Apr/1998:22:00:52 +0000] -| *Result* a| -* ip = 1.2.3.4 -* @timestamp = 30/Apr/1998:22:00:52 +0000 -|====== - -===== Reference keys (`*` and `&`) -[[dissect-modifier-reference-keys]] -Dissect support using parsed values as the key/value pairings for the structured content. Imagine a system that -partially logs in key/value pairs. Reference keys allow you to maintain that key/value relationship. - -Reference key modifier example -|====== -| *Pattern* | `[%{ts}] [%{level}] %{*p1}:%{&p1} %{*p2}:%{&p2}` -| *Input* | [2018-08-10T17:15:42,466] [ERR] ip:1.2.3.4 error:REFUSED -| *Result* a| -* ts = 1998-08-10T17:15:42,466 -* level = ERR -* ip = 1.2.3.4 -* error = REFUSED -|====== - -[[drop-processor]] -=== Drop Processor -Drops the document without raising any errors. This is useful to prevent the document from -getting indexed based on some condition. - -[[drop-options]] -.Drop Options -[options="header"] -|====== -| Name | Required | Default | Description -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -{ - "drop": { - "if" : "ctx.network_name == 'Guest'" - } -} --------------------------------------------------- -// NOTCONSOLE - -[[dot-expand-processor]] -=== Dot Expander Processor - -Expands a field with dots into an object field. This processor allows fields -with dots in the name to be accessible by other processors in the pipeline. -Otherwise these <> can't be accessed by any processor. - -[[dot-expender-options]] -.Dot Expand Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to expand into an object field -| `path` | no | - | The field that contains the field to expand. Only required if the field to expand is part another object field, because the `field` option can only understand leaf fields. -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -{ - "dot_expander": { - "field": "foo.bar" - } -} --------------------------------------------------- -// NOTCONSOLE - -For example the dot expand processor would turn this document: - -[source,js] --------------------------------------------------- -{ - "foo.bar" : "value" -} --------------------------------------------------- -// NOTCONSOLE - -into: - -[source,js] --------------------------------------------------- -{ - "foo" : { - "bar" : "value" - } -} --------------------------------------------------- -// NOTCONSOLE - -If there is already a `bar` field nested under `foo` then -this processor merges the `foo.bar` field into it. If the field is -a scalar value then it will turn that field into an array field. - -For example, the following document: - -[source,js] --------------------------------------------------- -{ - "foo.bar" : "value2", - "foo" : { - "bar" : "value1" - } -} --------------------------------------------------- -// NOTCONSOLE - -is transformed by the `dot_expander` processor into: - -[source,js] --------------------------------------------------- -{ - "foo" : { - "bar" : ["value1", "value2"] - } -} --------------------------------------------------- -// NOTCONSOLE - -If any field outside of the leaf field conflicts with a pre-existing field of the same name, -then that field needs to be renamed first. - -Consider the following document: - -[source,js] --------------------------------------------------- -{ - "foo": "value1", - "foo.bar": "value2" -} --------------------------------------------------- -// NOTCONSOLE - -Then the `foo` needs to be renamed first before the `dot_expander` -processor is applied. So in order for the `foo.bar` field to properly -be expanded into the `bar` field under the `foo` field the following -pipeline should be used: - -[source,js] --------------------------------------------------- -{ - "processors" : [ - { - "rename" : { - "field" : "foo", - "target_field" : "foo.bar"" - } - }, - { - "dot_expander": { - "field": "foo.bar" - } - } - ] -} --------------------------------------------------- -// NOTCONSOLE - -The reason for this is that Ingest doesn't know how to automatically cast -a scalar field to an object field. - -[[fail-processor]] -=== Fail Processor -Raises an exception. This is useful for when -you expect a pipeline to fail and want to relay a specific message -to the requester. - -[[fail-options]] -.Fail Options -[options="header"] -|====== -| Name | Required | Default | Description -| `message` | yes | - | The error message thrown by the processor. Supports <>. -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -{ - "fail": { - "if" : "ctx.tags.contains('production') != true", - "message": "The production tag is not present, found tags: {{tags}}" - } -} --------------------------------------------------- -// NOTCONSOLE - -[[foreach-processor]] -=== Foreach Processor - -Processes elements in an array of unknown length. - -All processors can operate on elements inside an array, but if all elements of an array need to -be processed in the same way, defining a processor for each element becomes cumbersome and tricky -because it is likely that the number of elements in an array is unknown. For this reason the `foreach` -processor exists. By specifying the field holding array elements and a processor that -defines what should happen to each element, array fields can easily be preprocessed. - -A processor inside the foreach processor works in the array element context and puts that in the ingest metadata -under the `_ingest._value` key. If the array element is a json object it holds all immediate fields of that json object. -and if the nested object is a value is `_ingest._value` just holds that value. Note that if a processor prior to the -`foreach` processor used `_ingest._value` key then the specified value will not be available to the processor inside -the `foreach` processor. The `foreach` processor does restore the original value, so that value is available to processors -after the `foreach` processor. - -Note that any other field from the document are accessible and modifiable like with all other processors. This processor -just puts the current array element being read into `_ingest._value` ingest metadata attribute, so that it may be -pre-processed. - -If the `foreach` processor fails to process an element inside the array, and no `on_failure` processor has been specified, -then it aborts the execution and leaves the array unmodified. - -[[foreach-options]] -.Foreach Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The array field -| `processor` | yes | - | The processor to execute against each field -| `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document -include::ingest-node-common-processor.asciidoc[] -|====== - -Assume the following document: - -[source,js] --------------------------------------------------- -{ - "values" : ["foo", "bar", "baz"] -} --------------------------------------------------- -// NOTCONSOLE - -When this `foreach` processor operates on this sample document: - -[source,js] --------------------------------------------------- -{ - "foreach" : { - "field" : "values", - "processor" : { - "uppercase" : { - "field" : "_ingest._value" - } - } - } -} --------------------------------------------------- -// NOTCONSOLE - -Then the document will look like this after preprocessing: - -[source,js] --------------------------------------------------- -{ - "values" : ["FOO", "BAR", "BAZ"] -} --------------------------------------------------- -// NOTCONSOLE - -Let's take a look at another example: - -[source,js] --------------------------------------------------- -{ - "persons" : [ - { - "id" : "1", - "name" : "John Doe" - }, - { - "id" : "2", - "name" : "Jane Doe" - } - ] -} --------------------------------------------------- -// NOTCONSOLE - -In this case, the `id` field needs to be removed, -so the following `foreach` processor is used: - -[source,js] --------------------------------------------------- -{ - "foreach" : { - "field" : "persons", - "processor" : { - "remove" : { - "field" : "_ingest._value.id" - } - } - } -} --------------------------------------------------- -// NOTCONSOLE - -After preprocessing the result is: - -[source,js] --------------------------------------------------- -{ - "persons" : [ - { - "name" : "John Doe" - }, - { - "name" : "Jane Doe" - } - ] -} --------------------------------------------------- -// NOTCONSOLE - -The wrapped processor can have a `on_failure` definition. -For example, the `id` field may not exist on all person objects. -Instead of failing the index request, you can use an `on_failure` -block to send the document to the 'failure_index' index for later inspection: - -[source,js] --------------------------------------------------- -{ - "foreach" : { - "field" : "persons", - "processor" : { - "remove" : { - "field" : "_value.id", - "on_failure" : [ - { - "set" : { - "field", "_index", - "value", "failure_index" - } - } - ] - } - } - } -} --------------------------------------------------- -// NOTCONSOLE - -In this example, if the `remove` processor does fail, then -the array elements that have been processed thus far will -be updated. - -Another advanced example can be found in the {plugins}/ingest-attachment-with-arrays.html[attachment processor documentation]. - - - -[[grok-processor]] -=== Grok Processor - -Extracts structured fields out of a single text field within a document. You choose which field to -extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular -expression that supports aliased expressions that can be reused. - -This tool is perfect for syslog logs, apache and other webserver logs, mysql logs, and in general, any log format -that is generally written for humans and not computer consumption. -This processor comes packaged with many -https://github.com/elastic/elasticsearch/blob/{branch}/libs/grok/src/main/resources/patterns[reusable patterns]. - -If you need help building patterns to match your logs, you will find the {kibana-ref}/xpack-grokdebugger.html[Grok Debugger] tool quite useful! The Grok Debugger is an {xpack} feature under the Basic License and is therefore *free to use*. The Grok Constructor at is also a useful tool. - -[[grok-basics]] -==== Grok Basics - -Grok sits on top of regular expressions, so any regular expressions are valid in grok as well. -The regular expression library is Oniguruma, and you can see the full supported regexp syntax -https://github.com/kkos/oniguruma/blob/master/doc/RE[on the Onigiruma site]. - -Grok works by leveraging this regular expression language to allow naming existing patterns and combining them into more -complex patterns that match your fields. - -The syntax for reusing a grok pattern comes in three forms: `%{SYNTAX:SEMANTIC}`, `%{SYNTAX}`, `%{SYNTAX:SEMANTIC:TYPE}`. - -The `SYNTAX` is the name of the pattern that will match your text. For example, `3.44` will be matched by the `NUMBER` -pattern and `55.3.244.1` will be matched by the `IP` pattern. The syntax is how you match. `NUMBER` and `IP` are both -patterns that are provided within the default patterns set. - -The `SEMANTIC` is the identifier you give to the piece of text being matched. For example, `3.44` could be the -duration of an event, so you could call it simply `duration`. Further, a string `55.3.244.1` might identify -the `client` making a request. - -The `TYPE` is the type you wish to cast your named field. `int`, `long`, `double`, `float` and `boolean` are supported types for coercion. - -For example, you might want to match the following text: - -[source,txt] --------------------------------------------------- -3.44 55.3.244.1 --------------------------------------------------- - -You may know that the message in the example is a number followed by an IP address. You can match this text by using the following -Grok expression. - -[source,txt] --------------------------------------------------- -%{NUMBER:duration} %{IP:client} --------------------------------------------------- - -[[using-grok]] -==== Using the Grok Processor in a Pipeline - -[[grok-options]] -.Grok Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to use for grok expression parsing -| `patterns` | yes | - | An ordered list of grok expression to match and extract named captures with. Returns on the first expression in the list that matches. -| `pattern_definitions` | no | - | A map of pattern-name and pattern tuples defining custom patterns to be used by the current processor. Patterns matching existing names will override the pre-existing definition. -| `trace_match` | no | false | when true, `_ingest._grok_match_index` will be inserted into your matched document's metadata with the index into the pattern found in `patterns` that matched. -| `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document -include::ingest-node-common-processor.asciidoc[] -|====== - -Here is an example of using the provided patterns to extract out and name structured fields from a string field in -a document. - -[source,js] --------------------------------------------------- -{ - "message": "55.3.244.1 GET /index.html 15824 0.043" -} --------------------------------------------------- -// NOTCONSOLE - -The pattern for this could be: - -[source,txt] --------------------------------------------------- -%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration} --------------------------------------------------- - -Here is an example pipeline for processing the above document by using Grok: - -[source,js] --------------------------------------------------- -{ - "description" : "...", - "processors": [ - { - "grok": { - "field": "message", - "patterns": ["%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}"] - } - } - ] -} --------------------------------------------------- -// NOTCONSOLE - -This pipeline will insert these named captures as new fields within the document, like so: - -[source,js] --------------------------------------------------- -{ - "message": "55.3.244.1 GET /index.html 15824 0.043", - "client": "55.3.244.1", - "method": "GET", - "request": "/index.html", - "bytes": 15824, - "duration": "0.043" -} --------------------------------------------------- -// NOTCONSOLE - -[[custom-patterns]] -==== Custom Patterns - -The Grok processor comes pre-packaged with a base set of pattern. These patterns may not always have -what you are looking for. Pattern have a very basic format. Each entry describes has a name and the pattern itself. - -You can add your own patterns to a processor definition under the `pattern_definitions` option. -Here is an example of a pipeline specifying custom pattern definitions: - -[source,js] --------------------------------------------------- -{ - "description" : "...", - "processors": [ - { - "grok": { - "field": "message", - "patterns": ["my %{FAVORITE_DOG:dog} is colored %{RGB:color}"], - "pattern_definitions" : { - "FAVORITE_DOG" : "beagle", - "RGB" : "RED|GREEN|BLUE" - } - } - } - ] -} --------------------------------------------------- -// NOTCONSOLE - -[[trace-match]] -==== Providing Multiple Match Patterns - -Sometimes one pattern is not enough to capture the potential structure of a field. Let's assume we -want to match all messages that contain your favorite pet breeds of either cats or dogs. One way to accomplish -this is to provide two distinct patterns that can be matched, instead of one really complicated expression capturing -the same `or` behavior. - -Here is an example of such a configuration executed against the simulate API: - -[source,js] --------------------------------------------------- -POST _ingest/pipeline/_simulate -{ - "pipeline": { - "description" : "parse multiple patterns", - "processors": [ - { - "grok": { - "field": "message", - "patterns": ["%{FAVORITE_DOG:pet}", "%{FAVORITE_CAT:pet}"], - "pattern_definitions" : { - "FAVORITE_DOG" : "beagle", - "FAVORITE_CAT" : "burmese" - } - } - } - ] -}, -"docs":[ - { - "_source": { - "message": "I love burmese cats!" - } - } - ] -} --------------------------------------------------- -// CONSOLE - -response: - -[source,js] --------------------------------------------------- -{ - "docs": [ - { - "doc": { - "_type": "_type", - "_index": "_index", - "_id": "_id", - "_source": { - "message": "I love burmese cats!", - "pet": "burmese" - }, - "_ingest": { - "timestamp": "2016-11-08T19:43:03.850+0000" - } - } - } - ] -} --------------------------------------------------- -// TESTRESPONSE[s/2016-11-08T19:43:03.850\+0000/$body.docs.0.doc._ingest.timestamp/] - -Both patterns will set the field `pet` with the appropriate match, but what if we want to trace which of our -patterns matched and populated our fields? We can do this with the `trace_match` parameter. Here is the output of -that same pipeline, but with `"trace_match": true` configured: - -//// -Hidden setup for example: -[source,js] --------------------------------------------------- -POST _ingest/pipeline/_simulate -{ - "pipeline": { - "description" : "parse multiple patterns", - "processors": [ - { - "grok": { - "field": "message", - "patterns": ["%{FAVORITE_DOG:pet}", "%{FAVORITE_CAT:pet}"], - "trace_match": true, - "pattern_definitions" : { - "FAVORITE_DOG" : "beagle", - "FAVORITE_CAT" : "burmese" - } - } - } - ] -}, -"docs":[ - { - "_source": { - "message": "I love burmese cats!" - } - } - ] -} --------------------------------------------------- -// CONSOLE -//// - -[source,js] --------------------------------------------------- -{ - "docs": [ - { - "doc": { - "_type": "_type", - "_index": "_index", - "_id": "_id", - "_source": { - "message": "I love burmese cats!", - "pet": "burmese" - }, - "_ingest": { - "_grok_match_index": "1", - "timestamp": "2016-11-08T19:43:03.850+0000" - } - } - } - ] -} --------------------------------------------------- -// TESTRESPONSE[s/2016-11-08T19:43:03.850\+0000/$body.docs.0.doc._ingest.timestamp/] - -In the above response, you can see that the index of the pattern that matched was `"1"`. This is to say that it was the -second (index starts at zero) pattern in `patterns` to match. - -This trace metadata enables debugging which of the patterns matched. This information is stored in the ingest -metadata and will not be indexed. - -[[grok-processor-rest-get]] -==== Retrieving patterns from REST endpoint - -The Grok Processor comes packaged with its own REST endpoint for retrieving which patterns the processor is packaged with. - -[source,js] --------------------------------------------------- -GET _ingest/processor/grok --------------------------------------------------- -// CONSOLE - -The above request will return a response body containing a key-value representation of the built-in patterns dictionary. - -[source,js] --------------------------------------------------- -{ - "patterns" : { - "BACULA_CAPACITY" : "%{INT}{1,3}(,%{INT}{3})*", - "PATH" : "(?:%{UNIXPATH}|%{WINPATH})", - ... -} --------------------------------------------------- -// NOTCONSOLE - -This can be useful to reference as the built-in patterns change across versions. - -[[grok-watchdog]] -==== Grok watchdog - -Grok expressions that take too long to execute are interrupted and -the grok processor then fails with an exception. The grok -processor has a watchdog thread that determines when evaluation of -a grok expression takes too long and is controlled by the following -settings: - -[[grok-watchdog-options]] -.Grok watchdog settings -[options="header"] -|====== -| Name | Default | Description -| `ingest.grok.watchdog.interval` | 1s | How often to check whether there are grok evaluations that take longer than the maximum allowed execution time. -| `ingest.grok.watchdog.max_execution_time` | 1s | The maximum allowed execution of a grok expression evaluation. -|====== - -[[gsub-processor]] -=== Gsub Processor -Converts a string field by applying a regular expression and a replacement. -If the field is not a string, the processor will throw an exception. - -[[gsub-options]] -.Gsub Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to apply the replacement to -| `pattern` | yes | - | The pattern to be replaced -| `replacement` | yes | - | The string to replace the matching patterns with -| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place -| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -{ - "gsub": { - "field": "field1", - "pattern": "\.", - "replacement": "-" - } -} --------------------------------------------------- -// NOTCONSOLE - -[[join-processor]] -=== Join Processor -Joins each element of an array into a single string using a separator character between each element. -Throws an error when the field is not an array. - -[[join-options]] -.Join Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to be separated -| `separator` | yes | - | The separator character -| `target_field` | no | `field` | The field to assign the joined value to, by default `field` is updated in-place -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -{ - "join": { - "field": "joined_array_field", - "separator": "-" - } -} --------------------------------------------------- -// NOTCONSOLE - -[[json-processor]] -=== JSON Processor -Converts a JSON string into a structured JSON object. - -[[json-options]] -.Json Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to be parsed -| `target_field` | no | `field` | The field to insert the converted structured object into -| `add_to_root` | no | false | Flag that forces the serialized json to be injected into the top level of the document. `target_field` must not be set when this option is chosen. -include::ingest-node-common-processor.asciidoc[] -|====== - -All JSON-supported types will be parsed (null, boolean, number, array, object, string). - -Suppose you provide this configuration of the `json` processor: - -[source,js] --------------------------------------------------- -{ - "json" : { - "field" : "string_source", - "target_field" : "json_target" - } -} --------------------------------------------------- -// NOTCONSOLE - -If the following document is processed: - -[source,js] --------------------------------------------------- -{ - "string_source": "{\"foo\": 2000}" -} --------------------------------------------------- -// NOTCONSOLE - -after the `json` processor operates on it, it will look like: - -[source,js] --------------------------------------------------- -{ - "string_source": "{\"foo\": 2000}", - "json_target": { - "foo": 2000 - } -} --------------------------------------------------- -// NOTCONSOLE - -If the following configuration is provided, omitting the optional `target_field` setting: -[source,js] --------------------------------------------------- -{ - "json" : { - "field" : "source_and_target" - } -} --------------------------------------------------- -// NOTCONSOLE - -then after the `json` processor operates on this document: - -[source,js] --------------------------------------------------- -{ - "source_and_target": "{\"foo\": 2000}" -} --------------------------------------------------- -// NOTCONSOLE - -it will look like: - -[source,js] --------------------------------------------------- -{ - "source_and_target": { - "foo": 2000 - } -} --------------------------------------------------- -// NOTCONSOLE - -This illustrates that, unless it is explicitly named in the processor configuration, the `target_field` -is the same field provided in the required `field` configuration. - -[[kv-processor]] -=== KV Processor -This processor helps automatically parse messages (or specific event fields) which are of the foo=bar variety. - -For example, if you have a log message which contains `ip=1.2.3.4 error=REFUSED`, you can parse those automatically by configuring: - - -[source,js] --------------------------------------------------- -{ - "kv": { - "field": "message", - "field_split": " ", - "value_split": "=" - } -} --------------------------------------------------- -// NOTCONSOLE - -[[kv-options]] -.Kv Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to be parsed -| `field_split` | yes | - | Regex pattern to use for splitting key-value pairs -| `value_split` | yes | - | Regex pattern to use for splitting the key from the value within a key-value pair -| `target_field` | no | `null` | The field to insert the extracted keys into. Defaults to the root of the document -| `include_keys` | no | `null` | List of keys to filter and insert into document. Defaults to including all keys -| `exclude_keys` | no | `null` | List of keys to exclude from document -| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document -| `prefix` | no | `null` | Prefix to be added to extracted keys -| `trim_key` | no | `null` | String of characters to trim from extracted keys -| `trim_value` | no | `null` | String of characters to trim from extracted values -| `strip_brackets` | no | `false` | If `true` strip brackets `()`, `<>`, `[]` as well as quotes `'` and `"` from extracted values -include::ingest-node-common-processor.asciidoc[] -|====== - - -[[lowercase-processor]] -=== Lowercase Processor -Converts a string to its lowercase equivalent. - -[[lowercase-options]] -.Lowercase Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to make lowercase -| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place -| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -{ - "lowercase": { - "field": "foo" - } -} --------------------------------------------------- -// NOTCONSOLE - -[[pipeline-processor]] -=== Pipeline Processor -Executes another pipeline. - -[[pipeline-options]] -.Pipeline Options -[options="header"] -|====== -| Name | Required | Default | Description -| `name` | yes | - | The name of the pipeline to execute -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -{ - "pipeline": { - "name": "inner-pipeline" - } -} --------------------------------------------------- -// NOTCONSOLE - -An example of using this processor for nesting pipelines would be: - -Define an inner pipeline: - -[source,js] --------------------------------------------------- -PUT _ingest/pipeline/pipelineA -{ - "description" : "inner pipeline", - "processors" : [ - { - "set" : { - "field": "inner_pipeline_set", - "value": "inner" - } - } - ] -} --------------------------------------------------- -// CONSOLE - -Define another pipeline that uses the previously defined inner pipeline: - -[source,js] --------------------------------------------------- -PUT _ingest/pipeline/pipelineB -{ - "description" : "outer pipeline", - "processors" : [ - { - "pipeline" : { - "name": "pipelineA" - } - }, - { - "set" : { - "field": "outer_pipeline_set", - "value": "outer" - } - } - ] -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Now indexing a document while applying the outer pipeline will see the inner pipeline executed -from the outer pipeline: - -[source,js] --------------------------------------------------- -PUT /myindex/_doc/1?pipeline=pipelineB -{ - "field": "value" -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Response from the index request: - -[source,js] --------------------------------------------------- -{ - "_index": "myindex", - "_type": "_doc", - "_id": "1", - "_version": 1, - "result": "created", - "_shards": { - "total": 2, - "successful": 1, - "failed": 0 - }, - "_seq_no": 66, - "_primary_term": 1, -} --------------------------------------------------- -// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] - -Indexed document: - -[source,js] --------------------------------------------------- -{ - "field": "value", - "inner_pipeline_set": "inner", - "outer_pipeline_set": "outer" -} --------------------------------------------------- -// NOTCONSOLE - -[[remove-processor]] -=== Remove Processor -Removes existing fields. If one field doesn't exist, an exception will be thrown. - -[[remove-options]] -.Remove Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | Fields to be removed. Supports <>. -| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document -include::ingest-node-common-processor.asciidoc[] -|====== - -Here is an example to remove a single field: - -[source,js] --------------------------------------------------- -{ - "remove": { - "field": "user_agent" - } -} --------------------------------------------------- -// NOTCONSOLE - -To remove multiple fields, you can use the following query: - -[source,js] --------------------------------------------------- -{ - "remove": { - "field": ["user_agent", "url"] - } -} --------------------------------------------------- -// NOTCONSOLE - -[[rename-processor]] -=== Rename Processor -Renames an existing field. If the field doesn't exist or the new name is already used, an exception will be thrown. - -[[rename-options]] -.Rename Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to be renamed. Supports <>. -| `target_field` | yes | - | The new name of the field. Supports <>. -| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -{ - "rename": { - "field": "provider", - "target_field": "cloud.provider" - } -} --------------------------------------------------- -// NOTCONSOLE - -[[script-processor]] -=== Script Processor - -Allows inline and stored scripts to be executed within ingest pipelines. - -See <> to learn more about writing scripts. The Script Processor -leverages caching of compiled scripts for improved performance. Since the -script specified within the processor is potentially re-compiled per document, it is important -to understand how script caching works. To learn more about -caching see <>. - -[[script-options]] -.Script Options -[options="header"] -|====== -| Name | Required | Default | Description -| `lang` | no | "painless" | The scripting language -| `id` | no | - | The stored script id to refer to -| `source` | no | - | An inline script to be executed -| `params` | no | - | Script Parameters -include::ingest-node-common-processor.asciidoc[] -|====== - -One of `id` or `source` options must be provided in order to properly reference a script to execute. - -You can access the current ingest document from within the script context by using the `ctx` variable. - -The following example sets a new field called `field_a_plus_b_times_c` to be the sum of two existing -numeric fields `field_a` and `field_b` multiplied by the parameter param_c: - -[source,js] --------------------------------------------------- -{ - "script": { - "lang": "painless", - "source": "ctx.field_a_plus_b_times_c = (ctx.field_a + ctx.field_b) * params.param_c", - "params": { - "param_c": 10 - } - } -} --------------------------------------------------- -// NOTCONSOLE - -It is possible to use the Script Processor to manipulate document metadata like `_index` and `_type` during -ingestion. Here is an example of an Ingest Pipeline that renames the index and type to `my_index` no matter what -was provided in the original index request: - -[source,js] --------------------------------------------------- -PUT _ingest/pipeline/my_index -{ - "description": "use index:my_index and type:_doc", - "processors": [ - { - "script": { - "source": """ - ctx._index = 'my_index'; - ctx._type = '_doc'; - """ - } - } - ] -} --------------------------------------------------- -// CONSOLE - -Using the above pipeline, we can attempt to index a document into the `any_index` index. - -[source,js] --------------------------------------------------- -PUT any_index/_doc/1?pipeline=my_index -{ - "message": "text" -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -The response from the above index request: - -[source,js] --------------------------------------------------- -{ - "_index": "my_index", - "_type": "_doc", - "_id": "1", - "_version": 1, - "result": "created", - "_shards": { - "total": 2, - "successful": 1, - "failed": 0 - }, - "_seq_no": 89, - "_primary_term": 1, -} --------------------------------------------------- -// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] - -In the above response, you can see that our document was actually indexed into `my_index` instead of -`any_index`. This type of manipulation is often convenient in pipelines that have various branches of transformation, -and depending on the progress made, indexed into different indices. - -[[set-processor]] -=== Set Processor -Sets one field and associates it with the specified value. If the field already exists, -its value will be replaced with the provided one. - -[[set-options]] -.Set Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to insert, upsert, or update. Supports <>. -| `value` | yes | - | The value to be set for the field. Supports <>. -| `override` | no | true | If processor will update fields with pre-existing non-null-valued field. When set to `false`, such fields will not be touched. -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -{ - "set": { - "field": "host.os.name", - "value": "{{os}}" - } -} --------------------------------------------------- -// NOTCONSOLE - - -[[ingest-node-set-security-user-processor]] -=== Set Security User Processor -Sets user-related details (such as `username`, `roles`, `email`, `full_name` -and `metadata` ) from the current -authenticated user to the current document by pre-processing the ingest. - -IMPORTANT: Requires an authenticated user for the index request. - -[[set-security-user-options]] -.Set Security User Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to store the user information into. -| `properties` | no | [`username`, `roles`, `email`, `full_name`, `metadata`] | Controls what user related properties are added to the `field`. -include::ingest-node-common-processor.asciidoc[] -|====== - -The following example adds all user details for the current authenticated user -to the `user` field for all documents that are processed by this pipeline: - -[source,js] --------------------------------------------------- -{ - "processors" : [ - { - "set_security_user": { - "field": "user" - } - } - ] -} --------------------------------------------------- -// NOTCONSOLE - - -[[split-processor]] -=== Split Processor -Splits a field into an array using a separator character. Only works on string fields. - -[[split-options]] -.Split Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to split -| `separator` | yes | - | A regex which matches the separator, eg `,` or `\s+` -| `target_field` | no | `field` | The field to assign the split value to, by default `field` is updated in-place -| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -{ - "split": { - "field": "my_field", - "separator": "\\s+" <1> - } -} --------------------------------------------------- -// NOTCONSOLE -<1> Treat all consecutive whitespace characters as a single separator - -[[sort-processor]] -=== Sort Processor -Sorts the elements of an array ascending or descending. Homogeneous arrays of numbers will be sorted -numerically, while arrays of strings or heterogeneous arrays of strings + numbers will be sorted lexicographically. -Throws an error when the field is not an array. - -[[sort-options]] -.Sort Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to be sorted -| `order` | no | `"asc"` | The sort order to use. Accepts `"asc"` or `"desc"`. -| `target_field` | no | `field` | The field to assign the sorted value to, by default `field` is updated in-place -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -{ - "sort": { - "field": "array_field_to_sort", - "order": "desc" - } -} --------------------------------------------------- -// NOTCONSOLE - -[[trim-processor]] -=== Trim Processor -Trims whitespace from field. - -NOTE: This only works on leading and trailing whitespace. - -[[trim-options]] -.Trim Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The string-valued field to trim whitespace from -| `target_field` | no | `field` | The field to assign the trimmed value to, by default `field` is updated in-place -| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -{ - "trim": { - "field": "foo" - } -} --------------------------------------------------- -// NOTCONSOLE - -[[uppercase-processor]] -=== Uppercase Processor -Converts a string to its uppercase equivalent. - -[[uppercase-options]] -.Uppercase Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to make uppercase -| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place -| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -{ - "uppercase": { - "field": "foo" - } -} --------------------------------------------------- -// NOTCONSOLE - -[[urldecode-processor]] -=== URL Decode Processor -URL-decodes a string - -[[urldecode-options]] -.URL Decode Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to decode -| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place -| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document -include::ingest-node-common-processor.asciidoc[] -|====== - -[source,js] --------------------------------------------------- -{ - "urldecode": { - "field": "my_url_to_decode" - } -} --------------------------------------------------- -// NOTCONSOLE +include::processors/append.asciidoc[] +include::processors/bytes.asciidoc[] +include::processors/convert.asciidoc[] +include::processors/date.asciidoc[] +include::processors/date-index-name.asciidoc[] +include::processors/dissect.asciidoc[] +include::processors/dot-expand.asciidoc[] +include::processors/drop.asciidoc[] +include::processors/fail.asciidoc[] +include::processors/foreach.asciidoc[] +include::processors/grok.asciidoc[] +include::processors/gsub.asciidoc[] +include::processors/join.asciidoc[] +include::processors/json.asciidoc[] +include::processors/kv.asciidoc[] +include::processors/pipeline.asciidoc[] +include::processors/remove.asciidoc[] +include::processors/rename.asciidoc[] +include::processors/script.asciidoc[] +include::processors/set.asciidoc[] +include::processors/set-security-user.asciidoc[] +include::processors/split.asciidoc[] +include::processors/sort.asciidoc[] +include::processors/trim.asciidoc[] +include::processors/uppercase.asciidoc[] +include::processors/url-decode.asciidoc[] diff --git a/docs/reference/ingest/processors/append.asciidoc b/docs/reference/ingest/processors/append.asciidoc new file mode 100644 index 0000000000000..1c2dc935116ce --- /dev/null +++ b/docs/reference/ingest/processors/append.asciidoc @@ -0,0 +1,27 @@ +[[append-processor]] +=== Append Processor +Appends one or more values to an existing array if the field already exists and it is an array. +Converts a scalar to an array and appends one or more values to it if the field exists and it is a scalar. +Creates an array containing the provided values if the field doesn't exist. +Accepts a single value or an array of values. + +[[append-options]] +.Append Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to be appended to. Supports <>. +| `value` | yes | - | The value to be appended. Supports <>. +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "append": { + "field": "tags", + "value": ["production", "{{app}}", "{{owner}}"] + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ingest/processors/bytes.asciidoc b/docs/reference/ingest/processors/bytes.asciidoc new file mode 100644 index 0000000000000..76f054cac64c2 --- /dev/null +++ b/docs/reference/ingest/processors/bytes.asciidoc @@ -0,0 +1,27 @@ +[[bytes-processor]] +=== Bytes Processor +Converts a human readable byte value (e.g. 1kb) to its value in bytes (e.g. 1024). + +Supported human readable units are "b", "kb", "mb", "gb", "tb", "pb" case insensitive. An error will occur if +the field is not a supported format or resultant value exceeds 2^63. + +[[bytes-options]] +.Bytes Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to convert +| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place +| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "bytes": { + "field": "file.size" + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ingest/ingest-node-common-processor.asciidoc b/docs/reference/ingest/processors/common-options.asciidoc similarity index 100% rename from docs/reference/ingest/ingest-node-common-processor.asciidoc rename to docs/reference/ingest/processors/common-options.asciidoc diff --git a/docs/reference/ingest/processors/convert.asciidoc b/docs/reference/ingest/processors/convert.asciidoc new file mode 100644 index 0000000000000..9c439a3c1a985 --- /dev/null +++ b/docs/reference/ingest/processors/convert.asciidoc @@ -0,0 +1,45 @@ +[[convert-processor]] +=== Convert Processor +Converts a field in the currently ingested document to a different type, such as converting a string to an integer. +If the field value is an array, all members will be converted. + +The supported types include: `integer`, `long`, `float`, `double`, `string`, `boolean`, and `auto`. + +Specifying `boolean` will set the field to true if its string value is equal to `true` (ignore case), to +false if its string value is equal to `false` (ignore case), or it will throw an exception otherwise. + +Specifying `auto` will attempt to convert the string-valued `field` into the closest non-string type. +For example, a field whose value is `"true"` will be converted to its respective boolean type: `true`. Do note +that float takes precedence of double in `auto`. A value of `"242.15"` will "automatically" be converted to +`242.15` of type `float`. If a provided field cannot be appropriately converted, the Convert Processor will +still process successfully and leave the field value as-is. In such a case, `target_field` will +still be updated with the unconverted field value. + +[[convert-options]] +.Convert Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field whose value is to be converted +| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place +| `type` | yes | - | The type to convert the existing value to +| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +PUT _ingest/pipeline/my-pipeline-id +{ + "description": "converts the content of the id field to an integer", + "processors" : [ + { + "convert" : { + "field" : "id", + "type": "integer" + } + } + ] +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ingest/processors/date-index-name.asciidoc b/docs/reference/ingest/processors/date-index-name.asciidoc new file mode 100644 index 0000000000000..fcece261bd440 --- /dev/null +++ b/docs/reference/ingest/processors/date-index-name.asciidoc @@ -0,0 +1,145 @@ +[[date-index-name-processor]] +=== Date Index Name Processor + +The purpose of this processor is to point documents to the right time based index based +on a date or timestamp field in a document by using the <>. + +The processor sets the `_index` meta field with a date math index name expression based on the provided index name +prefix, a date or timestamp field in the documents being processed and the provided date rounding. + +First, this processor fetches the date or timestamp from a field in the document being processed. Optionally, +date formatting can be configured on how the field's value should be parsed into a date. Then this date, +the provided index name prefix and the provided date rounding get formatted into a date math index name expression. +Also here optionally date formatting can be specified on how the date should be formatted into a date math index name +expression. + +An example pipeline that points documents to a monthly index that starts with a `myindex-` prefix based on a +date in the `date1` field: + +[source,js] +-------------------------------------------------- +PUT _ingest/pipeline/monthlyindex +{ + "description": "monthly date-time index naming", + "processors" : [ + { + "date_index_name" : { + "field" : "date1", + "index_name_prefix" : "myindex-", + "date_rounding" : "M" + } + } + ] +} +-------------------------------------------------- +// CONSOLE + + +Using that pipeline for an index request: + +[source,js] +-------------------------------------------------- +PUT /myindex/_doc/1?pipeline=monthlyindex +{ + "date1" : "2016-04-25T12:02:01.789Z" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +{ + "_index" : "myindex-2016-04-01", + "_type" : "_doc", + "_id" : "1", + "_version" : 1, + "result" : "created", + "_shards" : { + "total" : 2, + "successful" : 1, + "failed" : 0 + }, + "_seq_no" : 55, + "_primary_term" : 1 +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] + + +The above request will not index this document into the `myindex` index, but into the `myindex-2016-04-01` index because +it was rounded by month. This is because the date-index-name-processor overrides the `_index` property of the document. + +To see the date-math value of the index supplied in the actual index request which resulted in the above document being +indexed into `myindex-2016-04-01` we can inspect the effects of the processor using a simulate request. + + +[source,js] +-------------------------------------------------- +POST _ingest/pipeline/_simulate +{ + "pipeline" : + { + "description": "monthly date-time index naming", + "processors" : [ + { + "date_index_name" : { + "field" : "date1", + "index_name_prefix" : "myindex-", + "date_rounding" : "M" + } + } + ] + }, + "docs": [ + { + "_source": { + "date1": "2016-04-25T12:02:01.789Z" + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +and the result: + +[source,js] +-------------------------------------------------- +{ + "docs" : [ + { + "doc" : { + "_id" : "_id", + "_index" : "", + "_type" : "_type", + "_source" : { + "date1" : "2016-04-25T12:02:01.789Z" + }, + "_ingest" : { + "timestamp" : "2016-11-08T19:43:03.850+0000" + } + } + } + ] +} +-------------------------------------------------- +// TESTRESPONSE[s/2016-11-08T19:43:03.850\+0000/$body.docs.0.doc._ingest.timestamp/] + +The above example shows that `_index` was set to ``. Elasticsearch +understands this to mean `2016-04-01` as is explained in the <> + +[[date-index-name-options]] +.Date index name options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to get the date or timestamp from. +| `index_name_prefix` | no | - | A prefix of the index name to be prepended before the printed date. Supports <>. +| `date_rounding` | yes | - | How to round the date when formatting the date into the index name. Valid values are: `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second). Supports <>. +| `date_formats` | no | yyyy-MM-dd'T'HH:mm:ss.SSSZ | An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. Can be a Joda pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. +| `timezone` | no | UTC | The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names. +| `locale` | no | ENGLISH | The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days. +| `index_name_format` | no | yyyy-MM-dd | The format to be used when printing the parsed date into the index name. An valid Joda pattern is expected here. Supports <>. +include::common-options.asciidoc[] +|====== diff --git a/docs/reference/ingest/processors/date.asciidoc b/docs/reference/ingest/processors/date.asciidoc new file mode 100644 index 0000000000000..17cb367afadaa --- /dev/null +++ b/docs/reference/ingest/processors/date.asciidoc @@ -0,0 +1,65 @@ +[[date-processor]] +=== Date Processor + +Parses dates from fields, and then uses the date or timestamp as the timestamp for the document. +By default, the date processor adds the parsed date as a new field called `@timestamp`. You can specify a +different field by setting the `target_field` configuration parameter. Multiple date formats are supported +as part of the same date processor definition. They will be used sequentially to attempt parsing the date field, +in the same order they were defined as part of the processor definition. + +[[date-options]] +.Date options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to get the date from. +| `target_field` | no | @timestamp | The field that will hold the parsed date. +| `formats` | yes | - | An array of the expected date formats. Can be a Joda pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. +| `timezone` | no | UTC | The timezone to use when parsing the date. Supports <>. +| `locale` | no | ENGLISH | The locale to use when parsing the date, relevant when parsing month names or week days. Supports <>. +include::common-options.asciidoc[] +|====== + +Here is an example that adds the parsed date to the `timestamp` field based on the `initial_date` field: + +[source,js] +-------------------------------------------------- +{ + "description" : "...", + "processors" : [ + { + "date" : { + "field" : "initial_date", + "target_field" : "timestamp", + "formats" : ["dd/MM/yyyy hh:mm:ss"], + "timezone" : "Europe/Amsterdam" + } + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +The `timezone` and `locale` processor parameters are templated. This means that their values can be +extracted from fields within documents. The example below shows how to extract the locale/timezone +details from existing fields, `my_timezone` and `my_locale`, in the ingested document that contain +the timezone and locale values. + +[source,js] +-------------------------------------------------- +{ + "description" : "...", + "processors" : [ + { + "date" : { + "field" : "initial_date", + "target_field" : "timestamp", + "formats" : ["ISO8601"], + "timezone" : "{{my_timezone}}", + "locale" : "{{my_locale}}" + } + } + ] +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ingest/processors/dissect.asciidoc b/docs/reference/ingest/processors/dissect.asciidoc new file mode 100644 index 0000000000000..0bcd1a27c7437 --- /dev/null +++ b/docs/reference/ingest/processors/dissect.asciidoc @@ -0,0 +1,191 @@ +[[dissect-processor]] +=== Dissect Processor + +Similar to the <>, dissect also extracts structured fields out of a single text field +within a document. However unlike the <>, dissect does not use +https://en.wikipedia.org/wiki/Regular_expression[Regular Expressions]. This allows dissect's syntax to be simple and for +some cases faster than the <>. + +Dissect matches a single text field against a defined pattern. + +For example the following pattern: +[source,txt] +-------------------------------------------------- +%{clientip} %{ident} %{auth} [%{@timestamp}] \"%{verb} %{request} HTTP/%{httpversion}\" %{status} %{size} +-------------------------------------------------- +will match a log line of this format: +[source,txt] +-------------------------------------------------- +1.2.3.4 - - [30/Apr/1998:22:00:52 +0000] \"GET /english/venues/cities/images/montpellier/18.gif HTTP/1.0\" 200 3171 +-------------------------------------------------- +and result in a document with the following fields: +[source,js] +-------------------------------------------------- +"doc": { + "_index": "_index", + "_type": "_type", + "_id": "_id", + "_source": { + "request": "/english/venues/cities/images/montpellier/18.gif", + "auth": "-", + "ident": "-", + "verb": "GET", + "@timestamp": "30/Apr/1998:22:00:52 +0000", + "size": "3171", + "clientip": "1.2.3.4", + "httpversion": "1.0", + "status": "200" + } +} +-------------------------------------------------- +// NOTCONSOLE + +A dissect pattern is defined by the parts of the string that will be discarded. In the example above the first part +to be discarded is a single space. Dissect finds this space, then assigns the value of `clientip` is everything up +until that space. +Later dissect matches the `[` and then `]` and then assigns `@timestamp` to everything in-between `[` and `]`. +Paying special attention the parts of the string to discard will help build successful dissect patterns. + +Successful matches require all keys in a pattern to have a value. If any of the `%{keyname}` defined in the pattern do +not have a value, then an exception is thrown and may be handled by the <> directive. +An empty key `%{}` or a <> can be used to match values, but exclude the value from +the final document. All matched values are represented as string data types. The <> +may be used to convert to expected data type. + +Dissect also supports <> that can change dissect's default +behavior. For example you can instruct dissect to ignore certain fields, append fields, skip over padding, etc. +See <> for more information. + +[[dissect-options]] +.Dissect Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to dissect +| `pattern` | yes | - | The pattern to apply to the field +| `append_separator`| no | "" (empty string) | The character(s) that separate the appended fields. +| `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "dissect": { + "field": "message", + "pattern" : "%{clientip} %{ident} %{auth} [%{@timestamp}] \"%{verb} %{request} HTTP/%{httpversion}\" %{status} %{size}" + } +} +-------------------------------------------------- +// NOTCONSOLE +[[dissect-key-modifiers]] +==== Dissect key modifiers +Key modifiers can change the default behavior for dissection. Key modifiers may be found on the left or right +of the `%{keyname}` always inside the `%{` and `}`. For example `%{+keyname ->}` has the append and right padding +modifiers. + +.Dissect Key Modifiers +[options="header"] +|====== +| Modifier | Name | Position | Example | Description | Details +| `->` | Skip right padding | (far) right | `%{keyname1->}` | Skips any repeated characters to the right | <> +| `+` | Append | left | `%{+keyname} %{+keyname}` | Appends two or more fields together | <> +| `+` with `/n` | Append with order | left and right | `%{+keyname/2} %{+keyname/1}` | Appends two or more fields together in the order specified | <> +| `?` | Named skip key | left | `%{?ignoreme}` | Skips the matched value in the output. Same behavior as `%{}`| <> +| `*` and `&` | Reference keys | left | `%{*r1} %{&r1}` | Sets the output key as value of `*` and output value of `&` | <> +|====== + +[[dissect-modifier-skip-right-padding]] +===== Right padding modifier (`->`) + +The algorithm that performs the dissection is very strict in that it requires all characters in the pattern to match +the source string. For example, the pattern `%{fookey} %{barkey}` (1 space), will match the string "foo{nbsp}bar" +(1 space), but will not match the string "foo{nbsp}{nbsp}bar" (2 spaces) since the pattern has only 1 space and the +source string has 2 spaces. + +The right padding modifier helps with this case. Adding the right padding modifier to the pattern `%{fookey->} %{barkey}`, +It will now will match "foo{nbsp}bar" (1 space) and "foo{nbsp}{nbsp}bar" (2 spaces) +and even "foo{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}bar" (10 spaces). + +Use the right padding modifier to allow for repetition of the characters after a `%{keyname->}`. + +The right padding modifier may be placed on any key with any other modifiers. It should always be the furthest right +modifier. For example: `%{+keyname/1->}` and `%{->}` + +Right padding modifier example +|====== +| *Pattern* | `%{ts->} %{level}` +| *Input* | 1998-08-10T17:15:42,466{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}WARN +| *Result* a| +* ts = 1998-08-10T17:15:42,466 +* level = WARN +|====== + +The right padding modifier may be used with an empty key to help skip unwanted data. For example, the same input string, but wrapped with brackets requires the use of an empty right padded key to achieve the same result. + +Right padding modifier with empty key example +|====== +| *Pattern* | `[%{ts}]%{->}[%{level}]` +| *Input* | [1998-08-10T17:15:42,466]{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}[WARN] +| *Result* a| +* ts = 1998-08-10T17:15:42,466 +* level = WARN +|====== + +===== Append modifier (`+`) +[[dissect-modifier-append-key]] +Dissect supports appending two or more results together for the output. +Values are appended left to right. An append separator can be specified. +In this example the append_separator is defined as a space. + +Append modifier example +|====== +| *Pattern* | `%{+name} %{+name} %{+name} %{+name}` +| *Input* | john jacob jingleheimer schmidt +| *Result* a| +* name = john jacob jingleheimer schmidt +|====== + +===== Append with order modifier (`+` and `/n`) +[[dissect-modifier-append-key-with-order]] +Dissect supports appending two or more results together for the output. +Values are appended based on the order defined (`/n`). An append separator can be specified. +In this example the append_separator is defined as a comma. + +Append with order modifier example +|====== +| *Pattern* | `%{+name/2} %{+name/4} %{+name/3} %{+name/1}` +| *Input* | john jacob jingleheimer schmidt +| *Result* a| +* name = schmidt,john,jingleheimer,jacob +|====== + +===== Named skip key (`?`) +[[dissect-modifier-named-skip-key]] +Dissect supports ignoring matches in the final result. This can be done with an empty key `%{}`, but for readability +it may be desired to give that empty key a name. + +Named skip key modifier example +|====== +| *Pattern* | `%{clientip} %{?ident} %{?auth} [%{@timestamp}]` +| *Input* | 1.2.3.4 - - [30/Apr/1998:22:00:52 +0000] +| *Result* a| +* ip = 1.2.3.4 +* @timestamp = 30/Apr/1998:22:00:52 +0000 +|====== + +===== Reference keys (`*` and `&`) +[[dissect-modifier-reference-keys]] +Dissect support using parsed values as the key/value pairings for the structured content. Imagine a system that +partially logs in key/value pairs. Reference keys allow you to maintain that key/value relationship. + +Reference key modifier example +|====== +| *Pattern* | `[%{ts}] [%{level}] %{*p1}:%{&p1} %{*p2}:%{&p2}` +| *Input* | [2018-08-10T17:15:42,466] [ERR] ip:1.2.3.4 error:REFUSED +| *Result* a| +* ts = 1998-08-10T17:15:42,466 +* level = ERR +* ip = 1.2.3.4 +* error = REFUSED +|====== diff --git a/docs/reference/ingest/processors/dot-expand.asciidoc b/docs/reference/ingest/processors/dot-expand.asciidoc new file mode 100644 index 0000000000000..b3322c96a25f8 --- /dev/null +++ b/docs/reference/ingest/processors/dot-expand.asciidoc @@ -0,0 +1,119 @@ +[[dot-expand-processor]] +=== Dot Expander Processor + +Expands a field with dots into an object field. This processor allows fields +with dots in the name to be accessible by other processors in the pipeline. +Otherwise these <> can't be accessed by any processor. + +[[dot-expender-options]] +.Dot Expand Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to expand into an object field +| `path` | no | - | The field that contains the field to expand. Only required if the field to expand is part another object field, because the `field` option can only understand leaf fields. +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "dot_expander": { + "field": "foo.bar" + } +} +-------------------------------------------------- +// NOTCONSOLE + +For example the dot expand processor would turn this document: + +[source,js] +-------------------------------------------------- +{ + "foo.bar" : "value" +} +-------------------------------------------------- +// NOTCONSOLE + +into: + +[source,js] +-------------------------------------------------- +{ + "foo" : { + "bar" : "value" + } +} +-------------------------------------------------- +// NOTCONSOLE + +If there is already a `bar` field nested under `foo` then +this processor merges the `foo.bar` field into it. If the field is +a scalar value then it will turn that field into an array field. + +For example, the following document: + +[source,js] +-------------------------------------------------- +{ + "foo.bar" : "value2", + "foo" : { + "bar" : "value1" + } +} +-------------------------------------------------- +// NOTCONSOLE + +is transformed by the `dot_expander` processor into: + +[source,js] +-------------------------------------------------- +{ + "foo" : { + "bar" : ["value1", "value2"] + } +} +-------------------------------------------------- +// NOTCONSOLE + +If any field outside of the leaf field conflicts with a pre-existing field of the same name, +then that field needs to be renamed first. + +Consider the following document: + +[source,js] +-------------------------------------------------- +{ + "foo": "value1", + "foo.bar": "value2" +} +-------------------------------------------------- +// NOTCONSOLE + +Then the `foo` needs to be renamed first before the `dot_expander` +processor is applied. So in order for the `foo.bar` field to properly +be expanded into the `bar` field under the `foo` field the following +pipeline should be used: + +[source,js] +-------------------------------------------------- +{ + "processors" : [ + { + "rename" : { + "field" : "foo", + "target_field" : "foo.bar"" + } + }, + { + "dot_expander": { + "field": "foo.bar" + } + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +The reason for this is that Ingest doesn't know how to automatically cast +a scalar field to an object field. diff --git a/docs/reference/ingest/processors/drop.asciidoc b/docs/reference/ingest/processors/drop.asciidoc new file mode 100644 index 0000000000000..e763c13be2c82 --- /dev/null +++ b/docs/reference/ingest/processors/drop.asciidoc @@ -0,0 +1,22 @@ +[[drop-processor]] +=== Drop Processor +Drops the document without raising any errors. This is useful to prevent the document from +getting indexed based on some condition. + +[[drop-options]] +.Drop Options +[options="header"] +|====== +| Name | Required | Default | Description +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "drop": { + "if" : "ctx.network_name == 'Guest'" + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ingest/processors/fail.asciidoc b/docs/reference/ingest/processors/fail.asciidoc new file mode 100644 index 0000000000000..c33c0b754fa75 --- /dev/null +++ b/docs/reference/ingest/processors/fail.asciidoc @@ -0,0 +1,25 @@ +[[fail-processor]] +=== Fail Processor +Raises an exception. This is useful for when +you expect a pipeline to fail and want to relay a specific message +to the requester. + +[[fail-options]] +.Fail Options +[options="header"] +|====== +| Name | Required | Default | Description +| `message` | yes | - | The error message thrown by the processor. Supports <>. +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "fail": { + "if" : "ctx.tags.contains('production') != true", + "message": "The production tag is not present, found tags: {{tags}}" + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ingest/processors/foreach.asciidoc b/docs/reference/ingest/processors/foreach.asciidoc new file mode 100644 index 0000000000000..3a341f60470bb --- /dev/null +++ b/docs/reference/ingest/processors/foreach.asciidoc @@ -0,0 +1,160 @@ +[[foreach-processor]] +=== Foreach Processor + +Processes elements in an array of unknown length. + +All processors can operate on elements inside an array, but if all elements of an array need to +be processed in the same way, defining a processor for each element becomes cumbersome and tricky +because it is likely that the number of elements in an array is unknown. For this reason the `foreach` +processor exists. By specifying the field holding array elements and a processor that +defines what should happen to each element, array fields can easily be preprocessed. + +A processor inside the foreach processor works in the array element context and puts that in the ingest metadata +under the `_ingest._value` key. If the array element is a json object it holds all immediate fields of that json object. +and if the nested object is a value is `_ingest._value` just holds that value. Note that if a processor prior to the +`foreach` processor used `_ingest._value` key then the specified value will not be available to the processor inside +the `foreach` processor. The `foreach` processor does restore the original value, so that value is available to processors +after the `foreach` processor. + +Note that any other field from the document are accessible and modifiable like with all other processors. This processor +just puts the current array element being read into `_ingest._value` ingest metadata attribute, so that it may be +pre-processed. + +If the `foreach` processor fails to process an element inside the array, and no `on_failure` processor has been specified, +then it aborts the execution and leaves the array unmodified. + +[[foreach-options]] +.Foreach Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The array field +| `processor` | yes | - | The processor to execute against each field +| `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::common-options.asciidoc[] +|====== + +Assume the following document: + +[source,js] +-------------------------------------------------- +{ + "values" : ["foo", "bar", "baz"] +} +-------------------------------------------------- +// NOTCONSOLE + +When this `foreach` processor operates on this sample document: + +[source,js] +-------------------------------------------------- +{ + "foreach" : { + "field" : "values", + "processor" : { + "uppercase" : { + "field" : "_ingest._value" + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +Then the document will look like this after preprocessing: + +[source,js] +-------------------------------------------------- +{ + "values" : ["FOO", "BAR", "BAZ"] +} +-------------------------------------------------- +// NOTCONSOLE + +Let's take a look at another example: + +[source,js] +-------------------------------------------------- +{ + "persons" : [ + { + "id" : "1", + "name" : "John Doe" + }, + { + "id" : "2", + "name" : "Jane Doe" + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +In this case, the `id` field needs to be removed, +so the following `foreach` processor is used: + +[source,js] +-------------------------------------------------- +{ + "foreach" : { + "field" : "persons", + "processor" : { + "remove" : { + "field" : "_ingest._value.id" + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +After preprocessing the result is: + +[source,js] +-------------------------------------------------- +{ + "persons" : [ + { + "name" : "John Doe" + }, + { + "name" : "Jane Doe" + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +The wrapped processor can have a `on_failure` definition. +For example, the `id` field may not exist on all person objects. +Instead of failing the index request, you can use an `on_failure` +block to send the document to the 'failure_index' index for later inspection: + +[source,js] +-------------------------------------------------- +{ + "foreach" : { + "field" : "persons", + "processor" : { + "remove" : { + "field" : "_value.id", + "on_failure" : [ + { + "set" : { + "field", "_index", + "value", "failure_index" + } + } + ] + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +In this example, if the `remove` processor does fail, then +the array elements that have been processed thus far will +be updated. + +Another advanced example can be found in the {plugins}/ingest-attachment-with-arrays.html[attachment processor documentation]. diff --git a/docs/reference/ingest/processors/grok.asciidoc b/docs/reference/ingest/processors/grok.asciidoc new file mode 100644 index 0000000000000..315caff0dc6e4 --- /dev/null +++ b/docs/reference/ingest/processors/grok.asciidoc @@ -0,0 +1,323 @@ +[[grok-processor]] +=== Grok Processor + +Extracts structured fields out of a single text field within a document. You choose which field to +extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular +expression that supports aliased expressions that can be reused. + +This tool is perfect for syslog logs, apache and other webserver logs, mysql logs, and in general, any log format +that is generally written for humans and not computer consumption. +This processor comes packaged with many +https://github.com/elastic/elasticsearch/blob/{branch}/libs/grok/src/main/resources/patterns[reusable patterns]. + +If you need help building patterns to match your logs, you will find the {kibana-ref}/xpack-grokdebugger.html[Grok Debugger] tool quite useful! The Grok Debugger is an {xpack} feature under the Basic License and is therefore *free to use*. The Grok Constructor at is also a useful tool. + +[[grok-basics]] +==== Grok Basics + +Grok sits on top of regular expressions, so any regular expressions are valid in grok as well. +The regular expression library is Oniguruma, and you can see the full supported regexp syntax +https://github.com/kkos/oniguruma/blob/master/doc/RE[on the Onigiruma site]. + +Grok works by leveraging this regular expression language to allow naming existing patterns and combining them into more +complex patterns that match your fields. + +The syntax for reusing a grok pattern comes in three forms: `%{SYNTAX:SEMANTIC}`, `%{SYNTAX}`, `%{SYNTAX:SEMANTIC:TYPE}`. + +The `SYNTAX` is the name of the pattern that will match your text. For example, `3.44` will be matched by the `NUMBER` +pattern and `55.3.244.1` will be matched by the `IP` pattern. The syntax is how you match. `NUMBER` and `IP` are both +patterns that are provided within the default patterns set. + +The `SEMANTIC` is the identifier you give to the piece of text being matched. For example, `3.44` could be the +duration of an event, so you could call it simply `duration`. Further, a string `55.3.244.1` might identify +the `client` making a request. + +The `TYPE` is the type you wish to cast your named field. `int`, `long`, `double`, `float` and `boolean` are supported types for coercion. + +For example, you might want to match the following text: + +[source,txt] +-------------------------------------------------- +3.44 55.3.244.1 +-------------------------------------------------- + +You may know that the message in the example is a number followed by an IP address. You can match this text by using the following +Grok expression. + +[source,txt] +-------------------------------------------------- +%{NUMBER:duration} %{IP:client} +-------------------------------------------------- + +[[using-grok]] +==== Using the Grok Processor in a Pipeline + +[[grok-options]] +.Grok Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to use for grok expression parsing +| `patterns` | yes | - | An ordered list of grok expression to match and extract named captures with. Returns on the first expression in the list that matches. +| `pattern_definitions` | no | - | A map of pattern-name and pattern tuples defining custom patterns to be used by the current processor. Patterns matching existing names will override the pre-existing definition. +| `trace_match` | no | false | when true, `_ingest._grok_match_index` will be inserted into your matched document's metadata with the index into the pattern found in `patterns` that matched. +| `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::common-options.asciidoc[] +|====== + +Here is an example of using the provided patterns to extract out and name structured fields from a string field in +a document. + +[source,js] +-------------------------------------------------- +{ + "message": "55.3.244.1 GET /index.html 15824 0.043" +} +-------------------------------------------------- +// NOTCONSOLE + +The pattern for this could be: + +[source,txt] +-------------------------------------------------- +%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration} +-------------------------------------------------- + +Here is an example pipeline for processing the above document by using Grok: + +[source,js] +-------------------------------------------------- +{ + "description" : "...", + "processors": [ + { + "grok": { + "field": "message", + "patterns": ["%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}"] + } + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +This pipeline will insert these named captures as new fields within the document, like so: + +[source,js] +-------------------------------------------------- +{ + "message": "55.3.244.1 GET /index.html 15824 0.043", + "client": "55.3.244.1", + "method": "GET", + "request": "/index.html", + "bytes": 15824, + "duration": "0.043" +} +-------------------------------------------------- +// NOTCONSOLE + +[[custom-patterns]] +==== Custom Patterns + +The Grok processor comes pre-packaged with a base set of pattern. These patterns may not always have +what you are looking for. Pattern have a very basic format. Each entry describes has a name and the pattern itself. + +You can add your own patterns to a processor definition under the `pattern_definitions` option. +Here is an example of a pipeline specifying custom pattern definitions: + +[source,js] +-------------------------------------------------- +{ + "description" : "...", + "processors": [ + { + "grok": { + "field": "message", + "patterns": ["my %{FAVORITE_DOG:dog} is colored %{RGB:color}"], + "pattern_definitions" : { + "FAVORITE_DOG" : "beagle", + "RGB" : "RED|GREEN|BLUE" + } + } + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +[[trace-match]] +==== Providing Multiple Match Patterns + +Sometimes one pattern is not enough to capture the potential structure of a field. Let's assume we +want to match all messages that contain your favorite pet breeds of either cats or dogs. One way to accomplish +this is to provide two distinct patterns that can be matched, instead of one really complicated expression capturing +the same `or` behavior. + +Here is an example of such a configuration executed against the simulate API: + +[source,js] +-------------------------------------------------- +POST _ingest/pipeline/_simulate +{ + "pipeline": { + "description" : "parse multiple patterns", + "processors": [ + { + "grok": { + "field": "message", + "patterns": ["%{FAVORITE_DOG:pet}", "%{FAVORITE_CAT:pet}"], + "pattern_definitions" : { + "FAVORITE_DOG" : "beagle", + "FAVORITE_CAT" : "burmese" + } + } + } + ] +}, +"docs":[ + { + "_source": { + "message": "I love burmese cats!" + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +response: + +[source,js] +-------------------------------------------------- +{ + "docs": [ + { + "doc": { + "_type": "_type", + "_index": "_index", + "_id": "_id", + "_source": { + "message": "I love burmese cats!", + "pet": "burmese" + }, + "_ingest": { + "timestamp": "2016-11-08T19:43:03.850+0000" + } + } + } + ] +} +-------------------------------------------------- +// TESTRESPONSE[s/2016-11-08T19:43:03.850\+0000/$body.docs.0.doc._ingest.timestamp/] + +Both patterns will set the field `pet` with the appropriate match, but what if we want to trace which of our +patterns matched and populated our fields? We can do this with the `trace_match` parameter. Here is the output of +that same pipeline, but with `"trace_match": true` configured: + +//// +Hidden setup for example: +[source,js] +-------------------------------------------------- +POST _ingest/pipeline/_simulate +{ + "pipeline": { + "description" : "parse multiple patterns", + "processors": [ + { + "grok": { + "field": "message", + "patterns": ["%{FAVORITE_DOG:pet}", "%{FAVORITE_CAT:pet}"], + "trace_match": true, + "pattern_definitions" : { + "FAVORITE_DOG" : "beagle", + "FAVORITE_CAT" : "burmese" + } + } + } + ] +}, +"docs":[ + { + "_source": { + "message": "I love burmese cats!" + } + } + ] +} +-------------------------------------------------- +// CONSOLE +//// + +[source,js] +-------------------------------------------------- +{ + "docs": [ + { + "doc": { + "_type": "_type", + "_index": "_index", + "_id": "_id", + "_source": { + "message": "I love burmese cats!", + "pet": "burmese" + }, + "_ingest": { + "_grok_match_index": "1", + "timestamp": "2016-11-08T19:43:03.850+0000" + } + } + } + ] +} +-------------------------------------------------- +// TESTRESPONSE[s/2016-11-08T19:43:03.850\+0000/$body.docs.0.doc._ingest.timestamp/] + +In the above response, you can see that the index of the pattern that matched was `"1"`. This is to say that it was the +second (index starts at zero) pattern in `patterns` to match. + +This trace metadata enables debugging which of the patterns matched. This information is stored in the ingest +metadata and will not be indexed. + +[[grok-processor-rest-get]] +==== Retrieving patterns from REST endpoint + +The Grok Processor comes packaged with its own REST endpoint for retrieving which patterns the processor is packaged with. + +[source,js] +-------------------------------------------------- +GET _ingest/processor/grok +-------------------------------------------------- +// CONSOLE + +The above request will return a response body containing a key-value representation of the built-in patterns dictionary. + +[source,js] +-------------------------------------------------- +{ + "patterns" : { + "BACULA_CAPACITY" : "%{INT}{1,3}(,%{INT}{3})*", + "PATH" : "(?:%{UNIXPATH}|%{WINPATH})", + ... +} +-------------------------------------------------- +// NOTCONSOLE + +This can be useful to reference as the built-in patterns change across versions. + +[[grok-watchdog]] +==== Grok watchdog + +Grok expressions that take too long to execute are interrupted and +the grok processor then fails with an exception. The grok +processor has a watchdog thread that determines when evaluation of +a grok expression takes too long and is controlled by the following +settings: + +[[grok-watchdog-options]] +.Grok watchdog settings +[options="header"] +|====== +| Name | Default | Description +| `ingest.grok.watchdog.interval` | 1s | How often to check whether there are grok evaluations that take longer than the maximum allowed execution time. +| `ingest.grok.watchdog.max_execution_time` | 1s | The maximum allowed execution of a grok expression evaluation. +|====== diff --git a/docs/reference/ingest/processors/gsub.asciidoc b/docs/reference/ingest/processors/gsub.asciidoc new file mode 100644 index 0000000000000..f6919eb1e95f1 --- /dev/null +++ b/docs/reference/ingest/processors/gsub.asciidoc @@ -0,0 +1,29 @@ +[[gsub-processor]] +=== Gsub Processor +Converts a string field by applying a regular expression and a replacement. +If the field is not a string, the processor will throw an exception. + +[[gsub-options]] +.Gsub Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to apply the replacement to +| `pattern` | yes | - | The pattern to be replaced +| `replacement` | yes | - | The string to replace the matching patterns with +| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place +| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "gsub": { + "field": "field1", + "pattern": "\.", + "replacement": "-" + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ingest/processors/join.asciidoc b/docs/reference/ingest/processors/join.asciidoc new file mode 100644 index 0000000000000..a31f0eee7c16f --- /dev/null +++ b/docs/reference/ingest/processors/join.asciidoc @@ -0,0 +1,26 @@ +[[join-processor]] +=== Join Processor +Joins each element of an array into a single string using a separator character between each element. +Throws an error when the field is not an array. + +[[join-options]] +.Join Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to be separated +| `separator` | yes | - | The separator character +| `target_field` | no | `field` | The field to assign the joined value to, by default `field` is updated in-place +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "join": { + "field": "joined_array_field", + "separator": "-" + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ingest/processors/json.asciidoc b/docs/reference/ingest/processors/json.asciidoc new file mode 100644 index 0000000000000..bbbd18e8337a4 --- /dev/null +++ b/docs/reference/ingest/processors/json.asciidoc @@ -0,0 +1,88 @@ +[[json-processor]] +=== JSON Processor +Converts a JSON string into a structured JSON object. + +[[json-options]] +.Json Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to be parsed +| `target_field` | no | `field` | The field to insert the converted structured object into +| `add_to_root` | no | false | Flag that forces the serialized json to be injected into the top level of the document. `target_field` must not be set when this option is chosen. +include::common-options.asciidoc[] +|====== + +All JSON-supported types will be parsed (null, boolean, number, array, object, string). + +Suppose you provide this configuration of the `json` processor: + +[source,js] +-------------------------------------------------- +{ + "json" : { + "field" : "string_source", + "target_field" : "json_target" + } +} +-------------------------------------------------- +// NOTCONSOLE + +If the following document is processed: + +[source,js] +-------------------------------------------------- +{ + "string_source": "{\"foo\": 2000}" +} +-------------------------------------------------- +// NOTCONSOLE + +after the `json` processor operates on it, it will look like: + +[source,js] +-------------------------------------------------- +{ + "string_source": "{\"foo\": 2000}", + "json_target": { + "foo": 2000 + } +} +-------------------------------------------------- +// NOTCONSOLE + +If the following configuration is provided, omitting the optional `target_field` setting: +[source,js] +-------------------------------------------------- +{ + "json" : { + "field" : "source_and_target" + } +} +-------------------------------------------------- +// NOTCONSOLE + +then after the `json` processor operates on this document: + +[source,js] +-------------------------------------------------- +{ + "source_and_target": "{\"foo\": 2000}" +} +-------------------------------------------------- +// NOTCONSOLE + +it will look like: + +[source,js] +-------------------------------------------------- +{ + "source_and_target": { + "foo": 2000 + } +} +-------------------------------------------------- +// NOTCONSOLE + +This illustrates that, unless it is explicitly named in the processor configuration, the `target_field` +is the same field provided in the required `field` configuration. diff --git a/docs/reference/ingest/processors/kv.asciidoc b/docs/reference/ingest/processors/kv.asciidoc new file mode 100644 index 0000000000000..3f4350a3301ed --- /dev/null +++ b/docs/reference/ingest/processors/kv.asciidoc @@ -0,0 +1,37 @@ +[[kv-processor]] +=== KV Processor +This processor helps automatically parse messages (or specific event fields) which are of the foo=bar variety. + +For example, if you have a log message which contains `ip=1.2.3.4 error=REFUSED`, you can parse those automatically by configuring: + + +[source,js] +-------------------------------------------------- +{ + "kv": { + "field": "message", + "field_split": " ", + "value_split": "=" + } +} +-------------------------------------------------- +// NOTCONSOLE + +[[kv-options]] +.Kv Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to be parsed +| `field_split` | yes | - | Regex pattern to use for splitting key-value pairs +| `value_split` | yes | - | Regex pattern to use for splitting the key from the value within a key-value pair +| `target_field` | no | `null` | The field to insert the extracted keys into. Defaults to the root of the document +| `include_keys` | no | `null` | List of keys to filter and insert into document. Defaults to including all keys +| `exclude_keys` | no | `null` | List of keys to exclude from document +| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +| `prefix` | no | `null` | Prefix to be added to extracted keys +| `trim_key` | no | `null` | String of characters to trim from extracted keys +| `trim_value` | no | `null` | String of characters to trim from extracted values +| `strip_brackets` | no | `false` | If `true` strip brackets `()`, `<>`, `[]` as well as quotes `'` and `"` from extracted values +include::common-options.asciidoc[] +|====== diff --git a/docs/reference/ingest/processors/lowercase.asciidoc b/docs/reference/ingest/processors/lowercase.asciidoc new file mode 100644 index 0000000000000..878b74ed9ba24 --- /dev/null +++ b/docs/reference/ingest/processors/lowercase.asciidoc @@ -0,0 +1,24 @@ +[[lowercase-processor]] +=== Lowercase Processor +Converts a string to its lowercase equivalent. + +[[lowercase-options]] +.Lowercase Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to make lowercase +| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place +| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "lowercase": { + "field": "foo" + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ingest/processors/pipeline.asciidoc b/docs/reference/ingest/processors/pipeline.asciidoc new file mode 100644 index 0000000000000..1b138123a7218 --- /dev/null +++ b/docs/reference/ingest/processors/pipeline.asciidoc @@ -0,0 +1,114 @@ +[[pipeline-processor]] +=== Pipeline Processor +Executes another pipeline. + +[[pipeline-options]] +.Pipeline Options +[options="header"] +|====== +| Name | Required | Default | Description +| `name` | yes | - | The name of the pipeline to execute +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "pipeline": { + "name": "inner-pipeline" + } +} +-------------------------------------------------- +// NOTCONSOLE + +An example of using this processor for nesting pipelines would be: + +Define an inner pipeline: + +[source,js] +-------------------------------------------------- +PUT _ingest/pipeline/pipelineA +{ + "description" : "inner pipeline", + "processors" : [ + { + "set" : { + "field": "inner_pipeline_set", + "value": "inner" + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +Define another pipeline that uses the previously defined inner pipeline: + +[source,js] +-------------------------------------------------- +PUT _ingest/pipeline/pipelineB +{ + "description" : "outer pipeline", + "processors" : [ + { + "pipeline" : { + "name": "pipelineA" + } + }, + { + "set" : { + "field": "outer_pipeline_set", + "value": "outer" + } + } + ] +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +Now indexing a document while applying the outer pipeline will see the inner pipeline executed +from the outer pipeline: + +[source,js] +-------------------------------------------------- +PUT /myindex/_doc/1?pipeline=pipelineB +{ + "field": "value" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +Response from the index request: + +[source,js] +-------------------------------------------------- +{ + "_index": "myindex", + "_type": "_doc", + "_id": "1", + "_version": 1, + "result": "created", + "_shards": { + "total": 2, + "successful": 1, + "failed": 0 + }, + "_seq_no": 66, + "_primary_term": 1, +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] + +Indexed document: + +[source,js] +-------------------------------------------------- +{ + "field": "value", + "inner_pipeline_set": "inner", + "outer_pipeline_set": "outer" +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ingest/processors/remove.asciidoc b/docs/reference/ingest/processors/remove.asciidoc new file mode 100644 index 0000000000000..a3f539a6cc347 --- /dev/null +++ b/docs/reference/ingest/processors/remove.asciidoc @@ -0,0 +1,37 @@ +[[remove-processor]] +=== Remove Processor +Removes existing fields. If one field doesn't exist, an exception will be thrown. + +[[remove-options]] +.Remove Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | Fields to be removed. Supports <>. +| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::common-options.asciidoc[] +|====== + +Here is an example to remove a single field: + +[source,js] +-------------------------------------------------- +{ + "remove": { + "field": "user_agent" + } +} +-------------------------------------------------- +// NOTCONSOLE + +To remove multiple fields, you can use the following query: + +[source,js] +-------------------------------------------------- +{ + "remove": { + "field": ["user_agent", "url"] + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ingest/processors/rename.asciidoc b/docs/reference/ingest/processors/rename.asciidoc new file mode 100644 index 0000000000000..7f690367fa388 --- /dev/null +++ b/docs/reference/ingest/processors/rename.asciidoc @@ -0,0 +1,25 @@ +[[rename-processor]] +=== Rename Processor +Renames an existing field. If the field doesn't exist or the new name is already used, an exception will be thrown. + +[[rename-options]] +.Rename Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to be renamed. Supports <>. +| `target_field` | yes | - | The new name of the field. Supports <>. +| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "rename": { + "field": "provider", + "target_field": "cloud.provider" + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ingest/processors/script.asciidoc b/docs/reference/ingest/processors/script.asciidoc new file mode 100644 index 0000000000000..4a1ab5306d040 --- /dev/null +++ b/docs/reference/ingest/processors/script.asciidoc @@ -0,0 +1,108 @@ +[[script-processor]] +=== Script Processor + +Allows inline and stored scripts to be executed within ingest pipelines. + +See <> to learn more about writing scripts. The Script Processor +leverages caching of compiled scripts for improved performance. Since the +script specified within the processor is potentially re-compiled per document, it is important +to understand how script caching works. To learn more about +caching see <>. + +[[script-options]] +.Script Options +[options="header"] +|====== +| Name | Required | Default | Description +| `lang` | no | "painless" | The scripting language +| `id` | no | - | The stored script id to refer to +| `source` | no | - | An inline script to be executed +| `params` | no | - | Script Parameters +include::common-options.asciidoc[] +|====== + +One of `id` or `source` options must be provided in order to properly reference a script to execute. + +You can access the current ingest document from within the script context by using the `ctx` variable. + +The following example sets a new field called `field_a_plus_b_times_c` to be the sum of two existing +numeric fields `field_a` and `field_b` multiplied by the parameter param_c: + +[source,js] +-------------------------------------------------- +{ + "script": { + "lang": "painless", + "source": "ctx.field_a_plus_b_times_c = (ctx.field_a + ctx.field_b) * params.param_c", + "params": { + "param_c": 10 + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +It is possible to use the Script Processor to manipulate document metadata like `_index` and `_type` during +ingestion. Here is an example of an Ingest Pipeline that renames the index and type to `my_index` no matter what +was provided in the original index request: + +[source,js] +-------------------------------------------------- +PUT _ingest/pipeline/my_index +{ + "description": "use index:my_index and type:_doc", + "processors": [ + { + "script": { + "source": """ + ctx._index = 'my_index'; + ctx._type = '_doc'; + """ + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +Using the above pipeline, we can attempt to index a document into the `any_index` index. + +[source,js] +-------------------------------------------------- +PUT any_index/_doc/1?pipeline=my_index +{ + "message": "text" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The response from the above index request: + +[source,js] +-------------------------------------------------- +{ + "_index": "my_index", + "_type": "_doc", + "_id": "1", + "_version": 1, + "result": "created", + "_shards": { + "total": 2, + "successful": 1, + "failed": 0 + }, + "_seq_no": 89, + "_primary_term": 1, +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] + +In the above response, you can see that our document was actually indexed into `my_index` instead of +`any_index`. This type of manipulation is often convenient in pipelines that have various branches of transformation, +and depending on the progress made, indexed into different indices. + +[[set-processor]] +=== Set Processor +Sets one field and associates it with the specified value. If the field already exists, +its value will be replaced with the provided one. diff --git a/docs/reference/ingest/processors/set-security-user.asciidoc b/docs/reference/ingest/processors/set-security-user.asciidoc new file mode 100644 index 0000000000000..738ce97dda9df --- /dev/null +++ b/docs/reference/ingest/processors/set-security-user.asciidoc @@ -0,0 +1,34 @@ +[[ingest-node-set-security-user-processor]] +=== Set Security User Processor +Sets user-related details (such as `username`, `roles`, `email`, `full_name` +and `metadata` ) from the current +authenticated user to the current document by pre-processing the ingest. + +IMPORTANT: Requires an authenticated user for the index request. + +[[set-security-user-options]] +.Set Security User Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to store the user information into. +| `properties` | no | [`username`, `roles`, `email`, `full_name`, `metadata`] | Controls what user related properties are added to the `field`. +include::common-options.asciidoc[] +|====== + +The following example adds all user details for the current authenticated user +to the `user` field for all documents that are processed by this pipeline: + +[source,js] +-------------------------------------------------- +{ + "processors" : [ + { + "set_security_user": { + "field": "user" + } + } + ] +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ingest/processors/set.asciidoc b/docs/reference/ingest/processors/set.asciidoc new file mode 100644 index 0000000000000..564594a05b0d6 --- /dev/null +++ b/docs/reference/ingest/processors/set.asciidoc @@ -0,0 +1,21 @@ +[[set-options]] +.Set Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to insert, upsert, or update. Supports <>. +| `value` | yes | - | The value to be set for the field. Supports <>. +| `override` | no | true | If processor will update fields with pre-existing non-null-valued field. When set to `false`, such fields will not be touched. +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "set": { + "field": "host.os.name", + "value": "{{os}}" + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ingest/processors/sort.asciidoc b/docs/reference/ingest/processors/sort.asciidoc new file mode 100644 index 0000000000000..455b32dca5a84 --- /dev/null +++ b/docs/reference/ingest/processors/sort.asciidoc @@ -0,0 +1,27 @@ +[[sort-processor]] +=== Sort Processor +Sorts the elements of an array ascending or descending. Homogeneous arrays of numbers will be sorted +numerically, while arrays of strings or heterogeneous arrays of strings + numbers will be sorted lexicographically. +Throws an error when the field is not an array. + +[[sort-options]] +.Sort Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to be sorted +| `order` | no | `"asc"` | The sort order to use. Accepts `"asc"` or `"desc"`. +| `target_field` | no | `field` | The field to assign the sorted value to, by default `field` is updated in-place +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "sort": { + "field": "array_field_to_sort", + "order": "desc" + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ingest/processors/split.asciidoc b/docs/reference/ingest/processors/split.asciidoc new file mode 100644 index 0000000000000..7d1487b8ca047 --- /dev/null +++ b/docs/reference/ingest/processors/split.asciidoc @@ -0,0 +1,27 @@ +[[split-processor]] +=== Split Processor +Splits a field into an array using a separator character. Only works on string fields. + +[[split-options]] +.Split Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to split +| `separator` | yes | - | A regex which matches the separator, eg `,` or `\s+` +| `target_field` | no | `field` | The field to assign the split value to, by default `field` is updated in-place +| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "split": { + "field": "my_field", + "separator": "\\s+" <1> + } +} +-------------------------------------------------- +// NOTCONSOLE +<1> Treat all consecutive whitespace characters as a single separator diff --git a/docs/reference/ingest/processors/trim.asciidoc b/docs/reference/ingest/processors/trim.asciidoc new file mode 100644 index 0000000000000..7c28767076ecc --- /dev/null +++ b/docs/reference/ingest/processors/trim.asciidoc @@ -0,0 +1,26 @@ +[[trim-processor]] +=== Trim Processor +Trims whitespace from field. + +NOTE: This only works on leading and trailing whitespace. + +[[trim-options]] +.Trim Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The string-valued field to trim whitespace from +| `target_field` | no | `field` | The field to assign the trimmed value to, by default `field` is updated in-place +| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "trim": { + "field": "foo" + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ingest/processors/uppercase.asciidoc b/docs/reference/ingest/processors/uppercase.asciidoc new file mode 100644 index 0000000000000..7565be1c7c303 --- /dev/null +++ b/docs/reference/ingest/processors/uppercase.asciidoc @@ -0,0 +1,24 @@ +[[uppercase-processor]] +=== Uppercase Processor +Converts a string to its uppercase equivalent. + +[[uppercase-options]] +.Uppercase Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to make uppercase +| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place +| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "uppercase": { + "field": "foo" + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ingest/processors/url-decode.asciidoc b/docs/reference/ingest/processors/url-decode.asciidoc new file mode 100644 index 0000000000000..76fc00c80f679 --- /dev/null +++ b/docs/reference/ingest/processors/url-decode.asciidoc @@ -0,0 +1,24 @@ +[[urldecode-processor]] +=== URL Decode Processor +URL-decodes a string + +[[urldecode-options]] +.URL Decode Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to decode +| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place +| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "urldecode": { + "field": "my_url_to_decode" + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/licensing/delete-license.asciidoc b/docs/reference/licensing/delete-license.asciidoc index b994e56f0481c..425504f7cba18 100644 --- a/docs/reference/licensing/delete-license.asciidoc +++ b/docs/reference/licensing/delete-license.asciidoc @@ -1,7 +1,10 @@ [role="xpack"] [testenv="basic"] [[delete-license]] -=== Delete License API +=== Delete license API +++++ +Delete license +++++ This API enables you to delete licensing information. diff --git a/docs/reference/licensing/get-basic-status.asciidoc b/docs/reference/licensing/get-basic-status.asciidoc index 5299048a52a59..673bb27e59626 100644 --- a/docs/reference/licensing/get-basic-status.asciidoc +++ b/docs/reference/licensing/get-basic-status.asciidoc @@ -1,7 +1,10 @@ [role="xpack"] [testenv="basic"] [[get-basic-status]] -=== Get Basic Status API +=== Get basic status API +++++ +Get basic status +++++ This API enables you to check the status of your basic license. diff --git a/docs/reference/licensing/get-license.asciidoc b/docs/reference/licensing/get-license.asciidoc index 6052fb7262d1f..3be86bf3e734a 100644 --- a/docs/reference/licensing/get-license.asciidoc +++ b/docs/reference/licensing/get-license.asciidoc @@ -1,7 +1,10 @@ [role="xpack"] [testenv="basic"] [[get-license]] -=== Get License API +=== Get license API +++++ +Get license +++++ This API enables you to retrieve licensing information. diff --git a/docs/reference/licensing/get-trial-status.asciidoc b/docs/reference/licensing/get-trial-status.asciidoc index 59910a5654b41..b81dade1ad850 100644 --- a/docs/reference/licensing/get-trial-status.asciidoc +++ b/docs/reference/licensing/get-trial-status.asciidoc @@ -1,7 +1,10 @@ [role="xpack"] [testenv="basic"] [[get-trial-status]] -=== Get Trial Status API +=== Get trial status API +++++ +Get trial status +++++ This API enables you to check the status of your trial license. diff --git a/docs/reference/licensing/start-basic.asciidoc b/docs/reference/licensing/start-basic.asciidoc index 7f53bfbb83a6a..d3c385059627e 100644 --- a/docs/reference/licensing/start-basic.asciidoc +++ b/docs/reference/licensing/start-basic.asciidoc @@ -1,7 +1,10 @@ [role="xpack"] [testenv="basic"] [[start-basic]] -=== Start Basic API +=== Start basic API +++++ +Start basic +++++ This API starts an indefinite basic license. diff --git a/docs/reference/licensing/start-trial.asciidoc b/docs/reference/licensing/start-trial.asciidoc index 3824a81a74076..0afce9ec9d22c 100644 --- a/docs/reference/licensing/start-trial.asciidoc +++ b/docs/reference/licensing/start-trial.asciidoc @@ -1,7 +1,10 @@ [role="xpack"] [testenv="basic"] [[start-trial]] -=== Start Trial API +=== Start trial API +++++ +Start trial +++++ This API starts a 30-day trial license. diff --git a/docs/reference/licensing/update-license.asciidoc b/docs/reference/licensing/update-license.asciidoc index 56ef7acfd289e..0dfe9b2da888a 100644 --- a/docs/reference/licensing/update-license.asciidoc +++ b/docs/reference/licensing/update-license.asciidoc @@ -1,7 +1,10 @@ [role="xpack"] [testenv="basic"] [[update-license]] -=== Update License API +=== Update license API +++++ +Update license +++++ This API enables you to update your license. @@ -40,12 +43,12 @@ https://www.elastic.co/subscriptions. [float] ==== Authorization -If {security} is enabled, you need `manage` cluster privileges to install the -license. +If {es} {security-features} are enabled, you need `manage` cluster privileges to +install the license. -If {security} is enabled and you are installing a gold or platinum license, you -must enable TLS on the transport networking layer before you install the license. -See <>. +If {es} {security-features} are enabled and you are installing a gold or platinum +license, you must enable TLS on the transport networking layer before you +install the license. See <>. [float] ==== Examples diff --git a/docs/reference/migration/apis/assistance.asciidoc b/docs/reference/migration/apis/assistance.asciidoc index 5758333d6ebbe..3a220644c16de 100644 --- a/docs/reference/migration/apis/assistance.asciidoc +++ b/docs/reference/migration/apis/assistance.asciidoc @@ -1,7 +1,10 @@ [role="xpack"] [testenv="basic"] [[migration-api-assistance]] -=== Migration Assistance API +=== Migration assistance API +++++ +Migration assistance +++++ The Migration Assistance API analyzes existing indices in the cluster and returns the information about indices that require some changes before the diff --git a/docs/reference/migration/apis/deprecation.asciidoc b/docs/reference/migration/apis/deprecation.asciidoc index 5ff828bfa0c8a..59ba1e97a2e9e 100644 --- a/docs/reference/migration/apis/deprecation.asciidoc +++ b/docs/reference/migration/apis/deprecation.asciidoc @@ -1,7 +1,10 @@ [role="xpack"] [testenv="basic"] [[migration-api-deprecation]] -=== Deprecation Info APIs +=== Deprecation info APIs +++++ +Deprecation info +++++ The deprecation API is to be used to retrieve information about different cluster, node, and index level settings that use deprecated features that will diff --git a/docs/reference/migration/apis/upgrade.asciidoc b/docs/reference/migration/apis/upgrade.asciidoc index 73ae0dac48e00..3545a4441df3e 100644 --- a/docs/reference/migration/apis/upgrade.asciidoc +++ b/docs/reference/migration/apis/upgrade.asciidoc @@ -1,7 +1,10 @@ [role="xpack"] [testenv="basic"] [[migration-api-upgrade]] -=== Migration Upgrade API +=== Migration upgrade API +++++ +Migration upgrade +++++ The Migration Upgrade API performs the upgrade of internal indices to make them compatible with the next major version. diff --git a/docs/reference/migration/migrate_7_0.asciidoc b/docs/reference/migration/migrate_7_0.asciidoc index 45f383435e4bc..9f99604318aa9 100644 --- a/docs/reference/migration/migrate_7_0.asciidoc +++ b/docs/reference/migration/migrate_7_0.asciidoc @@ -11,6 +11,7 @@ See also <> and <>. * <> * <> +* <> * <> * <> * <> @@ -44,6 +45,7 @@ Elasticsearch 6.x in order to be readable by Elasticsearch 7.x. include::migrate_7_0/aggregations.asciidoc[] include::migrate_7_0/analysis.asciidoc[] include::migrate_7_0/cluster.asciidoc[] +include::migrate_7_0/discovery.asciidoc[] include::migrate_7_0/indices.asciidoc[] include::migrate_7_0/mappings.asciidoc[] include::migrate_7_0/search.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0/api.asciidoc b/docs/reference/migration/migrate_7_0/api.asciidoc index 83370a93d556a..3972be43685cc 100644 --- a/docs/reference/migration/migrate_7_0/api.asciidoc +++ b/docs/reference/migration/migrate_7_0/api.asciidoc @@ -88,10 +88,10 @@ When putting stored scripts, support for storing them with the deprecated `templ now removed. Scripts must be stored using the `script` context as mentioned in the documentation. [float] -==== Get Aliases API limitations when {security} is enabled removed +==== Removed Get Aliases API limitations when {security-features} are enabled The behavior and response codes of the get aliases API no longer vary -depending on whether {security} is enabled. Previously a +depending on whether {security-features} are enabled. Previously a 404 - NOT FOUND (IndexNotFoundException) could be returned in case the current user was not authorized for any alias. An empty response with status 200 - OK is now returned instead at all times. diff --git a/docs/reference/migration/migrate_7_0/cluster.asciidoc b/docs/reference/migration/migrate_7_0/cluster.asciidoc index 732270706ff3d..bfe7d5df2d094 100644 --- a/docs/reference/migration/migrate_7_0/cluster.asciidoc +++ b/docs/reference/migration/migrate_7_0/cluster.asciidoc @@ -25,12 +25,3 @@ Clusters now have soft limits on the total number of open shards in the cluster based on the number of nodes and the `cluster.max_shards_per_node` cluster setting, to prevent accidental operations that would destabilize the cluster. More information can be found in the <>. - -[float] -==== Discovery configuration is required in production -Production deployments of Elasticsearch now require at least one of the following settings -to be specified in the `elasticsearch.yml` configuration file: - -- `discovery.zen.ping.unicast.hosts` -- `discovery.zen.hosts_provider` -- `cluster.initial_master_nodes` diff --git a/docs/reference/migration/migrate_7_0/discovery.asciidoc b/docs/reference/migration/migrate_7_0/discovery.asciidoc new file mode 100644 index 0000000000000..d568e7fe32c25 --- /dev/null +++ b/docs/reference/migration/migrate_7_0/discovery.asciidoc @@ -0,0 +1,40 @@ +[float] +[[breaking_70_discovery_changes]] +=== Discovery changes + +[float] +==== Cluster bootstrapping is required if discovery is configured + +The first time a cluster is started, `cluster.initial_master_nodes` must be set +to perform cluster bootstrapping. It should contain the names of the +master-eligible nodes in the initial cluster and be defined on every +master-eligible node in the cluster. See <> for an example, and the +<> describes this setting in more detail. + +The `discovery.zen.minimum_master_nodes` setting is required during a rolling +upgrade from 6.x, but can be removed in all other circumstances. + +[float] +==== Removing master-eligible nodes sometimes requires voting exclusions + +If you wish to remove half or more of the master-eligible nodes from a cluster, +you must first exclude the affected nodes from the voting configuration using +the <>. +If you remove fewer than half of the master-eligible nodes at the same time, +voting exclusions are not required. If you remove only master-ineligible nodes +such as data-only nodes or coordinating-only nodes, voting exclusions are not +required. Likewise, if you add nodes to the cluster, voting exclusions are not +required. + +[float] +==== Discovery configuration is required in production + +Production deployments of Elasticsearch now require at least one of the +following settings to be specified in the `elasticsearch.yml` configuration +file: + +- `discovery.zen.ping.unicast.hosts` +- `discovery.zen.hosts_provider` +- `cluster.initial_master_nodes` diff --git a/docs/reference/migration/migrate_7_0/mappings.asciidoc b/docs/reference/migration/migrate_7_0/mappings.asciidoc index f08ea3ab89c1d..e4d38d9a64374 100644 --- a/docs/reference/migration/migrate_7_0/mappings.asciidoc +++ b/docs/reference/migration/migrate_7_0/mappings.asciidoc @@ -54,13 +54,17 @@ An error will now be thrown when unknown configuration options are provided to similarities. Such unknown parameters were ignored before. [float] -==== deprecated `geo_shape` Prefix Tree indexing +==== Changed default `geo_shape` indexing strategy `geo_shape` types now default to using a vector indexing approach based on Lucene's new `LatLonShape` field type. This indexes shapes as a triangular mesh instead of decomposing -them into individual grid cells. To index using legacy prefix trees `recursive` or `term` -strategy must be explicitly defined. Note that these strategies are now deprecated and will -be removed in a future version. +them into individual grid cells. To index using legacy prefix trees the `tree` parameter +must be explicitly set to one of `quadtree` or `geohash`. Note that these strategies are +now deprecated and will be removed in a future version. + +IMPORTANT NOTE: If using timed index creation from templates, the `geo_shape` mapping +should also be changed in the template to explicitly define `tree` to one of `geohash` +or `quadtree`. This will ensure compatibility with previously created indexes. [float] ==== deprecated `geo_shape` parameters diff --git a/docs/reference/ml/apis/calendarresource.asciidoc b/docs/reference/ml/apis/calendarresource.asciidoc index 4279102cd35fc..4b3353598ba81 100644 --- a/docs/reference/ml/apis/calendarresource.asciidoc +++ b/docs/reference/ml/apis/calendarresource.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="platinum"] [[ml-calendar-resource]] -=== Calendar Resources +=== Calendar resources A calendar resource has the following properties: diff --git a/docs/reference/ml/apis/close-job.asciidoc b/docs/reference/ml/apis/close-job.asciidoc index 317a67a287a0b..c5f9b5fc2444a 100644 --- a/docs/reference/ml/apis/close-job.asciidoc +++ b/docs/reference/ml/apis/close-job.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-close-job]] -=== Close Jobs API +=== Close jobs API ++++ -Close Jobs +Close jobs ++++ Closes one or more jobs. diff --git a/docs/reference/ml/apis/datafeedresource.asciidoc b/docs/reference/ml/apis/datafeedresource.asciidoc index 228008ddb0375..25d407ed00f70 100644 --- a/docs/reference/ml/apis/datafeedresource.asciidoc +++ b/docs/reference/ml/apis/datafeedresource.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="platinum"] [[ml-datafeed-resource]] -=== {dfeed-cap} Resources +=== {dfeed-cap} resources A {dfeed} resource has the following properties: diff --git a/docs/reference/ml/apis/delete-calendar-event.asciidoc b/docs/reference/ml/apis/delete-calendar-event.asciidoc index 7eb7b56dbb515..68f7a0738375d 100644 --- a/docs/reference/ml/apis/delete-calendar-event.asciidoc +++ b/docs/reference/ml/apis/delete-calendar-event.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-delete-calendar-event]] -=== Delete Events from Calendar API +=== Delete events from calendar API ++++ -Delete Events from Calendar +Delete events from calendar ++++ Deletes scheduled events from a calendar. diff --git a/docs/reference/ml/apis/delete-calendar-job.asciidoc b/docs/reference/ml/apis/delete-calendar-job.asciidoc index 174f29c66736c..118a706d29460 100644 --- a/docs/reference/ml/apis/delete-calendar-job.asciidoc +++ b/docs/reference/ml/apis/delete-calendar-job.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-delete-calendar-job]] -=== Delete Jobs from Calendar API +=== Delete jobs from calendar API ++++ -Delete Jobs from Calendar +Delete jobs from calendar ++++ Deletes jobs from a calendar. diff --git a/docs/reference/ml/apis/delete-calendar.asciidoc b/docs/reference/ml/apis/delete-calendar.asciidoc index 5a7cf23146f02..2707f3175e04c 100644 --- a/docs/reference/ml/apis/delete-calendar.asciidoc +++ b/docs/reference/ml/apis/delete-calendar.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-delete-calendar]] -=== Delete Calendar API +=== Delete calendar API ++++ -Delete Calendar +Delete calendar ++++ Deletes a calendar. diff --git a/docs/reference/ml/apis/delete-datafeed.asciidoc b/docs/reference/ml/apis/delete-datafeed.asciidoc index da077dfd87bdf..5940d5c70f044 100644 --- a/docs/reference/ml/apis/delete-datafeed.asciidoc +++ b/docs/reference/ml/apis/delete-datafeed.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-delete-datafeed]] -=== Delete {dfeeds-cap} API +=== Delete {dfeeds} API ++++ -Delete {dfeeds-cap} +Delete {dfeeds} ++++ Deletes an existing {dfeed}. diff --git a/docs/reference/ml/apis/delete-expired-data.asciidoc b/docs/reference/ml/apis/delete-expired-data.asciidoc index 83a58179a31bf..8814a1686736e 100644 --- a/docs/reference/ml/apis/delete-expired-data.asciidoc +++ b/docs/reference/ml/apis/delete-expired-data.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-delete-expired-data]] -=== Delete Expired Data API +=== Delete expired data API ++++ -Delete Expired Data +Delete expired data ++++ Deletes expired and unused machine learning data. @@ -14,9 +14,9 @@ Deletes expired and unused machine learning data. ==== Description -Deletes all job results, model snapshots and forecast data that have exceeded their -`retention days` period. -Machine Learning state documents that are not associated with any job are also deleted. +Deletes all job results, model snapshots and forecast data that have exceeded +their `retention days` period. Machine learning state documents that are not +associated with any job are also deleted. ==== Authorization diff --git a/docs/reference/ml/apis/delete-filter.asciidoc b/docs/reference/ml/apis/delete-filter.asciidoc index 5ca578f43dd32..b2cbb7ef2832d 100644 --- a/docs/reference/ml/apis/delete-filter.asciidoc +++ b/docs/reference/ml/apis/delete-filter.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-delete-filter]] -=== Delete Filter API +=== Delete filter API ++++ -Delete Filter +Delete filter ++++ Deletes a filter. diff --git a/docs/reference/ml/apis/delete-forecast.asciidoc b/docs/reference/ml/apis/delete-forecast.asciidoc index a051da31df3c6..133b9105e478a 100644 --- a/docs/reference/ml/apis/delete-forecast.asciidoc +++ b/docs/reference/ml/apis/delete-forecast.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-delete-forecast]] -=== Delete Forecast API +=== Delete forecast API ++++ -Delete Forecast +Delete forecast ++++ Deletes forecasts from a {ml} job. diff --git a/docs/reference/ml/apis/delete-job.asciidoc b/docs/reference/ml/apis/delete-job.asciidoc index b275ffafca73b..a52c434f93cfe 100644 --- a/docs/reference/ml/apis/delete-job.asciidoc +++ b/docs/reference/ml/apis/delete-job.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-delete-job]] -=== Delete Jobs API +=== Delete jobs API ++++ -Delete Jobs +Delete jobs ++++ Deletes an existing anomaly detection job. @@ -19,9 +19,9 @@ Deletes an existing anomaly detection job. All job configuration, model state and results are deleted. IMPORTANT: Deleting a job must be done via this API only. Do not delete the - job directly from the `.ml-*` indices using the Elasticsearch - DELETE Document API. When {security} is enabled, make sure no `write` - privileges are granted to anyone over the `.ml-*` indices. +job directly from the `.ml-*` indices using the Elasticsearch delete document +API. When {es} {security-features} are enabled, make sure no `write` privileges +are granted to anyone over the `.ml-*` indices. Before you can delete a job, you must delete the {dfeeds} that are associated with it. See <>. Unless the `force` parameter @@ -47,8 +47,9 @@ separated list. ==== Authorization -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. +If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` +cluster privileges to use this API. +For more information, see {stack-ov}/security-privileges.html[Security Privileges]. ==== Examples diff --git a/docs/reference/ml/apis/delete-snapshot.asciidoc b/docs/reference/ml/apis/delete-snapshot.asciidoc index 5802000546e92..18092ff8e89c1 100644 --- a/docs/reference/ml/apis/delete-snapshot.asciidoc +++ b/docs/reference/ml/apis/delete-snapshot.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-delete-snapshot]] -=== Delete Model Snapshots API +=== Delete model snapshots API ++++ -Delete Model Snapshots +Delete model snapshots ++++ Deletes an existing model snapshot. diff --git a/docs/reference/ml/apis/eventresource.asciidoc b/docs/reference/ml/apis/eventresource.asciidoc index a1e96f5c25a0a..7999c9744aea0 100644 --- a/docs/reference/ml/apis/eventresource.asciidoc +++ b/docs/reference/ml/apis/eventresource.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="platinum"] [[ml-event-resource]] -=== Scheduled Event Resources +=== Scheduled event resources An events resource has the following properties: diff --git a/docs/reference/ml/apis/filterresource.asciidoc b/docs/reference/ml/apis/filterresource.asciidoc index e67c92dc8d096..a9748949ffd58 100644 --- a/docs/reference/ml/apis/filterresource.asciidoc +++ b/docs/reference/ml/apis/filterresource.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="platinum"] [[ml-filter-resource]] -=== Filter Resources +=== Filter resources A filter resource has the following properties: diff --git a/docs/reference/ml/apis/find-file-structure.asciidoc b/docs/reference/ml/apis/find-file-structure.asciidoc index 8ed6a2385651d..ddc72b78d8e86 100644 --- a/docs/reference/ml/apis/find-file-structure.asciidoc +++ b/docs/reference/ml/apis/find-file-structure.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[ml-find-file-structure]] -=== Find File Structure API +=== Find file structure API ++++ -Find File Structure +Find file structure ++++ experimental[] diff --git a/docs/reference/ml/apis/flush-job.asciidoc b/docs/reference/ml/apis/flush-job.asciidoc index b7d72efde016e..e2793b2c1a174 100644 --- a/docs/reference/ml/apis/flush-job.asciidoc +++ b/docs/reference/ml/apis/flush-job.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-flush-job]] -=== Flush Jobs API +=== Flush jobs API ++++ -Flush Jobs +Flush jobs ++++ Forces any buffered data to be processed by the job. diff --git a/docs/reference/ml/apis/forecast.asciidoc b/docs/reference/ml/apis/forecast.asciidoc index 9fa91462c294e..71a7e1db2b185 100644 --- a/docs/reference/ml/apis/forecast.asciidoc +++ b/docs/reference/ml/apis/forecast.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-forecast]] -=== Forecast Jobs API +=== Forecast jobs API ++++ -Forecast Jobs +Forecast jobs ++++ Predicts the future behavior of a time series by using its historical behavior. diff --git a/docs/reference/ml/apis/get-bucket.asciidoc b/docs/reference/ml/apis/get-bucket.asciidoc index c97a18257f9bb..39c548dd64e8d 100644 --- a/docs/reference/ml/apis/get-bucket.asciidoc +++ b/docs/reference/ml/apis/get-bucket.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-get-bucket]] -=== Get Buckets API +=== Get buckets API ++++ -Get Buckets +Get buckets ++++ Retrieves job results for one or more buckets. diff --git a/docs/reference/ml/apis/get-calendar-event.asciidoc b/docs/reference/ml/apis/get-calendar-event.asciidoc index 05364edbaecd5..a890f67db0d23 100644 --- a/docs/reference/ml/apis/get-calendar-event.asciidoc +++ b/docs/reference/ml/apis/get-calendar-event.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-get-calendar-event]] -=== Get Scheduled Events API +=== Get scheduled events API ++++ -Get Scheduled Events +Get scheduled events ++++ Retrieves information about the scheduled events in diff --git a/docs/reference/ml/apis/get-calendar.asciidoc b/docs/reference/ml/apis/get-calendar.asciidoc index 3f7f2e7072f4d..0bb8a30afaf7f 100644 --- a/docs/reference/ml/apis/get-calendar.asciidoc +++ b/docs/reference/ml/apis/get-calendar.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-get-calendar]] -=== Get Calendars API +=== Get calendars API ++++ -Get Calendars +Get calendars ++++ Retrieves configuration information for calendars. diff --git a/docs/reference/ml/apis/get-category.asciidoc b/docs/reference/ml/apis/get-category.asciidoc index 4307b6a7e2279..1fbfda20eccc0 100644 --- a/docs/reference/ml/apis/get-category.asciidoc +++ b/docs/reference/ml/apis/get-category.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-get-category]] -=== Get Categories API +=== Get categories API ++++ -Get Categories +Get categories ++++ Retrieves job results for one or more categories. diff --git a/docs/reference/ml/apis/get-datafeed-stats.asciidoc b/docs/reference/ml/apis/get-datafeed-stats.asciidoc index d0bf7149b6a55..e43a2f454ca5e 100644 --- a/docs/reference/ml/apis/get-datafeed-stats.asciidoc +++ b/docs/reference/ml/apis/get-datafeed-stats.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-get-datafeed-stats]] -=== Get {dfeed-cap} Statistics API +=== Get {dfeed} statistics API ++++ -Get {dfeed-cap} Statistics +Get {dfeed} statistics ++++ Retrieves usage information for {dfeeds}. diff --git a/docs/reference/ml/apis/get-datafeed.asciidoc b/docs/reference/ml/apis/get-datafeed.asciidoc index 828fcae0a8d55..437a56b86dfbf 100644 --- a/docs/reference/ml/apis/get-datafeed.asciidoc +++ b/docs/reference/ml/apis/get-datafeed.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-get-datafeed]] -=== Get {dfeeds-cap} API +=== Get {dfeeds} API ++++ -Get {dfeeds-cap} +Get {dfeeds} ++++ Retrieves configuration information for {dfeeds}. diff --git a/docs/reference/ml/apis/get-filter.asciidoc b/docs/reference/ml/apis/get-filter.asciidoc index d668d1a355593..07fc25d756a6a 100644 --- a/docs/reference/ml/apis/get-filter.asciidoc +++ b/docs/reference/ml/apis/get-filter.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-get-filter]] -=== Get Filters API +=== Get filters API ++++ -Get Filters +Get filters ++++ Retrieves filters. diff --git a/docs/reference/ml/apis/get-influencer.asciidoc b/docs/reference/ml/apis/get-influencer.asciidoc index 753b85b376f4b..7425a734ed441 100644 --- a/docs/reference/ml/apis/get-influencer.asciidoc +++ b/docs/reference/ml/apis/get-influencer.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-get-influencer]] -=== Get Influencers API +=== Get influencers API ++++ -Get Influencers +Get influencers ++++ Retrieves job results for one or more influencers. diff --git a/docs/reference/ml/apis/get-job-stats.asciidoc b/docs/reference/ml/apis/get-job-stats.asciidoc index d4161474b3d64..7cc6d18b86a13 100644 --- a/docs/reference/ml/apis/get-job-stats.asciidoc +++ b/docs/reference/ml/apis/get-job-stats.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-get-job-stats]] -=== Get Job Statistics API +=== Get job statistics API ++++ -Get Job Statistics +Get job statistics ++++ Retrieves usage information for jobs. diff --git a/docs/reference/ml/apis/get-job.asciidoc b/docs/reference/ml/apis/get-job.asciidoc index 7d6df8175283c..be61d6baea031 100644 --- a/docs/reference/ml/apis/get-job.asciidoc +++ b/docs/reference/ml/apis/get-job.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-get-job]] -=== Get Jobs API +=== Get jobs API ++++ -Get Jobs +Get jobs ++++ Retrieves configuration information for jobs. diff --git a/docs/reference/ml/apis/get-ml-info.asciidoc b/docs/reference/ml/apis/get-ml-info.asciidoc index 2e20fa08e6c58..f692ede711f02 100644 --- a/docs/reference/ml/apis/get-ml-info.asciidoc +++ b/docs/reference/ml/apis/get-ml-info.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[get-ml-info]] -=== Get Machine Learning Info API +=== Get machine learning info API ++++ -Get Machine Learning Info +Get {ml} info ++++ Returns defaults and limits used by machine learning. diff --git a/docs/reference/ml/apis/get-overall-buckets.asciidoc b/docs/reference/ml/apis/get-overall-buckets.asciidoc index 343e8bc9818e8..d8592e6516bbb 100644 --- a/docs/reference/ml/apis/get-overall-buckets.asciidoc +++ b/docs/reference/ml/apis/get-overall-buckets.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-get-overall-buckets]] -=== Get Overall Buckets API +=== Get overall buckets API ++++ -Get Overall Buckets +Get overall buckets ++++ Retrieves overall bucket results that summarize the diff --git a/docs/reference/ml/apis/get-record.asciidoc b/docs/reference/ml/apis/get-record.asciidoc index dfb6b1b14bafd..afc7d2733c872 100644 --- a/docs/reference/ml/apis/get-record.asciidoc +++ b/docs/reference/ml/apis/get-record.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-get-record]] -=== Get Records API +=== Get records API ++++ -Get Records +Get records ++++ Retrieves anomaly records for a job. diff --git a/docs/reference/ml/apis/get-snapshot.asciidoc b/docs/reference/ml/apis/get-snapshot.asciidoc index a5fba52566a29..4935a6e2d238f 100644 --- a/docs/reference/ml/apis/get-snapshot.asciidoc +++ b/docs/reference/ml/apis/get-snapshot.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-get-snapshot]] -=== Get Model Snapshots API +=== Get model snapshots API ++++ -Get Model Snapshots +Get model snapshots ++++ Retrieves information about model snapshots. diff --git a/docs/reference/ml/apis/jobcounts.asciidoc b/docs/reference/ml/apis/jobcounts.asciidoc index e6af7ac569cd7..c2e3aebb1a0a1 100644 --- a/docs/reference/ml/apis/jobcounts.asciidoc +++ b/docs/reference/ml/apis/jobcounts.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="platinum"] [[ml-jobstats]] -=== Job Statistics +=== Job statistics The get job statistics API provides information about the operational progress of a job. diff --git a/docs/reference/ml/apis/jobresource.asciidoc b/docs/reference/ml/apis/jobresource.asciidoc index ec547f4b7b750..751bf33788121 100644 --- a/docs/reference/ml/apis/jobresource.asciidoc +++ b/docs/reference/ml/apis/jobresource.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="platinum"] [[ml-job-resource]] -=== Job Resources +=== Job resources A job resource has the following properties: diff --git a/docs/reference/ml/apis/ml-api.asciidoc b/docs/reference/ml/apis/ml-api.asciidoc index dd8a54f73f634..6cb0dc6ba4093 100644 --- a/docs/reference/ml/apis/ml-api.asciidoc +++ b/docs/reference/ml/apis/ml-api.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="platinum"] [[ml-apis]] -== Machine Learning APIs +== Machine learning APIs You can use the following APIs to perform {ml} activities. See <> for the resource definitions used by the diff --git a/docs/reference/ml/apis/open-job.asciidoc b/docs/reference/ml/apis/open-job.asciidoc index 4a3a068bd57de..08c7b97d9c050 100644 --- a/docs/reference/ml/apis/open-job.asciidoc +++ b/docs/reference/ml/apis/open-job.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-open-job]] -=== Open Jobs API +=== Open jobs API ++++ -Open Jobs +Open jobs ++++ Opens one or more jobs. diff --git a/docs/reference/ml/apis/post-calendar-event.asciidoc b/docs/reference/ml/apis/post-calendar-event.asciidoc index 18481bda3b92b..616f9d704ffdc 100644 --- a/docs/reference/ml/apis/post-calendar-event.asciidoc +++ b/docs/reference/ml/apis/post-calendar-event.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-post-calendar-event]] -=== Add Events to Calendar API +=== Add events to calendar API ++++ -Add Events to Calendar +Add events to calendar ++++ Posts scheduled events in a calendar. diff --git a/docs/reference/ml/apis/post-data.asciidoc b/docs/reference/ml/apis/post-data.asciidoc index 2c07332ee521d..2df0df69e9030 100644 --- a/docs/reference/ml/apis/post-data.asciidoc +++ b/docs/reference/ml/apis/post-data.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-post-data]] -=== Post Data to Jobs API +=== Post data to jobs API ++++ -Post Data to Jobs +Post data to jobs ++++ Sends data to an anomaly detection job for analysis. diff --git a/docs/reference/ml/apis/preview-datafeed.asciidoc b/docs/reference/ml/apis/preview-datafeed.asciidoc index 4b903ddb8af4e..7eca456c981dd 100644 --- a/docs/reference/ml/apis/preview-datafeed.asciidoc +++ b/docs/reference/ml/apis/preview-datafeed.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-preview-datafeed]] -=== Preview {dfeeds-cap} API +=== Preview {dfeeds} API ++++ -Preview {dfeeds-cap} +Preview {dfeeds} ++++ Previews a {dfeed}. @@ -29,16 +29,17 @@ structure of the data that will be passed to the anomaly detection engine. ==== Authorization -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +If {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`, +`manage_ml`, or `manage` cluster privileges to use this API. For more +information, see +{stack-ov}/security-privileges.html[Security Privileges]. ==== Security Integration -When {security} is enabled, the {dfeed} query will be previewed using the -credentials of the user calling the preview {dfeed} API. When the {dfeed} -is started it will run the query using the roles of the last user to +When {es} {security-features} are enabled, the {dfeed} query is previewed using +the credentials of the user calling the preview {dfeed} API. When the {dfeed} +is started it runs the query using the roles of the last user to create or update it. If the two sets of roles differ then the preview may not accurately reflect what the {dfeed} will return when started. To avoid such problems, the same user that creates/updates the {dfeed} should preview diff --git a/docs/reference/ml/apis/put-calendar-job.asciidoc b/docs/reference/ml/apis/put-calendar-job.asciidoc index e7e878a043ea0..cafc5f670627c 100644 --- a/docs/reference/ml/apis/put-calendar-job.asciidoc +++ b/docs/reference/ml/apis/put-calendar-job.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-put-calendar-job]] -=== Add Jobs to Calendar API +=== Add jobs to calendar API ++++ -Add Jobs to Calendar +Add jobs to calendar ++++ Adds a job to a calendar. diff --git a/docs/reference/ml/apis/put-calendar.asciidoc b/docs/reference/ml/apis/put-calendar.asciidoc index 385636b9a4473..9b1e781e3cc06 100644 --- a/docs/reference/ml/apis/put-calendar.asciidoc +++ b/docs/reference/ml/apis/put-calendar.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-put-calendar]] -=== Create Calendar API +=== Create calendar API ++++ -Create Calendar +Create calendar ++++ Instantiates a calendar. diff --git a/docs/reference/ml/apis/put-datafeed.asciidoc b/docs/reference/ml/apis/put-datafeed.asciidoc index 5803cc03876d5..791a51486c801 100644 --- a/docs/reference/ml/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/apis/put-datafeed.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-put-datafeed]] -=== Create {dfeeds-cap} API +=== Create {dfeeds} API ++++ -Create {dfeeds-cap} +Create {dfeeds} ++++ Instantiates a {dfeed}. @@ -88,15 +88,16 @@ see <>. ==== Authorization -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` +cluster privileges to use this API. For more information, see +{stack-ov}/security-privileges.html[Security Privileges]. -==== Security Integration +==== Security integration -When {security} is enabled, your {dfeed} will remember which roles the user who -created it had at the time of creation, and run the query using those same roles. +When {es} {security-features} are enabled, your {dfeed} remembers which roles the +user who created it had at the time of creation and runs the query using those +same roles. ==== Examples diff --git a/docs/reference/ml/apis/put-filter.asciidoc b/docs/reference/ml/apis/put-filter.asciidoc index 7a3202daa5430..abe52dfb13b25 100644 --- a/docs/reference/ml/apis/put-filter.asciidoc +++ b/docs/reference/ml/apis/put-filter.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-put-filter]] -=== Create Filter API +=== Create filter API ++++ -Create Filter +Create filter ++++ Instantiates a filter. diff --git a/docs/reference/ml/apis/put-job.asciidoc b/docs/reference/ml/apis/put-job.asciidoc index 7022b9b00847d..4abeebee3e47a 100644 --- a/docs/reference/ml/apis/put-job.asciidoc +++ b/docs/reference/ml/apis/put-job.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-put-job]] -=== Create Jobs API +=== Create jobs API ++++ -Create Jobs +Create jobs ++++ Instantiates a job. diff --git a/docs/reference/ml/apis/resultsresource.asciidoc b/docs/reference/ml/apis/resultsresource.asciidoc index 9aac36fc87ae1..8962129c73966 100644 --- a/docs/reference/ml/apis/resultsresource.asciidoc +++ b/docs/reference/ml/apis/resultsresource.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="platinum"] [[ml-results-resource]] -=== Results Resources +=== Results resources Several different result types are created for each job. You can query anomaly results for _buckets_, _influencers_, and _records_ by using the results API. diff --git a/docs/reference/ml/apis/revert-snapshot.asciidoc b/docs/reference/ml/apis/revert-snapshot.asciidoc index dc9ea66b7fa55..b560f7b041206 100644 --- a/docs/reference/ml/apis/revert-snapshot.asciidoc +++ b/docs/reference/ml/apis/revert-snapshot.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-revert-snapshot]] -=== Revert Model Snapshots API +=== Revert model snapshots API ++++ -Revert Model Snapshots +Revert model snapshots ++++ Reverts to a specific snapshot. diff --git a/docs/reference/ml/apis/snapshotresource.asciidoc b/docs/reference/ml/apis/snapshotresource.asciidoc index f068f6d94ed0f..73e2e9b3b8bc1 100644 --- a/docs/reference/ml/apis/snapshotresource.asciidoc +++ b/docs/reference/ml/apis/snapshotresource.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="platinum"] [[ml-snapshot-resource]] -=== Model Snapshot Resources +=== Model snapshot resources Model snapshots are saved to disk periodically. By default, this is occurs approximately every 3 hours to 4 hours and is diff --git a/docs/reference/ml/apis/start-datafeed.asciidoc b/docs/reference/ml/apis/start-datafeed.asciidoc index 790e4027b38ac..2ae92288a2666 100644 --- a/docs/reference/ml/apis/start-datafeed.asciidoc +++ b/docs/reference/ml/apis/start-datafeed.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-start-datafeed]] -=== Start {dfeeds-cap} API +=== Start {dfeeds} API ++++ -Start {dfeeds-cap} +Start {dfeeds} ++++ Starts one or more {dfeeds}. @@ -77,16 +77,16 @@ of the latest processed record. ==== Authorization -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` +cluster privileges to use this API. For more information, see +{stack-ov}/security-privileges.html[Security Privileges]. -==== Security Integration +==== Security integration -When {security} is enabled, your {dfeed} will remember which roles the last -user to create or update it had at the time of creation/update, and run the query -using those same roles. +When {es} {security-features} are enabled, your {dfeed} remembers which roles the +last user to create or update it had at the time of creation/update and runs the +query using those same roles. ==== Examples diff --git a/docs/reference/ml/apis/stop-datafeed.asciidoc b/docs/reference/ml/apis/stop-datafeed.asciidoc index 13cb04286076b..c021d9ad18d62 100644 --- a/docs/reference/ml/apis/stop-datafeed.asciidoc +++ b/docs/reference/ml/apis/stop-datafeed.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-stop-datafeed]] -=== Stop {dfeeds-cap} API +=== Stop {dfeeds} API ++++ -Stop {dfeeds-cap} +Stop {dfeeds} ++++ Stops one or more {dfeeds}. diff --git a/docs/reference/ml/apis/update-datafeed.asciidoc b/docs/reference/ml/apis/update-datafeed.asciidoc index e8e0a94f596ab..37b489e6ef596 100644 --- a/docs/reference/ml/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/apis/update-datafeed.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-update-datafeed]] -=== Update {dfeeds-cap} API +=== Update {dfeeds} API ++++ -Update {dfeeds-cap} +Update {dfeeds} ++++ Updates certain properties of a {dfeed}. @@ -79,15 +79,16 @@ see <>. ==== Authorization -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` +cluster privileges to use this API. For more information, see +{stack-ov}/security-privileges.html[Security Privileges]. ==== Security Integration -When {security} is enabled, your {dfeed} will remember which roles the user who -updated it had at the time of update, and run the query using those same roles. +When {es} {security-features} are enabled, your {dfeed} remembers which roles the +user who updated it had at the time of update and runs the query using those +same roles. ==== Examples diff --git a/docs/reference/ml/apis/update-filter.asciidoc b/docs/reference/ml/apis/update-filter.asciidoc index 4480563adee2d..45c294a0b8bc6 100644 --- a/docs/reference/ml/apis/update-filter.asciidoc +++ b/docs/reference/ml/apis/update-filter.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-update-filter]] -=== Update Filter API +=== Update filter API ++++ -Update Filter +Update filter ++++ Updates the description of a filter, adds items, or removes items. diff --git a/docs/reference/ml/apis/update-job.asciidoc b/docs/reference/ml/apis/update-job.asciidoc index f56252eada8b8..3382e7fe34675 100644 --- a/docs/reference/ml/apis/update-job.asciidoc +++ b/docs/reference/ml/apis/update-job.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-update-job]] -=== Update Jobs API +=== Update jobs API ++++ -Update Jobs +Update jobs ++++ Updates certain properties of a job. diff --git a/docs/reference/ml/apis/update-snapshot.asciidoc b/docs/reference/ml/apis/update-snapshot.asciidoc index 8cac88fe30224..ffd38f590b1e2 100644 --- a/docs/reference/ml/apis/update-snapshot.asciidoc +++ b/docs/reference/ml/apis/update-snapshot.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-update-snapshot]] -=== Update Model Snapshots API +=== Update model snapshots API ++++ -Update Model Snapshots +Update model snapshots ++++ Updates certain properties of a snapshot. diff --git a/docs/reference/ml/apis/validate-detector.asciidoc b/docs/reference/ml/apis/validate-detector.asciidoc index 94857955ac1ac..0f9fe9902e36e 100644 --- a/docs/reference/ml/apis/validate-detector.asciidoc +++ b/docs/reference/ml/apis/validate-detector.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-valid-detector]] -=== Validate Detectors API +=== Validate detectors API ++++ -Validate Detectors +Validate detectors ++++ Validates detector configuration information. diff --git a/docs/reference/ml/apis/validate-job.asciidoc b/docs/reference/ml/apis/validate-job.asciidoc index e06f3837a55ab..5fbfb62dd28a6 100644 --- a/docs/reference/ml/apis/validate-job.asciidoc +++ b/docs/reference/ml/apis/validate-job.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="platinum"] [[ml-valid-job]] -=== Validate Jobs API +=== Validate jobs API ++++ -Validate Jobs +Validate jobs ++++ Validates job configuration information. diff --git a/docs/reference/modules.asciidoc b/docs/reference/modules.asciidoc index 1dbe72c3b86e4..f8b6c2784a075 100644 --- a/docs/reference/modules.asciidoc +++ b/docs/reference/modules.asciidoc @@ -18,13 +18,13 @@ These settings can be dynamically updated on a live cluster with the The modules in this section are: -<>:: +<>:: - Settings to control where, when, and how shards are allocated to nodes. + How nodes discover each other, elect a master and form a cluster. -<>:: +<>:: - How nodes discover each other to form a cluster. + Settings to control where, when, and how shards are allocated to nodes. <>:: @@ -85,10 +85,10 @@ The modules in this section are: -- -include::modules/cluster.asciidoc[] - include::modules/discovery.asciidoc[] +include::modules/cluster.asciidoc[] + include::modules/gateway.asciidoc[] include::modules/http.asciidoc[] diff --git a/docs/reference/modules/cluster.asciidoc b/docs/reference/modules/cluster.asciidoc index c4b6445292726..810ed7c4a34b4 100644 --- a/docs/reference/modules/cluster.asciidoc +++ b/docs/reference/modules/cluster.asciidoc @@ -1,5 +1,5 @@ [[modules-cluster]] -== Cluster +== Shard allocation and cluster-level routing One of the main roles of the master is to decide which shards to allocate to which nodes, and when to move shards between nodes in order to rebalance the diff --git a/docs/reference/modules/discovery.asciidoc b/docs/reference/modules/discovery.asciidoc index 292748d1d7b90..546c347fa3bb8 100644 --- a/docs/reference/modules/discovery.asciidoc +++ b/docs/reference/modules/discovery.asciidoc @@ -1,30 +1,75 @@ [[modules-discovery]] -== Discovery +== Discovery and cluster formation -The discovery module is responsible for discovering nodes within a -cluster, as well as electing a master node. +The discovery and cluster formation module is responsible for discovering +nodes, electing a master, forming a cluster, and publishing the cluster state +each time it changes. It is integrated with other modules. For example, all +communication between nodes is done using the <> +module. This module is divided into the following sections: -Note, Elasticsearch is a peer to peer based system, nodes communicate -with one another directly if operations are delegated / broadcast. All -the main APIs (index, delete, search) do not communicate with the master -node. The responsibility of the master node is to maintain the global -cluster state, and act if nodes join or leave the cluster by reassigning -shards. Each time a cluster state is changed, the state is made known to -the other nodes in the cluster (the manner depends on the actual -discovery implementation). +<>:: -[float] -=== Settings + Discovery is the process where nodes find each other when the master is + unknown, such as when a node has just started up or when the previous + master has failed. -The `cluster.name` allows to create separated clusters from one another. -The default value for the cluster name is `elasticsearch`, though it is -recommended to change this to reflect the logical group name of the -cluster running. +<>:: -include::discovery/azure.asciidoc[] + Bootstrapping a cluster is required when an Elasticsearch cluster starts up + for the very first time. In <>, with no + discovery settings configured, this is automatically performed by the nodes + themselves. As this auto-bootstrapping is + <>, running a node in + <> requires bootstrapping to be + explicitly configured via the + <>. -include::discovery/ec2.asciidoc[] +<>:: -include::discovery/gce.asciidoc[] + It is recommended to have a small and fixed number of master-eligible nodes + in a cluster, and to scale the cluster up and down by adding and removing + master-ineligible nodes only. However there are situations in which it may + be desirable to add or remove some master-eligible nodes to or from a + cluster. This section describes the process for adding or removing + master-eligible nodes, including the extra steps that need to be performed + when removing more than half of the master-eligible nodes at the same time. + +<>:: + + Cluster state publishing is the process by which the elected master node + updates the cluster state on all the other nodes in the cluster. + +<>:: + + The no-master block is put in place when there is no known elected master, + and can be configured to determine which operations should be rejected when + it is in place. + +Advanced settings:: + + There are settings that allow advanced users to influence the + <> and + <> processes. + +<>:: + + This section describes the detailed design behind the master election and + auto-reconfiguration logic. + +include::discovery/discovery.asciidoc[] + +include::discovery/bootstrapping.asciidoc[] + +include::discovery/adding-removing-nodes.asciidoc[] + +include::discovery/publishing.asciidoc[] + +include::discovery/no-master-block.asciidoc[] + +include::discovery/master-election.asciidoc[] + +include::discovery/fault-detection.asciidoc[] + +include::discovery/quorums.asciidoc[] -include::discovery/zen.asciidoc[] diff --git a/docs/reference/modules/discovery/adding-removing-nodes.asciidoc b/docs/reference/modules/discovery/adding-removing-nodes.asciidoc new file mode 100644 index 0000000000000..d40e903fa88f1 --- /dev/null +++ b/docs/reference/modules/discovery/adding-removing-nodes.asciidoc @@ -0,0 +1,125 @@ +[[modules-discovery-adding-removing-nodes]] +=== Adding and removing nodes + +As nodes are added or removed Elasticsearch maintains an optimal level of fault +tolerance by automatically updating the cluster's _voting configuration_, which +is the set of <> whose responses are counted +when making decisions such as electing a new master or committing a new cluster +state. + +It is recommended to have a small and fixed number of master-eligible nodes in a +cluster, and to scale the cluster up and down by adding and removing +master-ineligible nodes only. However there are situations in which it may be +desirable to add or remove some master-eligible nodes to or from a cluster. + +==== Adding master-eligible nodes + +If you wish to add some master-eligible nodes to your cluster, simply configure +the new nodes to find the existing cluster and start them up. Elasticsearch will +add the new nodes to the voting configuration if it is appropriate to do so. + +==== Removing master-eligible nodes + +When removing master-eligible nodes, it is important not to remove too many all +at the same time. For instance, if there are currently seven master-eligible +nodes and you wish to reduce this to three, it is not possible simply to stop +four of the nodes at once: to do so would leave only three nodes remaining, +which is less than half of the voting configuration, which means the cluster +cannot take any further actions. + +As long as there are at least three master-eligible nodes in the cluster, as a +general rule it is best to remove nodes one-at-a-time, allowing enough time for +the cluster to <> the voting +configuration and adapt the fault tolerance level to the new set of nodes. + +If there are only two master-eligible nodes remaining then neither node can be +safely removed since both are required to reliably make progress. You must first +inform Elasticsearch that one of the nodes should not be part of the voting +configuration, and that the voting power should instead be given to other nodes. +You can then take the excluded node offline without preventing the other node +from making progress. A node which is added to a voting configuration exclusion +list still works normally, but Elasticsearch tries to remove it from the voting +configuration so its vote is no longer required. Importantly, Elasticsearch +will never automatically move a node on the voting exclusions list back into the +voting configuration. Once an excluded node has been successfully +auto-reconfigured out of the voting configuration, it is safe to shut it down +without affecting the cluster's master-level availability. A node can be added +to the voting configuration exclusion list using the following API: + +[source,js] +-------------------------------------------------- +# Add node to voting configuration exclusions list and wait for the system to +# auto-reconfigure the node out of the voting configuration up to the default +# timeout of 30 seconds +POST /_cluster/voting_config_exclusions/node_name + +# Add node to voting configuration exclusions list and wait for +# auto-reconfiguration up to one minute +POST /_cluster/voting_config_exclusions/node_name?timeout=1m +-------------------------------------------------- +// CONSOLE +// TEST[skip:this would break the test cluster if executed] + +The node that should be added to the exclusions list is specified using +<> in place of `node_name` here. If a call to the +voting configuration exclusions API fails, you can safely retry it. Only a +successful response guarantees that the node has actually been removed from the +voting configuration and will not be reinstated. + +Although the voting configuration exclusions API is most useful for down-scaling +a two-node to a one-node cluster, it is also possible to use it to remove +multiple master-eligible nodes all at the same time. Adding multiple nodes to +the exclusions list has the system try to auto-reconfigure all of these nodes +out of the voting configuration, allowing them to be safely shut down while +keeping the cluster available. In the example described above, shrinking a +seven-master-node cluster down to only have three master nodes, you could add +four nodes to the exclusions list, wait for confirmation, and then shut them +down simultaneously. + +NOTE: Voting exclusions are only required when removing at least half of the +master-eligible nodes from a cluster in a short time period. They are not +required when removing master-ineligible nodes, nor are they required when +removing fewer than half of the master-eligible nodes. + +Adding an exclusion for a node creates an entry for that node in the voting +configuration exclusions list, which has the system automatically try to +reconfigure the voting configuration to remove that node and prevents it from +returning to the voting configuration once it has removed. The current list of +exclusions is stored in the cluster state and can be inspected as follows: + +[source,js] +-------------------------------------------------- +GET /_cluster/state?filter_path=metadata.cluster_coordination.voting_config_exclusions +-------------------------------------------------- +// CONSOLE + +This list is limited in size by the following setting: + +`cluster.max_voting_config_exclusions`:: + + Sets a limits on the number of voting configuration exclusions at any one + time. Defaults to `10`. + +Since voting configuration exclusions are persistent and limited in number, they +must be cleaned up. Normally an exclusion is added when performing some +maintenance on the cluster, and the exclusions should be cleaned up when the +maintenance is complete. Clusters should have no voting configuration exclusions +in normal operation. + +If a node is excluded from the voting configuration because it is to be shut +down permanently, its exclusion can be removed after it is shut down and removed +from the cluster. Exclusions can also be cleared if they were created in error +or were only required temporarily: + +[source,js] +-------------------------------------------------- +# Wait for all the nodes with voting configuration exclusions to be removed from +# the cluster and then remove all the exclusions, allowing any node to return to +# the voting configuration in the future. +DELETE /_cluster/voting_config_exclusions + +# Immediately remove all the voting configuration exclusions, allowing any node +# to return to the voting configuration in the future. +DELETE /_cluster/voting_config_exclusions?wait_for_removal=false +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/modules/discovery/azure.asciidoc b/docs/reference/modules/discovery/azure.asciidoc deleted file mode 100644 index 1343819b02a85..0000000000000 --- a/docs/reference/modules/discovery/azure.asciidoc +++ /dev/null @@ -1,5 +0,0 @@ -[[modules-discovery-azure-classic]] -=== Azure Classic Discovery - -Azure classic discovery allows to use the Azure Classic APIs to perform automatic discovery (similar to multicast). -It is available as a plugin. See {plugins}/discovery-azure-classic.html[discovery-azure-classic] for more information. diff --git a/docs/reference/modules/discovery/bootstrapping.asciidoc b/docs/reference/modules/discovery/bootstrapping.asciidoc new file mode 100644 index 0000000000000..4b5aa532d4874 --- /dev/null +++ b/docs/reference/modules/discovery/bootstrapping.asciidoc @@ -0,0 +1,105 @@ +[[modules-discovery-bootstrap-cluster]] +=== Bootstrapping a cluster + +Starting an Elasticsearch cluster for the very first time requires the initial +set of <> to be explicitly defined on one or +more of the master-eligible nodes in the cluster. This is known as _cluster +bootstrapping_. This is only required the very first time the cluster starts +up: nodes that have already joined a cluster store this information in their +data folder and freshly-started nodes that are joining an existing cluster +obtain this information from the cluster's elected master. This information is +given using this setting: + +`cluster.initial_master_nodes`:: + + Sets a list of the <> or transport addresses of the + initial set of master-eligible nodes in a brand-new cluster. By default + this list is empty, meaning that this node expects to join a cluster that + has already been bootstrapped. + +This setting can be given on the command line or in the `elasticsearch.yml` +configuration file when starting up a master-eligible node. Once the cluster +has formed this setting is no longer required and is ignored. It need not be set +on master-ineligible nodes, nor on master-eligible nodes that are started to +join an existing cluster. Note that master-eligible nodes should use storage +that persists across restarts. If they do not, and +`cluster.initial_master_nodes` is set, and a full cluster restart occurs, then +another brand-new cluster will form and this may result in data loss. + +It is technically sufficient to set `cluster.initial_master_nodes` on a single +master-eligible node in the cluster, and only to mention that single node in the +setting's value, but this provides no fault tolerance before the cluster has +fully formed. It is therefore better to bootstrap using at least three +master-eligible nodes, each with a `cluster.initial_master_nodes` setting +containing all three nodes. + +NOTE: In alpha releases, all listed master-eligible nodes are required to be +discovered before bootstrapping can take place. This requirement will be relaxed +in production-ready releases. + +WARNING: You must set `cluster.initial_master_nodes` to the same list of nodes +on each node on which it is set in order to be sure that only a single cluster +forms during bootstrapping and therefore to avoid the risk of data loss. + +For a cluster with 3 master-eligible nodes (with <> +`master-a`, `master-b` and `master-c`) the configuration will look as follows: + +[source,yaml] +-------------------------------------------------- +cluster.initial_master_nodes: + - master-a + - master-b + - master-c +-------------------------------------------------- + +Alternatively the IP addresses or hostnames (<>) can be used. If there is more than one Elasticsearch node +with the same IP address or hostname then the transport ports must also be given +to specify exactly which node is meant: + +[source,yaml] +-------------------------------------------------- +cluster.initial_master_nodes: + - 10.0.10.101 + - 10.0.10.102:9300 + - 10.0.10.102:9301 + - master-node-hostname +-------------------------------------------------- + +Like all node settings, it is also possible to specify the initial set of master +nodes on the command-line that is used to start Elasticsearch: + +[source,bash] +-------------------------------------------------- +$ bin/elasticsearch -Ecluster.initial_master_nodes=master-a,master-b,master-c +-------------------------------------------------- + +[float] +==== Choosing a cluster name + +The <> setting enables you to create multiple +clusters which are separated from each other. Nodes verify that they agree on +their cluster name when they first connect to each other, and Elasticsearch +will only form a cluster from nodes that all have the same cluster name. The +default value for the cluster name is `elasticsearch`, but it is recommended to +change this to reflect the logical name of the cluster. + +[float] +==== Auto-bootstrapping in development mode + +If the cluster is running with a completely default configuration then it will +automatically bootstrap a cluster based on the nodes that could be discovered to +be running on the same host within a short time after startup. This means that +by default it is possible to start up several nodes on a single machine and have +them automatically form a cluster which is very useful for development +environments and experimentation. However, since nodes may not always +successfully discover each other quickly enough this automatic bootstrapping +cannot be relied upon and cannot be used in production deployments. + +If any of the following settings are configured then auto-bootstrapping will not +take place, and you must configure `cluster.initial_master_nodes` as described +in the <>: + +* `discovery.zen.hosts_provider` +* `discovery.zen.ping.unicast.hosts` +* `cluster.initial_master_nodes` diff --git a/docs/reference/modules/discovery/discovery.asciidoc b/docs/reference/modules/discovery/discovery.asciidoc new file mode 100644 index 0000000000000..dd2dc47a79dfb --- /dev/null +++ b/docs/reference/modules/discovery/discovery.asciidoc @@ -0,0 +1,195 @@ +[[modules-discovery-hosts-providers]] +=== Discovery + +Discovery is the process by which the cluster formation module finds other +nodes with which to form a cluster. This process runs when you start an +Elasticsearch node or when a node believes the master node failed and continues +until the master node is found or a new master node is elected. + +Discovery operates in two phases: First, each node probes the addresses of all +known master-eligible nodes by connecting to each address and attempting to +identify the node to which it is connected. Secondly it shares with the remote +node a list of all of its known master-eligible peers and the remote node +responds with _its_ peers in turn. The node then probes all the new nodes that +it just discovered, requests their peers, and so on. + +This process starts with a list of _seed_ addresses from one or more +<>, together with the addresses of +any master-eligible nodes that were in the last known cluster. The process +operates in two phases: First, each node probes the seed addresses by +connecting to each address and attempting to identify the node to which it is +connected. Secondly it shares with the remote node a list of all of its known +master-eligible peers and the remote node responds with _its_ peers in turn. +The node then probes all the new nodes that it just discovered, requests their +peers, and so on. + +If the node is not master-eligible then it continues this discovery process +until it has discovered an elected master node. If no elected master is +discovered then the node will retry after `discovery.find_peers_interval` which +defaults to `1s`. + +If the node is master-eligible then it continues this discovery process until it +has either discovered an elected master node or else it has discovered enough +masterless master-eligible nodes to complete an election. If neither of these +occur quickly enough then the node will retry after +`discovery.find_peers_interval` which defaults to `1s`. + +[[built-in-hosts-providers]] +==== Hosts providers + +By default the cluster formation module offers two hosts providers to configure +the list of seed nodes: a _settings_-based and a _file_-based hosts provider. +It can be extended to support cloud environments and other forms of hosts +providers via {plugins}/discovery.html[discovery plugins]. Hosts providers are +configured using the `discovery.zen.hosts_provider` setting, which defaults to +the _settings_-based hosts provider. Multiple hosts providers can be specified +as a list. + +[float] +[[settings-based-hosts-provider]] +===== Settings-based hosts provider + +The settings-based hosts provider uses a node setting to configure a static list +of hosts to use as seed nodes. These hosts can be specified as hostnames or IP +addresses; hosts specified as hostnames are resolved to IP addresses during each +round of discovery. Note that if you are in an environment where DNS resolutions +vary with time, you might need to adjust your <>. + +The list of hosts is set using the <> static +setting. For example: + +[source,yaml] +-------------------------------------------------- +discovery.zen.ping.unicast.hosts: + - 192.168.1.10:9300 + - 192.168.1.11 <1> + - seeds.mydomain.com <2> +-------------------------------------------------- +<1> The port will default to `transport.profiles.default.port` and fallback to + `transport.port` if not specified. +<2> A hostname that resolves to multiple IP addresses will try all resolved + addresses. + +Additionally, the `discovery.zen.ping.unicast.hosts.resolve_timeout` configures +the amount of time to wait for DNS lookups on each round of discovery. This is +specified as a <> and defaults to 5s. + +Unicast discovery uses the <> module to perform the +discovery. + +[float] +[[file-based-hosts-provider]] +===== File-based hosts provider + +The file-based hosts provider configures a list of hosts via an external file. +Elasticsearch reloads this file when it changes, so that the list of seed nodes +can change dynamically without needing to restart each node. For example, this +gives a convenient mechanism for an Elasticsearch instance that is run in a +Docker container to be dynamically supplied with a list of IP addresses to +connect to when those IP addresses may not be known at node startup. + +To enable file-based discovery, configure the `file` hosts provider as follows: + +[source,txt] +---------------------------------------------------------------- +discovery.zen.hosts_provider: file +---------------------------------------------------------------- + +Then create a file at `$ES_PATH_CONF/unicast_hosts.txt` in the format described +below. Any time a change is made to the `unicast_hosts.txt` file the new changes +will be picked up by Elasticsearch and the new hosts list will be used. + +Note that the file-based discovery plugin augments the unicast hosts list in +`elasticsearch.yml`. If there are valid unicast host entries in +`discovery.zen.ping.unicast.hosts`, they are used in addition to those +supplied in `unicast_hosts.txt`. + +The `discovery.zen.ping.unicast.hosts.resolve_timeout` setting also applies to +DNS lookups for nodes specified by address via file-based discovery. This is +specified as a <> and defaults to 5s. + +The format of the file is to specify one node entry per line. Each node entry +consists of the host (host name or IP address) and an optional transport port +number. If the port number is specified, is must come immediately after the +host (on the same line) separated by a `:`. If the port number is not +specified, a default value of 9300 is used. + +For example, this is an example of `unicast_hosts.txt` for a cluster with four +nodes that participate in unicast discovery, some of which are not running on +the default port: + +[source,txt] +---------------------------------------------------------------- +10.10.10.5 +10.10.10.6:9305 +10.10.10.5:10005 +# an IPv6 address +[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:9301 +---------------------------------------------------------------- + +Host names are allowed instead of IP addresses (similar to +`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be specified in +brackets with the port coming after the brackets. + +You can also add comments to this file. All comments must appear on +their lines starting with `#` (i.e. comments cannot start in the middle of a +line). + +[float] +[[ec2-hosts-provider]] +===== EC2 hosts provider + +The {plugins}/discovery-ec2.html[EC2 discovery plugin] adds a hosts provider +that uses the https://github.com/aws/aws-sdk-java[AWS API] to find a list of +seed nodes. + +[float] +[[azure-classic-hosts-provider]] +===== Azure Classic hosts provider + +The {plugins}/discovery-azure-classic.html[Azure Classic discovery plugin] adds +a hosts provider that uses the Azure Classic API find a list of seed nodes. + +[float] +[[gce-hosts-provider]] +===== Google Compute Engine hosts provider + +The {plugins}/discovery-gce.html[GCE discovery plugin] adds a hosts provider +that uses the GCE API find a list of seed nodes. + +[float] +==== Discovery settings + +The discovery process is controlled by the following settings. + +`discovery.find_peers_interval`:: + + Sets how long a node will wait before attempting another discovery round. + Defaults to `1s`. + +`discovery.request_peers_timeout`:: + + Sets how long a node will wait after asking its peers again before + considering the request to have failed. Defaults to `3s`. + +`discovery.probe.connect_timeout`:: + + Sets how long to wait when attempting to connect to each address. Defaults + to `3s`. + +`discovery.probe.handshake_timeout`:: + + Sets how long to wait when attempting to identify the remote node via a + handshake. Defaults to `1s`. + +`discovery.cluster_formation_warning_timeout`:: + + Sets how long a node will try to form a cluster before logging a warning + that the cluster did not form. Defaults to `10s`. + +If a cluster has not formed after `discovery.cluster_formation_warning_timeout` +has elapsed then the node will log a warning message that starts with the phrase +`master not discovered` which describes the current state of the discovery +process. + diff --git a/docs/reference/modules/discovery/ec2.asciidoc b/docs/reference/modules/discovery/ec2.asciidoc deleted file mode 100644 index ba15f6bffa4cd..0000000000000 --- a/docs/reference/modules/discovery/ec2.asciidoc +++ /dev/null @@ -1,4 +0,0 @@ -[[modules-discovery-ec2]] -=== EC2 Discovery - -EC2 discovery is available as a plugin. See {plugins}/discovery-ec2.html[discovery-ec2] for more information. diff --git a/docs/reference/modules/discovery/fault-detection.asciidoc b/docs/reference/modules/discovery/fault-detection.asciidoc new file mode 100644 index 0000000000000..0a8ff5fa2081c --- /dev/null +++ b/docs/reference/modules/discovery/fault-detection.asciidoc @@ -0,0 +1,52 @@ +[[fault-detection-settings]] +=== Cluster fault detection settings + +An elected master periodically checks each of the nodes in the cluster in order +to ensure that they are still connected and healthy, and in turn each node in +the cluster periodically checks the health of the elected master. These checks +are known respectively as _follower checks_ and _leader checks_. + +Elasticsearch allows for these checks occasionally to fail or timeout without +taking any action, and will only consider a node to be truly faulty after a +number of consecutive checks have failed. The following settings control the +behaviour of fault detection. + +`cluster.fault_detection.follower_check.interval`:: + + Sets how long the elected master waits between follower checks to each + other node in the cluster. Defaults to `1s`. + +`cluster.fault_detection.follower_check.timeout`:: + + Sets how long the elected master waits for a response to a follower check + before considering it to have failed. Defaults to `30s`. + +`cluster.fault_detection.follower_check.retry_count`:: + + Sets how many consecutive follower check failures must occur to each node + before the elected master considers that node to be faulty and removes it + from the cluster. Defaults to `3`. + +`cluster.fault_detection.leader_check.interval`:: + + Sets how long each node waits between checks of the elected master. + Defaults to `1s`. + +`cluster.fault_detection.leader_check.timeout`:: + + Sets how long each node waits for a response to a leader check from the + elected master before considering it to have failed. Defaults to `30s`. + +`cluster.fault_detection.leader_check.retry_count`:: + + Sets how many consecutive leader check failures must occur before a node + considers the elected master to be faulty and attempts to find or elect a + new master. Defaults to `3`. + +If the elected master detects that a node has disconnected then this is treated +as an immediate failure, bypassing the timeouts and retries listed above, and +the master attempts to remove the node from the cluster. Similarly, if a node +detects that the elected master has disconnected then this is treated as an +immediate failure, bypassing the timeouts and retries listed above, and the +follower restarts its discovery phase to try and find or elect a new master. + diff --git a/docs/reference/modules/discovery/gce.asciidoc b/docs/reference/modules/discovery/gce.asciidoc deleted file mode 100644 index ea367d52ceb75..0000000000000 --- a/docs/reference/modules/discovery/gce.asciidoc +++ /dev/null @@ -1,6 +0,0 @@ -[[modules-discovery-gce]] -=== Google Compute Engine Discovery - -Google Compute Engine (GCE) discovery allows to use the GCE APIs to perform automatic discovery (similar to multicast). -It is available as a plugin. See {plugins}/discovery-gce.html[discovery-gce] for more information. - diff --git a/docs/reference/modules/discovery/master-election.asciidoc b/docs/reference/modules/discovery/master-election.asciidoc new file mode 100644 index 0000000000000..60d09e5545b40 --- /dev/null +++ b/docs/reference/modules/discovery/master-election.asciidoc @@ -0,0 +1,40 @@ +[[master-election-settings]] +=== Master election settings + +The following settings control the scheduling of elections. + +`cluster.election.initial_timeout`:: + + Sets the upper bound on how long a node will wait initially, or after the + elected master fails, before attempting its first election. This defaults + to `100ms`. + +`cluster.election.back_off_time`:: + + Sets the amount to increase the upper bound on the wait before an election + on each election failure. Note that this is _linear_ backoff. This defaults + to `100ms` + +`cluster.election.max_timeout`:: + + Sets the maximum upper bound on how long a node will wait before attempting + an first election, so that an network partition that lasts for a long time + does not result in excessively sparse elections. This defaults to `10s` + +`cluster.election.duration`:: + + Sets how long each election is allowed to take before a node considers it to + have failed and schedules a retry. This defaults to `500ms`. + +[float] +==== Joining an elected master + +During master election, or when joining an existing formed cluster, a node will +send a join request to the master in order to be officially added to the +cluster. This join process can be configured with the following settings. + +`cluster.join.timeout`:: + + Sets how long a node will wait after sending a request to join a cluster + before it considers the request to have failed and retries. Defaults to + `60s`. diff --git a/docs/reference/modules/discovery/no-master-block.asciidoc b/docs/reference/modules/discovery/no-master-block.asciidoc new file mode 100644 index 0000000000000..3099aaf66796d --- /dev/null +++ b/docs/reference/modules/discovery/no-master-block.asciidoc @@ -0,0 +1,22 @@ +[[no-master-block]] +=== No master block settings + +For the cluster to be fully operational, it must have an active master. The +`discovery.zen.no_master_block` settings controls what operations should be +rejected when there is no active master. + +The `discovery.zen.no_master_block` setting has two valid values: + +[horizontal] +`all`:: All operations on the node--i.e. both read & writes--will be rejected. +This also applies for api cluster state read or write operations, like the get +index settings, put mapping and cluster state api. +`write`:: (default) Write operations will be rejected. Read operations will +succeed, based on the last known cluster configuration. This may result in +partial reads of stale data as this node may be isolated from the rest of the +cluster. + +The `discovery.zen.no_master_block` setting doesn't apply to nodes-based APIs +(for example cluster stats, node info, and node stats APIs). Requests to these +APIs will not be blocked and can run on any available node. + diff --git a/docs/reference/modules/discovery/publishing.asciidoc b/docs/reference/modules/discovery/publishing.asciidoc new file mode 100644 index 0000000000000..8c69290edc706 --- /dev/null +++ b/docs/reference/modules/discovery/publishing.asciidoc @@ -0,0 +1,42 @@ +[[cluster-state-publishing]] +=== Publishing the cluster state + +The master node is the only node in a cluster that can make changes to the +cluster state. The master node processes one batch of cluster state updates at +a time, computing the required changes and publishing the updated cluster state +to all the other nodes in the cluster. Each publication starts with the master +broadcasting the updated cluster state to all nodes in the cluster. Each node +responds with an acknowledgement but does not yet apply the newly-received +state. Once the master has collected acknowledgements from enough +master-eligible nodes, the new cluster state is said to be _committed_ and the +master broadcasts another message instructing nodes to apply the now-committed +state. Each node receives this message, applies the updated state, and then +sends a second acknowledgement back to the master. + +The master allows a limited amount of time for each cluster state update to be +completely published to all nodes. It is defined by the +`cluster.publish.timeout` setting, which defaults to `30s`, measured from the +time the publication started. If this time is reached before the new cluster +state is committed then the cluster state change is rejected and the master +considers itself to have failed. It stands down and starts trying to elect a +new master. + +If the new cluster state is committed before `cluster.publish.timeout` has +elapsed, the master node considers the change to have succeeded. It waits until +the timeout has elapsed or until it has received acknowledgements that each +node in the cluster has applied the updated state, and then starts processing +and publishing the next cluster state update. If some acknowledgements have not +been received (i.e. some nodes have not yet confirmed that they have applied +the current update), these nodes are said to be _lagging_ since their cluster +states have fallen behind the master's latest state. The master waits for the +lagging nodes to catch up for a further time, `cluster.follower_lag.timeout`, +which defaults to `90s`. If a node has still not successfully applied the +cluster state update within this time then it is considered to have failed and +is removed from the cluster. + +NOTE: Elasticsearch is a peer to peer based system, in which nodes communicate +with one another directly. The high-throughput APIs (index, delete, search) do +not normally interact with the master node. The responsibility of the master +node is to maintain the global cluster state and reassign shards when nodes join or leave +the cluster. Each time the cluster state is changed, the +new state is published to all nodes in the cluster as described above. diff --git a/docs/reference/modules/discovery/quorums.asciidoc b/docs/reference/modules/discovery/quorums.asciidoc new file mode 100644 index 0000000000000..5642083b63b0b --- /dev/null +++ b/docs/reference/modules/discovery/quorums.asciidoc @@ -0,0 +1,193 @@ +[[modules-discovery-quorums]] +=== Quorum-based decision making + +Electing a master node and changing the cluster state are the two fundamental +tasks that master-eligible nodes must work together to perform. It is important +that these activities work robustly even if some nodes have failed. +Elasticsearch achieves this robustness by considering each action to have +succeeded on receipt of responses from a _quorum_, which is a subset of the +master-eligible nodes in the cluster. The advantage of requiring only a subset +of the nodes to respond is that it means some of the nodes can fail without +preventing the cluster from making progress. The quorums are carefully chosen so +the cluster does not have a "split brain" scenario where it's partitioned into +two pieces such that each piece may make decisions that are inconsistent with +those of the other piece. + +Elasticsearch allows you to add and remove master-eligible nodes to a running +cluster. In many cases you can do this simply by starting or stopping the nodes +as required. See <>. + +As nodes are added or removed Elasticsearch maintains an optimal level of fault +tolerance by updating the cluster's _voting configuration_, which is the set of +master-eligible nodes whose responses are counted when making decisions such as +electing a new master or committing a new cluster state. A decision is made only +after more than half of the nodes in the voting configuration have responded. +Usually the voting configuration is the same as the set of all the +master-eligible nodes that are currently in the cluster. However, there are some +situations in which they may be different. + +To be sure that the cluster remains available you **must not stop half or more +of the nodes in the voting configuration at the same time**. As long as more +than half of the voting nodes are available the cluster can still work normally. +This means that if there are three or four master-eligible nodes, the cluster +can tolerate one of them being unavailable. If there are two or fewer +master-eligible nodes, they must all remain available. + +After a node has joined or left the cluster the elected master must issue a +cluster-state update that adjusts the voting configuration to match, and this +can take a short time to complete. It is important to wait for this adjustment +to complete before removing more nodes from the cluster. + +[float] +==== Setting the initial quorum + +When a brand-new cluster starts up for the first time, it must elect its first +master node. To do this election, it needs to know the set of master-eligible +nodes whose votes should count. This initial voting configuration is known as +the _bootstrap configuration_ and is set in the +<>. + +It is important that the bootstrap configuration identifies exactly which nodes +should vote in the first election. It is not sufficient to configure each node +with an expectation of how many nodes there should be in the cluster. It is also +important to note that the bootstrap configuration must come from outside the +cluster: there is no safe way for the cluster to determine the bootstrap +configuration correctly on its own. + +If the bootstrap configuration is not set correctly, when you start a brand-new +cluster there is a risk that you will accidentally form two separate clusters +instead of one. This situation can lead to data loss: you might start using both +clusters before you notice that anything has gone wrong and it is impossible to +merge them together later. + +NOTE: To illustrate the problem with configuring each node to expect a certain +cluster size, imagine starting up a three-node cluster in which each node knows +that it is going to be part of a three-node cluster. A majority of three nodes +is two, so normally the first two nodes to discover each other form a cluster +and the third node joins them a short time later. However, imagine that four +nodes were erroneously started instead of three. In this case, there are enough +nodes to form two separate clusters. Of course if each node is started manually +then it's unlikely that too many nodes are started. If you're using an automated +orchestrator, however, it's certainly possible to get into this situation-- +particularly if the orchestrator is not resilient to failures such as network +partitions. + +The initial quorum is only required the very first time a whole cluster starts +up. New nodes joining an established cluster can safely obtain all the +information they need from the elected master. Nodes that have previously been +part of a cluster will have stored to disk all the information that is required +when they restart. + +[float] +==== Master elections + +Elasticsearch uses an election process to agree on an elected master node, both +at startup and if the existing elected master fails. Any master-eligible node +can start an election, and normally the first election that takes place will +succeed. Elections only usually fail when two nodes both happen to start their +elections at about the same time, so elections are scheduled randomly on each +node to reduce the probability of this happening. Nodes will retry elections +until a master is elected, backing off on failure, so that eventually an +election will succeed (with arbitrarily high probability). The scheduling of +master elections are controlled by the <>. + +[float] +==== Cluster maintenance, rolling restarts and migrations + +Many cluster maintenance tasks involve temporarily shutting down one or more +nodes and then starting them back up again. By default Elasticsearch can remain +available if one of its master-eligible nodes is taken offline, such as during a +<>. Furthermore, if multiple nodes are stopped +and then started again then it will automatically recover, such as during a +<>. There is no need to take any further +action with the APIs described here in these cases, because the set of master +nodes is not changing permanently. + +[float] +==== Automatic changes to the voting configuration + +Nodes may join or leave the cluster, and Elasticsearch reacts by automatically +making corresponding changes to the voting configuration in order to ensure that +the cluster is as resilient as possible. The default auto-reconfiguration +behaviour is expected to give the best results in most situations. The current +voting configuration is stored in the cluster state so you can inspect its +current contents as follows: + +[source,js] +-------------------------------------------------- +GET /_cluster/state?filter_path=metadata.cluster_coordination.last_committed_config +-------------------------------------------------- +// CONSOLE + +NOTE: The current voting configuration is not necessarily the same as the set of +all available master-eligible nodes in the cluster. Altering the voting +configuration involves taking a vote, so it takes some time to adjust the +configuration as nodes join or leave the cluster. Also, there are situations +where the most resilient configuration includes unavailable nodes, or does not +include some available nodes, and in these situations the voting configuration +differs from the set of available master-eligible nodes in the cluster. + +Larger voting configurations are usually more resilient, so Elasticsearch +normally prefers to add master-eligible nodes to the voting configuration after +they join the cluster. Similarly, if a node in the voting configuration +leaves the cluster and there is another master-eligible node in the cluster that +is not in the voting configuration then it is preferable to swap these two nodes +over. The size of the voting configuration is thus unchanged but its +resilience increases. + +It is not so straightforward to automatically remove nodes from the voting +configuration after they have left the cluster. Different strategies have +different benefits and drawbacks, so the right choice depends on how the cluster +will be used. You can control whether the voting configuration automatically shrinks by using the following setting: + +`cluster.auto_shrink_voting_configuration`:: + + Defaults to `true`, meaning that the voting configuration will automatically + shrink, shedding departed nodes, as long as it still contains at least 3 + nodes. If set to `false`, the voting configuration never automatically + shrinks; departed nodes must be removed manually using the + <>. + +NOTE: If `cluster.auto_shrink_voting_configuration` is set to `true`, the +recommended and default setting, and there are at least three master-eligible +nodes in the cluster, then Elasticsearch remains capable of processing +cluster-state updates as long as all but one of its master-eligible nodes are +healthy. + +There are situations in which Elasticsearch might tolerate the loss of multiple +nodes, but this is not guaranteed under all sequences of failures. If this +setting is set to `false` then departed nodes must be removed from the voting +configuration manually, using the +<>, to achieve +the desired level of resilience. + +No matter how it is configured, Elasticsearch will not suffer from a "split-brain" inconsistency. +The `cluster.auto_shrink_voting_configuration` setting affects only its availability in the +event of the failure of some of its nodes, and the administrative tasks that +must be performed as nodes join and leave the cluster. + +[float] +==== Even numbers of master-eligible nodes + +There should normally be an odd number of master-eligible nodes in a cluster. +If there is an even number, Elasticsearch leaves one of them out of the voting +configuration to ensure that it has an odd size. This omission does not decrease +the failure-tolerance of the cluster. In fact, improves it slightly: if the +cluster suffers from a network partition that divides it into two equally-sized +halves then one of the halves will contain a majority of the voting +configuration and will be able to keep operating. If all of the master-eligible +nodes' votes were counted, neither side would contain a strict majority of the +nodes and so the cluster would not be able to make any progress. + +For instance if there are four master-eligible nodes in the cluster and the +voting configuration contained all of them, any quorum-based decision would +require votes from at least three of them. This situation means that the cluster +can tolerate the loss of only a single master-eligible node. If this cluster +were split into two equal halves, neither half would contain three +master-eligible nodes and the cluster would not be able to make any progress. +If the voting configuration contains only three of the four master-eligible +nodes, however, the cluster is still only fully tolerant to the loss of one +node, but quorum-based decisions require votes from two of the three voting +nodes. In the event of an even split, one half will contain two of the three +voting nodes so that half will remain available. diff --git a/docs/reference/modules/discovery/zen.asciidoc b/docs/reference/modules/discovery/zen.asciidoc deleted file mode 100644 index 98967bf7ebaf4..0000000000000 --- a/docs/reference/modules/discovery/zen.asciidoc +++ /dev/null @@ -1,226 +0,0 @@ -[[modules-discovery-zen]] -=== Zen Discovery - -Zen discovery is the built-in, default, discovery module for Elasticsearch. It -provides unicast and file-based discovery, and can be extended to support cloud -environments and other forms of discovery via plugins. - -Zen discovery is integrated with other modules, for example, all communication -between nodes is done using the <> module. - -It is separated into several sub modules, which are explained below: - -[float] -[[ping]] -==== Ping - -This is the process where a node uses the discovery mechanisms to find other -nodes. - -[float] -[[discovery-seed-nodes]] -==== Seed nodes - -Zen discovery uses a list of _seed_ nodes in order to start off the discovery -process. At startup, or when electing a new master, Elasticsearch tries to -connect to each seed node in its list, and holds a gossip-like conversation with -them to find other nodes and to build a complete picture of the cluster. By -default there are two methods for configuring the list of seed nodes: _unicast_ -and _file-based_. It is recommended that the list of seed nodes comprises the -list of master-eligible nodes in the cluster. - -[float] -[[unicast]] -===== Unicast - -Unicast discovery configures a static list of hosts for use as seed nodes. -These hosts can be specified as hostnames or IP addresses; hosts specified as -hostnames are resolved to IP addresses during each round of pinging. Note that -if you are in an environment where DNS resolutions vary with time, you might -need to adjust your <>. - -The list of hosts is set using the `discovery.zen.ping.unicast.hosts` static -setting. This is either an array of hosts or a comma-delimited string. Each -value should be in the form of `host:port` or `host` (where `port` defaults to -the setting `transport.profiles.default.port` falling back to -`transport.port` if not set). Note that IPv6 hosts must be bracketed. The -default for this setting is `127.0.0.1, [::1]` - -Additionally, the `discovery.zen.ping.unicast.resolve_timeout` configures the -amount of time to wait for DNS lookups on each round of pinging. This is -specified as a <> and defaults to 5s. - -Unicast discovery uses the <> module to perform the -discovery. - -[float] -[[file-based-hosts-provider]] -===== File-based - -In addition to hosts provided by the static `discovery.zen.ping.unicast.hosts` -setting, it is possible to provide a list of hosts via an external file. -Elasticsearch reloads this file when it changes, so that the list of seed nodes -can change dynamically without needing to restart each node. For example, this -gives a convenient mechanism for an Elasticsearch instance that is run in a -Docker container to be dynamically supplied with a list of IP addresses to -connect to for Zen discovery when those IP addresses may not be known at node -startup. - -To enable file-based discovery, configure the `file` hosts provider as follows: - -[source,txt] ----------------------------------------------------------------- -discovery.zen.hosts_provider: file ----------------------------------------------------------------- - -Then create a file at `$ES_PATH_CONF/unicast_hosts.txt` in the format described -below. Any time a change is made to the `unicast_hosts.txt` file the new -changes will be picked up by Elasticsearch and the new hosts list will be used. - -Note that the file-based discovery plugin augments the unicast hosts list in -`elasticsearch.yml`: if there are valid unicast host entries in -`discovery.zen.ping.unicast.hosts` then they will be used in addition to those -supplied in `unicast_hosts.txt`. - -The `discovery.zen.ping.unicast.resolve_timeout` setting also applies to DNS -lookups for nodes specified by address via file-based discovery. This is -specified as a <> and defaults to 5s. - -The format of the file is to specify one node entry per line. Each node entry -consists of the host (host name or IP address) and an optional transport port -number. If the port number is specified, is must come immediately after the -host (on the same line) separated by a `:`. If the port number is not -specified, a default value of 9300 is used. - -For example, this is an example of `unicast_hosts.txt` for a cluster with four -nodes that participate in unicast discovery, some of which are not running on -the default port: - -[source,txt] ----------------------------------------------------------------- -10.10.10.5 -10.10.10.6:9305 -10.10.10.5:10005 -# an IPv6 address -[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:9301 ----------------------------------------------------------------- - -Host names are allowed instead of IP addresses (similar to -`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be specified in -brackets with the port coming after the brackets. - -It is also possible to add comments to this file. All comments must appear on -their lines starting with `#` (i.e. comments cannot start in the middle of a -line). - -[float] -[[master-election]] -==== Master Election - -As part of the ping process a master of the cluster is either elected or joined -to. This is done automatically. The `discovery.zen.ping_timeout` (which defaults -to `3s`) determines how long the node will wait before deciding on starting an -election or joining an existing cluster. Three pings will be sent over this -timeout interval. In case where no decision can be reached after the timeout, -the pinging process restarts. In slow or congested networks, three seconds -might not be enough for a node to become aware of the other nodes in its -environment before making an election decision. Increasing the timeout should -be done with care in that case, as it will slow down the election process. Once -a node decides to join an existing formed cluster, it will send a join request -to the master (`discovery.zen.join_timeout`) with a timeout defaulting at 20 -times the ping timeout. - -When the master node stops or has encountered a problem, the cluster nodes start -pinging again and will elect a new master. This pinging round also serves as a -protection against (partial) network failures where a node may unjustly think -that the master has failed. In this case the node will simply hear from other -nodes about the currently active master. - -If `discovery.zen.master_election.ignore_non_master_pings` is `true`, pings from -nodes that are not master eligible (nodes where `node.master` is `false`) are -ignored during master election; the default value is `false`. - -Nodes can be excluded from becoming a master by setting `node.master` to -`false`. - -The `discovery.zen.minimum_master_nodes` sets the minimum number of master -eligible nodes that need to join a newly elected master in order for an election -to complete and for the elected node to accept its mastership. The same setting -controls the minimum number of active master eligible nodes that should be a -part of any active cluster. If this requirement is not met the active master -node will step down and a new master election will begin. - -This setting must be set to a <> of your master -eligible nodes. It is recommended to avoid having only two master eligible -nodes, since a quorum of two is two. Therefore, a loss of either master eligible -node will result in an inoperable cluster. - -[float] -[[fault-detection]] -==== Fault Detection - -There are two fault detection processes running. The first is by the master, to -ping all the other nodes in the cluster and verify that they are alive. And on -the other end, each node pings to master to verify if its still alive or an -election process needs to be initiated. - -The following settings control the fault detection process using the -`discovery.zen.fd` prefix: - -[cols="<,<",options="header",] -|======================================================================= -|Setting |Description -|`ping_interval` |How often a node gets pinged. Defaults to `1s`. - -|`ping_timeout` |How long to wait for a ping response, defaults to -`30s`. - -|`ping_retries` |How many ping failures / timeouts cause a node to be -considered failed. Defaults to `3`. -|======================================================================= - -[float] -==== Cluster state updates - -The master node is the only node in a cluster that can make changes to the -cluster state. The master node processes one cluster state update at a time, -applies the required changes and publishes the updated cluster state to all the -other nodes in the cluster. Each node receives the publish message, acknowledges -it, but does *not* yet apply it. If the master does not receive acknowledgement -from at least `discovery.zen.minimum_master_nodes` nodes within a certain time -(controlled by the `discovery.zen.commit_timeout` setting and defaults to 30 -seconds) the cluster state change is rejected. - -Once enough nodes have responded, the cluster state is committed and a message -will be sent to all the nodes. The nodes then proceed to apply the new cluster -state to their internal state. The master node waits for all nodes to respond, -up to a timeout, before going ahead processing the next updates in the queue. -The `discovery.zen.publish_timeout` is set by default to 30 seconds and is -measured from the moment the publishing started. Both timeout settings can be -changed dynamically through the <> - -[float] -[[no-master-block]] -==== No master block - -For the cluster to be fully operational, it must have an active master and the -number of running master eligible nodes must satisfy the -`discovery.zen.minimum_master_nodes` setting if set. The -`discovery.zen.no_master_block` settings controls what operations should be -rejected when there is no active master. - -The `discovery.zen.no_master_block` setting has two valid options: - -[horizontal] -`all`:: All operations on the node--i.e. both read & writes--will be rejected. -This also applies for api cluster state read or write operations, like the get -index settings, put mapping and cluster state api. -`write`:: (default) Write operations will be rejected. Read operations will -succeed, based on the last known cluster configuration. This may result in -partial reads of stale data as this node may be isolated from the rest of the -cluster. - -The `discovery.zen.no_master_block` setting doesn't apply to nodes-based apis -(for example cluster stats, node info and node stats apis). Requests to these -apis will not be blocked and can run on any available node. diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index 9287e171129ff..a94f76c55de1f 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -19,7 +19,7 @@ purpose: <>:: A node that has `node.master` set to `true` (default), which makes it eligible -to be <>, which controls +to be <>, which controls the cluster. <>:: @@ -69,7 +69,7 @@ and deciding which shards to allocate to which nodes. It is important for cluster health to have a stable master node. Any master-eligible node (all nodes by default) may be elected to become the -master node by the <>. +master node by the <>. IMPORTANT: Master nodes must have access to the `data/` directory (just like `data` nodes) as this is where the cluster state is persisted between node restarts. @@ -105,74 +105,6 @@ NOTE: These settings apply only when {xpack} is not installed. To create a dedicated master-eligible node when {xpack} is installed, see <>. endif::include-xpack[] - -[float] -[[split-brain]] -==== Avoiding split brain with `minimum_master_nodes` - -To prevent data loss, it is vital to configure the -`discovery.zen.minimum_master_nodes` setting (which defaults to `1`) so that -each master-eligible node knows the _minimum number of master-eligible nodes_ -that must be visible in order to form a cluster. - -To explain, imagine that you have a cluster consisting of two master-eligible -nodes. A network failure breaks communication between these two nodes. Each -node sees one master-eligible node... itself. With `minimum_master_nodes` set -to the default of `1`, this is sufficient to form a cluster. Each node elects -itself as the new master (thinking that the other master-eligible node has -died) and the result is two clusters, or a _split brain_. These two nodes -will never rejoin until one node is restarted. Any data that has been written -to the restarted node will be lost. - -Now imagine that you have a cluster with three master-eligible nodes, and -`minimum_master_nodes` set to `2`. If a network split separates one node from -the other two nodes, the side with one node cannot see enough master-eligible -nodes and will realise that it cannot elect itself as master. The side with -two nodes will elect a new master (if needed) and continue functioning -correctly. As soon as the network split is resolved, the single node will -rejoin the cluster and start serving requests again. - -This setting should be set to a _quorum_ of master-eligible nodes: - - (master_eligible_nodes / 2) + 1 - -In other words, if there are three master-eligible nodes, then minimum master -nodes should be set to `(3 / 2) + 1` or `2`: - -[source,yaml] ----------------------------- -discovery.zen.minimum_master_nodes: 2 <1> ----------------------------- -<1> Defaults to `1`. - -To be able to remain available when one of the master-eligible nodes fails, -clusters should have at least three master-eligible nodes, with -`minimum_master_nodes` set accordingly. A <>, -performed without any downtime, also requires at least three master-eligible -nodes to avoid the possibility of data loss if a network split occurs while the -upgrade is in progress. - -This setting can also be changed dynamically on a live cluster with the -<>: - -[source,js] ----------------------------- -PUT _cluster/settings -{ - "transient": { - "discovery.zen.minimum_master_nodes": 2 - } -} ----------------------------- -// CONSOLE -// TEST[skip:Test use Zen2 now so we can't test Zen1 behaviour here] - -TIP: An advantage of splitting the master and data roles between dedicated -nodes is that you can have just three master-eligible nodes and set -`minimum_master_nodes` to `2`. You never have to change this setting, no -matter how many dedicated data nodes you add to the cluster. - - [float] [[data-node]] === Data Node diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 48ae41ded2a78..7ee545d66cf0f 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -597,9 +597,8 @@ if the new cluster doesn't contain nodes with appropriate attributes that a rest index will not be successfully restored unless these index allocation settings are changed during restore operation. The restore operation also checks that restored persistent settings are compatible with the current cluster to avoid accidentally -restoring an incompatible settings such as `discovery.zen.minimum_master_nodes` and as a result disable a smaller cluster until the -required number of master eligible nodes is added. If you need to restore a snapshot with incompatible persistent settings, try -restoring it without the global cluster state. +restoring incompatible settings. If you need to restore a snapshot with incompatible persistent settings, try restoring it without +the global cluster state. [float] === Snapshot status diff --git a/docs/reference/monitoring/http-export.asciidoc b/docs/reference/monitoring/http-export.asciidoc index 4ba93f326370c..fce22bd5d78be 100644 --- a/docs/reference/monitoring/http-export.asciidoc +++ b/docs/reference/monitoring/http-export.asciidoc @@ -47,7 +47,7 @@ xpack.monitoring.exporters: uniquely defines the exporter but is otherwise unused. <3> `host` is a required setting for `http` exporters. It must specify the HTTP port rather than the transport port. The default port value is `9200`. -<4> User authentication for those using {security} or some other +<4> User authentication for those using {stack} {security-features} or some other form of user authentication protecting the cluster. <5> See <> for all TLS/SSL settings. If not supplied, the default node-level TLS/SSL settings are used. diff --git a/docs/reference/monitoring/local-export.asciidoc b/docs/reference/monitoring/local-export.asciidoc index 2bc757f07ecc8..821a6b1fc0e13 100644 --- a/docs/reference/monitoring/local-export.asciidoc +++ b/docs/reference/monitoring/local-export.asciidoc @@ -47,10 +47,10 @@ a message indicating that they are waiting for the resources to be set up. One benefit of the `local` exporter is that it lives within the cluster and therefore no extra configuration is required when the cluster is secured with -{security}. All operations, including indexing operations, that occur from a -`local` exporter make use of the internal transport mechanisms within {es}. This -behavior enables the exporter to be used without providing any user credentials -when {security} is enabled. +{stack} {security-features}. All operations, including indexing operations, that +occur from a `local` exporter make use of the internal transport mechanisms +within {es}. This behavior enables the exporter to be used without providing any +user credentials when {security-features} are enabled. For more information about the configuration options for the `local` exporter, see <>. diff --git a/docs/reference/query-dsl/script-score-query.asciidoc b/docs/reference/query-dsl/script-score-query.asciidoc index 5e8bf560140a8..dfd06a04523c3 100644 --- a/docs/reference/query-dsl/script-score-query.asciidoc +++ b/docs/reference/query-dsl/script-score-query.asciidoc @@ -39,7 +39,7 @@ GET /_search ==== Accessing the score of a document within a script Within a script, you can -<> +{ref}/modules-scripting-fields.html#scripting-score[access] the `_score` variable which represents the current relevance score of a document. @@ -132,8 +132,8 @@ these unique values need to be loaded into memory. [[decay-functions]] ===== Decay functions for numeric fields -You can read more about decay functions -<>. +You can read more about decay functions +{ref}/query-dsl-function-score-query.html#function-decay[here]. * `double decayNumericLinear(double origin, double scale, double offset, double decay, double docValue)` * `double decayNumericExp(double origin, double scale, double offset, double decay, double docValue)` diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc index 4c69889040eb1..7d36d01c8d6f5 100644 --- a/docs/reference/query-dsl/special-queries.asciidoc +++ b/docs/reference/query-dsl/special-queries.asciidoc @@ -14,6 +14,10 @@ or collection of documents. This query allows a script to act as a filter. Also see the <>. +<>:: + +A query that allows to modify the score of a sub-query with a script. + <>:: This query finds queries that are stored as documents that match with @@ -32,6 +36,8 @@ include::mlt-query.asciidoc[] include::script-query.asciidoc[] +include::script-score-query.asciidoc[] + include::percolate-query.asciidoc[] include::feature-query.asciidoc[] diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index f07d1d09747e7..fe2954b015a02 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -560,3 +560,19 @@ See <>. The standard token filter has been removed. +[role="exclude",id="modules-discovery-azure-classic"] + +See <>. + +[role="exclude",id="modules-discovery-ec2"] + +See <>. + +[role="exclude",id="modules-discovery-gce"] + +See <>. + +[role="exclude",id="modules-discovery-zen"] + +Zen discovery is replaced by the <>. diff --git a/docs/reference/release-notes/7.0.0-alpha1.asciidoc b/docs/reference/release-notes/7.0.0-alpha1.asciidoc index 36339fa4b9b26..758e88d34024c 100644 --- a/docs/reference/release-notes/7.0.0-alpha1.asciidoc +++ b/docs/reference/release-notes/7.0.0-alpha1.asciidoc @@ -30,10 +30,3 @@ Suggesters:: explicitly indicate the type of suggestion that they produce. Existing plugins will require changes to their plugin registration. See the `custom-suggester` example plugin {pull}30284[#30284] - -[float] -[[enhancement-7.0.0-alpha1]] -=== Enhancements - -Machine learning:: -* Adds categorical filter type to detector rules. {ml-pull}27[#27] \ No newline at end of file diff --git a/docs/reference/release-notes/7.0.0-alpha2.asciidoc b/docs/reference/release-notes/7.0.0-alpha2.asciidoc index f6d3172ce01d9..7f66d21408224 100644 --- a/docs/reference/release-notes/7.0.0-alpha2.asciidoc +++ b/docs/reference/release-notes/7.0.0-alpha2.asciidoc @@ -1,10 +1,585 @@ [[release-notes-7.0.0-alpha2]] == {es} version 7.0.0-alpha2 -coming[7.0.0-alpha2] +[[breaking-7.0.0-alpha2]] +[float] +=== Breaking changes + +Authentication:: +* Enhance Invalidate Token API {pull}35388[#35388] (issues: {issue}34556[#34556], {issue}35115[#35115]) + +Circuit Breakers:: +* Lower fielddata circuit breaker's default limit {pull}27162[#27162] (issue: {issue}27130[#27130]) + +CCR:: +* Change get autofollow patterns API response format {pull}36203[#36203] (issue: {issue}36049[#36049]) + +Index APIs:: +* Always enforce cluster-wide shard limit {pull}34892[#34892] (issues: {issue}20705[#20705], {issue}34021[#34021]) + +Ranking:: +* Forbid negative scores in functon_score query {pull}35709[#35709] (issue: {issue}33309[#33309]) + +Scripting:: +* Delete deprecated getValues from ScriptDocValues {pull}36183[#36183] (issue: {issue}22919[#22919]) + +Search:: +* Remove the deprecated _termvector endpoint. {pull}36131[#36131] (issues: {issue}36098[#36098], {issue}8484[#8484]) +* Remove deprecated Graph endpoints {pull}35956[#35956] +* Validate metdata on `_msearch` {pull}35938[#35938] (issue: {issue}35869[#35869]) +* Make hits.total an object in the search response {pull}35849[#35849] (issue: {issue}33028[#33028]) +* Remove the distinction between query and filter context in QueryBuilders {pull}35354[#35354] (issue: {issue}35293[#35293]) +* Throw a parsing exception when boost is set in span_or query (#28390) {pull}34112[#34112] (issue: {issue}28390[#28390]) + +ZenDiscovery:: +* Best-effort cluster formation if unconfigured {pull}36215[#36215] + +[[breaking-java-7.0.0-alpha2]] +[float] +=== Breaking Java changes + +ZenDiscovery:: +* Make node field in JoinRequest private {pull}36405[#36405] + +[[deprecation-7.0.0-alpha2]] +[float] +=== Deprecations + +Core:: +* Deprecate use of scientific notation in epoch time parsing {pull}36691[#36691] +* Add backcompat for joda time formats {pull}36531[#36531] + +Machine Learning:: +* Deprecate X-Pack centric ML endpoints {pull}36315[#36315] (issue: {issue}35958[#35958]) + +Mapping:: +* Deprecate types in index API {pull}36575[#36575] (issues: {issue}35190[#35190], {issue}35790[#35790]) +* Deprecate uses of _type as a field name in queries {pull}36503[#36503] (issue: {issue}35190[#35190]) +* Deprecate types in update_by_query and delete_by_query {pull}36365[#36365] (issue: {issue}35190[#35190]) +* For msearch templates, make sure to use the right name for deprecation logging. {pull}36344[#36344] +* Deprecate types in termvector and mtermvector requests. {pull}36182[#36182] +* Deprecate types in update requests. {pull}36181[#36181] +* Deprecate types in document delete requests. {pull}36087[#36087] +* Deprecate types in get, exists, and multi get. {pull}35930[#35930] +* Deprecate types in search and multi search templates. {pull}35669[#35669] +* Deprecate types in explain requests. {pull}35611[#35611] +* Deprecate types in validate query requests. {pull}35575[#35575] +* Deprecate types in count and msearch. {pull}35421[#35421] (issue: {issue}34041[#34041]) + +Migration:: +* Deprecate X-Pack centric Migration endpoints {pull}35976[#35976] (issue: {issue}35958[#35958]) + +Monitoring:: +* Deprecate /_xpack/monitoring/* in favor of /_monitoring/* {pull}36130[#36130] (issue: {issue}35958[#35958]) + +Rollup:: +* Re-deprecate xpack rollup endpoints {pull}36451[#36451] (issue: {issue}36044[#36044]) +* Deprecate X-Pack centric rollup endpoints {pull}35962[#35962] (issue: {issue}35958[#35958]) + +Scripting:: +* Adds deprecation logging to ScriptDocValues#getValues. {pull}34279[#34279] (issue: {issue}22919[#22919]) +* Conditionally use java time api in scripting {pull}31441[#31441] + +Search:: +* Remove X-Pack centric graph endpoints {pull}36010[#36010] (issue: {issue}35958[#35958]) + +Security:: +* Deprecate X-Pack centric license endpoints {pull}35959[#35959] (issue: {issue}35958[#35958]) +* Deprecate /_xpack/security/* in favor of /_security/* {pull}36293[#36293] (issue: {issue}35958[#35958]) + +SQL:: +* Deprecate X-Pack SQL translate endpoint {pull}36030[#36030] +* Deprecate X-Pack centric SQL endpoints {pull}35964[#35964] (issue: {issue}35958[#35958]) + +Watcher:: +* Deprecate X-Pack centric watcher endpoints {pull}36218[#36218] (issue: {issue}35958[#35958]) + + +[[feature-7.0.0-alpha2]] +[float] +=== New features + +Analysis:: +* Add support for inlined user dictionary in Nori {pull}36123[#36123] (issue: {issue}35842[#35842]) +* Add a prebuilt ICU Analyzer {pull}34958[#34958] (issue: {issue}34285[#34285]) + +Java High Level REST Client:: +* Add rollup search {pull}36334[#36334] (issue: {issue}29827[#29827]) + +Java Low Level REST Client:: +* Make warning behavior pluggable per request {pull}36345[#36345] +* Add PreferHasAttributeNodeSelector {pull}36005[#36005] + +Geo:: +* Integrate Lucene's LatLonShape (BKD Backed GeoShapes) as default `geo_shape` indexing approach {pull}36751[#36751] (issue: {issue}35320[#35320]) +* Integrate Lucene's LatLonShape (BKD Backed GeoShapes) as default `geo_shape` indexing approach {pull}35320[#35320] (issue: {issue}32039[#32039]) + +Machine Learning:: +* Add delayed datacheck to the datafeed job runner {pull}35387[#35387] (issue: {issue}35131[#35131]) + +Mapping:: +* Make typeless APIs usable with indices whose type name is different from `_doc` {pull}35790[#35790] (issue: {issue}35190[#35190]) +SQL:: +* Introduce HISTOGRAM grouping function {pull}36510[#36510] (issue: {issue}36509[#36509]) +* DATABASE() and USER() system functions {pull}35946[#35946] (issue: {issue}35863[#35863]) +* Introduce INTERVAL support {pull}35521[#35521] (issue: {issue}29990[#29990]) + +Search:: +* Add intervals query {pull}36135[#36135] (issues: {issue}29636[#29636], {issue}32406[#32406]) +* Added soft limit to open scroll contexts #25244 {pull}36009[#36009] (issue: {issue}25244[#25244]) + +[[enhancement-7.0.0-alpha2]] [float] +=== Enhancements + +Aggregations:: +* Added keyed response to pipeline percentile aggregations 22302 {pull}36392[#36392] (issue: {issue}22302[#22302]) +* Enforce max_buckets limit only in the final reduction phase {pull}36152[#36152] (issues: {issue}32125[#32125], {issue}35921[#35921]) +* Histogram aggs: add empty buckets only in the final reduce step {pull}35921[#35921] +* Handles exists query in composite aggs {pull}35758[#35758] +* Added parent validation for auto date histogram {pull}35670[#35670] + +Analysis:: +* Allow word_delimiter_graph_filter to not adjust internal offsets {pull}36699[#36699] (issues: {issue}33710[#33710], {issue}34741[#34741]) +* Ensure TokenFilters only produce single tokens when parsing synonyms {pull}34331[#34331] (issue: {issue}34298[#34298]) + +Audit:: +* Add "request.id" to file audit logs {pull}35536[#35536] + +Authentication:: +* Invalidate Token API enhancements - HLRC {pull}36362[#36362] (issue: {issue}35388[#35388]) +* Add DEBUG/TRACE logs for LDAP bind {pull}36028[#36028] +* Add Tests for findSamlRealm {pull}35905[#35905] +* Add realm information for Authenticate API {pull}35648[#35648] +* Formal support for "password_hash" in Put User {pull}35242[#35242] (issue: {issue}34729[#34729]) + +Authorization:: +* Improve exact index matching performance {pull}36017[#36017] +* `manage_token` privilege for `kibana_system` {pull}35751[#35751] +* Grant .tasks access to kibana_system role {pull}35573[#35573] + +Build:: +* Sounds like typo in exception message {pull}35458[#35458] +* Allow set section in setup section of REST tests {pull}34678[#34678] + +CCR:: +* Add time since last auto follow fetch to auto follow stats {pull}36542[#36542] (issues: {issue}33007[#33007], {issue}35895[#35895]) +* Clean followed leader index UUIDs in auto follow metadata {pull}36408[#36408] (issue: {issue}33007[#33007]) +* Change AutofollowCoordinator to use wait_for_metadata_version {pull}36264[#36264] (issues: {issue}33007[#33007], {issue}35895[#35895]) +* Refactor AutoFollowCoordinator to track leader indices per remote cluster {pull}36031[#36031] (issues: {issue}33007[#33007], {issue}35895[#35895]) + +Core:: +* Override the JVM DNS cache policy {pull}36570[#36570] +* Replace usages of AtomicBoolean based block of code by the RunOnce class {pull}35553[#35553] (issue: {issue}35489[#35489]) +* Added wait_for_metadata_version parameter to cluster state api. {pull}35535[#35535] +* Extract RunOnce into a dedicated class {pull}35489[#35489] +* Introduce elasticsearch-core jar {pull}28191[#28191] (issue: {issue}27933[#27933]) +* Rename core module to server {pull}28180[#28180] (issue: {issue}27933[#27933]) + +CRUD:: +* Rename seq# powered optimistic concurrency control parameters to ifSeqNo/ifPrimaryTerm {pull}36757[#36757] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Expose Sequence Number based Optimistic Concurrency Control in the rest layer {pull}36721[#36721] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Add doc's sequence number + primary term to GetResult and use it for updates {pull}36680[#36680] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Add seq no powered optimistic locking support to the index and delete transport actions {pull}36619[#36619] (issues: {issue}10708[#10708], {issue}36148[#36148]) + +Distributed:: +* [Close Index API] Mark shard copy as stale if needed during shard verification {pull}36755[#36755] +* [Close Index API] Refactor MetaDataIndexStateService {pull}36354[#36354] (issue: {issue}36249[#36249]) +* [Close Index API] Add TransportShardCloseAction for pre-closing verifications {pull}36249[#36249] +* TransportResyncReplicationAction should not honour blocks {pull}35795[#35795] (issues: {issue}35332[#35332], {issue}35597[#35597]) +* Expose all permits acquisition in IndexShard and TransportReplicationAction {pull}35540[#35540] (issue: {issue}33888[#33888]) +* [RCI] Check blocks while having index shard permit in TransportReplicationAction {pull}35332[#35332] (issue: {issue}33888[#33888]) + +Engine:: +* Add sequence numbers based optimistic concurrency control support to Engine {pull}36467[#36467] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Require soft-deletes when access changes snapshot {pull}36446[#36446] +* Use delCount of SegmentInfos to calculate numDocs {pull}36323[#36323] +* Always configure soft-deletes field of IndexWriterConfig {pull}36196[#36196] (issue: {issue}36141[#36141]) +* Enable soft-deletes by default on 7.0.0 or later {pull}36141[#36141] +* Always return false from `refreshNeeded` on ReadOnlyEngine {pull}35837[#35837] (issue: {issue}35785[#35785]) +* Add a `_freeze` / `_unfreeze` API {pull}35592[#35592] (issue: {issue}34352[#34352]) +* [RCI] Add IndexShardOperationPermits.asyncBlockOperations(ActionListener) {pull}34902[#34902] (issue: {issue}33888[#33888]) + +Features:: +* Simplify deprecation issue levels {pull}36326[#36326] + +Index APIs:: +* Add cluster-wide shard limit warnings {pull}34021[#34021] (issues: {issue}20705[#20705], {issue}32856[#32856]) + +Ingest:: +* Grok fix duplicate patterns JAVACLASS and JAVAFILE {pull}35886[#35886] +* Implement Drop Processor {pull}32278[#32278] (issue: {issue}23726[#23726]) + +Java High Level REST Client:: +* Add get users action {pull}36332[#36332] (issue: {issue}29827[#29827]) +* Add delete template API {pull}36320[#36320] (issue: {issue}27205[#27205]) +* Implement get-user-privileges API {pull}36292[#36292] +* Get Deprecation Info API {pull}36279[#36279] (issue: {issue}29827[#29827]) +* Add support for Follow Stats API {pull}36253[#36253] (issue: {issue}33824[#33824]) +* Add support for CCR Stats API {pull}36213[#36213] (issue: {issue}33824[#33824]) +* Put Role {pull}36209[#36209] (issue: {issue}29827[#29827]) +* Add index templates exist API {pull}36132[#36132] (issue: {issue}27205[#27205]) +* Add support for CCR Get Auto Follow Pattern apis {pull}36049[#36049] (issue: {issue}33824[#33824]) +* Add support for CCR Delete Auto Follow Pattern API {pull}35981[#35981] (issue: {issue}33824[#33824]) +* Remove fromXContent from IndexUpgradeInfoResponse {pull}35934[#35934] +* Add delete expired data API {pull}35906[#35906] (issue: {issue}29827[#29827]) +* Execute watch API {pull}35868[#35868] (issue: {issue}29827[#29827]) +* Add ability to put user with a password hash {pull}35844[#35844] (issue: {issue}35242[#35242]) +* Add ML find file structure API {pull}35833[#35833] (issue: {issue}29827[#29827]) +* Add support for get roles API {pull}35787[#35787] (issue: {issue}29827[#29827]) +* Added support for CCR Put Auto Follow Pattern API {pull}35780[#35780] (issue: {issue}33824[#33824]) +* XPack ML info action {pull}35777[#35777] (issue: {issue}29827[#29827]) +* ML Delete event from Calendar {pull}35760[#35760] (issue: {issue}29827[#29827]) +* Add ML revert model snapshot API {pull}35750[#35750] (issue: {issue}29827[#29827]) +* ML Get Calendar Events {pull}35747[#35747] (issue: {issue}29827[#29827]) +* Add high-level REST client API for `_freeze` and `_unfreeze` {pull}35723[#35723] (issue: {issue}34352[#34352]) +* Fix issue in equals impl for GlobalOperationPrivileges {pull}35721[#35721] +* ML Delete job from calendar {pull}35713[#35713] (issue: {issue}29827[#29827]) +* ML Add Event To Calendar API {pull}35704[#35704] (issue: {issue}29827[#29827]) +* Add ML update model snapshot API (#35537) {pull}35694[#35694] (issue: {issue}29827[#29827]) +* Add support for CCR Unfollow API {pull}35693[#35693] (issue: {issue}33824[#33824]) +* Clean up PutLicenseResponse {pull}35689[#35689] (issue: {issue}35547[#35547]) +* Clean up StartBasicResponse {pull}35688[#35688] (issue: {issue}35547[#35547]) +* Add support for put privileges API {pull}35679[#35679] +* ML Add Job to Calendar API {pull}35666[#35666] (issue: {issue}29827[#29827]) +* Add support for CCR Resume Follow API {pull}35638[#35638] (issue: {issue}33824[#33824]) +* Add support for get application privileges API {pull}35556[#35556] (issue: {issue}29827[#29827]) +* Clean up XPackInfoResponse class and related tests {pull}35547[#35547] +* Add parameters to stopRollupJob API {pull}35545[#35545] (issue: {issue}34811[#34811]) +* Add ML delete model snapshot API {pull}35537[#35537] (issue: {issue}29827[#29827]) +* Add get watch API {pull}35531[#35531] (issue: {issue}29827[#29827]) +* Add ML Update Filter API {pull}35522[#35522] (issue: {issue}29827[#29827]) +* Add ml get filters api {pull}35502[#35502] (issue: {issue}29827[#29827]) +* Add ML get model snapshots API {pull}35487[#35487] (issue: {issue}29827[#29827]) +* Add "_has_privileges" API to Security Client {pull}35479[#35479] (issue: {issue}29827[#29827]) +* Add Delete Privileges API to HLRC {pull}35454[#35454] (issue: {issue}29827[#29827]) +* Add support for CCR Put Follow API {pull}35409[#35409] +* Add ML delete filter action {pull}35382[#35382] (issue: {issue}29827[#29827]) +* Add delete user action {pull}35294[#35294] (issue: {issue}29827[#29827]) +* HLRC for _mtermvectors {pull}35266[#35266] (issues: {issue}27205[#27205], {issue}33447[#33447]) +* Reindex API with wait_for_completion false {pull}35202[#35202] (issue: {issue}27205[#27205]) +* Add watcher stats API {pull}35185[#35185] (issue: {issue}29827[#29827]) +* HLRC support for getTask {pull}35166[#35166] (issue: {issue}27205[#27205]) +* Add GetRollupIndexCaps API {pull}35102[#35102] (issue: {issue}29827[#29827]) +* HLRC: migration api - upgrade {pull}34898[#34898] (issue: {issue}29827[#29827]) +* Add stop rollup job support to HL REST Client {pull}34702[#34702] (issue: {issue}29827[#29827]) +* Bulk Api support for global parameters {pull}34528[#34528] (issue: {issue}26026[#26026]) +* Add delete rollup job support to HL REST Client {pull}34066[#34066] (issue: {issue}29827[#29827]) +* Add support for get license basic/trial status API {pull}33176[#33176] (issue: {issue}29827[#29827]) +* Add machine learning open job {pull}32860[#32860] (issue: {issue}29827[#29827]) +* Add ML HLRC wrapper and put_job API call {pull}32726[#32726] +* Add Get Snapshots High Level REST API {pull}31537[#31537] (issue: {issue}27205[#27205]) + +Java Low Level REST Client:: +* On retry timeout add root exception {pull}25576[#25576] + +Monitoring:: +* Make Exporters Async {pull}35765[#35765] (issue: {issue}35743[#35743]) + +Geo:: +* Adds a name of the field to geopoint parsing errors {pull}36529[#36529] (issue: {issue}15965[#15965]) +* Add support to ShapeBuilders for building Lucene geometry {pull}35707[#35707] (issue: {issue}35320[#35320]) +* Add ST_WktToSQL function {pull}35416[#35416] (issue: {issue}29872[#29872]) + +License:: +* Require acknowledgement to start_trial license {pull}30135[#30135] (issue: {issue}30134[#30134]) + +Machine Learning:: +* Create the ML annotations index {pull}36731[#36731] (issues: {issue}26034[#26034], {issue}33376[#33376]) +* Split in batches and migrate all jobs and datafeeds {pull}36716[#36716] (issue: {issue}32905[#32905]) +* Add cluster setting to enable/disable config migration {pull}36700[#36700] (issue: {issue}32905[#32905]) +* Add audits when deprecation warnings occur with datafeed start {pull}36233[#36233] +* Add lazy parsing for DatafeedConfig:Aggs,Query {pull}36117[#36117] +* Add support for lazy nodes (#29991) {pull}34538[#34538] (issue: {issue}29991[#29991]) + +Network:: +* Unify transport settings naming {pull}36623[#36623] +* Add sni name to SSLEngine in netty transport {pull}33144[#33144] (issue: {issue}32517[#32517]) +* Add cors support to NioHttpServerTransport {pull}30827[#30827] (issue: {issue}28898[#28898]) +* Reintroduce mandatory http pipelining support {pull}30820[#30820] +* Make http pipelining support mandatory {pull}30695[#30695] (issues: {issue}28898[#28898], {issue}29500[#29500]) +* Add nio http server transport {pull}29587[#29587] (issue: {issue}28898[#28898]) +* Selectors operate on channel contexts {pull}28468[#28468] (issue: {issue}27260[#27260]) +* Unify nio read / write channel contexts {pull}28160[#28160] (issue: {issue}27260[#27260]) +* Create nio-transport plugin for NioTransport {pull}27949[#27949] (issue: {issue}27260[#27260]) +* Add elasticsearch-nio jar for base nio classes {pull}27801[#27801] (issue: {issue}27802[#27802]) +* Add NioGroup for use in different transports {pull}27737[#27737] (issue: {issue}27260[#27260]) +* Add read timeouts to http module {pull}27713[#27713] +* Implement byte array reusage in `NioTransport` {pull}27696[#27696] (issue: {issue}27563[#27563]) +* Introduce resizable inbound byte buffer {pull}27551[#27551] (issue: {issue}27563[#27563]) +* Decouple nio constructs from the tcp transport {pull}27484[#27484] (issue: {issue}27260[#27260]) +* Remove manual tracking of registered channels {pull}27445[#27445] (issue: {issue}27260[#27260]) +* Remove tcp profile from low level nio channel {pull}27441[#27441] (issue: {issue}27260[#27260]) +* Decouple `ChannelFactory` from Tcp classes {pull}27286[#27286] (issue: {issue}27260[#27260]) + +Packaging:: +* Introduce Docker images build {pull}36246[#36246] +* Move creation of temporary directory to Java {pull}36002[#36002] (issue: {issue}31003[#31003]) + +Plugins:: +* Plugin install: don't print download progress in batch mode {pull}36361[#36361] + +Ranking:: +* Vector field {pull}33022[#33022] (issue: {issue}31615[#31615]) + +Recovery:: +* Exposed engine must include all operations below global checkpoint during rollback {pull}36159[#36159] (issue: {issue}32867[#32867]) + +Rollup:: +* Add non-X-Pack centric rollup endpoints {pull}36383[#36383] (issues: {issue}35958[#35958], {issue}35962[#35962]) +* Add more diagnostic stats to job {pull}35471[#35471] +* Add `wait_for_completion` option to StopRollupJob API {pull}34811[#34811] (issue: {issue}34574[#34574]) + +Scripting:: +* Update joda compat methods to use compat class {pull}36654[#36654] +* [Painless] Add boxed type to boxed type casts for method/return {pull}36571[#36571] +* [Painless] Add def to boxed type casts {pull}36506[#36506] + +Settings:: +* Add user-defined cluster metadata {pull}33325[#33325] (issue: {issue}33220[#33220]) + +Search:: +* Add copy constructor to SearchRequest {pull}36641[#36641] (issue: {issue}32125[#32125]) +* Add raw sort values to SearchSortValues transport serialization {pull}36617[#36617] (issue: {issue}32125[#32125]) +* Add sort and collapse info to SearchHits transport serialization {pull}36555[#36555] (issue: {issue}32125[#32125]) +* Add default methods to DocValueFormat {pull}36480[#36480] +* Respect indices options on _msearch {pull}35887[#35887] +* Allow efficient can_match phases on frozen indices {pull}35431[#35431] (issues: {issue}34352[#34352], {issue}34357[#34357]) +* Add a new query type - ScriptScoreQuery {pull}34533[#34533] (issues: {issue}23850[#23850], {issue}27588[#27588], {issue}30303[#30303]) + +Security:: +* Make credentials mandatory when launching xpack/migrate {pull}36197[#36197] (issues: {issue}29847[#29847], {issue}33972[#33972]) + +Snapshot/Restore:: +* Allow Parallel Restore Operations {pull}36397[#36397] +* Repo Creation out of ClusterStateTask {pull}36157[#36157] (issue: {issue}9488[#9488]) +* Add read-only repository verification {pull}35731[#35731] (issue: {issue}35703[#35703]) + +SQL:: +* Extend the ODBC metric by differentiating between 32 and 64bit platforms {pull}36753[#36753] (issue: {issue}36740[#36740]) +* Fix wrong appliance of StackOverflow limit for IN {pull}36724[#36724] (issue: {issue}36592[#36592]) +* Introduce NOW/CURRENT_TIMESTAMP function {pull}36562[#36562] (issue: {issue}36534[#36534]) +* Move requests' parameters to requests JSON body {pull}36149[#36149] (issue: {issue}35992[#35992]) +* Make INTERVAL millis optional {pull}36043[#36043] (issue: {issue}36032[#36032]) +* Implement data type verification for conditionals {pull}35916[#35916] (issue: {issue}35907[#35907]) +* Implement GREATEST and LEAST functions {pull}35879[#35879] (issue: {issue}35878[#35878]) +* Implement null safe equality operator `<=>` {pull}35873[#35873] (issue: {issue}35871[#35871]) +* SYS COLUMNS returns ODBC specific schema {pull}35870[#35870] (issue: {issue}35376[#35376]) +* Polish grammar for intervals {pull}35853[#35853] +* Add filtering to SYS TYPES {pull}35852[#35852] (issue: {issue}35342[#35342]) +* Implement NULLIF(expr1, expr2) function {pull}35826[#35826] (issue: {issue}35818[#35818]) +* Lock down JDBC driver {pull}35798[#35798] (issue: {issue}35437[#35437]) +* Implement NVL(expr1, expr2) {pull}35794[#35794] (issue: {issue}35782[#35782]) +* Implement ISNULL(expr1, expr2) {pull}35793[#35793] (issue: {issue}35781[#35781]) +* Implement IFNULL variant of COALESCE {pull}35762[#35762] (issue: {issue}35749[#35749]) +* XPack FeatureSet functionality {pull}35725[#35725] (issue: {issue}34821[#34821]) +* Perform lazy evaluation of mismatched mappings {pull}35676[#35676] (issues: {issue}35659[#35659], {issue}35675[#35675]) +* Improve validation of unsupported fields {pull}35675[#35675] (issue: {issue}35673[#35673]) +* Move internals from Joda to java.time {pull}35649[#35649] (issue: {issue}35633[#35633]) + +Stats:: +* Handle OS pretty name on old OS without OS release {pull}35453[#35453] (issue: {issue}35440[#35440]) + +Task Management:: +* Periodically try to reassign unassigned persistent tasks {pull}36069[#36069] (issue: {issue}35792[#35792]) +* Only require task permissions {pull}35667[#35667] (issue: {issue}35573[#35573]) +* Retry if task can't be written {pull}35054[#35054] (issue: {issue}33764[#33764]) + +ZenDiscovery:: +* Add discovery types to cluster stats {pull}36442[#36442] +* Introduce `zen2` discovery type {pull}36298[#36298] +* Zen2: Persist cluster states the old way on non-master-eligible nodes {pull}36247[#36247] (issue: {issue}3[#3]) +* [Zen2] Storage layer WriteStateException propagation {pull}36052[#36052] +* [Zen2] Implement Tombstone REST APIs {pull}36007[#36007] +* [Zen2] Update default for USE_ZEN2 to true {pull}35998[#35998] +* [Zen2] Add warning if cluster fails to form fast enough {pull}35993[#35993] +* [Zen2] Allow Setting a List of Bootstrap Nodes to Wait for {pull}35847[#35847] +* [Zen2] VotingTombstone class {pull}35832[#35832] +* [Zen2] PersistedState interface implementation {pull}35819[#35819] +* [Zen2] Support rolling upgrades from Zen1 {pull}35737[#35737] +* [Zen2] Add lag detector {pull}35685[#35685] +* [Zen2] Move ClusterState fields to be persisted to ClusterState.MetaData {pull}35625[#35625] +* [Zen2] Introduce ClusterBootstrapService {pull}35488[#35488] +* [Zen2] Introduce vote withdrawal {pull}35446[#35446] +* Zen2: Add basic Zen1 transport-level BWC {pull}35443[#35443] + [[bug-7.0.0-alpha2]] +[float] === Bug fixes -* Fixes CPoissonMeanConjugate sampling error. {ml-pull}335[#335] +Aggregations:: +* fix MultiValuesSourceFieldConfig toXContent {pull}36525[#36525] (issue: {issue}36474[#36474]) +* Cache the score of the parent document in the nested agg {pull}36019[#36019] (issues: {issue}34555[#34555], {issue}35985[#35985]) +* Correct implemented interface of ParsedReverseNested {pull}35455[#35455] (issue: {issue}35449[#35449]) +* Handle IndexOrDocValuesQuery in composite aggregation {pull}35392[#35392] + +Audit:: +* Fix origin.type for connection_* events {pull}36410[#36410] +* Fix IndexAuditTrail rolling restart on rollover edge {pull}35988[#35988] (issue: {issue}33867[#33867]) + +Authentication:: +* Fix kerberos setting registration {pull}35986[#35986] (issues: {issue}30241[#30241], {issue}35942[#35942]) +* Add support for Kerberos V5 Oid {pull}35764[#35764] (issue: {issue}34763[#34763]) + +Build:: +* Use explicit deps on test tasks for check {pull}36325[#36325] +* Fix jdbc jar pom to not include deps {pull}36036[#36036] (issue: {issue}32014[#32014]) +* Fix official plugins list {pull}35661[#35661] (issue: {issue}35623[#35623]) + +CCR:: +* Fix follow stats API's follower index filtering feature {pull}36647[#36647] +* AutoFollowCoordinator should tolerate that auto follow patterns may be removed {pull}35945[#35945] (issue: {issue}35937[#35937]) +* Only auto follow indices when all primary shards have started {pull}35814[#35814] (issue: {issue}35480[#35480]) +* Avoid NPE in follower stats when no tasks metadata {pull}35802[#35802] +* Fix the names of CCR stats endpoints in usage API {pull}35438[#35438] + +Circuit Breakers:: +* Modify `BigArrays` to take name of circuit breaker {pull}36461[#36461] (issue: {issue}31435[#31435]) + +Core:: +* Fix CompositeBytesReference#slice to not throw AIOOBE with legal offsets. {pull}35955[#35955] (issue: {issue}35950[#35950]) +* Suppress CachedTimeThread in hot threads output {pull}35558[#35558] (issue: {issue}23175[#23175]) +* Upgrade to Joda 2.10.1 {pull}35410[#35410] (issue: {issue}33749[#33749]) + +Distributed:: +* Combine the execution of an exclusive replica operation with primary term update {pull}36116[#36116] (issue: {issue}35850[#35850]) +* ActiveShardCount should not fail when closing the index {pull}35936[#35936] + +Engine:: +* Set Lucene version upon index creation. {pull}36038[#36038] (issue: {issue}33826[#33826]) +* Wrap can_match reader with ElasticsearchDirectoryReader {pull}35857[#35857] +* Copy checkpoint atomically when rolling generation {pull}35407[#35407] + +Geo:: +* More robust handling of ignore_malformed in geoshape parsing {pull}35603[#35603] (issues: {issue}34047[#34047], {issue}34498[#34498]) +* Better handling of malformed geo_points {pull}35554[#35554] (issue: {issue}35419[#35419]) +* Enables coerce support in WKT polygon parser {pull}35414[#35414] (issue: {issue}35059[#35059]) + +Index APIs:: +* Fix duplicate phrase in shrink/split error message {pull}36734[#36734] (issue: {issue}36729[#36729]) +* Raise a 404 exception when document source is not found (#33384) {pull}34083[#34083] (issue: {issue}33384[#33384]) + +Ingest:: +* Fix on_failure with Drop processor {pull}36686[#36686] (issue: {issue}36151[#36151]) +* Support default pipelines + bulk upserts {pull}36618[#36618] (issue: {issue}36219[#36219]) +* Support default pipeline through an alias {pull}36231[#36231] (issue: {issue}35817[#35817]) + +License:: +* Do not serialize basic license exp in x-pack info {pull}30848[#30848] +* Update versions for start_trial after backport {pull}30218[#30218] (issue: {issue}30135[#30135]) + +Machine Learning:: +* Interrupt Grok in file structure finder timeout {pull}36588[#36588] +* Prevent stack overflow while copying ML jobs and datafeeds {pull}36370[#36370] (issue: {issue}36360[#36360]) +* Adjust file structure finder parser config {pull}35935[#35935] +* Fix find_file_structure NPE with should_trim_fields {pull}35465[#35465] (issue: {issue}35462[#35462]) +* Prevent notifications being created on deletion of a non existent job {pull}35337[#35337] (issues: {issue}34058[#34058], {issue}35336[#35336]) +* Clear Job#finished_time when it is opened (#32605) {pull}32755[#32755] +* Fix thread leak when waiting for job flush (#32196) {pull}32541[#32541] (issue: {issue}32196[#32196]) +* Fix CPoissonMeanConjugate sampling error. {ml-pull}335[#335] + +Network:: +* Do not resolve addresses in remote connection info {pull}36671[#36671] (issue: {issue}35658[#35658]) +* Always compress based on the settings {pull}36522[#36522] (issue: {issue}36399[#36399]) +* http.publish_host Should Contain CNAME {pull}32806[#32806] (issue: {issue}22029[#22029]) +* Adjust SSLDriver behavior for JDK11 changes {pull}32145[#32145] (issues: {issue}32122[#32122], {issue}32144[#32144]) +* Add TRACE, CONNECT, and PATCH http methods {pull}31035[#31035] (issue: {issue}31017[#31017]) +* Transport client: Don't validate node in handshake {pull}30737[#30737] (issue: {issue}30141[#30141]) +* Fix issue with finishing handshake in ssl driver {pull}30580[#30580] +* Remove potential nio selector leak {pull}27825[#27825] +* Fix issue where the incorrect buffers are written {pull}27695[#27695] (issue: {issue}27551[#27551]) +* Do not set SO_LINGER on server channels {pull}26997[#26997] +* Do not set SO_LINGER to 0 when not shutting down {pull}26871[#26871] (issue: {issue}26764[#26764]) +* Release pipelined http responses on close {pull}26226[#26226] + +Packaging:: +* Fix error message when package install fails due to missing Java {pull}36077[#36077] (issue: {issue}31845[#31845]) +* Add missing entries to conffiles {pull}35810[#35810] (issue: {issue}35691[#35691]) + +Plugins:: +* Ensure that azure stream has socket privileges {pull}28751[#28751] (issue: {issue}28662[#28662]) + +Recovery:: +* Register ResyncTask.Status as a NamedWriteable {pull}36610[#36610] + +Rollup:: +* Fix rollup search statistics {pull}36674[#36674] + +Scripting:: +* Properly support no-offset date formatting {pull}36316[#36316] (issue: {issue}36306[#36306]) +* [Painless] Generate Bridge Methods {pull}36097[#36097] +* Fix serialization bug in painless execute api request {pull}36075[#36075] (issue: {issue}36050[#36050]) +* Actually add joda time back to whitelist {pull}35965[#35965] (issue: {issue}35915[#35915]) +* Add back joda to whitelist {pull}35915[#35915] (issue: {issue}35913[#35913]) + +Settings:: +* Correctly Identify Noop Updates {pull}36560[#36560] (issue: {issue}36496[#36496]) + +SQL:: +* Fix translation of LIKE/RLIKE keywords {pull}36672[#36672] (issues: {issue}36039[#36039], {issue}36584[#36584]) +* Scripting support for casting functions CAST and CONVERT {pull}36640[#36640] (issue: {issue}36061[#36061]) +* Fix translation to painless for conditionals {pull}36636[#36636] (issue: {issue}36631[#36631]) +* Concat should be always not nullable {pull}36601[#36601] (issue: {issue}36169[#36169]) +* Fix MOD() for long and integer arguments {pull}36599[#36599] (issue: {issue}36364[#36364]) +* Fix issue with complex HAVING and GROUP BY ordinal {pull}36594[#36594] (issue: {issue}36059[#36059]) +* Be lenient for tests involving comparison to H2 but strict for csv spec tests {pull}36498[#36498] (issue: {issue}36483[#36483]) +* Non ISO 8601 versions of DAY_OF_WEEK and WEEK_OF_YEAR functions {pull}36358[#36358] (issue: {issue}36263[#36263]) +* Do not ignore all fields whose names start with underscore {pull}36214[#36214] (issue: {issue}36206[#36206]) +* Fix issue with wrong data type for scripted Grouping keys {pull}35969[#35969] (issue: {issue}35662[#35662]) +* Fix translation of math functions to painless {pull}35910[#35910] (issue: {issue}35654[#35654]) +* Fix jdbc jar to include deps {pull}35602[#35602] +* Fix query translation for scripted queries {pull}35408[#35408] (issue: {issue}35232[#35232]) +* Clear the cursor if nested inner hits are enough to fulfill the query required limits {pull}35398[#35398] (issue: {issue}35176[#35176]) +* Introduce IsNull node to simplify expressions {pull}35206[#35206] (issues: {issue}34876[#34876], {issue}35171[#35171]) +* The SSL default configuration shouldn't override the https protocol if used {pull}34635[#34635] (issue: {issue}33817[#33817]) +* Minor fix for javadoc {pull}32573[#32573] (issue: {issue}32553[#32553]) + +Search:: +* Inner hits fail to propagate doc-value format. {pull}36310[#36310] +* Fix custom AUTO issue with Fuzziness#toXContent {pull}35807[#35807] (issue: {issue}33462[#33462]) +* Fix analyzed prefix query in query_string {pull}35756[#35756] (issue: {issue}31702[#31702]) +* Fix problem with MatchNoDocsQuery in disjunction queries {pull}35726[#35726] (issue: {issue}34708[#34708]) +* Fix phrase_slop in query_string query {pull}35533[#35533] (issue: {issue}35125[#35125]) +* Add a More Like This query routing requirement check (#29678) {pull}33974[#33974] + +Security:: +* Remove license state listeners on closables {pull}36308[#36308] (issues: {issue}33328[#33328], {issue}35627[#35627], {issue}35628[#35628]) + +Snapshot/Restore:: +* Upgrade GCS Dependencies to 1.55.0 {pull}36634[#36634] (issues: {issue}35229[#35229], {issue}35459[#35459]) +* Improve Resilience SnapshotShardService {pull}36113[#36113] (issue: {issue}32265[#32265]) +* Keep SnapshotsInProgress State in Sync with Routing Table {pull}35710[#35710] +* Ensure that gcs client creation is privileged {pull}25938[#25938] (issue: {issue}25932[#25932]) +* Make calls to CloudBlobContainer#exists privileged {pull}25937[#25937] (issue: {issue}25931[#25931]) + +Watcher:: +* Watcher accounts constructed lazily {pull}36656[#36656] +* Only trigger a watch if new or schedule/changed {pull}35908[#35908] +* Fix Watcher NotificationService's secure settings {pull}35610[#35610] (issue: {issue}35378[#35378]) +* Fix integration tests to ensure correct start/stop of Watcher {pull}35271[#35271] (issues: {issue}29877[#29877], {issue}30705[#30705], {issue}33291[#33291], {issue}34448[#34448], {issue}34462[#34462]) + +ZenDiscovery:: +* [Zen2] Respect the no_master_block setting {pull}36478[#36478] +* Cancel GetDiscoveredNodesAction when bootstrapped {pull}36423[#36423] (issues: {issue}36380[#36380], {issue}36381[#36381]) +* [Zen2] Only elect master-eligible nodes {pull}35996[#35996] +* [Zen2] Remove duplicate discovered peers {pull}35505[#35505] + + +[[regression-7.0.0-alpha2]] +[float] +=== Regressions + +Scripting:: +* Use Number as a return value for BucketAggregationScript {pull}35653[#35653] (issue: {issue}35351[#35351]) + + +[[upgrade-7.0.0-alpha2]] +[float] +=== Upgrades + +Ingest:: +* Update geolite2 database in ingest geoip plugin {pull}33840[#33840] + +Network:: +* Upgrade Netty 4.3.32.Final {pull}36102[#36102] (issue: {issue}35360[#35360]) diff --git a/docs/reference/rest-api/defs.asciidoc b/docs/reference/rest-api/defs.asciidoc index 4eeedc5539992..823b63cbe579d 100644 --- a/docs/reference/rest-api/defs.asciidoc +++ b/docs/reference/rest-api/defs.asciidoc @@ -2,8 +2,8 @@ [[api-definitions]] == Definitions -These resource definitions are used in {ml} and {security} APIs and in {kib} -advanced {ml} job configuration options. +These resource definitions are used in APIs related to {ml-features} and +{security-features} and in {kib} advanced {ml} job configuration options. * <> * <> diff --git a/docs/reference/rollup/apis/delete-job.asciidoc b/docs/reference/rollup/apis/delete-job.asciidoc index 57f2c524d7cf9..18c353ac73674 100644 --- a/docs/reference/rollup/apis/delete-job.asciidoc +++ b/docs/reference/rollup/apis/delete-job.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[rollup-delete-job]] -=== Delete Job API +=== Delete job API ++++ -Delete Job +Delete job ++++ experimental[] diff --git a/docs/reference/rollup/apis/get-job.asciidoc b/docs/reference/rollup/apis/get-job.asciidoc index 46bdd46ead47b..ff4d62fb8002c 100644 --- a/docs/reference/rollup/apis/get-job.asciidoc +++ b/docs/reference/rollup/apis/get-job.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[rollup-get-job]] -=== Get Rollup Jobs API +=== Get rollup jobs API ++++ -Get Job +Get job ++++ experimental[] diff --git a/docs/reference/rollup/apis/put-job.asciidoc b/docs/reference/rollup/apis/put-job.asciidoc index 4953bc2c081d8..b43c5a0e90b2a 100644 --- a/docs/reference/rollup/apis/put-job.asciidoc +++ b/docs/reference/rollup/apis/put-job.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[rollup-put-job]] -=== Create Job API +=== Create job API ++++ -Create Job +Create job ++++ experimental[] diff --git a/docs/reference/rollup/apis/rollup-caps.asciidoc b/docs/reference/rollup/apis/rollup-caps.asciidoc index bd39d701295f2..b4a0d4470a7bd 100644 --- a/docs/reference/rollup/apis/rollup-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-caps.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[rollup-get-rollup-caps]] -=== Get Rollup Job Capabilities +=== Get rollup job capabilities API ++++ -Get Rollup Caps +Get rollup caps ++++ experimental[] diff --git a/docs/reference/rollup/apis/rollup-index-caps.asciidoc b/docs/reference/rollup/apis/rollup-index-caps.asciidoc index 5abcbe5737678..1fad99e0311de 100644 --- a/docs/reference/rollup/apis/rollup-index-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-index-caps.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[rollup-get-rollup-index-caps]] -=== Get Rollup Index Capabilities +=== Get rollup index capabilities API ++++ -Get Rollup Index Caps +Get rollup index caps ++++ experimental[] diff --git a/docs/reference/rollup/apis/rollup-job-config.asciidoc b/docs/reference/rollup/apis/rollup-job-config.asciidoc index b839e454e6dbf..885d4e82cf6b0 100644 --- a/docs/reference/rollup/apis/rollup-job-config.asciidoc +++ b/docs/reference/rollup/apis/rollup-job-config.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="basic"] [[rollup-job-config]] -=== Rollup Job Configuration +=== Rollup job configuration experimental[] diff --git a/docs/reference/rollup/apis/rollup-search.asciidoc b/docs/reference/rollup/apis/rollup-search.asciidoc index a1a814224c961..244f304ed917b 100644 --- a/docs/reference/rollup/apis/rollup-search.asciidoc +++ b/docs/reference/rollup/apis/rollup-search.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[rollup-search]] -=== Rollup Search +=== Rollup search ++++ -Rollup Search +Rollup search ++++ experimental[] diff --git a/docs/reference/rollup/apis/start-job.asciidoc b/docs/reference/rollup/apis/start-job.asciidoc index 141af0a2fc2a0..241d070a670a0 100644 --- a/docs/reference/rollup/apis/start-job.asciidoc +++ b/docs/reference/rollup/apis/start-job.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[rollup-start-job]] -=== Start Job API +=== Start rollup job API ++++ -Start Job +Start job ++++ experimental[] diff --git a/docs/reference/rollup/apis/stop-job.asciidoc b/docs/reference/rollup/apis/stop-job.asciidoc index 161e50d458b90..50935826f5f53 100644 --- a/docs/reference/rollup/apis/stop-job.asciidoc +++ b/docs/reference/rollup/apis/stop-job.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="basic"] [[rollup-stop-job]] -=== Stop Job API +=== Stop rollup job API ++++ -Stop Job +Stop job ++++ experimental[] diff --git a/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc b/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc index e7e1a00208adc..c9d645e4fa234 100644 --- a/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc +++ b/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc @@ -1,8 +1,10 @@ [role="xpack"] [[configuring-tls-docker]] -=== Encrypting Communications in an {es} Docker Container +=== Encrypting communications in an {es} Docker Container -Starting with version 6.0.0, {security} (Gold, Platinum or Enterprise subscriptions) https://www.elastic.co/guide/en/elasticsearch/reference/6.0/breaking-6.0.0-xes.html[requires SSL/TLS] +Starting with version 6.0.0, {stack} {security-features} +(Gold, Platinum or Enterprise subscriptions) +https://www.elastic.co/guide/en/elasticsearch/reference/6.0/breaking-6.0.0-xes.html[require SSL/TLS] encryption for the transport networking layer. This section demonstrates an easy path to get started with SSL/TLS for both @@ -10,7 +12,7 @@ HTTPS and transport using the {es} Docker image. The example uses Docker Compose to manage the containers. For further details, please refer to -{xpack-ref}/encrypting-communications.html[Encrypting Communications] and +{stack-ov}/encrypting-communications.html[Encrypting communications] and https://www.elastic.co/subscriptions[available subscriptions]. [float] @@ -106,7 +108,7 @@ services: image: {docker-image} environment: - node.name=es01 - - discovery.zen.minimum_master_nodes=2 + - cluster.initial_master_nodes=es01,es02 - ELASTIC_PASSWORD=$ELASTIC_PASSWORD <1> - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - xpack.license.self_generated.type=trial <2> @@ -131,9 +133,9 @@ services: image: {docker-image} environment: - node.name=es02 - - discovery.zen.minimum_master_nodes=2 - - ELASTIC_PASSWORD=$ELASTIC_PASSWORD - discovery.zen.ping.unicast.hosts=es01 + - cluster.initial_master_nodes=es01,es02 + - ELASTIC_PASSWORD=$ELASTIC_PASSWORD - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - xpack.license.self_generated.type=trial - xpack.security.enabled=true @@ -156,7 +158,7 @@ volumes: {"esdata_01": {"driver": "local"}, "esdata_02": {"driver": "local"}} <1> Bootstrap `elastic` with the password defined in `.env`. See {stack-ov}/built-in-users.html#bootstrap-elastic-passwords[the Elastic Bootstrap Password]. <2> Automatically generate and apply a trial subscription, in order to enable -{security}. +{security-features}. <3> Disable verification of authenticity for inter-node communication. Allows creating self-signed certificates without having to pin specific internal IP addresses. endif::[] diff --git a/docs/reference/security/securing-communications/enabling-cipher-suites.asciidoc b/docs/reference/security/securing-communications/enabling-cipher-suites.asciidoc index 38db876542747..51d5e5f6de650 100644 --- a/docs/reference/security/securing-communications/enabling-cipher-suites.asciidoc +++ b/docs/reference/security/securing-communications/enabling-cipher-suites.asciidoc @@ -16,8 +16,8 @@ The _JCE Unlimited Strength Jurisdiction Policy Files`_ are required for encryption with key lengths greater than 128 bits, such as 256-bit AES encryption. After installation, all cipher suites in the JCE are available for use but requires -configuration in order to use them. To enable the use of stronger cipher suites with -{security}, configure the `cipher_suites` parameter. See the +configuration in order to use them. To enable the use of stronger cipher suites +with {es} {security-features}, configure the `cipher_suites` parameter. See the {ref}/security-settings.html#ssl-tls-settings[Configuration Parameters for TLS/SSL] section of this document for specific parameter information. diff --git a/docs/reference/security/securing-communications/node-certificates.asciidoc b/docs/reference/security/securing-communications/node-certificates.asciidoc index 44ef8278fb968..b2f5e95b09999 100644 --- a/docs/reference/security/securing-communications/node-certificates.asciidoc +++ b/docs/reference/security/securing-communications/node-certificates.asciidoc @@ -12,14 +12,12 @@ Additionally, it is recommended that the certificates contain subject alternativ names (SAN) that correspond to the node's IP address and DNS name so that hostname verification can be performed. -In order to simplify the process of generating certificates for the Elastic -Stack, a command line tool, {ref}/certutil.html[`elasticsearch-certutil`] has been -included with {xpack}. This tool takes care of generating a CA and signing -certificates with the CA. `elasticsearch-certutil` can be used interactively or -in a silent mode through the use of an input file. The `elasticsearch-certutil` -tool also supports generation of certificate signing requests (CSR), so that a -commercial- or organization-specific CA can be used to sign the certificates. -For example: +The {ref}/certutil.html[`elasticsearch-certutil`] command simplifies the process +of generating certificates for the {stack}. It takes care of generating a CA and +signing certificates with the CA. It can be used interactively or in a silent +mode through the use of an input file. It also supports generation of +certificate signing requests (CSR), so that a commercial- or +organization-specific CA can be used to sign the certificates. For example: . Optional: Create a certificate authority for your {es} cluster. + diff --git a/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc b/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc index 6b919e065c631..9d207f26a96b6 100644 --- a/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc +++ b/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc @@ -2,11 +2,13 @@ [[configuring-tls]] === Encrypting communications in {es} -{security} enables you to encrypt traffic to, from, and within your {es} cluster. -Connections are secured using Transport Layer Security (TLS/SSL). +{stack} {security-features} enable you to encrypt traffic to, from, and within +your {es} cluster. Connections are secured using Transport Layer Security +(TLS/SSL). WARNING: Clusters that do not have encryption enabled send all data in plain text -including passwords and will not be able to install a license that enables {security}. +including passwords and will not be able to install a license that enables +{security-features}. To enable encryption, you need to perform the following steps on each node in the cluster: @@ -27,7 +29,7 @@ information, see <>. <>. For more information about encrypting communications across the Elastic Stack, -see {xpack-ref}/encrypting-communications.html[Encrypting Communications]. +see {stack-ov}/encrypting-communications.html[Encrypting Communications]. :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/node-certificates.asciidoc include::node-certificates.asciidoc[] diff --git a/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc b/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc index 5dd1620c0e4a8..2eab8e0ae5adb 100644 --- a/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc +++ b/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc @@ -2,13 +2,14 @@ [[separating-node-client-traffic]] === Separating node-to-node and client traffic -Elasticsearch has the feature of so called {ref}/modules-transport.html[TCP transport profiles] -that allows it to bind to several ports and addresses. {security} extends on this -functionality to enhance the security of the cluster by enabling the separation -of node-to-node transport traffic from client transport traffic. This is important -if the client transport traffic is not trusted and could potentially be malicious. -To separate the node-to-node traffic from the client traffic, add the following -to `elasticsearch.yml`: +Elasticsearch has the feature of so called +{ref}/modules-transport.html[TCP transport profiles] +that allows it to bind to several ports and addresses. The {es} +{security-features} extend on this functionality to enhance the security of the +cluster by enabling the separation of node-to-node transport traffic from client +transport traffic. This is important if the client transport traffic is not +trusted and could potentially be malicious. To separate the node-to-node traffic +from the client traffic, add the following to `elasticsearch.yml`: [source, yaml] -------------------------------------------------- diff --git a/docs/reference/security/securing-communications/setting-up-ssl.asciidoc b/docs/reference/security/securing-communications/setting-up-ssl.asciidoc index ad2548f73f441..90f9b040d9d54 100644 --- a/docs/reference/security/securing-communications/setting-up-ssl.asciidoc +++ b/docs/reference/security/securing-communications/setting-up-ssl.asciidoc @@ -1,12 +1,13 @@ [[ssl-tls]] -=== Setting Up TLS on a Cluster +=== Setting Up TLS on a cluster -{security} enables you to encrypt traffic to, from, and within your {es} -cluster. Connections are secured using Transport Layer Security (TLS), which is -commonly referred to as "SSL". +The {stack} {security-features} enables you to encrypt traffic to, from, and +within your {es} cluster. Connections are secured using Transport Layer Security +(TLS), which is commonly referred to as "SSL". WARNING: Clusters that do not have encryption enabled send all data in plain text -including passwords and will not be able to install a license that enables {security}. +including passwords and will not be able to install a license that enables +{security-features}. The following steps describe how to enable encryption across the various components of the Elastic Stack. You must perform each of the steps that are diff --git a/docs/reference/security/securing-communications/tls-ad.asciidoc b/docs/reference/security/securing-communications/tls-ad.asciidoc index cd0395b6725f2..48278f8336597 100644 --- a/docs/reference/security/securing-communications/tls-ad.asciidoc +++ b/docs/reference/security/securing-communications/tls-ad.asciidoc @@ -5,7 +5,7 @@ To protect the user credentials that are sent for authentication, it's highly recommended to encrypt communications between {es} and your Active Directory server. Connecting via SSL/TLS ensures that the identity of the Active Directory -server is authenticated before {security} transmits the user credentials and the +server is authenticated before {es} transmits the user credentials and the usernames and passwords are encrypted in transit. Clients and nodes that connect via SSL/TLS to the Active Directory server need @@ -47,11 +47,11 @@ For more information about these settings, see <>. . Restart {es}. -NOTE: By default, when you configure {security} to connect to Active Directory - using SSL/TLS, {security} attempts to verify the hostname or IP address +NOTE: By default, when you configure {es} to connect to Active Directory + using SSL/TLS, it attempts to verify the hostname or IP address specified with the `url` attribute in the realm configuration with the values in the certificate. If the values in the certificate and realm - configuration do not match, {security} does not allow a connection to the + configuration do not match, {es} does not allow a connection to the Active Directory server. This is done to protect against man-in-the-middle attacks. If necessary, you can disable this behavior by setting the `ssl.verification_mode` property to `certificate`. diff --git a/docs/reference/security/securing-communications/tls-http.asciidoc b/docs/reference/security/securing-communications/tls-http.asciidoc index 06e70b036735e..45129db009658 100644 --- a/docs/reference/security/securing-communications/tls-http.asciidoc +++ b/docs/reference/security/securing-communications/tls-http.asciidoc @@ -1,8 +1,8 @@ [role="xpack"] [[tls-http]] -==== Encrypting HTTP Client Communications +==== Encrypting HTTP Client communications -When {security} is enabled, you can optionally use TLS to ensure that +When {security-features} are enabled, you can optionally use TLS to ensure that communication between HTTP clients and the cluster is encrypted. NOTE: Enabling TLS on the HTTP layer is strongly recommended but is not required. diff --git a/docs/reference/security/securing-communications/tls-ldap.asciidoc b/docs/reference/security/securing-communications/tls-ldap.asciidoc index 2d7b2546becb7..30b786b8e4c11 100644 --- a/docs/reference/security/securing-communications/tls-ldap.asciidoc +++ b/docs/reference/security/securing-communications/tls-ldap.asciidoc @@ -5,7 +5,7 @@ To protect the user credentials that are sent for authentication in an LDAP realm, it's highly recommended to encrypt communications between {es} and your LDAP server. Connecting via SSL/TLS ensures that the identity of the LDAP server -is authenticated before {security} transmits the user credentials and the +is authenticated before {es} transmits the user credentials and the contents of the connection are encrypted. Clients and nodes that connect via TLS to the LDAP server need to have the LDAP server's certificate or the server's root CA certificate installed in their keystore or truststore. @@ -15,7 +15,7 @@ For more information, see <>. . Configure the realm's TLS settings on each node to trust certificates signed by the CA that signed your LDAP server certificates. The following example demonstrates how to trust a CA certificate, `cacert.pem`, located within the -{xpack} configuration directory: +{es} configuration directory (ES_PATH_CONF): + -- [source,shell] @@ -45,11 +45,11 @@ protocol and the secure port number. For example, `url: ldaps://ldap.example.com . Restart {es}. -NOTE: By default, when you configure {security} to connect to an LDAP server - using SSL/TLS, {security} attempts to verify the hostname or IP address +NOTE: By default, when you configure {es} to connect to an LDAP server + using SSL/TLS, it attempts to verify the hostname or IP address specified with the `url` attribute in the realm configuration with the values in the certificate. If the values in the certificate and realm - configuration do not match, {security} does not allow a connection to the + configuration do not match, {es} does not allow a connection to the LDAP server. This is done to protect against man-in-the-middle attacks. If necessary, you can disable this behavior by setting the `ssl.verification_mode` property to `certificate`. diff --git a/docs/reference/security/securing-communications/tls-transport.asciidoc b/docs/reference/security/securing-communications/tls-transport.asciidoc index c2306545536aa..fee775078d6a2 100644 --- a/docs/reference/security/securing-communications/tls-transport.asciidoc +++ b/docs/reference/security/securing-communications/tls-transport.asciidoc @@ -1,10 +1,10 @@ [role="xpack"] [[tls-transport]] -==== Encrypting Communications Between Nodes in a Cluster +==== Encrypting communications between nodes in a cluster The transport networking layer is used for internal communication between nodes -in a cluster. When {security} is enabled, you must use TLS to ensure that -communication between the nodes is encrypted. +in a cluster. When {security-features} are enabled, you must use TLS to ensure +that communication between the nodes is encrypted. . <>. diff --git a/docs/reference/settings/audit-settings.asciidoc b/docs/reference/settings/audit-settings.asciidoc index b7a3446ae9691..d551516984052 100644 --- a/docs/reference/settings/audit-settings.asciidoc +++ b/docs/reference/settings/audit-settings.asciidoc @@ -161,9 +161,9 @@ xpack.security.audit.index.settings: -- NOTE: These settings apply to the local audit indices, as well as to the <>, but only if the remote cluster -does *not* have {security} installed, or the {es} versions are different. -If the remote cluster has {security} installed, and the versions coincide, the -settings for the audit indices there will take precedence, +does *not* have {security-features} enabled or the {es} versions are different. +If the remote cluster has {security-features} enabled and the versions coincide, +the settings for the audit indices there will take precedence, even if they are unspecified (i.e. left to defaults). -- diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index e97ad2edc77c5..28c30bf665cf2 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -90,9 +90,10 @@ access. Defaults to `true`. [float] [[security-automata-settings]] ==== Automata Settings -In places where {security} accepts wildcard patterns (e.g. index patterns in -roles, group matches in the role mapping API), each pattern is compiled into -an Automaton. The follow settings are available to control this behaviour. +In places where the {security-features} accept wildcard patterns (e.g. index +patterns in roles, group matches in the role mapping API), each pattern is +compiled into an Automaton. The follow settings are available to control this +behaviour. `xpack.security.automata.max_determinized_states`:: The upper limit on how many automaton states may be created by a single pattern. @@ -357,7 +358,7 @@ Defaults to `60s`. `group_search.base_dn`:: The container DN to search for groups in which the user has membership. When -this element is absent, {security} searches for the attribute specified by +this element is absent, {es} searches for the attribute specified by `user_group_attribute` set on the user in order to determine group membership. `group_search.scope`:: @@ -391,7 +392,7 @@ YAML role mapping configuration file]. Defaults to `ES_PATH_CONF/role_mapping.yml`. `follow_referrals`:: -Specifies whether {security} should follow referrals returned +Specifies whether {es} should follow referrals returned by the LDAP server. Referrals are URLs returned by the server that are to be used to continue the LDAP operation (for example, search). Defaults to `true`. @@ -517,7 +518,7 @@ The `type` setting must be set to `active_directory`. In addition to the the following settings: `url`:: -An LDAP URL of the form `ldap[s]://:`. {security} attempts to +An LDAP URL of the form `ldap[s]://:`. {es} attempts to authenticate against this URL. If the URL is not specified, it is derived from the `domain_name` setting and assumes an unencrypted connection to port 389. Defaults to `ldap://:389`. This setting is required when connecting @@ -756,7 +757,7 @@ this realm, so that it only supports user lookups. Defaults to `true`. `follow_referrals`:: -If set to `true` {security} follows referrals returned by the LDAP server. +If set to `true`, {es} follows referrals returned by the LDAP server. Referrals are URLs returned by the server that are to be used to continue the LDAP operation (such as `search`). Defaults to `true`. @@ -832,7 +833,7 @@ capabilities and configuration of the Identity Provider. If a path is provided, then it is resolved relative to the {es} config directory. If a URL is provided, then it must be either a `file` URL or a `https` URL. -{security} automatically polls this metadata resource and reloads +{es} automatically polls this metadata resource and reloads the IdP configuration when changes are detected. File based resources are polled at a frequency determined by the global {es} `resource.reload.interval.high` setting, which defaults to 5 seconds. @@ -864,24 +865,20 @@ The URL of the Single Logout service within {kib}. Typically this is the `https://kibana.example.com/logout`. `attributes.principal`:: -The Name of the SAML attribute that should be used as the {security} user's -principal (username). +The Name of the SAML attribute that contains the user's principal (username). `attributes.groups`:: -The Name of the SAML attribute that should be used to populate {security} -user's groups. +The Name of the SAML attribute that contains the user's groups. `attributes.name`:: -The Name of the SAML attribute that should be used to populate {security} -user's full name. +The Name of the SAML attribute that contains the user's full name. `attributes.mail`:: -The Name of the SAML attribute that should be used to populate {security} -user's email address. +The Name of the SAML attribute that contains the user's email address. `attributes.dn`:: -The Name of the SAML attribute that should be used to populate {security} -user's X.500 _Distinguished Name_. +The Name of the SAML attribute that contains the user's X.50 +_Distinguished Name_. `attribute_patterns.principal`:: A Java regular expression that is matched against the SAML attribute specified @@ -950,7 +947,7 @@ For more information, see ===== SAML realm signing settings If a signing key is configured (that is, either `signing.key` or -`signing.keystore.path` is set), then {security} signs outgoing SAML messages. +`signing.keystore.path` is set), then {es} signs outgoing SAML messages. Signing can be configured using the following settings: `signing.saml_messages`:: @@ -1001,7 +998,7 @@ Defaults to the keystore password. ===== SAML realm encryption settings If an encryption key is configured (that is, either `encryption.key` or -`encryption.keystore.path` is set), then {security} publishes an encryption +`encryption.keystore.path` is set), then {es} publishes an encryption certificate when generating metadata and attempts to decrypt incoming SAML content. Encryption can be configured using the following settings: @@ -1210,8 +1207,8 @@ through the list of URLs will continue until a successful connection is made. ==== Default TLS/SSL settings You can configure the following TLS/SSL settings in `elasticsearch.yml`. For more information, see -{stack-ov}/encrypting-communications.html[Encrypting communications]. These settings will be used -for all of {xpack} unless they have been overridden by more specific +{stack-ov}/encrypting-communications.html[Encrypting communications]. These +settings are used unless they have been overridden by more specific settings such as those for HTTP or Transport. `xpack.ssl.supported_protocols`:: @@ -1262,8 +1259,8 @@ Jurisdiction Policy Files_ has been installed, the default value also includes ` The following settings are used to specify a private key, certificate, and the trusted certificates that should be used when communicating over an SSL/TLS connection. -If none of the settings below are specified, this will default to the <>. If no trusted certificates are configured, the default certificates that are trusted by the JVM will be +If none of the settings below are specified, the +<> are used. If no trusted certificates are configured, the default certificates that are trusted by the JVM will be trusted along with the certificate(s) from the <>. The key and certificate must be in place for connections that require client authentication or when acting as a SSL enabled server. diff --git a/docs/reference/settings/ssl-settings.asciidoc b/docs/reference/settings/ssl-settings.asciidoc index 2d513c7423745..1757cc481c3fe 100644 --- a/docs/reference/settings/ssl-settings.asciidoc +++ b/docs/reference/settings/ssl-settings.asciidoc @@ -110,7 +110,7 @@ Password to the truststore. ===== PKCS#12 Files -{security} can be configured to use PKCS#12 container files (`.p12` or `.pfx` files) +{es} can be configured to use PKCS#12 container files (`.p12` or `.pfx` files) that contain the private key, certificate and certificates that should be trusted. PKCS#12 files are configured in the same way as Java Keystore Files: @@ -148,7 +148,7 @@ Password to the PKCS#12 file. ===== PKCS#11 Tokens -{security} can be configured to use a PKCS#11 token that contains the private key, +{es} can be configured to use a PKCS#11 token that contains the private key, certificate and certificates that should be trusted. PKCS#11 token require additional configuration on the JVM level and can be enabled diff --git a/docs/reference/setup/bootstrap-checks-xes.asciidoc b/docs/reference/setup/bootstrap-checks-xes.asciidoc index eef6a30c94beb..df020bbd96276 100644 --- a/docs/reference/setup/bootstrap-checks-xes.asciidoc +++ b/docs/reference/setup/bootstrap-checks-xes.asciidoc @@ -21,11 +21,11 @@ on each node in the cluster. For more information, see === PKI realm check //See PkiRealmBootstrapCheckTests.java -If you use {security} and a Public Key Infrastructure (PKI) realm, you must -configure Transport Layer Security (TLS) on your cluster and enable client -authentication on the network layers (either transport or http). For more -information, see {xpack-ref}/pki-realm.html[PKI User Authentication] and -{xpack-ref}/ssl-tls.html[Setting Up TLS on a Cluster]. +If you use {es} {security-features} and a Public Key Infrastructure (PKI) realm, +you must configure Transport Layer Security (TLS) on your cluster and enable +client authentication on the network layers (either transport or http). For more +information, see {stack-ov}/pki-realm.html[PKI user authentication] and +{stack-ov}/ssl-tls.html[Setting up TLS on a cluster]. To pass this bootstrap check, if a PKI realm is enabled, you must configure TLS and enable client authentication on at least one network communication layer. @@ -42,7 +42,7 @@ and copy it to each node in the cluster. By default, role mappings are stored in `ES_PATH_CONF/role_mapping.yml`. Alternatively, you can specify a different role mapping file for each type of realm and specify its location in the `elasticsearch.yml` file. For more information, see -{xpack-ref}/mapping-roles.html#mapping-roles-file[Using Role Mapping Files]. +{stack-ov}/mapping-roles.html#mapping-roles-file[Using role mapping files]. To pass this bootstrap check, the role mapping files must exist and must be valid. The Distinguished Names (DNs) that are listed in the role mappings files @@ -54,24 +54,24 @@ must also be valid. //See TLSLicenseBootstrapCheck.java In 6.0 and later releases, if you have a gold, platinum, or enterprise license -and {security} is enabled, you must configure SSL/TLS for +and {es} {security-features} are enabled, you must configure SSL/TLS for internode-communication. NOTE: Single-node clusters that use a loopback interface do not have this requirement. For more information, see -{xpack-ref}/encrypting-communications.html[Encrypting Communications]. +{stack-ov}/encrypting-communications.html[Encrypting communications]. To pass this bootstrap check, you must -{xpack-ref}/ssl-tls.html[set up SSL/TLS in your cluster]. +{stack-ov}/ssl-tls.html[set up SSL/TLS in your cluster]. [float] === Token SSL check //See TokenSSLBootstrapCheckTests.java -If you use {security} and the built-in token service is enabled, you must -configure your cluster to use SSL/TLS for the HTTP interface. HTTPS is required -in order to use the token service. +If you use {es} {security-features} and the built-in token service is enabled, +you must configure your cluster to use SSL/TLS for the HTTP interface. HTTPS is +required in order to use the token service. In particular, if `xpack.security.authc.token.enabled` is set to `true` in the `elasticsearch.yml` file, you must also set @@ -79,4 +79,4 @@ set to `true` in the `elasticsearch.yml` file, you must also set settings, see <> and <>. To pass this bootstrap check, you must enable HTTPS or disable the built-in -token service by using the {security} settings. +token service. diff --git a/docs/reference/setup/bootstrap-checks.asciidoc b/docs/reference/setup/bootstrap-checks.asciidoc index 9cf3620636a41..34b39546324d1 100644 --- a/docs/reference/setup/bootstrap-checks.asciidoc +++ b/docs/reference/setup/bootstrap-checks.asciidoc @@ -21,6 +21,7 @@ Elasticsearch from running with incompatible settings. These checks are documented individually. [float] +[[dev-vs-prod-mode]] === Development vs. production mode By default, Elasticsearch binds to loopback addresses for <> diff --git a/docs/reference/setup/important-settings/discovery-settings.asciidoc b/docs/reference/setup/important-settings/discovery-settings.asciidoc index 0587484f50c61..9c62f2da1af25 100644 --- a/docs/reference/setup/important-settings/discovery-settings.asciidoc +++ b/docs/reference/setup/important-settings/discovery-settings.asciidoc @@ -1,22 +1,43 @@ [[discovery-settings]] -=== Discovery settings +=== Discovery and cluster formation settings -Elasticsearch uses a custom discovery implementation called "Zen Discovery" for -node-to-node clustering and master election. There are two important discovery -settings that should be configured before going to production. +There are two important discovery and cluster formation settings that should be +configured before going to production so that nodes in the cluster can discover +each other and elect a master node. [float] [[unicast.hosts]] ==== `discovery.zen.ping.unicast.hosts` Out of the box, without any network configuration, Elasticsearch will bind to -the available loopback addresses and will scan ports 9300 to 9305 to try to -connect to other nodes running on the same server. This provides an auto- +the available loopback addresses and will scan local ports 9300 to 9305 to try +to connect to other nodes running on the same server. This provides an auto- clustering experience without having to do any configuration. -When the moment comes to form a cluster with nodes on other servers, you have to -provide a seed list of other nodes in the cluster that are likely to be live and -contactable. This can be specified as follows: +When the moment comes to form a cluster with nodes on other servers, you must +use the `discovery.zen.ping.unicast.hosts` setting to provide a seed list of +other nodes in the cluster that are master-eligible and likely to be live and +contactable. This setting should normally contain the addresses of all the +master-eligible nodes in the cluster. +This setting contains either an array of hosts or a comma-delimited string. Each +value should be in the form of `host:port` or `host` (where `port` defaults to +the setting `transport.profiles.default.port` falling back to `transport.port` +if not set). Note that IPv6 hosts must be bracketed. The default for this +setting is `127.0.0.1, [::1]` +[float] +[[initial_master_nodes]] +==== `cluster.initial_master_nodes` + +When you start a brand new Elasticsearch cluster for the very first time, there +is a <> step, which +determines the set of master-eligible nodes whose votes are counted in the very +first election. In <>, with no discovery +settings configured, this step is automatically performed by the nodes +themselves. As this auto-bootstrapping is <>, when you start a brand new cluster in <>, you must explicitly list the names or IP addresses of the +master-eligible nodes whose votes should be counted in the very first election. +This list is set using the `cluster.initial_master_nodes` setting. [source,yaml] -------------------------------------------------- @@ -24,35 +45,16 @@ discovery.zen.ping.unicast.hosts: - 192.168.1.10:9300 - 192.168.1.11 <1> - seeds.mydomain.com <2> +cluster.initial_master_nodes: + - master-node-a <3> + - 192.168.1.12 <4> + - 192.168.1.13:9301 <5> -------------------------------------------------- <1> The port will default to `transport.profiles.default.port` and fallback to `transport.port` if not specified. -<2> A hostname that resolves to multiple IP addresses will try all resolved - addresses. - -[float] -[[minimum_master_nodes]] -==== `discovery.zen.minimum_master_nodes` - -To prevent data loss, it is vital to configure the -`discovery.zen.minimum_master_nodes` setting so that each master-eligible node -knows the _minimum number of master-eligible nodes_ that must be visible in -order to form a cluster. - -Without this setting, a cluster that suffers a network failure is at risk of -having the cluster split into two independent clusters -- a split brain -- which -will lead to data loss. A more detailed explanation is provided in -<>. - -To avoid a split brain, this setting should be set to a _quorum_ of -master-eligible nodes: - - (master_eligible_nodes / 2) + 1 - -In other words, if there are three master-eligible nodes, then minimum master -nodes should be set to `(3 / 2) + 1` or `2`: - -[source,yaml] --------------------------------------------------- -discovery.zen.minimum_master_nodes: 2 --------------------------------------------------- +<2> If a hostname resolves to multiple IP addresses then the node will attempt to + discover other nodes at all resolved addresses. +<3> Initial master nodes can be identified by their <>. +<4> Initial master nodes can also be identified by their IP address. +<5> If multiple master nodes share an IP address then the port must be used to + disambiguate them. diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 6eba32ba33202..267ea14420921 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -142,12 +142,12 @@ endif::[] Instructions for installing it can be found on the https://docs.docker.com/compose/install/#install-using-pip[Docker Compose webpage]. -The node `elasticsearch` listens on `localhost:9200` while `elasticsearch2` -talks to `elasticsearch` over a Docker network. +The node `es01` listens on `localhost:9200` while `es02` +talks to `es01` over a Docker network. This example also uses https://docs.docker.com/engine/tutorials/dockervolumes[Docker named volumes], -called `esdata1` and `esdata2` which will be created if not already present. +called `esdata01` and `esdata02` which will be created if not already present. [[docker-prod-cluster-composefile]] `docker-compose.yml`: @@ -163,10 +163,12 @@ ifeval::["{release-state}"!="unreleased"] -------------------------------------------- version: '2.2' services: - elasticsearch: + es01: image: {docker-image} - container_name: elasticsearch + container_name: es01 environment: + - node.name=es01 + - cluster.initial_master_nodes=es01,es02 - cluster.name=docker-cluster - bootstrap.memory_lock=true - "ES_JAVA_OPTS=-Xms512m -Xmx512m" @@ -175,32 +177,34 @@ services: soft: -1 hard: -1 volumes: - - esdata1:/usr/share/elasticsearch/data + - esdata01:/usr/share/elasticsearch/data ports: - 9200:9200 networks: - esnet - elasticsearch2: + es02: image: {docker-image} - container_name: elasticsearch2 + container_name: es02 environment: + - node.name=es02 + - discovery.zen.ping.unicast.hosts=es01 + - cluster.initial_master_nodes=es01,es02 - cluster.name=docker-cluster - bootstrap.memory_lock=true - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - - "discovery.zen.ping.unicast.hosts=elasticsearch" ulimits: memlock: soft: -1 hard: -1 volumes: - - esdata2:/usr/share/elasticsearch/data + - esdata02:/usr/share/elasticsearch/data networks: - esnet volumes: - esdata1: + esdata01: driver: local - esdata2: + esdata02: driver: local networks: diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index 3f64698f334f4..46aadbc34a9f7 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -76,12 +76,18 @@ TIP: Ensure the installation machine has access to the internet and that any cor [[msi-installer-selected-plugins]] image::images/msi_installer/msi_installer_selected_plugins.png[] -As of version 6.3.0, X-Pack is now https://www.elastic.co/products/x-pack/open[bundled by default]. The final step allows a choice of the type of X-Pack license to install, in addition to security configuration and built-in user configuration: +As of version 6.3.0, {xpack} is now https://www.elastic.co/products/x-pack/open[bundled by default]. +The final step allows a choice of the type of license to install, in addition to +security configuration and built-in user configuration: [[msi-installer-xpack]] image::images/msi_installer/msi_installer_xpack.png[] -NOTE: X-Pack includes a choice of a Trial or Basic license. A Trial license is valid for 30 days, after which you can obtain one of the available subscriptions. The Basic license is free and perpetual. Consult the https://www.elastic.co/subscriptions[available subscriptions] for further details on which features are available under which license. +NOTE: {xpack} includes a choice of a Trial or Basic license. A Trial license is +valid for 30 days, after which you can obtain one of the available subscriptions. +The Basic license is free and perpetual. Consult the +https://www.elastic.co/subscriptions[available subscriptions] for further +details on which features are available under which license. After clicking the install button, the installation will begin: @@ -260,7 +266,8 @@ as _properties_ within Windows Installer documentation) that can be passed to `m `PLUGINS`:: - A comma separated list of the plugins to download and install as part of the installation. Defaults to `""` + A comma separated list of the plugins to download and install as part of the + installation. Defaults to `""` `HTTPSPROXYHOST`:: @@ -280,47 +287,47 @@ as _properties_ within Windows Installer documentation) that can be passed to `m `XPACKLICENSE`:: - The type of X-Pack license to install, either `Basic` or `Trial`. Defaults to `Basic` + The type of license to install, either `Basic` or `Trial`. Defaults to `Basic` `XPACKSECURITYENABLED`:: - When installing with a `Trial` license, whether X-Pack Security should be enabled. - Defaults to `true` + When installing with a `Trial` license, whether {security-features} are + enabled. Defaults to `true` `BOOTSTRAPPASSWORD`:: - When installing with a `Trial` license and X-Pack Security enabled, the password to - used to bootstrap the cluster and persisted as the `bootstrap.password` setting in the keystore. - Defaults to a randomized value. + When installing with a `Trial` license and {security-features} are enabled, + the password to used to bootstrap the cluster and persisted as the + `bootstrap.password` setting in the keystore. Defaults to a randomized value. `SKIPSETTINGPASSWORDS`:: - When installing with a `Trial` license and {security} enabled, whether the - installation should skip setting up the built-in users `elastic`, `kibana`, - `logstash_system`, `apm_system`, and `beats_system`. + When installing with a `Trial` license and {security-features} enabled, + whether the installation should skip setting up the built-in users. Defaults to `false` `ELASTICUSERPASSWORD`:: - When installing with a `Trial` license and X-Pack Security enabled, the password - to use for the built-in user `elastic`. Defaults to `""` + When installing with a `Trial` license and {security-features} are enabled, + the password to use for the built-in user `elastic`. Defaults to `""` `KIBANAUSERPASSWORD`:: - When installing with a `Trial` license and X-Pack Security enabled, the password - to use for the built-in user `kibana`. Defaults to `""` + When installing with a `Trial` license and {security-features} are enabled, + the password to use for the built-in user `kibana`. Defaults to `""` `LOGSTASHSYSTEMUSERPASSWORD`:: - When installing with a `Trial` license and X-Pack Security enabled, the password - to use for the built-in user `logstash_system`. Defaults to `""` + When installing with a `Trial` license and {security-features} are enabled, + the password to use for the built-in user `logstash_system`. Defaults to `""` -To pass a value, simply append the property name and value using the format `=""` to -the installation command. For example, to use a different installation directory to the default one and to install https://www.elastic.co/products/x-pack[X-Pack]: +To pass a value, simply append the property name and value using the format +`=""` to the installation command. For example, to use a +different installation directory to the default one: ["source","sh",subs="attributes,callouts"] -------------------------------------------- -start /wait msiexec.exe /i elasticsearch-{version}.msi /qn INSTALLDIR="C:\Custom Install Directory\{version}" PLUGINS="x-pack" +start /wait msiexec.exe /i elasticsearch-{version}.msi /qn INSTALLDIR="C:\Custom Install Directory\{version}" -------------------------------------------- Consult the https://msdn.microsoft.com/en-us/library/windows/desktop/aa367988(v=vs.85).aspx[Windows Installer SDK Command-Line Options] @@ -328,10 +335,10 @@ for additional rules related to values containing quotation marks. ifdef::include-xpack[] [[msi-installer-enable-indices]] -==== Enable automatic creation of X-Pack indices +==== Enable automatic creation of {xpack} indices -X-Pack will try to automatically create a number of indices within Elasticsearch. +The {stack} features try to automatically create a number of indices within {es}. include::xpack-indices.asciidoc[] endif::include-xpack[] diff --git a/docs/reference/setup/setup-xclient.asciidoc b/docs/reference/setup/setup-xclient.asciidoc index 4282264e39524..819e3de98f4bd 100644 --- a/docs/reference/setup/setup-xclient.asciidoc +++ b/docs/reference/setup/setup-xclient.asciidoc @@ -111,5 +111,5 @@ Then in your project's `pom.xml` if using maven, add the following repositories -------------------------------------------------------------- -- -. If you are using {security}, there are more configuration steps. See -{xpack-ref}/java-clients.html[Java Client and Security]. +. If you are using {stack} {security-features}, there are more configuration +steps. See {stack-ov}/java-clients.html[Java Client and Security]. diff --git a/docs/reference/sql/functions/grouping.asciidoc b/docs/reference/sql/functions/grouping.asciidoc index 9a8c5c5ef5348..b80b08a39f481 100644 --- a/docs/reference/sql/functions/grouping.asciidoc +++ b/docs/reference/sql/functions/grouping.asciidoc @@ -36,6 +36,8 @@ The histogram function takes all matching values and divides them into buckets w bucket_key = Math.floor(value / interval) * interval ---- +NOTE:: The histogram in SQL does *NOT* return empty buckets for missing intervals as the traditional <> and <>. Such behavior does not fit conceptually in SQL which treats all missing values as `NULL`; as such the histogram places all missing values in the `NULL` group. + `Histogram` can be applied on either numeric fields: @@ -51,4 +53,26 @@ or date/time fields: include-tagged::{sql-specs}/docs.csv-spec[histogramDate] ---- +Expressions inside the histogram are also supported as long as the +return type is numeric: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[histogramNumericExpression] +---- + +Do note that histograms (and grouping functions in general) allow custom expressions but cannot have any functions applied to them in the `GROUP BY`. In other words, the following statement is *NOT* allowed: +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[expressionOnHistogramNotAllowed] +---- + +as it requires two groupings (one for histogram followed by a second for applying the function on top of the histogram groups). + +Instead one can rewrite the query to move the expression on the histogram _inside_ of it: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[histogramDateExpression] +---- diff --git a/docs/reference/upgrade/cluster_restart.asciidoc b/docs/reference/upgrade/cluster_restart.asciidoc index 85b6fffdb2eb3..4c229e373f505 100644 --- a/docs/reference/upgrade/cluster_restart.asciidoc +++ b/docs/reference/upgrade/cluster_restart.asciidoc @@ -59,10 +59,14 @@ If you have dedicated master nodes, start them first and wait for them to form a cluster and elect a master before proceeding with your data nodes. You can check progress by looking at the logs. -As soon as the <> -have discovered each other, they form a cluster and elect a master. At -that point, you can use <> and -<> to monitor nodes joining the cluster: +If upgrading from a 6.x cluster, you must +<> by +setting the `cluster.initial_master_nodes` setting. + +As soon as enough master-eligible nodes have discovered each other, they form a +cluster and elect a master. At that point, you can use +<> and <> to monitor nodes +joining the cluster: [source,sh] -------------------------------------------------- diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java index 3215b9a7306af..30543ea236a14 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java @@ -65,6 +65,8 @@ public RestMultiSearchTemplateAction(Settings settings, RestController controlle controller.registerHandler(POST, "/_msearch/template", this); controller.registerHandler(GET, "/{index}/_msearch/template", this); controller.registerHandler(POST, "/{index}/_msearch/template", this); + + // Deprecated typed endpoints. controller.registerHandler(GET, "/{index}/{type}/_msearch/template", this); controller.registerHandler(POST, "/{index}/{type}/_msearch/template", this); } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java index 7cb346ba8a5e4..196147bb7308a 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java @@ -54,6 +54,8 @@ public RestSearchTemplateAction(Settings settings, RestController controller) { controller.registerHandler(POST, "/_search/template", this); controller.registerHandler(GET, "/{index}/_search/template", this); controller.registerHandler(POST, "/{index}/_search/template", this); + + // Deprecated typed endpoints. controller.registerHandler(GET, "/{index}/{type}/_search/template", this); controller.registerHandler(POST, "/{index}/{type}/_search/template", this); } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/CastTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/GeneralCastTests.java similarity index 99% rename from modules/lang-painless/src/test/java/org/elasticsearch/painless/CastTests.java rename to modules/lang-painless/src/test/java/org/elasticsearch/painless/GeneralCastTests.java index c955c98e82d1c..353146211f384 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/CastTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/GeneralCastTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.painless; /** Tests for explicit casts */ -public class CastTests extends ScriptTestCase { +public class GeneralCastTests extends ScriptTestCase { /** * Unary operator with explicit cast diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/StandardCastTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/StandardCastTests.java new file mode 100644 index 0000000000000..604e20ad6f96a --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/StandardCastTests.java @@ -0,0 +1,548 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +public class StandardCastTests extends ScriptTestCase { + + public void testObjectCasts() { + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Number n = o;")); + exec("Object o = Integer.valueOf(0); Number n = (Number)o;"); + exec("Object o = null; Number n = (Number)o;"); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = 'string'; String n = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; String n = o;")); + exec("Object o = 'string'; String n = (String)o;"); + exec("Object o = null; String n = (String)o;"); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Boolean.valueOf(true); boolean b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; boolean b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Boolean.valueOf(true); boolean b = (boolean)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; boolean b = (boolean)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); byte b = (byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; byte b = (byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); byte b = (byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); byte b = (byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); byte b = (byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); byte b = (byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); byte b = (byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); byte b = (byte)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); short b = (short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; short b = (short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); short b = (short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); short b = (short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); short b = (short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); short b = (short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); short b = (short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); short b = (short)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); char b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; char b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); char b = (char)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; char b = (char)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); char b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); char b = (char)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); char b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); char b = (char)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); char b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); char b = (char)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); char b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); char b = (char)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); char b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); char b = (char)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); char b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); char b = (char)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf((int)0); int b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; int b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf((int)0); int b = (int)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; int b = (int)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); int b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); int b = (int)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); int b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); int b = (int)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); int b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); int b = (int)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); int b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); int b = (int)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); int b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); int b = (int)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); int b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); int b = (int)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); long b = (long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; long b = (long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); long b = (long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); long b = (long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); long b = (long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); long b = (long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); long b = (long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); long b = (long)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); float b = (float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; float b = (float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); float b = (float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); float b = (float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf(0); float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf(0); float b = (float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); float b = (float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); float b = (float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); float b = (float)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); double b = (double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; double b = (double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); double b = (double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); double b = (double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); double b = (double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); double b = (double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); double b = (double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); double b = (double)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Boolean.valueOf(true); Boolean b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Boolean b = o;")); + exec("Object o = Boolean.valueOf(true); Boolean b = (Boolean)o;"); + exec("Object o = null; Boolean b = (Boolean)o;"); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Byte b = o;")); + exec("Object o = Byte.valueOf((byte)0); Byte b = (Byte)o;"); + exec("Object o = null; Byte b = (Byte)o;"); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Byte b = (Byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Byte b = (Byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Byte b = (Byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Byte b = (Byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Byte b = (Byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Byte b = (Byte)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((byte)0); Short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Short b = o;")); + exec("Object o = Short.valueOf((byte)0); Short b = (Short)o;"); + exec("Object o = null; Short b = (Short)o;"); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((short)0); Short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((short)0); Short b = (Short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Short b = (Short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Short b = (Short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Short b = (Short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Short b = (Short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Short b = (Short)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Character b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Character b = o;")); + exec("Object o = Character.valueOf((char)0); Character b = (Character)o;"); + exec("Object o = null; Character b = (Character)o;"); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Character b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Character b = (Character)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Character b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Character b = (Character)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Character b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Character b = (Character)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Character b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Character b = (Character)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Character b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Character b = (Character)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Character b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Character b = (Character)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Integer b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Integer b = o;")); + exec("Object o = Integer.valueOf(0); Integer b = (Integer)o;"); + exec("Object o = null; Integer b = (Integer)o;"); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Integer b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Integer b = (Integer)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Integer b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Integer b = (Integer)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Integer b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Integer b = (Integer)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Integer b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Integer b = (Integer)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Integer b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Integer b = (Integer)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Integer b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Integer b = (Integer)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Long b = o;")); + exec("Object o = Long.valueOf((long)0); Long b = (Long)o;"); + exec("Object o = null; Long b = (Long)o;"); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Long b = (Long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Long b = (Long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Long b = (Long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Long b = (Long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Long b = (Long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Long b = (Long)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((long)0); Float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Float b = o;")); + exec("Object o = Float.valueOf((long)0); Float b = (Float)o;"); + exec("Object o = null; Float b = (Float)o;"); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Float b = (Float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Float b = (Float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Float b = (Float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Float b = (Float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Float b = (Float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Float b = (Float)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((long)0); Double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Double b = o;")); + exec("Object o = Double.valueOf((long)0); Double b = (Double)o;"); + exec("Object o = null; Double b = (Double)o;"); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Double b = (Double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Double b = (Double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Double b = (Double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Double b = (Double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Double b = (Double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Double b = (Double)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Object o = new ArrayList(); ArrayList b = o;")); + exec("Object o = new ArrayList(); ArrayList b = (ArrayList)o;"); + } + + public void testNumberCasts() { + exec("Number o = Integer.valueOf(0); Object n = o;"); + exec("Number o = null; Object n = o;"); + exec("Number o = Integer.valueOf(0); Object n = (Object)o;"); + exec("Number o = null; Object n = (Object)o;"); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = 'string'; String n = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; String n = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = 'string'; String n = (String)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; String n = (String)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Boolean.valueOf(true); boolean b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; boolean b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Boolean.valueOf(true); boolean b = (boolean)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; boolean b = (boolean)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); byte b = (byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; byte b = (byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); byte b = (byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); byte b = (byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); byte b = (byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); byte b = (byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); byte b = (byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); byte b = (byte)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); short b = (short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; short b = (short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); short b = (short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); short b = (short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); short b = (short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); short b = (short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); short b = (short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); short b = (short)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); char b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; char b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); char b = (char)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; char b = (char)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); char b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); char b = (char)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); char b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); char b = (char)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); char b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); char b = (char)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); char b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); char b = (char)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); char b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); char b = (char)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); char b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); char b = (char)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf((int)0); int b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; int b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf((int)0); int b = (int)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; int b = (int)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); int b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); int b = (int)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); int b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); int b = (int)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); int b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); int b = (int)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); int b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); int b = (int)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); int b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); int b = (int)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); int b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); int b = (int)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); long b = (long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; long b = (long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); long b = (long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); long b = (long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); long b = (long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); long b = (long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); long b = (long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); long b = (long)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); float b = (float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; float b = (float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); float b = (float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); float b = (float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf(0); float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf(0); float b = (float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); float b = (float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); float b = (float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); float b = (float)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); double b = (double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; double b = (double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); double b = (double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); double b = (double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); double b = (double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); double b = (double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); double b = (double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); double b = (double)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Boolean.valueOf(true); Boolean b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Boolean b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Boolean.valueOf(true); Boolean b = (Boolean)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Boolean b = (Boolean)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Byte b = o;")); + exec("Number o = Byte.valueOf((byte)0); Byte b = (Byte)o;"); + exec("Number o = null; Byte b = (Byte)o;"); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Byte b = (Byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Byte b = (Byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Byte b = (Byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Byte b = (Byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Byte b = (Byte)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Byte b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Byte b = (Byte)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((byte)0); Short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Short b = o;")); + exec("Number o = Short.valueOf((byte)0); Short b = (Short)o;"); + exec("Number o = null; Short b = (Short)o;"); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((short)0); Short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((short)0); Short b = (Short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Short b = (Short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Short b = (Short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Short b = (Short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Short b = (Short)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Short b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Short b = (Short)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Character b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Character b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Character b = (Character)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Character b = (Character)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Character b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Character b = (Character)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Character b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Character b = (Character)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Character b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Character b = (Character)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Character b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Character b = (Character)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Character b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Character b = (Character)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Character b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Character b = (Character)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Integer b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Integer b = o;")); + exec("Number o = Integer.valueOf(0); Integer b = (Integer)o;"); + exec("Number o = null; Integer b = (Integer)o;"); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Integer b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Integer b = (Integer)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Integer b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Integer b = (Integer)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Integer b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Integer b = (Integer)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Integer b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Integer b = (Integer)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Integer b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Integer b = (Integer)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Integer b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Integer b = (Integer)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Long b = o;")); + exec("Number o = Long.valueOf((long)0); Long b = (Long)o;"); + exec("Number o = null; Long b = (Long)o;"); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Long b = (Long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Long b = (Long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Long b = (Long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Long b = (Long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Long b = (Long)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Long b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Long b = (Long)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((long)0); Float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Float b = o;")); + exec("Number o = Float.valueOf((long)0); Float b = (Float)o;"); + exec("Number o = null; Float b = (Float)o;"); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Float b = (Float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Float b = (Float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Float b = (Float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Float b = (Float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Float b = (Float)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Float b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Float b = (Float)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((long)0); Double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Double b = o;")); + exec("Number o = Double.valueOf((long)0); Double b = (Double)o;"); + exec("Number o = null; Double b = (Double)o;"); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Double b = (Double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Double b = (Double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Double b = (Double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Double b = (Double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Double b = (Double)o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Double b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Double b = (Double)o;")); + + expectScriptThrows(ClassCastException.class, () -> exec("Number o = new ArrayList(); ArrayList b = o;")); + expectScriptThrows(ClassCastException.class, () -> exec("Number o = new ArrayList(); ArrayList b = (ArrayList)o;")); + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java index 50d01535d7ff0..150853b3170a4 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java @@ -40,10 +40,10 @@ import java.io.IOException; import java.io.InputStream; +import java.net.URI; +import java.net.URISyntaxException; import java.util.List; import java.util.Map; -import java.util.regex.Matcher; -import java.util.regex.Pattern; import static java.util.Collections.emptyMap; import static java.util.Objects.requireNonNull; @@ -56,7 +56,6 @@ */ public class RestReindexAction extends AbstractBaseReindexRestHandler { static final ObjectParser PARSER = new ObjectParser<>("reindex"); - private static final Pattern HOST_PATTERN = Pattern.compile("(?[^:]+)://(?[^:]+):(?\\d+)(?/.*)?"); static { ObjectParser.Parser sourceParser = (parser, request, context) -> { @@ -136,15 +135,27 @@ static RemoteInfo buildRemoteInfo(Map source) throws IOException String username = extractString(remote, "username"); String password = extractString(remote, "password"); String hostInRequest = requireNonNull(extractString(remote, "host"), "[host] must be specified to reindex from a remote cluster"); - Matcher hostMatcher = HOST_PATTERN.matcher(hostInRequest); - if (false == hostMatcher.matches()) { + URI uri; + try { + uri = new URI(hostInRequest); + // URI has less stringent URL parsing than our code. We want to fail if all values are not provided. + if (uri.getPort() == -1) { + throw new URISyntaxException(hostInRequest, "The port was not defined in the [host]"); + } + } catch (URISyntaxException ex) { throw new IllegalArgumentException("[host] must be of the form [scheme]://[host]:[port](/[pathPrefix])? but was [" - + hostInRequest + "]"); + + hostInRequest + "]", ex); } - String scheme = hostMatcher.group("scheme"); - String host = hostMatcher.group("host"); - String pathPrefix = hostMatcher.group("pathPrefix"); - int port = Integer.parseInt(hostMatcher.group("port")); + + String scheme = uri.getScheme(); + String host = uri.getHost(); + int port = uri.getPort(); + + String pathPrefix = null; + if (uri.getPath().isEmpty() == false) { + pathPrefix = uri.getPath(); + } + Map headers = extractStringStringMap(remote, "headers"); TimeValue socketTimeout = extractTimeValue(remote, "socket_timeout", RemoteInfo.DEFAULT_SOCKET_TIMEOUT); TimeValue connectTimeout = extractTimeValue(remote, "connect_timeout", RemoteInfo.DEFAULT_CONNECT_TIMEOUT); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWhitelistTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWhitelistTests.java index e32370b166546..f7a4e74fa19fd 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWhitelistTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWhitelistTests.java @@ -102,6 +102,12 @@ public void testRejectMatchAll() { assertMatchesTooMuch(random); } + public void testIPv6Address() { + List whitelist = randomWhitelist(); + whitelist.add("[::1]:*"); + checkRemoteWhitelist(buildRemoteWhitelist(whitelist), newRemoteInfo("[::1]", 9200)); + } + private void assertMatchesTooMuch(List whitelist) { Exception e = expectThrows(IllegalArgumentException.class, () -> buildRemoteWhitelist(whitelist)); assertEquals("Refusing to start because whitelist " + whitelist + " accepts all addresses. " diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java index 70e29ed12c5b4..1c6b60b705a3a 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java @@ -78,6 +78,8 @@ public void testBuildRemoteInfoFullyLoaded() throws IOException { public void testBuildRemoteInfoWithoutAllParts() throws IOException { expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("example.com")); + expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase(":9200")); + expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("http://:9200")); expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("example.com:9200")); expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("http://example.com")); } @@ -99,6 +101,14 @@ public void testBuildRemoteInfoWithAllHostParts() throws IOException { assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout()); assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout()); + info = buildRemoteInfoHostTestCase("https://[::1]:9201"); + assertEquals("https", info.getScheme()); + assertEquals("[::1]", info.getHost()); + assertEquals(9201, info.getPort()); + assertNull(info.getPathPrefix()); + assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout()); + assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout()); + info = buildRemoteInfoHostTestCase("https://other.example.com:9201/"); assertEquals("https", info.getScheme()); assertEquals("other.example.com", info.getHost()); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java index 7695c2bb596d6..cfa5a3f6d79c8 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java @@ -43,8 +43,10 @@ import org.hamcrest.Matchers; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.stream.Collectors; import static org.hamcrest.core.Is.is; @@ -60,13 +62,27 @@ protected Settings nodeSettings(int nodeOrdinal) { .put(TestZenDiscovery.USE_ZEN2.getKey(), true) .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.MAX_VALUE); - if (nodeOrdinal == 0) { - builder.put(ClusterBootstrapService.INITIAL_MASTER_NODE_COUNT_SETTING.getKey(), 2); - } - return builder.build(); } + @Override + protected List addExtraClusterBootstrapSettings(List allNodesSettings) { + final Settings firstNodeSettings = allNodesSettings.get(0); + final List otherNodesSettings = allNodesSettings.subList(1, allNodesSettings.size()); + final List masterNodeNames = allNodesSettings.stream() + .filter(org.elasticsearch.node.Node.NODE_MASTER_SETTING::get) + .map(org.elasticsearch.node.Node.NODE_NAME_SETTING::get) + .collect(Collectors.toList()); + final List updatedSettings = new ArrayList<>(); + + updatedSettings.add(Settings.builder().put(firstNodeSettings) + .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), masterNodeNames) + .build()); + updatedSettings.addAll(otherNodesSettings); + + return updatedSettings; + } + @Override protected boolean addMockHttpTransport() { return false; // enable http diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 3ce23c8e6a204..7519cade0f29b 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -25,10 +25,6 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; -import org.elasticsearch.index.mapper.TypeFieldMapper; -import org.elasticsearch.rest.action.document.RestGetAction; -import org.elasticsearch.rest.action.document.RestUpdateAction; -import org.elasticsearch.rest.action.search.RestExplainAction; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.CheckedFunction; @@ -37,6 +33,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.rest.action.document.RestGetAction; +import org.elasticsearch.rest.action.document.RestUpdateAction; +import org.elasticsearch.rest.action.search.RestExplainAction; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.ObjectPath; @@ -574,8 +573,7 @@ void assertAllSearchWorks(int count) throws IOException { Request explainRequest = new Request("GET", "/" + index + "/" + type + "/" + id + "/_explain"); explainRequest.setJsonEntity("{ \"query\": { \"match_all\" : {} }}"); - explainRequest.setOptions( - expectWarnings(RestExplainAction.TYPES_DEPRECATION_MESSAGE, TypeFieldMapper.TypeFieldType.TYPES_DEPRECATION_MESSAGE)); + explainRequest.setOptions(expectWarnings(RestExplainAction.TYPES_DEPRECATION_MESSAGE)); String explanation = toStr(client().performRequest(explainRequest)); assertFalse("Could not find payload boost in explanation\n" + explanation, explanation.contains("payloadBoost")); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json index 9b4e09a174aaf..6a21620423d6b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json @@ -3,8 +3,8 @@ "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html", "methods": ["PUT","POST"], "url": { - "path": "/{index}/{type}/{id}/_create", - "paths": ["/{index}/{type}/{id}/_create"], + "path": "/{index}/_create/{id}", + "paths": ["/{index}/_create/{id}", "/{index}/{type}/{id}/_create"], "parts": { "id": { "type" : "string", @@ -18,7 +18,6 @@ }, "type": { "type" : "string", - "required" : true, "description" : "The type of the document" } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/10_with_id.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/10_with_id.yml index 1e58c38c7b589..410b31acb7138 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/10_with_id.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/10_with_id.yml @@ -1,25 +1,24 @@ --- "Create with ID": + - skip: + version: " - 6.99.99" + reason: types are required in requests before 7.0.0 - do: create: index: test_1 - type: test id: 1 body: { foo: bar } - match: { _index: test_1 } - - match: { _type: test } - match: { _id: "1"} - match: { _version: 1} - do: get: index: test_1 - type: test id: 1 - match: { _index: test_1 } - - match: { _type: test } - match: { _id: "1"} - match: { _version: 1} - match: { _source: { foo: bar }} @@ -28,6 +27,5 @@ catch: conflict create: index: test_1 - type: test id: 1 body: { foo: bar } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/11_with_id_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/11_with_id_with_types.yml new file mode 100644 index 0000000000000..1e58c38c7b589 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/11_with_id_with_types.yml @@ -0,0 +1,33 @@ +--- +"Create with ID": + - do: + create: + index: test_1 + type: test + id: 1 + body: { foo: bar } + + - match: { _index: test_1 } + - match: { _type: test } + - match: { _id: "1"} + - match: { _version: 1} + + - do: + get: + index: test_1 + type: test + id: 1 + + - match: { _index: test_1 } + - match: { _type: test } + - match: { _id: "1"} + - match: { _version: 1} + - match: { _source: { foo: bar }} + + - do: + catch: conflict + create: + index: test_1 + type: test + id: 1 + body: { foo: bar } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yml index ab9932819381f..5280c5bb9946d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yml @@ -1,8 +1,10 @@ --- "Create without ID": + - skip: + version: " - 6.99.99" + reason: types are required in requests before 7.0.0 - do: catch: param create: index: test_1 - type: test body: { foo: bar } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id_with_types.yml new file mode 100644 index 0000000000000..ab9932819381f --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id_with_types.yml @@ -0,0 +1,8 @@ +--- +"Create without ID": + - do: + catch: param + create: + index: test_1 + type: test + body: { foo: bar } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/30_internal_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/30_internal_version.yml index 83772828bc8f4..52e8e464da094 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/30_internal_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/30_internal_version.yml @@ -1,10 +1,11 @@ --- "Internal version": - + - skip: + version: " - 6.99.99" + reason: types are required in requests before 7.0.0 - do: create: index: test_1 - type: test id: 1 body: { foo: bar } @@ -14,18 +15,18 @@ catch: conflict create: index: test_1 - type: test id: 1 body: { foo: bar } --- "Internal versioning with explicit version": - + - skip: + version: " - 6.99.99" + reason: types are required in requests before 7.0.0 - do: catch: bad_request create: index: test - type: test id: 3 body: { foo: bar } version: 5 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/31_internal_version_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/31_internal_version_with_types.yml new file mode 100644 index 0000000000000..83772828bc8f4 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/31_internal_version_with_types.yml @@ -0,0 +1,35 @@ +--- +"Internal version": + + - do: + create: + index: test_1 + type: test + id: 1 + body: { foo: bar } + + - match: { _version: 1} + + - do: + catch: conflict + create: + index: test_1 + type: test + id: 1 + body: { foo: bar } + +--- +"Internal versioning with explicit version": + + - do: + catch: bad_request + create: + index: test + type: test + id: 3 + body: { foo: bar } + version: 5 + + - match: { status: 400 } + - match: { error.type: action_request_validation_exception } + - match: { error.reason: "Validation Failed: 1: create operations do not support explicit versions. use index instead;" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yml index cb8c041d7102c..47dc5b6059609 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yml @@ -1,11 +1,12 @@ --- "External version": - + - skip: + version: " - 6.99.99" + reason: types are required in requests before 7.0.0 - do: catch: bad_request create: index: test - type: test id: 1 body: { foo: bar } version_type: external @@ -19,7 +20,6 @@ catch: bad_request create: index: test - type: test id: 2 body: { foo: bar } version_type: external diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/36_external_version_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/36_external_version_with_types.yml new file mode 100644 index 0000000000000..cb8c041d7102c --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/36_external_version_with_types.yml @@ -0,0 +1,30 @@ +--- +"External version": + + - do: + catch: bad_request + create: + index: test + type: test + id: 1 + body: { foo: bar } + version_type: external + version: 0 + + - match: { status: 400 } + - match: { error.type: action_request_validation_exception } + - match: { error.reason: "Validation Failed: 1: create operations only support internal versioning. use index instead;" } + + - do: + catch: bad_request + create: + index: test + type: test + id: 2 + body: { foo: bar } + version_type: external + version: 5 + + - match: { status: 400 } + - match: { error.type: action_request_validation_exception } + - match: { error.reason: "Validation Failed: 1: create operations only support internal versioning. use index instead;" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/40_routing.yml index 752489f722c9e..9c048c361bd5c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/40_routing.yml @@ -1,6 +1,8 @@ --- "Routing": - + - skip: + version: " - 6.99.99" + reason: types are required in requests before 7.0.0 - do: indices.create: index: test_1 @@ -18,7 +20,6 @@ - do: create: index: test_1 - type: test id: 1 routing: 5 body: { foo: bar } @@ -26,7 +27,6 @@ - do: get: index: test_1 - type: test id: 1 routing: 5 stored_fields: [_routing] @@ -38,6 +38,5 @@ catch: missing get: index: test_1 - type: test id: 1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/41_routing_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/41_routing_with_types.yml new file mode 100644 index 0000000000000..752489f722c9e --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/41_routing_with_types.yml @@ -0,0 +1,43 @@ +--- +"Routing": + + - do: + indices.create: + index: test_1 + body: + settings: + index: + number_of_shards: 5 + number_of_routing_shards: 5 + number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + + - do: + create: + index: test_1 + type: test + id: 1 + routing: 5 + body: { foo: bar } + + - do: + get: + index: test_1 + type: test + id: 1 + routing: 5 + stored_fields: [_routing] + + - match: { _id: "1"} + - match: { _routing: "5"} + + - do: + catch: missing + get: + index: test_1 + type: test + id: 1 + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yml index e24bdf4260340..dd8acd9f99f4f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yml @@ -1,6 +1,8 @@ --- "Refresh": - + - skip: + version: " - 6.99.99" + reason: types are required in requests before 7.0.0 - do: indices.create: index: test_1 @@ -11,7 +13,6 @@ - do: create: index: test_1 - type: test id: 1 body: { foo: bar } @@ -27,7 +28,6 @@ - do: create: index: test_1 - type: test id: 2 refresh: true body: { foo: bar } @@ -44,10 +44,12 @@ --- "When refresh url parameter is an empty string that means \"refresh immediately\"": + - skip: + version: " - 6.99.99" + reason: types are required in requests before 7.0.0 - do: create: index: test_1 - type: test id: 1 refresh: "" body: { foo: bar } @@ -64,10 +66,12 @@ --- "refresh=wait_for waits until changes are visible in search": + - skip: + version: " - 6.99.99" + reason: types are required in requests before 7.0.0 - do: index: index: create_60_refresh_1 - type: test id: create_60_refresh_id1 body: { foo: bar } refresh: wait_for diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/61_refresh_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/61_refresh_with_types.yml new file mode 100644 index 0000000000000..e24bdf4260340 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/61_refresh_with_types.yml @@ -0,0 +1,82 @@ +--- +"Refresh": + + - do: + indices.create: + index: test_1 + body: + settings: + index.refresh_interval: -1 + number_of_replicas: 0 + - do: + create: + index: test_1 + type: test + id: 1 + body: { foo: bar } + + - do: + search: + rest_total_hits_as_int: true + index: test_1 + body: + query: { term: { _id: 1 }} + + - match: { hits.total: 0 } + + - do: + create: + index: test_1 + type: test + id: 2 + refresh: true + body: { foo: bar } + - is_true: forced_refresh + + - do: + search: + rest_total_hits_as_int: true + index: test_1 + body: + query: { term: { _id: 2 }} + + - match: { hits.total: 1 } + +--- +"When refresh url parameter is an empty string that means \"refresh immediately\"": + - do: + create: + index: test_1 + type: test + id: 1 + refresh: "" + body: { foo: bar } + - is_true: forced_refresh + + - do: + search: + rest_total_hits_as_int: true + index: test_1 + body: + query: { term: { _id: 1 }} + + - match: { hits.total: 1 } + +--- +"refresh=wait_for waits until changes are visible in search": + - do: + index: + index: create_60_refresh_1 + type: test + id: create_60_refresh_id1 + body: { foo: bar } + refresh: wait_for + - is_false: forced_refresh + + - do: + search: + rest_total_hits_as_int: true + index: create_60_refresh_1 + body: + query: { term: { _id: create_60_refresh_id1 }} + - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/70_nested.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/70_nested.yml index 2c912a2165a83..1d6bd5bd70373 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/70_nested.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/70_nested.yml @@ -1,16 +1,19 @@ --- setup: + - skip: + version: " - 6.99.99" + reason: types are required in requests before 7.0.0 - do: indices.create: + include_type_name: false index: test_1 body: settings: index.mapping.nested_objects.limit: 2 mappings: - test_type: - properties: - nested1: - type: nested + properties: + nested1: + type: nested --- "Indexing a doc with No. nested objects less or equal to index.mapping.nested_objects.limit should succeed": @@ -20,7 +23,6 @@ setup: - do: create: index: test_1 - type: test_type id: 1 body: "nested1" : [ { "foo": "bar" }, { "foo": "bar2" } ] @@ -35,7 +37,6 @@ setup: catch: /The number of nested documents has exceeded the allowed limit of \[2\]. This limit can be set by changing the \[index.mapping.nested_objects.limit\] index level setting\./ create: index: test_1 - type: test_type id: 1 body: "nested1" : [ { "foo": "bar" }, { "foo": "bar2" }, { "foo": "bar3" } ] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/71_nested_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/71_nested_with_types.yml new file mode 100644 index 0000000000000..2c912a2165a83 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/71_nested_with_types.yml @@ -0,0 +1,41 @@ +--- +setup: + - do: + indices.create: + index: test_1 + body: + settings: + index.mapping.nested_objects.limit: 2 + mappings: + test_type: + properties: + nested1: + type: nested + +--- +"Indexing a doc with No. nested objects less or equal to index.mapping.nested_objects.limit should succeed": + - skip: + version: " - 6.99.99" + reason: index.mapping.nested_objects setting has been added in 7.0.0 + - do: + create: + index: test_1 + type: test_type + id: 1 + body: + "nested1" : [ { "foo": "bar" }, { "foo": "bar2" } ] + - match: { _version: 1} + +--- +"Indexing a doc with No. nested objects more than index.mapping.nested_objects.limit should fail": + - skip: + version: " - 6.99.99" + reason: index.mapping.nested_objects setting has been added in 7.0.0 + - do: + catch: /The number of nested documents has exceeded the allowed limit of \[2\]. This limit can be set by changing the \[index.mapping.nested_objects.limit\] index level setting\./ + create: + index: test_1 + type: test_type + id: 1 + body: + "nested1" : [ { "foo": "bar" }, { "foo": "bar2" }, { "foo": "bar3" } ] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml index 21eb7959cf2f0..c5cac876e7959 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml @@ -304,7 +304,7 @@ setup: index: test type: test id: 4 - body: { "date" : "-2524492800000" } + body: { "date" : "10000" } - do: index: @@ -324,16 +324,16 @@ setup: age_groups: date_range: field: date - missing: "-2240496000000" + missing: "0" ranges: - key: Generation Y from: '315561600000' to: '946713600000' - key: Generation X - from: "-157737600000" + from: "200000" to: '315561600000' - key: Other - to: "-2208960000000" + to: "200000" - match: { hits.total: 5 } diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 07df1b646d54c..8f4d799713b09 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -116,6 +116,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_5_3 = new Version(V_6_5_3_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_6_5_4_ID = 6050499; public static final Version V_6_5_4 = new Version(V_6_5_4_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); + public static final int V_6_5_5_ID = 6050599; + public static final Version V_6_5_5 = new Version(V_6_5_5_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_6_6_0_ID = 6060099; public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); public static final int V_6_7_0_ID = 6070099; @@ -142,6 +144,8 @@ public static Version fromId(int id) { return V_6_7_0; case V_6_6_0_ID: return V_6_6_0; + case V_6_5_5_ID: + return V_6_5_5; case V_6_5_4_ID: return V_6_5_4; case V_6_5_3_ID: diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java index de8f7c7ccc1ad..260c63d340a6f 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java @@ -91,10 +91,6 @@ public ExplainRequest type(String type) { return this; } - public boolean isTypeless() { - return type == null || type.equals(MapperService.SINGLE_MAPPING_NAME); - } - public String id() { return id; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 9389edeb345fc..732debf2a1305 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -376,17 +376,6 @@ public SearchRequestBuilder setTrackTotalHits(boolean trackTotalHits) { return this; } - /** - * Adds stored fields to load and return (note, it must be stored) as part of the search request. - * To disable the stored fields entirely (source and metadata fields) use {@code storedField("_none_")}. - * @deprecated Use {@link SearchRequestBuilder#storedFields(String...)} instead. - */ - @Deprecated - public SearchRequestBuilder fields(String... fields) { - sourceBuilder().storedFields(Arrays.asList(fields)); - return this; - } - /** * Adds stored fields to load and return (note, it must be stored) as part of the search request. * To disable the stored fields entirely (source and metadata fields) use {@code storedField("_none_")}. diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index ef7f3b9c3ec1a..01b2fae997749 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -403,7 +403,8 @@ private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback void becomeCandidate(String method) { assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; - logger.debug("{}: becoming CANDIDATE (was {}, lastKnownLeader was [{}])", method, mode, lastKnownLeader); + logger.debug("{}: coordinator becoming CANDIDATE in term {} (was {}, lastKnownLeader was [{}])", + method, getCurrentTerm(), mode, lastKnownLeader); if (mode != Mode.CANDIDATE) { mode = Mode.CANDIDATE; @@ -440,7 +441,8 @@ void becomeLeader(String method) { assert mode == Mode.CANDIDATE : "expected candidate but was " + mode; assert getLocalNode().isMasterNode() : getLocalNode() + " became a leader but is not master-eligible"; - logger.debug("{}: becoming LEADER (was {}, lastKnownLeader was [{}])", method, mode, lastKnownLeader); + logger.debug("{}: coordinator becoming LEADER in term {} (was {}, lastKnownLeader was [{}])", + method, getCurrentTerm(), mode, lastKnownLeader); mode = Mode.LEADER; joinAccumulator.close(mode); @@ -461,7 +463,8 @@ void becomeFollower(String method, DiscoveryNode leaderNode) { assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; assert leaderNode.isMasterNode() : leaderNode + " became a leader but is not master-eligible"; - logger.debug("{}: becoming FOLLOWER of [{}] (was {}, lastKnownLeader was [{}])", method, leaderNode, mode, lastKnownLeader); + logger.debug("{}: coordinator becoming FOLLOWER of [{}] in term {} (was {}, lastKnownLeader was [{}])", + method, leaderNode, getCurrentTerm(), mode, lastKnownLeader); final boolean restartLeaderChecker = (mode == Mode.FOLLOWER && Optional.of(leaderNode).equals(lastKnownLeader)) == false; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java b/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java index cccbde35c18a9..0f387fca18a13 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java @@ -203,7 +203,7 @@ private class CheckScheduler implements Releasable { @Override public void close() { if (isClosed.compareAndSet(false, true) == false) { - logger.debug("already closed"); + logger.trace("already closed, doing nothing"); } else { logger.debug("closed"); } @@ -211,7 +211,7 @@ public void close() { void handleWakeUp() { if (isClosed.get()) { - logger.debug("closed check scheduler woken up, doing nothing"); + logger.trace("closed check scheduler woken up, doing nothing"); return; } @@ -289,7 +289,7 @@ void leaderFailed() { if (isClosed.compareAndSet(false, true)) { transportService.getThreadPool().generic().execute(onLeaderFailure); } else { - logger.debug("already closed, not failing leader"); + logger.trace("already closed, not failing leader"); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java index 3e94245a8975d..c331a9a137e0f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java @@ -446,7 +446,8 @@ private void applyChanges(UpdateTask task, ClusterState previousClusterState, Cl if (nodesDelta.hasChanges() && logger.isInfoEnabled()) { String summary = nodesDelta.shortSummary(); if (summary.length() > 0) { - logger.info("{}, reason: {}", summary, task.source); + logger.info("{}, term: {}, version: {}, reason: {}", + summary, newClusterState.term(), newClusterState.version(), task.source); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index fbc1a08182109..472ffcd73effd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -230,7 +230,8 @@ protected void runTasks(TaskInputs taskInputs) { if (nodesDelta.hasChanges() && logger.isInfoEnabled()) { String nodeSummary = nodesDelta.shortSummary(); if (nodeSummary.length() > 0) { - logger.info("{}, reason: {}", summary, nodeSummary); + logger.info("{}, term: {}, version: {}, reason: {}", + summary, newClusterState.term(), newClusterState.version(), nodeSummary); } } diff --git a/server/src/main/java/org/elasticsearch/common/bytes/ByteBufferReference.java b/server/src/main/java/org/elasticsearch/common/bytes/ByteBufferReference.java index 998a06a2643ba..0f1a6a85ac30c 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/ByteBufferReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/ByteBufferReference.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.bytes; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.FutureObjects; import java.nio.ByteBuffer; @@ -27,26 +28,22 @@ * This is a {@link BytesReference} backed by a {@link ByteBuffer}. The byte buffer can either be a heap or * direct byte buffer. The reference is composed of the space between the {@link ByteBuffer#position()} and * {@link ByteBuffer#limit()} at construction time. If the position or limit of the underlying byte buffer is - * changed, those changes will not be reflected in this reference. However, modifying the limit or position - * of the underlying byte buffer is not recommended as those can be used during {@link ByteBuffer#get()} - * bounds checks. Use {@link ByteBuffer#duplicate()} at creation time if you plan on modifying the markers of - * the underlying byte buffer. Any changes to the underlying data in the byte buffer will be reflected. + * changed, those changes will not be reflected in this reference. Any changes to the underlying data in the + * byte buffer will be reflected in this reference. */ public class ByteBufferReference extends BytesReference { private final ByteBuffer buffer; - private final int offset; private final int length; - public ByteBufferReference(ByteBuffer buffer) { - this.buffer = buffer; - this.offset = buffer.position(); + ByteBufferReference(ByteBuffer buffer) { + this.buffer = buffer.slice(); this.length = buffer.remaining(); } @Override public byte get(int index) { - return buffer.get(index + offset); + return buffer.get(index); } @Override @@ -56,14 +53,13 @@ public int length() { @Override public BytesReference slice(int from, int length) { - if (from < 0 || (from + length) > this.length) { - throw new IndexOutOfBoundsException("can't slice a buffer with length [" + this.length + "], with slice parameters from [" - + from + "], length [" + length + "]"); - } - ByteBuffer newByteBuffer = buffer.duplicate(); - newByteBuffer.position(offset + from); - newByteBuffer.limit(offset + from + length); - return new ByteBufferReference(newByteBuffer); + FutureObjects.checkFromIndexSize(from, length, this.length); + buffer.position(from); + buffer.limit(from + length); + ByteBufferReference newByteBuffer = new ByteBufferReference(buffer); + buffer.position(0); + buffer.limit(this.length); + return newByteBuffer; } /** @@ -75,10 +71,10 @@ public BytesReference slice(int from, int length) { @Override public BytesRef toBytesRef() { if (buffer.hasArray()) { - return new BytesRef(buffer.array(), buffer.arrayOffset() + offset, length); + return new BytesRef(buffer.array(), buffer.arrayOffset(), length); } final byte[] copy = new byte[length]; - buffer.get(copy, offset, length); + buffer.get(copy, 0, length); return new BytesRef(copy); } diff --git a/server/src/main/java/org/elasticsearch/common/joda/Joda.java b/server/src/main/java/org/elasticsearch/common/joda/Joda.java index 265cc2edce14c..3c99b65a9a54f 100644 --- a/server/src/main/java/org/elasticsearch/common/joda/Joda.java +++ b/server/src/main/java/org/elasticsearch/common/joda/Joda.java @@ -232,6 +232,27 @@ public static JodaDateFormatter forPattern(String input) { formatter = StrictISODateTimeFormat.yearMonth(); } else if ("strictYearMonthDay".equals(input) || "strict_year_month_day".equals(input)) { formatter = StrictISODateTimeFormat.yearMonthDay(); + } else if (Strings.hasLength(input) && input.contains("||")) { + String[] formats = Strings.delimitedListToStringArray(input, "||"); + DateTimeParser[] parsers = new DateTimeParser[formats.length]; + + if (formats.length == 1) { + formatter = forPattern(input).parser; + } else { + DateTimeFormatter dateTimeFormatter = null; + for (int i = 0; i < formats.length; i++) { + JodaDateFormatter currentFormatter = forPattern(formats[i]); + DateTimeFormatter currentParser = currentFormatter.parser; + if (dateTimeFormatter == null) { + dateTimeFormatter = currentFormatter.printer; + } + parsers[i] = currentParser.getParser(); + } + + DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder() + .append(dateTimeFormatter.withZone(DateTimeZone.UTC).getPrinter(), parsers); + formatter = builder.toFormatter(); + } } else { try { maybeLogJodaDeprecation(input); @@ -351,10 +372,14 @@ public int parseInto(DateTimeParserBucket bucket, String text, int position) { int factor = hasMilliSecondPrecision ? 1 : 1000; try { long millis = new BigDecimal(text).longValue() * factor; - // check for deprecation, but after it has parsed correctly so the "e" isn't from something else + // check for deprecations, but after it has parsed correctly so invalid values aren't counted as deprecated + if (millis < 0) { + deprecationLogger.deprecatedAndMaybeLog("epoch-negative", "Use of negative values" + + " in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); + } if (scientificNotation.matcher(text).find()) { deprecationLogger.deprecatedAndMaybeLog("epoch-scientific-notation", "Use of scientific notation" + - "in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); + " in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); } DateTime dt = new DateTime(millis, DateTimeZone.UTC); bucket.saveField(DateTimeFieldType.year(), dt.getYear()); diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java index 140c9fec394fe..49c5e7626072b 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java @@ -129,19 +129,17 @@ static DateFormatter forPattern(String input) { if (Strings.hasLength(input) == false) { throw new IllegalArgumentException("No date pattern provided"); } + if (input.startsWith("8") == false) { + return Joda.forPattern(input); + } + + // force java 8 date format List formatters = new ArrayList<>(); - for (String pattern : Strings.delimitedListToStringArray(input, "||")) { - if (Strings.hasLength(input) == false) { + for (String pattern : Strings.delimitedListToStringArray(input.substring(1), "||")) { + if (Strings.hasLength(pattern) == false) { throw new IllegalArgumentException("Cannot have empty element in multi date format pattern: " + input); } - final DateFormatter formatter; - if (pattern.startsWith("8")) { - // force java 8 date format - formatter = DateFormatters.forPattern(pattern.substring(1)); - } else { - formatter = Joda.forPattern(pattern); - } - formatters.add(formatter); + formatters.add(DateFormatters.forPattern(pattern)); } if (formatters.size() == 1) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java index 442a816be5c6b..c4d9ef966ca3d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.mapper; -import org.apache.logging.log4j.LogManager; import org.apache.lucene.document.Field; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; @@ -36,7 +35,6 @@ import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -92,10 +90,6 @@ public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext c public static final class TypeFieldType extends StringFieldType { - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(TypeFieldType.class)); - public static final String TYPES_DEPRECATION_MESSAGE = - "[types removal] Referring to types within search queries is deprecated, filter on a field instead."; - TypeFieldType() { } @@ -126,7 +120,6 @@ public boolean isSearchable() { @Override public Query existsQuery(QueryShardContext context) { - deprecationLogger.deprecatedAndMaybeLog("exists_query_with_type_field", TYPES_DEPRECATION_MESSAGE); return new MatchAllDocsQuery(); } @@ -137,7 +130,6 @@ public Query termQuery(Object value, QueryShardContext context) { @Override public Query termsQuery(List values, QueryShardContext context) { - deprecationLogger.deprecatedAndMaybeLog("term_query_with_type_field", TYPES_DEPRECATION_MESSAGE); DocumentMapper mapper = context.getMapperService().documentMapper(); if (mapper == null) { return new MatchNoDocsQuery("No types"); @@ -159,7 +151,6 @@ public Query termsQuery(List values, QueryShardContext context) { @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { - deprecationLogger.deprecatedAndMaybeLog("range_query_with_type_field", TYPES_DEPRECATION_MESSAGE); Query result = new MatchAllDocsQuery(); String type = context.getMapperService().documentMapper().type(); if (type != null) { diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java index bfc786e84f7db..733875075b051 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java @@ -32,12 +32,12 @@ import org.elasticsearch.script.Script; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField; +import org.elasticsearch.search.collapse.CollapseBuilder; import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext.FieldAndFormat; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.sort.SortBuilder; -import org.elasticsearch.search.collapse.CollapseBuilder; import java.io.IOException; import java.util.ArrayList; @@ -158,6 +158,7 @@ public InnerHitBuilder(StreamInput in) throws IOException { trackScores = in.readBoolean(); storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new); if (in.getVersion().before(Version.V_6_4_0)) { + @SuppressWarnings("unchecked") List fieldList = (List) in.readGenericValue(); if (fieldList == null) { docValueFields = null; @@ -307,28 +308,6 @@ public InnerHitBuilder setTrackScores(boolean trackScores) { return this; } - /** - * Gets the stored fields to load and return. - * - * @deprecated Use {@link InnerHitBuilder#getStoredFieldsContext()} instead. - */ - @Deprecated - public List getFieldNames() { - return storedFieldsContext == null ? null : storedFieldsContext.fieldNames(); - } - - /** - * Sets the stored fields to load and return. - * If none are specified, the source of the document will be returned. - * - * @deprecated Use {@link InnerHitBuilder#setStoredFieldNames(List)} instead. - */ - @Deprecated - public InnerHitBuilder setFieldNames(List fieldNames) { - return setStoredFieldNames(fieldNames); - } - - /** * Gets the stored fields context. */ diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java index 82bae93e84d49..f7f1d29f53098 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.query; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.Query; @@ -32,6 +33,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; @@ -47,6 +49,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.TextFieldMapper; +import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.script.ScriptService; @@ -70,6 +73,10 @@ * Context object used to create lucene queries on the shard level. */ public class QueryShardContext extends QueryRewriteContext { + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(QueryShardContext.class)); + static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Using the _type field " + + "in queries is deprecated, prefer to filter on a field instead."; private final ScriptService scriptService; private final IndexSettings indexSettings; @@ -185,6 +192,9 @@ public Collection simpleMatchToIndexNames(String pattern) { } public MappedFieldType fieldMapper(String name) { + if (name.equals(TypeFieldMapper.NAME)) { + deprecationLogger.deprecatedAndMaybeLog("query_with_types", TYPES_DEPRECATION_MESSAGE); + } return failIfFieldMappingNotFound(name, mapperService.fullName(name)); } diff --git a/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java index 260c47a334b30..ceb473bce40d0 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java @@ -32,7 +32,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.TypeFieldMapper; import java.io.IOException; import java.util.Objects; @@ -41,7 +40,10 @@ public class TypeQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "type"; private static final ParseField VALUE_FIELD = new ParseField("value"); - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(TypeQueryBuilder.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(TypeQueryBuilder.class)); + static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Type queries are deprecated, " + + "prefer to filter on a field instead."; private final String type; @@ -128,7 +130,7 @@ public String getWriteableName() { @Override protected Query doToQuery(QueryShardContext context) throws IOException { - deprecationLogger.deprecatedAndMaybeLog("type_query", TypeFieldMapper.TypeFieldType.TYPES_DEPRECATION_MESSAGE); + deprecationLogger.deprecatedAndMaybeLog("type_query", TYPES_DEPRECATION_MESSAGE); //LUCENE 4 UPGRADE document mapper should use bytesref as well? DocumentMapper documentMapper = context.getMapperService().documentMapper(type); if (documentMapper == null) { diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 83d81222bf58f..d8acba635f822 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -1227,6 +1227,9 @@ private void write(final StreamOutput out) throws IOException { out.writeString(type); out.writeBytesReference(source); out.writeOptionalString(routing); + if (format < FORMAT_NO_PARENT) { + out.writeOptionalString(null); // _parent + } out.writeLong(version); if (format < FORMAT_NO_VERSION_TYPE) { out.writeByte(VersionType.EXTERNAL.getValue()); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java index bbb02231e7a59..6b442750c1898 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java @@ -258,7 +258,7 @@ private class RecoveryMonitor extends AbstractRunnable { private final long recoveryId; private final TimeValue checkInterval; - private long lastSeenAccessTime; + private volatile long lastSeenAccessTime; private RecoveryMonitor(long recoveryId, long lastSeenAccessTime, TimeValue checkInterval) { this.recoveryId = recoveryId; diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java index 27aac8661bafa..71918cadb55d8 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java @@ -73,7 +73,8 @@ public PersistentTasksClusterService(Settings settings, PersistentTasksExecutorR this::setRecheckInterval); } - void setRecheckInterval(TimeValue recheckInterval) { + // visible for testing only + public void setRecheckInterval(TimeValue recheckInterval) { periodicRechecker.setInterval(recheckInterval); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java index 74c30a9f5d0e1..5cfbbfdb1b524 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java @@ -52,11 +52,12 @@ public class RestBulkAction extends BaseRestHandler { public RestBulkAction(Settings settings, RestController controller) { super(settings); - controller.registerHandler(POST, "/_bulk", this); controller.registerHandler(PUT, "/_bulk", this); controller.registerHandler(POST, "/{index}/_bulk", this); controller.registerHandler(PUT, "/{index}/_bulk", this); + + // Deprecated typed endpoints. controller.registerHandler(POST, "/{index}/{type}/_bulk", this); controller.registerHandler(PUT, "/{index}/{type}/_bulk", this); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java index b19bc371794c3..5100b3d960b97 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -45,6 +44,9 @@ public class RestDeleteAction extends BaseRestHandler { public RestDeleteAction(Settings settings, RestController controller) { super(settings); + controller.registerHandler(DELETE, "/{index}/_doc/{id}", this); + + // Deprecated typed endpoint. controller.registerHandler(DELETE, "/{index}/{type}/{id}", this); } @@ -55,12 +57,14 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - String type = request.param("type"); - if (!type.equals(MapperService.SINGLE_MAPPING_NAME)) { + DeleteRequest deleteRequest; + if (request.hasParam("type")) { deprecationLogger.deprecatedAndMaybeLog("delete_with_types", TYPES_DEPRECATION_MESSAGE); + deleteRequest = new DeleteRequest(request.param("index"), request.param("type"), request.param("id")); + } else { + deleteRequest = new DeleteRequest(request.param("index"), request.param("id")); } - DeleteRequest deleteRequest = new DeleteRequest(request.param("index"), type, request.param("id")); deleteRequest.routing(request.param("routing")); deleteRequest.timeout(request.paramAsTime("timeout", DeleteRequest.DEFAULT_TIMEOUT)); deleteRequest.setRefreshPolicy(request.param("refresh")); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java index ff0a8c9ccbe32..6bef519078a69 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -51,6 +50,10 @@ public class RestGetAction extends BaseRestHandler { public RestGetAction(final Settings settings, final RestController controller) { super(settings); + controller.registerHandler(GET, "/{index}/_doc/{id}", this); + controller.registerHandler(HEAD, "/{index}/_doc/{id}", this); + + // Deprecated typed endpoints. controller.registerHandler(GET, "/{index}/{type}/{id}", this); controller.registerHandler(HEAD, "/{index}/{type}/{id}", this); } @@ -62,12 +65,14 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - String type = request.param("type"); - if (!type.equals(MapperService.SINGLE_MAPPING_NAME)) { + GetRequest getRequest; + if (request.hasParam("type")) { deprecationLogger.deprecatedAndMaybeLog("get_with_types", TYPES_DEPRECATION_MESSAGE); + getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); + } else { + getRequest = new GetRequest(request.param("index"), request.param("id")); } - final GetRequest getRequest = new GetRequest(request.param("index"), type, request.param("id")); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); getRequest.routing(request.param("routing")); getRequest.preference(request.param("preference")); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java index bd18dae74545d..adaa4a46fa22b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java @@ -42,15 +42,24 @@ public class RestIndexAction extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger( LogManager.getLogger(RestDeleteAction.class)); - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in " + - "document index requests is deprecated, use the /{index}/_doc/{id} or /{index}/_doc endpoints instead."; + public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in document " + + "index requests is deprecated, use the typeless endpoints instead (/{index}/_doc/{id}, /{index}/_doc, " + + "or /{index}/_create/{id})."; public RestIndexAction(Settings settings, RestController controller) { super(settings); + controller.registerHandler(POST, "/{index}/_doc", this); // auto id creation + controller.registerHandler(PUT, "/{index}/_doc/{id}", this); + controller.registerHandler(POST, "/{index}/_doc/{id}", this); + + CreateHandler createHandler = new CreateHandler(settings); + controller.registerHandler(PUT, "/{index}/_create/{id}", createHandler); + controller.registerHandler(POST, "/{index}/_create/{id}/", createHandler); + + // Deprecated typed endpoints. controller.registerHandler(POST, "/{index}/{type}", this); // auto id creation controller.registerHandler(PUT, "/{index}/{type}/{id}", this); controller.registerHandler(POST, "/{index}/{type}/{id}", this); - CreateHandler createHandler = new CreateHandler(settings); controller.registerHandler(PUT, "/{index}/{type}/{id}/_create", createHandler); controller.registerHandler(POST, "/{index}/{type}/{id}/_create", createHandler); } @@ -88,7 +97,7 @@ void validateOpType(String opType) { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { IndexRequest indexRequest; final String type = request.param("type"); - if (type.equals(MapperService.SINGLE_MAPPING_NAME) == false) { + if (type != null && type.equals(MapperService.SINGLE_MAPPING_NAME) == false) { deprecationLogger.deprecatedAndMaybeLog("index_with_types", TYPES_DEPRECATION_MESSAGE); indexRequest = new IndexRequest(request.param("index"), type, request.param("id")); } else { diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java index 3684f9b3b086f..e466eaf4ee9cf 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java @@ -51,6 +51,8 @@ public RestMultiGetAction(Settings settings, RestController controller) { controller.registerHandler(POST, "/_mget", this); controller.registerHandler(GET, "/{index}/_mget", this); controller.registerHandler(POST, "/{index}/_mget", this); + + // Deprecated typed endpoints. controller.registerHandler(GET, "/{index}/{type}/_mget", this); controller.registerHandler(POST, "/{index}/{type}/_mget", this); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java index e76852896a3a4..bdb0052698253 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java @@ -49,6 +49,8 @@ public RestMultiTermVectorsAction(Settings settings, RestController controller) controller.registerHandler(POST, "/_mtermvectors", this); controller.registerHandler(GET, "/{index}/_mtermvectors", this); controller.registerHandler(POST, "/{index}/_mtermvectors", this); + + // Deprecated typed endpoints. controller.registerHandler(GET, "/{index}/{type}/_mtermvectors", this); controller.registerHandler(POST, "/{index}/{type}/_mtermvectors", this); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java index 72a2ec3a88fd6..85ddd3b58ed49 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java @@ -53,16 +53,16 @@ public class RestTermVectorsAction extends BaseRestHandler { public RestTermVectorsAction(Settings settings, RestController controller) { super(settings); + controller.registerHandler(GET, "/{index}/_termvectors", this); + controller.registerHandler(POST, "/{index}/_termvectors", this); + controller.registerHandler(GET, "/{index}/_termvectors/{id}", this); + controller.registerHandler(POST, "/{index}/_termvectors/{id}", this); + // Deprecated typed endpoints. controller.registerHandler(GET, "/{index}/{type}/_termvectors", this); controller.registerHandler(POST, "/{index}/{type}/_termvectors", this); controller.registerHandler(GET, "/{index}/{type}/{id}/_termvectors", this); controller.registerHandler(POST, "/{index}/{type}/{id}/_termvectors", this); - - controller.registerHandler(GET, "/{index}/_termvectors", this); - controller.registerHandler(POST, "/{index}/_termvectors", this); - controller.registerHandler(GET, "/{index}/_termvectors/{id}", this); - controller.registerHandler(POST, "/{index}/_termvectors/{id}", this); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java index b3040a3572f82..033176c4a7300 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java @@ -47,6 +47,8 @@ public class RestUpdateAction extends BaseRestHandler { public RestUpdateAction(Settings settings, RestController controller) { super(settings); controller.registerHandler(POST, "/{index}/_update/{id}", this); + + // Deprecated typed endpoint. controller.registerHandler(POST, "/{index}/{type}/{id}/_update", this); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java index 3405a344463cf..04d13133f0841 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java @@ -57,6 +57,8 @@ public RestCountAction(Settings settings, RestController controller) { controller.registerHandler(GET, "/_count", this); controller.registerHandler(POST, "/{index}/_count", this); controller.registerHandler(GET, "/{index}/_count", this); + + // Deprecated typed endpoints. controller.registerHandler(POST, "/{index}/{type}/_count", this); controller.registerHandler(GET, "/{index}/{type}/_count", this); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java index dee349217311c..2bcc8a5a79435 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java @@ -49,10 +49,12 @@ public class RestExplainAction extends BaseRestHandler { public RestExplainAction(Settings settings, RestController controller) { super(settings); - controller.registerHandler(GET, "/{index}/{type}/{id}/_explain", this); - controller.registerHandler(POST, "/{index}/{type}/{id}/_explain", this); controller.registerHandler(GET, "/{index}/_explain/{id}", this); controller.registerHandler(POST, "/{index}/_explain/{id}", this); + + // Deprecated typed endpoints. + controller.registerHandler(GET, "/{index}/{type}/{id}/_explain", this); + controller.registerHandler(POST, "/{index}/{type}/{id}/_explain", this); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index 680d55907ee9c..30791835d3009 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -68,11 +68,12 @@ public class RestMultiSearchAction extends BaseRestHandler { public RestMultiSearchAction(Settings settings, RestController controller) { super(settings); - controller.registerHandler(GET, "/_msearch", this); controller.registerHandler(POST, "/_msearch", this); controller.registerHandler(GET, "/{index}/_msearch", this); controller.registerHandler(POST, "/{index}/_msearch", this); + + // Deprecated typed endpoints. controller.registerHandler(GET, "/{index}/{type}/_msearch", this); controller.registerHandler(POST, "/{index}/{type}/_msearch", this); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index cb1efdcd587cc..8e6e247123d36 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -78,6 +78,8 @@ public RestSearchAction(Settings settings, RestController controller) { controller.registerHandler(POST, "/_search", this); controller.registerHandler(GET, "/{index}/_search", this); controller.registerHandler(POST, "/{index}/_search", this); + + // Deprecated typed endpoints. controller.registerHandler(GET, "/{index}/{type}/_search", this); controller.registerHandler(POST, "/{index}/{type}/_search", this); } diff --git a/server/src/main/java/org/elasticsearch/script/UpdateScript.java b/server/src/main/java/org/elasticsearch/script/UpdateScript.java index e1eaf14bcb943..9b9e79c7b74ba 100644 --- a/server/src/main/java/org/elasticsearch/script/UpdateScript.java +++ b/server/src/main/java/org/elasticsearch/script/UpdateScript.java @@ -20,8 +20,6 @@ package org.elasticsearch.script; -import java.util.Collections; -import java.util.HashMap; import java.util.Map; /** @@ -31,17 +29,6 @@ public abstract class UpdateScript { public static final String[] PARAMETERS = { }; - private static final Map DEPRECATIONS; - static { - Map deprecations = new HashMap<>(); - deprecations.put( - "ctx", - "Accessing variable [ctx] via [params.ctx] from within a update script " + - "is deprecated in favor of directly accessing [ctx]." - ); - DEPRECATIONS = Collections.unmodifiableMap(deprecations); - } - /** The context used to compile {@link UpdateScript} factories. */ public static final ScriptContext CONTEXT = new ScriptContext<>("update", Factory.class); @@ -52,9 +39,7 @@ public abstract class UpdateScript { private final Map ctx; public UpdateScript(Map params, Map ctx) { - Map paramsWithCtx = new HashMap<>(params); - paramsWithCtx.put("ctx", ctx); - this.params = new ParameterMap(paramsWithCtx, DEPRECATIONS); + this.params = params; this.ctx = ctx; } diff --git a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java index 5690e584eac20..02e748dfbc95d 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java @@ -250,6 +250,9 @@ public QueryRescorerBuilder rewrite(QueryRewriteContext ctx) throws IOException queryRescoreBuilder.setQueryWeight(queryWeight); queryRescoreBuilder.setRescoreQueryWeight(rescoreQueryWeight); queryRescoreBuilder.setScoreMode(scoreMode); + if (windowSize() != null) { + queryRescoreBuilder.windowSize(windowSize()); + } return queryRescoreBuilder; } } diff --git a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index 7774456b51e15..1bc6af2c9669e 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -234,19 +234,6 @@ public GeoPoint[] points() { return this.points.toArray(new GeoPoint[this.points.size()]); } - /** - * The geohash of the geo point to create the range distance facets from. - * - * Deprecated - please use points(GeoPoint... points) instead. - */ - @Deprecated - public GeoDistanceSortBuilder geohashes(String... geohashes) { - for (String geohash : geohashes) { - this.points.add(GeoPoint.fromGeohash(geohash)); - } - return this; - } - /** * The geo distance type used to compute the distance. */ diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java index 8e72e6d5768f1..2ca42ff85abdf 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.Client; import org.elasticsearch.client.support.AbstractClient; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; @@ -45,14 +46,19 @@ final class RemoteClusterAwareClient extends AbstractClient { protected void doExecute(Action action, Request request, ActionListener listener) { remoteClusterService.ensureConnected(clusterAlias, ActionListener.wrap(res -> { - Transport.Connection connection = remoteClusterService.getConnection(clusterAlias); + Transport.Connection connection; + if (request instanceof RemoteClusterAwareRequest) { + DiscoveryNode preferredTargetNode = ((RemoteClusterAwareRequest) request).getPreferredTargetNode(); + connection = remoteClusterService.getConnection(preferredTargetNode, clusterAlias); + } else { + connection = remoteClusterService.getConnection(clusterAlias); + } service.sendRequest(connection, action.name(), request, TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, action.getResponseReader())); }, listener::onFailure)); } - @Override public void close() { // do nothing diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareRequest.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareRequest.java new file mode 100644 index 0000000000000..b708240f6daf9 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareRequest.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.cluster.node.DiscoveryNode; + +public interface RemoteClusterAwareRequest { + + /** + * Returns the preferred discovery node for this request. The remote cluster client will attempt to send + * this request directly to this node. Otherwise, it will send the request as a proxy action that will + * be routed by the remote cluster to this node. + * + * @return preferred discovery node + */ + DiscoveryNode getPreferredTargetNode(); + +} diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java index 0e230bfd9af39..08db8dfaf2100 100644 --- a/server/src/test/java/org/elasticsearch/VersionTests.java +++ b/server/src/test/java/org/elasticsearch/VersionTests.java @@ -186,7 +186,7 @@ public void testMinCompatVersion() { // from 7.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is // released since we need to bump the supported minor in Version#minimumCompatibilityVersion() - Version lastVersion = Version.V_6_6_0; // TODO: remove this once min compat version is a constant instead of method + Version lastVersion = Version.V_6_7_0; // TODO: remove this once min compat version is a constant instead of method assertEquals(lastVersion.major, Version.V_7_0_0.minimumCompatibilityVersion().major); assertEquals("did you miss to bump the minor in Version#minimumCompatibilityVersion()", lastVersion.minor, Version.V_7_0_0.minimumCompatibilityVersion().minor); @@ -345,8 +345,8 @@ public static void assertUnknownVersion(Version version) { public void testIsCompatible() { assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); - assertFalse(isCompatible(Version.V_6_5_0, Version.V_7_0_0)); - assertTrue(isCompatible(Version.V_6_6_0, Version.V_7_0_0)); + assertFalse(isCompatible(Version.V_6_6_0, Version.V_7_0_0)); + assertTrue(isCompatible(Version.V_6_7_0, Version.V_7_0_0)); assertFalse(isCompatible(Version.fromId(2000099), Version.V_7_0_0)); assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_5_0)); diff --git a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java index c85d303cf293f..5840f7ef60191 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java @@ -75,13 +75,9 @@ public void testDuellingFormatsValidParsing() { assertSameDate("1522332219.0", "epoch_second"); assertSameDate("0", "epoch_second"); assertSameDate("1", "epoch_second"); - assertSameDate("-1", "epoch_second"); - assertSameDate("-1522332219", "epoch_second"); assertSameDate("1522332219321", "epoch_millis"); assertSameDate("0", "epoch_millis"); assertSameDate("1", "epoch_millis"); - assertSameDate("-1", "epoch_millis"); - assertSameDate("-1522332219321", "epoch_millis"); assertSameDate("20181126", "basic_date"); assertSameDate("20181126T121212.123Z", "basic_date_time"); @@ -471,7 +467,7 @@ public void testSamePrinterOutput() { public void testSeveralTimeFormats() { DateFormatter jodaFormatter = DateFormatter.forPattern("year_month_day||ordinal_date"); - DateFormatter javaFormatter = DateFormatter.forPattern("8year_month_day||8ordinal_date"); + DateFormatter javaFormatter = DateFormatter.forPattern("8year_month_day||ordinal_date"); assertSameDate("2018-12-12", "year_month_day||ordinal_date", jodaFormatter, javaFormatter); assertSameDate("2018-128", "year_month_day||ordinal_date", jodaFormatter, javaFormatter); } diff --git a/server/src/test/java/org/elasticsearch/common/joda/SimpleJodaTests.java b/server/src/test/java/org/elasticsearch/common/joda/SimpleJodaTests.java index 2da3a08629f52..b6f1b1b650a6f 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/SimpleJodaTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/SimpleJodaTests.java @@ -303,6 +303,9 @@ public void testThatNegativeEpochsCanBeParsed() { formatter.parseJoda("-1234567890.9999"); formatter.parseJoda("-1234567890123456.9999"); } + + assertWarnings("Use of negative values" + + " in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); } public void testForInvalidDatesInEpochSecond() { @@ -753,13 +756,22 @@ public void testDeprecatedFormatSpecifiers() { " next major version of Elasticsearch. Prefix your date format with '8' to use the new specifier."); } - public void testDeprecatedScientificNotation() { + public void testDeprecatedEpochScientificNotation() { assertValidDateFormatParsing("epoch_second", "1.234e5", "123400"); assertWarnings("Use of scientific notation" + - "in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); + " in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); assertValidDateFormatParsing("epoch_millis", "1.234e5", "123400"); assertWarnings("Use of scientific notation" + - "in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); + " in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); + } + + public void testDeprecatedEpochNegative() { + assertValidDateFormatParsing("epoch_second", "-12345", "-12345"); + assertWarnings("Use of negative values" + + " in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); + assertValidDateFormatParsing("epoch_millis", "-12345", "-12345"); + assertWarnings("Use of negative values" + + " in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); } private void assertValidDateFormatParsing(String pattern, String dateToParse) { diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java index 98e58f7a0ebd9..0f58e30f7a2bf 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.time; -import org.elasticsearch.common.joda.JodaDateFormatter; import org.elasticsearch.test.ESTestCase; import java.time.Instant; @@ -204,6 +203,6 @@ public void testForceJava8() { assertThat(formatter, instanceOf(DateFormatters.MergedDateFormatter.class)); DateFormatters.MergedDateFormatter mergedFormatter = (DateFormatters.MergedDateFormatter) formatter; assertThat(mergedFormatter.formatters.get(0), instanceOf(JavaDateFormatter.class)); - assertThat(mergedFormatter.formatters.get(1), instanceOf(JodaDateFormatter.class)); + assertThat(mergedFormatter.formatters.get(1), instanceOf(JavaDateFormatter.class)); } } diff --git a/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java b/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java index 6c8afa1e3dbc4..8d702ebee8388 100644 --- a/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java @@ -239,7 +239,7 @@ public void testTimestamps() { assertDateMathEquals("1418248078000||/m", "2014-12-10T21:47:00.000"); // also check other time units - DateMathParser parser = DateFormatter.forPattern("8epoch_second||8dateOptionalTime").toDateMathParser(); + DateMathParser parser = DateFormatter.forPattern("8epoch_second||dateOptionalTime").toDateMathParser(); long datetime = parser.parse("1418248078", () -> 0); assertDateEquals(datetime, "1418248078", "2014-12-10T21:47:58.000"); diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/SettingsBasedHostProviderIT.java b/server/src/test/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProviderIT.java similarity index 97% rename from server/src/test/java/org/elasticsearch/discovery/zen/SettingsBasedHostProviderIT.java rename to server/src/test/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProviderIT.java index 1e64ced1a2595..b79fb05d40111 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/SettingsBasedHostProviderIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProviderIT.java @@ -28,7 +28,7 @@ import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) -public class SettingsBasedHostProviderIT extends ESIntegTestCase { +public class SettingsBasedHostsProviderIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { diff --git a/server/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java b/server/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java index 94aa403c3d3e9..e97f69b6d4965 100644 --- a/server/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java @@ -31,6 +31,8 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import java.util.ArrayList; +import java.util.List; import java.util.Set; import static org.hamcrest.Matchers.equalTo; @@ -40,6 +42,23 @@ public class RecoverAfterNodesIT extends ESIntegTestCase { private static final TimeValue BLOCK_WAIT_TIMEOUT = TimeValue.timeValueSeconds(10); + @Override + protected List addExtraClusterBootstrapSettings(List allNodesSettings) { + if (internalCluster().numDataAndMasterNodes() == 0) { + final Settings firstNodeSettings = allNodesSettings.get(0); + final List otherNodesSettings = allNodesSettings.subList(1, allNodesSettings.size()); + + final List updatedSettings = new ArrayList<>(); + updatedSettings.add(Settings.builder().put(firstNodeSettings) + .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), + Node.NODE_NAME_SETTING.get(firstNodeSettings)).build()); + updatedSettings.addAll(otherNodesSettings); + + return updatedSettings; + } + return super.addExtraClusterBootstrapSettings(allNodesSettings); + } + public Set waitForNoBlocksOnNode(TimeValue timeout, Client nodeClient) throws InterruptedException { long start = System.currentTimeMillis(); Set blocks; @@ -60,9 +79,7 @@ public Client startNode(Settings.Builder settings, int minMasterNodes) { public void testRecoverAfterNodes() throws Exception { logger.info("--> start node (1)"); - Client clientNode1 = startNode(Settings.builder() - .put("gateway.recover_after_nodes", 3) - .put(ClusterBootstrapService.INITIAL_MASTER_NODE_COUNT_SETTING.getKey(), 1), 1); + Client clientNode1 = startNode(Settings.builder().put("gateway.recover_after_nodes", 3), 1); assertThat(clientNode1.admin().cluster().prepareState().setLocal(true).execute().actionGet() .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); @@ -89,8 +106,7 @@ public void testRecoverAfterMasterNodes() throws Exception { logger.info("--> start master_node (1)"); Client master1 = startNode(Settings.builder() .put("gateway.recover_after_master_nodes", 2).put(Node.NODE_DATA_SETTING.getKey(), false) - .put(Node.NODE_MASTER_SETTING.getKey(), true) - .put(ClusterBootstrapService.INITIAL_MASTER_NODE_COUNT_SETTING.getKey(), 1), 1); + .put(Node.NODE_MASTER_SETTING.getKey(), true), 1); assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet() .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); @@ -136,8 +152,7 @@ public void testRecoverAfterDataNodes() throws Exception { Client master1 = startNode(Settings.builder() .put("gateway.recover_after_data_nodes", 2) .put(Node.NODE_DATA_SETTING.getKey(), false) - .put(Node.NODE_MASTER_SETTING.getKey(), true) - .put(ClusterBootstrapService.INITIAL_MASTER_NODE_COUNT_SETTING.getKey(), 1), 1); + .put(Node.NODE_MASTER_SETTING.getKey(), true), 1); assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet() .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index a0f0bf874befb..023fa5ca051aa 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -37,7 +37,6 @@ import java.io.IOException; import java.util.Collection; -import java.util.Locale; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.notNullValue; @@ -174,7 +173,7 @@ public void testIgnoreMalformed() throws Exception { .endObject()), XContentType.JSON)); MapperParsingException e = expectThrows(MapperParsingException.class, runnable); - assertThat(e.getCause().getMessage(), containsString("failed to parse date field [2016-03-99]")); + assertThat(e.getCause().getMessage(), containsString("Cannot parse \"2016-03-99\"")); mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field").field("type", "date") @@ -228,8 +227,8 @@ public void testFloatEpochFormat() throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - double epochFloatMillisFromEpoch = (randomDouble() * 2 - 1) * 1000000; - String epochFloatValue = String.format(Locale.US, "%f", epochFloatMillisFromEpoch); + long epochMillis = randomNonNegativeLong(); + String epochFloatValue = epochMillis + "." + randomIntBetween(0, 999); ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -241,7 +240,7 @@ public void testFloatEpochFormat() throws IOException { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals((long)epochFloatMillisFromEpoch, pointField.numericValue().longValue()); + assertEquals(epochMillis, pointField.numericValue().longValue()); } public void testChangeLocale() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java index d8840b275419b..dc6f14cb0148a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java @@ -18,11 +18,6 @@ */ package org.elasticsearch.index.mapper; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field.Store; -import org.apache.lucene.document.StringField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; @@ -36,8 +31,6 @@ import org.elasticsearch.test.VersionUtils; import org.mockito.Mockito; -import java.io.IOException; - public class TypeFieldTypeTests extends FieldTypeTestCase { @Override protected MappedFieldType createDefaultFieldType() { @@ -81,36 +74,5 @@ public void testTermsQuery() throws Exception { Mockito.when(mapperService.documentMapper()).thenReturn(mapper); query = ft.termQuery("my_type", context); assertEquals(new MatchNoDocsQuery(), query); - assertWarnings(TypeFieldMapper.TypeFieldType.TYPES_DEPRECATION_MESSAGE); - } - - public void testExistsQuery() { - QueryShardContext context = Mockito.mock(QueryShardContext.class); - TypeFieldMapper.TypeFieldType ft = new TypeFieldMapper.TypeFieldType(); - ft.setName(TypeFieldMapper.NAME); - ft.existsQuery(context); - assertWarnings(TypeFieldMapper.TypeFieldType.TYPES_DEPRECATION_MESSAGE); - } - - public void testRangeQuery() { - QueryShardContext context = Mockito.mock(QueryShardContext.class); - MapperService mapperService = Mockito.mock(MapperService.class); - DocumentMapper mapper = Mockito.mock(DocumentMapper.class); - Mockito.when(context.getMapperService()).thenReturn(mapperService); - Mockito.when(mapperService.documentMapper()).thenReturn(mapper); - Mockito.when(mapper.type()).thenReturn("my_type"); - - TypeFieldMapper.TypeFieldType ft = new TypeFieldMapper.TypeFieldType(); - ft.setName(TypeFieldMapper.NAME); - ft.rangeQuery("type1", "type2", true, true, context); - assertWarnings(TypeFieldMapper.TypeFieldType.TYPES_DEPRECATION_MESSAGE); - } - - static DirectoryReader openReaderWithNewType(String type, IndexWriter writer) throws IOException { - Document doc = new Document(); - StringField typeField = new StringField(TypeFieldMapper.NAME, type, Store.NO); - doc.add(typeField); - writer.addDocument(doc); - return DirectoryReader.open(writer); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index 28349994c63e3..df312ba84c309 100644 --- a/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -592,4 +592,11 @@ public boolean convertNowRangeToMatchAll() { rewritten = query.rewrite(queryShardContext); assertThat(rewritten, instanceOf(MatchAllQueryBuilder.class)); } + + public void testTypeField() throws IOException { + RangeQueryBuilder builder = QueryBuilders.rangeQuery("_type") + .from("value1"); + builder.doToQuery(createShardContext()); + assertWarnings(QueryShardContext.TYPES_DEPRECATION_MESSAGE); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java index 6876d021a0ae9..a6e9590f54010 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java @@ -168,4 +168,10 @@ public void testParseFailsWithMultipleFields() throws IOException { e = expectThrows(ParsingException.class, () -> parseQuery(shortJson)); assertEquals("[term] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); } + + public void testTypeField() throws IOException { + TermQueryBuilder builder = QueryBuilders.termQuery("_type", "value1"); + builder.doToQuery(createShardContext()); + assertWarnings(QueryShardContext.TYPES_DEPRECATION_MESSAGE); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java index c954be5403f49..02df22fd97efb 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java @@ -310,5 +310,11 @@ public void testConversion() { assertEquals(Arrays.asList(5, 42d), TermsQueryBuilder.convert(list)); assertEquals(Arrays.asList(5, 42d), TermsQueryBuilder.convertBack(TermsQueryBuilder.convert(list))); } + + public void testTypeField() throws IOException { + TermsQueryBuilder builder = QueryBuilders.termsQuery("_type", "value1", "value2"); + builder.doToQuery(createShardContext()); + assertWarnings(QueryShardContext.TYPES_DEPRECATION_MESSAGE); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java index 17c49df366261..67916e52789c5 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java @@ -75,12 +75,12 @@ public void testFromJson() throws IOException { @Override public void testToQuery() throws IOException { super.testToQuery(); - assertWarnings(TypeFieldMapper.TypeFieldType.TYPES_DEPRECATION_MESSAGE); + assertWarnings(TypeQueryBuilder.TYPES_DEPRECATION_MESSAGE); } @Override public void testMustRewrite() throws IOException { super.testMustRewrite(); - assertWarnings(TypeFieldMapper.TypeFieldType.TYPES_DEPRECATION_MESSAGE); + assertWarnings(TypeQueryBuilder.TYPES_DEPRECATION_MESSAGE); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java index f5adb70c9fecc..caab692b9cf6b 100644 --- a/server/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java @@ -148,4 +148,10 @@ public void testIndexWildcard() throws IOException { query = new WildcardQueryBuilder("_index", "index_" + index + "*").doToQuery(context); assertThat(query instanceof MatchNoDocsQuery, equalTo(true)); } + + public void testTypeField() throws IOException { + WildcardQueryBuilder builder = QueryBuilders.wildcardQuery("_type", "doc*"); + builder.doToQuery(createShardContext()); + assertWarnings(QueryShardContext.TYPES_DEPRECATION_MESSAGE); + } } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 7e73f9ef5175a..3eddeea2f2a8a 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -35,6 +35,7 @@ import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Assertions; +import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; @@ -72,6 +73,7 @@ import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.VersionUtils; import org.hamcrest.Matchers; import org.junit.After; import org.junit.Before; @@ -2814,9 +2816,12 @@ public void testTranslogOpSerialization() throws Exception { Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomPrimaryTerm, randomSeqNum, true); Translog.Index index = new Translog.Index(eIndex, eIndexResult); + Version wireVersion = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); BytesStreamOutput out = new BytesStreamOutput(); + out.setVersion(wireVersion); Translog.Operation.writeOperation(out, index); StreamInput in = out.bytes().streamInput(); + in.setVersion(wireVersion); Translog.Index serializedIndex = (Translog.Index) Translog.Operation.readOperation(in); assertEquals(index, serializedIndex); @@ -2826,8 +2831,10 @@ public void testTranslogOpSerialization() throws Exception { Translog.Delete delete = new Translog.Delete(eDelete, eDeleteResult); out = new BytesStreamOutput(); + out.setVersion(wireVersion); Translog.Operation.writeOperation(out, delete); in = out.bytes().streamInput(); + in.setVersion(wireVersion); Translog.Delete serializedDelete = (Translog.Delete) Translog.Operation.readOperation(in); assertEquals(delete, serializedDelete); } diff --git a/server/src/test/java/org/elasticsearch/rest/action/document/RestIndexActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/document/RestIndexActionTests.java index 5fe99589a64c4..1f45f5265acda 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/document/RestIndexActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/document/RestIndexActionTests.java @@ -52,7 +52,22 @@ public void testTypeInPath() { dispatchRequest(validRequest); } - public void testCreateOpTypeValidation() throws Exception { + public void testCreateWithTypeInPath() { + RestRequest deprecatedRequest = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.PUT) + .withPath("/some_index/some_type/some_id/_create") + .build(); + dispatchRequest(deprecatedRequest); + assertWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE); + + RestRequest validRequest = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.PUT) + .withPath("/some_index/_create/some_id") + .build(); + dispatchRequest(validRequest); + } + + public void testCreateOpTypeValidation() { Settings settings = settings(Version.CURRENT).build(); RestIndexAction.CreateHandler create = action.new CreateHandler(settings); diff --git a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java index 24621a12d39e4..0f647353e95af 100644 --- a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java @@ -200,11 +200,13 @@ public MappedFieldType fieldMapper(String name) { rescoreBuilder.setQueryWeight(randomFloat()); rescoreBuilder.setRescoreQueryWeight(randomFloat()); rescoreBuilder.setScoreMode(QueryRescoreMode.Max); + rescoreBuilder.windowSize(randomIntBetween(0, 100)); QueryRescorerBuilder rescoreRewritten = rescoreBuilder.rewrite(mockShardContext); assertEquals(rescoreRewritten.getQueryWeight(), rescoreBuilder.getQueryWeight(), 0.01f); assertEquals(rescoreRewritten.getRescoreQueryWeight(), rescoreBuilder.getRescoreQueryWeight(), 0.01f); assertEquals(rescoreRewritten.getScoreMode(), rescoreBuilder.getScoreMode()); + assertEquals(rescoreRewritten.windowSize(), rescoreBuilder.windowSize()); } /** diff --git a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java index 87a8015bf3676..97d9361fc46fe 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java @@ -35,6 +35,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.concurrent.ExecutionException; @@ -222,28 +223,16 @@ public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionExcept client().prepareIndex("index", "type", "d1").setSource(d1Builder), client().prepareIndex("index", "type", "d2").setSource(d2Builder)); - List qHashes = new ArrayList<>(); - List qPoints = new ArrayList<>(); - createQPoints(qHashes, qPoints); + List qPoints = Arrays.asList(new GeoPoint(2, 1), new GeoPoint(2, 2), new GeoPoint(2, 3), new GeoPoint(2, 4)); + Collections.shuffle(qPoints, random()); GeoDistanceSortBuilder geoDistanceSortBuilder = null; - for (int i = 0; i < 4; i++) { - int at = randomInt(3 - i); - if (randomBoolean()) { + for (GeoPoint point : qPoints) { if (geoDistanceSortBuilder == null) { - geoDistanceSortBuilder = new GeoDistanceSortBuilder(LOCATION_FIELD, qHashes.get(at)); + geoDistanceSortBuilder = new GeoDistanceSortBuilder(LOCATION_FIELD, point); } else { - geoDistanceSortBuilder.geohashes(qHashes.get(at)); + geoDistanceSortBuilder.points(point); } - } else { - if (geoDistanceSortBuilder == null) { - geoDistanceSortBuilder = new GeoDistanceSortBuilder(LOCATION_FIELD, qPoints.get(at)); - } else { - geoDistanceSortBuilder.points(qPoints.get(at)); - } - } - qHashes.remove(at); - qPoints.remove(at); } SearchResponse searchResponse = client().prepareSearch() @@ -340,13 +329,6 @@ private static void checkCorrectSortOrderForGeoSort(SearchResponse searchRespons closeTo(GeoDistance.ARC.calculate(2, 2, 1, 1, DistanceUnit.METERS), 1.e-1)); } - protected void createQPoints(List qHashes, List qPoints) { - GeoPoint[] qp = {new GeoPoint(2, 1), new GeoPoint(2, 2), new GeoPoint(2, 3), new GeoPoint(2, 4)}; - qPoints.addAll(Arrays.asList(qp)); - String[] qh = {"s02equ04ven0", "s037ms06g7h0", "s065kk0dc540", "s06g7h0dyg00"}; - qHashes.addAll(Arrays.asList(qh)); - } - public void testCrossIndexIgnoreUnmapped() throws Exception { assertAcked(prepareCreate("test1").addMapping( "type", "str_field", "type=keyword", diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 1dc3c375ad80a..e17377b500d6b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1947,6 +1947,11 @@ public Settings nodeSettings(int nodeOrdinal) { .put(ESIntegTestCase.this.nodeSettings(nodeOrdinal)).build(); } + @Override + public List addExtraClusterBootstrapSettings(List allNodesSettings) { + return ESIntegTestCase.this.addExtraClusterBootstrapSettings(allNodesSettings); + } + @Override public Path nodeConfigPath(int nodeOrdinal) { return ESIntegTestCase.this.nodeConfigPath(nodeOrdinal); @@ -1975,6 +1980,19 @@ public Collection> transportClientPlugins() { }; } + /** + * This method is called before starting a collection of nodes. + * At this point the test has a holistic view on all nodes settings and might perform settings adjustments as needed. + * For instance, the test could retrieve master node names and fill in + * {@link org.elasticsearch.cluster.coordination.ClusterBootstrapService#INITIAL_MASTER_NODES_SETTING} setting. + * + * @param allNodesSettings list of node settings before update + * @return list of node settings after update + */ + protected List addExtraClusterBootstrapSettings(List allNodesSettings) { + return allNodesSettings; + } + /** * Iff this returns true mock transport implementations are used for the test runs. Otherwise not mock transport impls are used. * The default is {@code true}. diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 5fbb5da14dbb4..32aa1c2107d5a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -62,7 +62,7 @@ import java.util.Collection; import java.util.Collections; -import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODE_COUNT_SETTING; +import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -179,6 +179,8 @@ protected boolean addMockHttpTransport() { private Node newNode() { final Path tempDir = createTempDir(); + final String nodeName = nodeSettings().get(Node.NODE_NAME_SETTING.getKey(), "node_s_0"); + Settings settings = Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", random().nextLong())) .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) @@ -186,7 +188,7 @@ private Node newNode() { // TODO: use a consistent data path for custom paths // This needs to tie into the ESIntegTestCase#indexSettings() method .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), createTempDir().getParent()) - .put("node.name", "node_s_0") + .put(Node.NODE_NAME_SETTING.getKey(), nodeName) .put(ScriptService.SCRIPT_MAX_COMPILATIONS_RATE.getKey(), "1000/1m") .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created .put("transport.type", getTestTransportType()) @@ -201,9 +203,10 @@ private Node newNode() { // turn it off for these tests. .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false) .putList(DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey()) // empty list disables a port scan for other nodes - .put(INITIAL_MASTER_NODE_COUNT_SETTING.getKey(), 1) + .putList(INITIAL_MASTER_NODES_SETTING.getKey(), nodeName) .put(nodeSettings()) // allow test cases to provide their own settings or override these .build(); + Collection> plugins = getPlugins(); if (plugins.contains(getTestTransportPlugin()) == false) { plugins = new ArrayList<>(plugins); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index db719389665ab..e6e11dacb749f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -148,7 +148,7 @@ import static java.util.Collections.emptyList; import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY; import static org.apache.lucene.util.LuceneTestCase.rarely; -import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODE_COUNT_SETTING; +import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; import static org.elasticsearch.discovery.DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING; @@ -525,10 +525,15 @@ private synchronized NodeAndClient getOrBuildRandomNode() { if (randomNodeAndClient != null) { return randomNodeAndClient; } - final int ord = nextNodeId.getAndIncrement(); final Runnable onTransportServiceStarted = () -> {}; // do not create unicast host file for this one node. - final Settings settings = Settings.builder().put(INITIAL_MASTER_NODE_COUNT_SETTING.getKey(), 1).build(); - final NodeAndClient buildNode = buildNode(ord, random.nextLong(), settings, false, 1, onTransportServiceStarted); + + final int nodeId = nextNodeId.getAndIncrement(); + final Settings settings = getNodeSettings(nodeId, random.nextLong(), Settings.EMPTY, 1); + final Settings nodeSettings = Settings.builder() + .putList(INITIAL_MASTER_NODES_SETTING.getKey(), Node.NODE_NAME_SETTING.get(settings)) + .put(settings) + .build(); + final NodeAndClient buildNode = buildNode(nodeId, nodeSettings, false, onTransportServiceStarted); assert nodes.isEmpty(); buildNode.startNode(); publishNode(buildNode); @@ -598,71 +603,66 @@ public synchronized void ensureAtMostNumDataNodes(int n) throws IOException { } } - /** - * builds a new node given the settings. - * - * @param settings the settings to use - * @param defaultMinMasterNodes min_master_nodes value to use if min_master_nodes is auto managed - * @param onTransportServiceStarted callback to run when transport service is started - */ - private NodeAndClient buildNode(Settings settings, int defaultMinMasterNodes, Runnable onTransportServiceStarted) { - int ord = nextNodeId.getAndIncrement(); - return buildNode(ord, random.nextLong(), settings, false, defaultMinMasterNodes, onTransportServiceStarted); + private Settings getNodeSettings(final int nodeId, final long seed, final Settings extraSettings, final int defaultMinMasterNodes) { + final Settings settings = getSettings(nodeId, seed, extraSettings); + + final String name = buildNodeName(nodeId, settings); + + final Settings.Builder updatedSettings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), baseDir) // allow overriding path.home + .put(settings) + .put("node.name", name) + .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), seed); + + final boolean usingSingleNodeDiscovery = DiscoveryModule.DISCOVERY_TYPE_SETTING.get(updatedSettings.build()).equals("single-node"); + if (!usingSingleNodeDiscovery && autoManageMinMasterNodes) { + assert updatedSettings.get(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()) == null : + "min master nodes may not be set when auto managed"; + assert updatedSettings.get(INITIAL_STATE_TIMEOUT_SETTING.getKey()) == null : + "automatically managing min master nodes require nodes to complete a join cycle" + + " when starting"; + updatedSettings + // don't wait too long not to slow down tests + .put(ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING.getKey(), "5s") + .put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), defaultMinMasterNodes); + } else if (!usingSingleNodeDiscovery && updatedSettings.get(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()) == null) { + throw new IllegalArgumentException(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " must be configured"); + } + + return updatedSettings.build(); } /** * builds a new node * - * @param nodeId the node internal id (see {@link NodeAndClient#nodeAndClientId()} - * @param seed the node's random seed - * @param settings the settings to use - * @param reuseExisting if a node with the same name is already part of {@link #nodes}, no new node will be built and - * the method will return the existing one - * @param defaultMinMasterNodes min_master_nodes value to use if min_master_nodes is auto managed + * @param nodeId node ordinal + * @param settings the settings to use + * @param reuseExisting if a node with the same name is already part of {@link #nodes}, no new node will be built and + * the method will return the existing one * @param onTransportServiceStarted callback to run when transport service is started */ - private NodeAndClient buildNode(int nodeId, long seed, Settings settings, - boolean reuseExisting, int defaultMinMasterNodes, Runnable onTransportServiceStarted) { + private NodeAndClient buildNode(int nodeId, Settings settings, + boolean reuseExisting, Runnable onTransportServiceStarted) { assert Thread.holdsLock(this); ensureOpen(); - settings = getSettings(nodeId, seed, settings); Collection> plugins = getPlugins(); - String name = buildNodeName(nodeId, settings); + String name = settings.get("node.name"); + if (reuseExisting && nodes.containsKey(name)) { onTransportServiceStarted.run(); // reusing an existing node implies its transport service already started return nodes.get(name); } else { assert reuseExisting == true || nodes.containsKey(name) == false : - "node name [" + name + "] already exists but not allowed to use it"; + "node name [" + name + "] already exists but not allowed to use it"; } - Settings.Builder finalSettings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), baseDir) // allow overriding path.home - .put(settings) - .put("node.name", name) - .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), seed); - final boolean usingSingleNodeDiscovery = DiscoveryModule.DISCOVERY_TYPE_SETTING.get(finalSettings.build()).equals("single-node"); - if (!usingSingleNodeDiscovery && autoManageMinMasterNodes) { - assert finalSettings.get(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()) == null : - "min master nodes may not be set when auto managed"; - assert finalSettings.get(INITIAL_STATE_TIMEOUT_SETTING.getKey()) == null : - "automatically managing min master nodes require nodes to complete a join cycle" + - " when starting"; - finalSettings - // don't wait too long not to slow down tests - .put(ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING.getKey(), "5s") - .put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), defaultMinMasterNodes); - } else if (!usingSingleNodeDiscovery && finalSettings.get(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()) == null) { - throw new IllegalArgumentException(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " must be configured"); - } - SecureSettings secureSettings = finalSettings.getSecureSettings(); + SecureSettings secureSettings = Settings.builder().put(settings).getSecureSettings(); if (secureSettings instanceof MockSecureSettings) { // we clone this here since in the case of a node restart we might need it again secureSettings = ((MockSecureSettings) secureSettings).clone(); } - final Settings nodeSettings = finalSettings.build(); MockNode node = new MockNode( - nodeSettings, + settings, plugins, nodeConfigurationSource.nodeConfigPath(nodeId), forbidPrivateIndexSettings); @@ -677,13 +677,15 @@ public void afterStart() { } catch (IOException e) { throw new UncheckedIOException(e); } - return new NodeAndClient(name, node, nodeSettings, nodeId); + return new NodeAndClient(name, node, settings, nodeId); + } + + private String getNodePrefix(Settings settings) { + return nodePrefix + getRoleSuffix(settings); } private String buildNodeName(int id, Settings settings) { - String prefix = nodePrefix; - prefix = prefix + getRoleSuffix(settings); - return prefix + id; + return getNodePrefix(settings) + id; } /** @@ -1087,49 +1089,52 @@ private synchronized void reset(boolean wipeData) throws IOException { final List toStartAndPublish = new ArrayList<>(); // we want to start nodes in one go due to min master nodes final Runnable onTransportServiceStarted = () -> rebuildUnicastHostFiles(toStartAndPublish); - final int bootstrapNodeIndex; - if (prevNodeCount == 0 && autoManageMinMasterNodes) { - if (numSharedDedicatedMasterNodes > 0) { - bootstrapNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDedicatedMasterNodes - 1); - } else if (numSharedDataNodes > 0) { - bootstrapNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDataNodes - 1); - } else { - bootstrapNodeIndex = -1; - } - } else { - bootstrapNodeIndex = -1; - } + final List settings = new ArrayList<>(); for (int i = 0; i < numSharedDedicatedMasterNodes; i++) { - final Settings.Builder settings = Settings.builder(); - settings.put(Node.NODE_MASTER_SETTING.getKey(), true); - settings.put(Node.NODE_DATA_SETTING.getKey(), false); - if (i == bootstrapNodeIndex) { - settings.put(INITIAL_MASTER_NODE_COUNT_SETTING.getKey(), numSharedDedicatedMasterNodes); - } - NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes, - onTransportServiceStarted); - toStartAndPublish.add(nodeAndClient); + final Settings.Builder extraSettings = Settings.builder(); + extraSettings.put(Node.NODE_MASTER_SETTING.getKey(), true); + extraSettings.put(Node.NODE_DATA_SETTING.getKey(), false); + settings.add(getNodeSettings(i, sharedNodesSeeds[i], extraSettings.build(), defaultMinMasterNodes)); } for (int i = numSharedDedicatedMasterNodes; i < numSharedDedicatedMasterNodes + numSharedDataNodes; i++) { - final Settings.Builder settings = Settings.builder(); + final Settings.Builder extraSettings = Settings.builder(); if (numSharedDedicatedMasterNodes > 0) { // if we don't have dedicated master nodes, keep things default - settings.put(Node.NODE_MASTER_SETTING.getKey(), false).build(); - settings.put(Node.NODE_DATA_SETTING.getKey(), true).build(); - } else if (i == bootstrapNodeIndex) { - settings.put(INITIAL_MASTER_NODE_COUNT_SETTING.getKey(), numSharedDataNodes); + extraSettings.put(Node.NODE_MASTER_SETTING.getKey(), false).build(); + extraSettings.put(Node.NODE_DATA_SETTING.getKey(), true).build(); } - NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes, - onTransportServiceStarted); - toStartAndPublish.add(nodeAndClient); + settings.add(getNodeSettings(i, sharedNodesSeeds[i], extraSettings.build(), defaultMinMasterNodes)); } for (int i = numSharedDedicatedMasterNodes + numSharedDataNodes; i < numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes; i++) { - final Builder settings = Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false) + final Builder extraSettings = Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false) .put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_INGEST_SETTING.getKey(), false); - NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes, - onTransportServiceStarted); + settings.add(getNodeSettings(i, sharedNodesSeeds[i], extraSettings.build(), defaultMinMasterNodes)); + } + + int bootstrapNodeIndex = -1; + final List masterNodeNames = settings.stream() + .filter(Node.NODE_MASTER_SETTING::get) + .map(Node.NODE_NAME_SETTING::get) + .collect(Collectors.toList()); + + if (prevNodeCount == 0 && autoManageMinMasterNodes) { + if (numSharedDedicatedMasterNodes > 0) { + bootstrapNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDedicatedMasterNodes - 1); + } else if (numSharedDataNodes > 0) { + bootstrapNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDataNodes - 1); + } + } + + final List updatedSettings = nodeConfigurationSource.addExtraClusterBootstrapSettings(settings); + + for (int i = 0; i < numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes; i++) { + Settings nodeSettings = updatedSettings.get(i); + if (i == bootstrapNodeIndex) { + nodeSettings = Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), masterNodeNames).put(nodeSettings).build(); + } + final NodeAndClient nodeAndClient = buildNode(i, nodeSettings, true, onTransportServiceStarted); toStartAndPublish.add(nodeAndClient); } @@ -1948,8 +1953,8 @@ public synchronized List startNodes(int numOfNodes, Settings settings) { /** * Starts multiple nodes with the given settings and returns their names */ - public synchronized List startNodes(Settings... settings) { - final int newMasterCount = Math.toIntExact(Stream.of(settings).filter(Node.NODE_MASTER_SETTING::get).count()); + public synchronized List startNodes(Settings... extraSettings) { + final int newMasterCount = Math.toIntExact(Stream.of(extraSettings).filter(Node.NODE_MASTER_SETTING::get).count()); final int defaultMinMasterNodes; if (autoManageMinMasterNodes) { defaultMinMasterNodes = getMinMasterNodes(getMasterNodesCount() + newMasterCount); @@ -1958,19 +1963,39 @@ public synchronized List startNodes(Settings... settings) { } final List nodes = new ArrayList<>(); final int prevMasterCount = getMasterNodesCount(); - int bootstrapMasterNodeIndex = prevMasterCount == 0 && autoManageMinMasterNodes && newMasterCount > 0 && Arrays.stream(settings) + int bootstrapMasterNodeIndex = + prevMasterCount == 0 && autoManageMinMasterNodes && newMasterCount > 0 && Arrays.stream(extraSettings) .allMatch(s -> Node.NODE_MASTER_SETTING.get(s) == false || TestZenDiscovery.USE_ZEN2.get(s) == true) ? RandomNumbers.randomIntBetween(random, 0, newMasterCount - 1) : -1; - for (Settings nodeSettings : settings) { + final int numOfNodes = extraSettings.length; + final int firstNodeId = nextNodeId.getAndIncrement(); + final List settings = new ArrayList<>(); + for (int i = 0; i < numOfNodes; i++) { + settings.add(getNodeSettings(firstNodeId + i, random.nextLong(), extraSettings[i], defaultMinMasterNodes)); + } + nextNodeId.set(firstNodeId + numOfNodes); + + final List initialMasterNodes = settings.stream() + .filter(Node.NODE_MASTER_SETTING::get) + .map(Node.NODE_NAME_SETTING::get) + .collect(Collectors.toList()); + + final List updatedSettings = nodeConfigurationSource.addExtraClusterBootstrapSettings(settings); + + for (int i = 0; i < numOfNodes; i++) { + final Settings nodeSettings = updatedSettings.get(i); final Builder builder = Settings.builder(); if (Node.NODE_MASTER_SETTING.get(nodeSettings)) { if (bootstrapMasterNodeIndex == 0) { - builder.put(INITIAL_MASTER_NODE_COUNT_SETTING.getKey(), newMasterCount); + builder.putList(INITIAL_MASTER_NODES_SETTING.getKey(), initialMasterNodes); } bootstrapMasterNodeIndex -= 1; } - nodes.add(buildNode(builder.put(nodeSettings).build(), defaultMinMasterNodes, () -> rebuildUnicastHostFiles(nodes))); + + final NodeAndClient nodeAndClient = + buildNode(firstNodeId + i, builder.put(nodeSettings).build(), false, () -> rebuildUnicastHostFiles(nodes)); + nodes.add(nodeAndClient); } startAndPublishNodesAndClients(nodes); if (autoManageMinMasterNodes) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java b/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java index 60c69bbd6c652..5ed21d64c6890 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java +++ b/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java @@ -24,6 +24,7 @@ import java.nio.file.Path; import java.util.Collection; import java.util.Collections; +import java.util.List; public abstract class NodeConfigurationSource { @@ -51,6 +52,10 @@ public Settings transportClientSettings() { public abstract Path nodeConfigPath(int nodeOrdinal); + public List addExtraClusterBootstrapSettings(List allNodesSettings) { + return allNodesSettings; + } + /** Returns plugins that should be loaded on the node */ public Collection> nodePlugins() { return Collections.emptyList(); diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc index ffc6c0baa3d1d..851bd2ba327b2 100644 --- a/x-pack/docs/en/rest-api/security.asciidoc +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -2,7 +2,7 @@ [[security-api]] == Security APIs -You can use the following APIs to perform {security} activities. +You can use the following APIs to perform security activities. * <> * <> @@ -66,12 +66,12 @@ native realm: * <> -include::security/put-app-privileges.asciidoc[] include::security/authenticate.asciidoc[] include::security/change-password.asciidoc[] include::security/clear-cache.asciidoc[] -include::security/create-role-mappings.asciidoc[] include::security/clear-roles-cache.asciidoc[] +include::security/put-app-privileges.asciidoc[] +include::security/create-role-mappings.asciidoc[] include::security/create-roles.asciidoc[] include::security/create-users.asciidoc[] include::security/delete-app-privileges.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security/authenticate.asciidoc b/x-pack/docs/en/rest-api/security/authenticate.asciidoc index a556d3854a538..51b0d64419453 100644 --- a/x-pack/docs/en/rest-api/security/authenticate.asciidoc +++ b/x-pack/docs/en/rest-api/security/authenticate.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-authenticate]] === Authenticate API +++++ +Authenticate +++++ The Authenticate API enables you to submit a request with a basic auth header to authenticate a user and retrieve information about the authenticated user. diff --git a/x-pack/docs/en/rest-api/security/change-password.asciidoc b/x-pack/docs/en/rest-api/security/change-password.asciidoc index f33c6f1e51907..2cfa99db6c787 100644 --- a/x-pack/docs/en/rest-api/security/change-password.asciidoc +++ b/x-pack/docs/en/rest-api/security/change-password.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-change-password]] === Change passwords API +++++ +Change passwords +++++ Changes the passwords of users in the native realm. diff --git a/x-pack/docs/en/rest-api/security/clear-cache.asciidoc b/x-pack/docs/en/rest-api/security/clear-cache.asciidoc index c8dcb4cfca98c..604d54fb26a1d 100644 --- a/x-pack/docs/en/rest-api/security/clear-cache.asciidoc +++ b/x-pack/docs/en/rest-api/security/clear-cache.asciidoc @@ -1,8 +1,11 @@ [role="xpack"] [[security-api-clear-cache]] -=== Clear Cache API +=== Clear cache API +++++ +Clear cache +++++ -The Clear Cache API evicts users from the user cache. You can completely clear +The clear cache API evicts users from the user cache. You can completely clear the cache or evict specific users. ==== Request diff --git a/x-pack/docs/en/rest-api/security/clear-roles-cache.asciidoc b/x-pack/docs/en/rest-api/security/clear-roles-cache.asciidoc index 500d629976019..5fbdb0c10cacc 100644 --- a/x-pack/docs/en/rest-api/security/clear-roles-cache.asciidoc +++ b/x-pack/docs/en/rest-api/security/clear-roles-cache.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-clear-role-cache]] === Clear roles cache API +++++ +Clear roles cache +++++ Evicts roles from the native role cache. diff --git a/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc index 91a94050b592b..de2ad5af3081b 100644 --- a/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-put-role-mapping]] === Create or update role mappings API +++++ +Create or update role mappings +++++ Creates and updates role mappings. diff --git a/x-pack/docs/en/rest-api/security/create-roles.asciidoc b/x-pack/docs/en/rest-api/security/create-roles.asciidoc index 345fe00dcfc47..224bc87285e3e 100644 --- a/x-pack/docs/en/rest-api/security/create-roles.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-roles.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-put-role]] === Create or update roles API +++++ +Create or update roles +++++ Adds and updates roles in the native realm. diff --git a/x-pack/docs/en/rest-api/security/create-users.asciidoc b/x-pack/docs/en/rest-api/security/create-users.asciidoc index 5c03829469967..1ad1163ffef0a 100644 --- a/x-pack/docs/en/rest-api/security/create-users.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-users.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-put-user]] === Create or update users API +++++ +Create or update users +++++ Adds and updates users in the native realm. These users are commonly referred to as _native users_. diff --git a/x-pack/docs/en/rest-api/security/delete-app-privileges.asciidoc b/x-pack/docs/en/rest-api/security/delete-app-privileges.asciidoc index 2274f1dc58fc1..4e8447663bfd6 100644 --- a/x-pack/docs/en/rest-api/security/delete-app-privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/delete-app-privileges.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-delete-privilege]] === Delete application privileges API +++++ +Delete application privileges +++++ Removes {stack-ov}/security-privileges.html#application-privileges[application privileges]. diff --git a/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc index 7a6cf9ad29863..8c423008c8c6a 100644 --- a/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc +++ b/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-delete-role-mapping]] === Delete role mappings API +++++ +Delete role mappings +++++ Removes role mappings. diff --git a/x-pack/docs/en/rest-api/security/delete-roles.asciidoc b/x-pack/docs/en/rest-api/security/delete-roles.asciidoc index 2aadc4dcae76a..a62148684a458 100644 --- a/x-pack/docs/en/rest-api/security/delete-roles.asciidoc +++ b/x-pack/docs/en/rest-api/security/delete-roles.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-delete-role]] === Delete roles API +++++ +Delete roles +++++ Removes roles in the native realm. diff --git a/x-pack/docs/en/rest-api/security/delete-users.asciidoc b/x-pack/docs/en/rest-api/security/delete-users.asciidoc index ff1a31dbd524b..547e0ff700439 100644 --- a/x-pack/docs/en/rest-api/security/delete-users.asciidoc +++ b/x-pack/docs/en/rest-api/security/delete-users.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-delete-user]] === Delete users API +++++ +Delete users +++++ Deletes users from the native realm. diff --git a/x-pack/docs/en/rest-api/security/disable-users.asciidoc b/x-pack/docs/en/rest-api/security/disable-users.asciidoc index b587d485cab30..e994d8f3995c9 100644 --- a/x-pack/docs/en/rest-api/security/disable-users.asciidoc +++ b/x-pack/docs/en/rest-api/security/disable-users.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-disable-user]] === Disable users API +++++ +Disable users +++++ Disables users in the native realm. diff --git a/x-pack/docs/en/rest-api/security/enable-users.asciidoc b/x-pack/docs/en/rest-api/security/enable-users.asciidoc index 891f9697fc114..49ad891119ef4 100644 --- a/x-pack/docs/en/rest-api/security/enable-users.asciidoc +++ b/x-pack/docs/en/rest-api/security/enable-users.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-enable-user]] === Enable users API +++++ +Enable users +++++ Enables users in the native realm. diff --git a/x-pack/docs/en/rest-api/security/get-app-privileges.asciidoc b/x-pack/docs/en/rest-api/security/get-app-privileges.asciidoc index 50630b28053f7..1e73fb6b43548 100644 --- a/x-pack/docs/en/rest-api/security/get-app-privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-app-privileges.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-get-privileges]] === Get application privileges API +++++ +Get application privileges +++++ Retrieves {stack-ov}/security-privileges.html#application-privileges[application privileges]. diff --git a/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc index 433a00d03275e..0c550d67217d4 100644 --- a/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-get-role-mapping]] === Get role mappings API +++++ +Get role mappings +++++ Retrieves role mappings. diff --git a/x-pack/docs/en/rest-api/security/get-roles.asciidoc b/x-pack/docs/en/rest-api/security/get-roles.asciidoc index 34f8f32d4fedb..9c0f5d589b815 100644 --- a/x-pack/docs/en/rest-api/security/get-roles.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-roles.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-get-role]] === Get roles API +++++ +Get roles +++++ Retrieves roles in the native realm. diff --git a/x-pack/docs/en/rest-api/security/get-tokens.asciidoc b/x-pack/docs/en/rest-api/security/get-tokens.asciidoc index 64338c101fc27..cba3b638bc10c 100644 --- a/x-pack/docs/en/rest-api/security/get-tokens.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-tokens.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-get-token]] === Get token API +++++ +Get token +++++ Creates a bearer token for access without requiring basic authentication. diff --git a/x-pack/docs/en/rest-api/security/get-users.asciidoc b/x-pack/docs/en/rest-api/security/get-users.asciidoc index 393230f02e71c..f50b4070b06ec 100644 --- a/x-pack/docs/en/rest-api/security/get-users.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-users.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-get-user]] === Get users API +++++ +Get users +++++ Retrieves information about users in the native realm. diff --git a/x-pack/docs/en/rest-api/security/has-privileges.asciidoc b/x-pack/docs/en/rest-api/security/has-privileges.asciidoc index d12b15688f96a..ee3d871c7943a 100644 --- a/x-pack/docs/en/rest-api/security/has-privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/has-privileges.asciidoc @@ -1,7 +1,9 @@ [role="xpack"] [[security-api-has-privileges]] -=== Has Privileges API - +=== Has privileges API +++++ +Has privileges +++++ [[security-api-has-privilege]] The `has_privileges` API allows you to determine whether the logged in user has diff --git a/x-pack/docs/en/rest-api/security/invalidate-tokens.asciidoc b/x-pack/docs/en/rest-api/security/invalidate-tokens.asciidoc index 18c88f7addd62..4056bb81bed63 100644 --- a/x-pack/docs/en/rest-api/security/invalidate-tokens.asciidoc +++ b/x-pack/docs/en/rest-api/security/invalidate-tokens.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-invalidate-token]] === Invalidate token API +++++ +Invalidate token +++++ Invalidates one or more access tokens or refresh tokens. diff --git a/x-pack/docs/en/rest-api/security/put-app-privileges.asciidoc b/x-pack/docs/en/rest-api/security/put-app-privileges.asciidoc index e57e3caa1cb22..e14ce93314faa 100644 --- a/x-pack/docs/en/rest-api/security/put-app-privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/put-app-privileges.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-put-privileges]] === Create or update application privileges API +++++ +Create or update application privileges +++++ Adds or updates {stack-ov}/security-privileges.html#application-privileges[application privileges]. diff --git a/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc b/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc index be4afc57a1a54..54581d4c72195 100644 --- a/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc +++ b/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc @@ -63,7 +63,8 @@ The value specified in the field rule can be one of the following types: The _user object_ against which rules are evaluated has the following fields: `username`:: -(string) The username by which {security} knows this user. For example, `"username": "jsmith"`. +(string) The username by which the {es} {security-features} knows this user. For +example, `"username": "jsmith"`. `dn`:: (string) The _Distinguished Name_ of the user. For example, `"dn": "cn=jsmith,ou=users,dc=example,dc=com",`. `groups`:: diff --git a/x-pack/docs/en/rest-api/security/ssl.asciidoc b/x-pack/docs/en/rest-api/security/ssl.asciidoc index 69541af52b8b5..de73407355b17 100644 --- a/x-pack/docs/en/rest-api/security/ssl.asciidoc +++ b/x-pack/docs/en/rest-api/security/ssl.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-ssl]] -=== SSL Certificate API +=== SSL certificate API +++++ +SSL certificate +++++ The `certificates` API enables you to retrieve information about the X.509 certificates that are used to encrypt communications in your {es} cluster. @@ -14,12 +17,12 @@ certificates that are used to encrypt communications in your {es} cluster. For more information about how certificates are configured in conjunction with Transport Layer Security (TLS), see -{xpack-ref}/ssl-tls.html[Setting up SSL/TLS on a cluster]. +{stack-ov}/ssl-tls.html[Setting up SSL/TLS on a cluster]. The API returns a list that includes certificates from all TLS contexts including: -* {xpack} default TLS settings +* Default {es} TLS settings * Settings for transport and HTTP interfaces * TLS settings that are used within authentication realms * TLS settings for remote monitoring exporters @@ -32,13 +35,13 @@ that are used for configuring server identity, such as `xpack.ssl.keystore` and The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in -use within {xpack}. +use within {es}. NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API will return all the certificates that are included in the PKCS#11 token irrespectively to whether these are used in the {es} TLS configuration or not. -If {xpack} is configured to use a keystore or truststore, the API output +If {es} is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. @@ -56,16 +59,16 @@ single certificate. The fields in each object are: `subject_dn`:: (string) The Distinguished Name of the certificate's subject. `serial_number`:: (string) The hexadecimal representation of the certificate's serial number. -`has_private_key`:: (boolean) If {xpack} has access to the private key for this +`has_private_key`:: (boolean) If {es} has access to the private key for this certificate, this field has a value of `true`. `expiry`:: (string) The ISO formatted date of the certificate's expiry (not-after) date. ==== Authorization -If {security} is enabled, you must have `monitor` cluster privileges to use this -API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +If the {security-features} are enabled, you must have `monitor` cluster +privileges to use this API. For more information, see +{stack-ov}/security-privileges.html[Security Privileges]. ==== Examples diff --git a/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc index f2e1da5309d46..a1704e9acc329 100644 --- a/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc @@ -1,8 +1,11 @@ [role="xpack"] [[watcher-api-ack-watch]] -=== Ack Watch API +=== Ack watch API +++++ +Ack watch +++++ -{xpack-ref}/actions.html#actions-ack-throttle[Acknowledging a watch] enables you +{stack-ov}/actions.html#actions-ack-throttle[Acknowledging a watch] enables you to manually throttle execution of the watch's actions. An action's _acknowledgement state_ is stored in the `status.actions..ack.state` structure. diff --git a/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc index 638ef4c55a8fc..ae49ba1f369c8 100644 --- a/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc @@ -1,9 +1,12 @@ [role="xpack"] [[watcher-api-activate-watch]] -=== Activate Watch API +=== Activate watch API +++++ +Activate watch +++++ A watch can be either -{xpack-ref}/how-watcher-works.html#watch-active-state[active or inactive]. This +{stack-ov}/how-watcher-works.html#watch-active-state[active or inactive]. This API enables you to activate a currently inactive watch. [float] diff --git a/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc index cdda46c2592f1..96fad5a702854 100644 --- a/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc @@ -1,9 +1,12 @@ [role="xpack"] [[watcher-api-deactivate-watch]] -=== Deactivate Watch API +=== Deactivate watch API +++++ +Deactivate watch +++++ A watch can be either -{xpack-ref}/how-watcher-works.html#watch-active-state[active or inactive]. This +{stack-ov}/how-watcher-works.html#watch-active-state[active or inactive]. This API enables you to deactivate a currently active watch. [float] diff --git a/x-pack/docs/en/rest-api/watcher/delete-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/delete-watch.asciidoc index 8a2fbd5a57721..e6c9d784ebc5d 100644 --- a/x-pack/docs/en/rest-api/watcher/delete-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/delete-watch.asciidoc @@ -1,8 +1,11 @@ [role="xpack"] [[watcher-api-delete-watch]] -=== Delete Watch API +=== Delete watch API +++++ +Delete watch +++++ -The DELETE watch API removes a watch from {watcher}. +The delete watch API removes a watch from {watcher}. [float] ==== Request @@ -20,8 +23,9 @@ related to this watch from the watch history. IMPORTANT: Deleting a watch must be done via this API only. Do not delete the watch directly from the `.watches` index using the Elasticsearch - DELETE Document API. When {security} is enabled, make sure no `write` - privileges are granted to anyone over the `.watches` index. + DELETE Document API. When {es} {security-features} are enabled, make + sure no `write` privileges are granted to anyone over the `.watches` + index. [float] ==== Path Parameters diff --git a/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc index 926c1b87883da..ca3a0de0af781 100644 --- a/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[watcher-api-execute-watch]] -=== Execute Watch API +=== Execute watch API +++++ +Execute watch +++++ The execute watch API forces the execution of a stored watch. It can be used to force execution of the watch outside of its triggering logic, or to simulate the @@ -56,7 +59,7 @@ This API supports the following fields: that will be used during the watch execution | `ignore_condition` | no | false | When set to `true`, the watch execution uses the - {xpack-ref}/condition-always.html[Always Condition]. + {stack-ov}/condition-always.html[Always Condition]. This can also be specified as an HTTP parameter. | `alternative_input` | no | null | When present, the watch uses this object as a payload @@ -73,7 +76,7 @@ This API supports the following fields: This can also be specified as an HTTP parameter. | `watch` | no | null | When present, this - {xpack-ref}/how-watcher-works.html#watch-definition[watch] is used + {stack-ov}/how-watcher-works.html#watch-definition[watch] is used instead of the one specified in the request. This watch is not persisted to the index and record_execution cannot be set. |====== @@ -91,7 +94,7 @@ are five possible modes an action can be associated with: | `simulate` | The action execution is simulated. Each action type define its own simulation operation mode. For example, the - {xpack-ref}/actions-email.html[email] action creates + {stack-ov}/actions-email.html[email] action creates the email that would have been sent but does not actually send it. In this mode, the action might be throttled if the current state of the watch indicates it should be. @@ -116,14 +119,14 @@ are five possible modes an action can be associated with: [float] ==== Authorization You must have `manage_watcher` cluster privileges to use this API. For more -information, see {xpack-ref}/security-privileges.html[Security Privileges]. +information, see {stack-ov}/security-privileges.html[Security Privileges]. [float] ==== Security Integration -When {security} is enabled on your Elasticsearch cluster, then watches will be -executed with the privileges of the user that stored the watches. If your user -is allowed to read index `a`, but not index `b`, then the exact same set of +When {es} {security-features} are enabled on your cluster, watches +are executed with the privileges of the user that stored the watches. If your +user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. When using the execute watch API, the authorization data of the user that diff --git a/x-pack/docs/en/rest-api/watcher/get-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/get-watch.asciidoc index 434ce73703bf3..7d62b5c76c41d 100644 --- a/x-pack/docs/en/rest-api/watcher/get-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/get-watch.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[watcher-api-get-watch]] -=== Get Watch API +=== Get watch API +++++ +Get watch +++++ This API retrieves a watch by its ID. diff --git a/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc index 8026d2e565d4e..89b79b5680056 100644 --- a/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[watcher-api-put-watch]] -=== Put Watch API +=== Put watch API +++++ +Put watch +++++ The PUT watch API either registers a new watch in {watcher} or update an existing one. @@ -20,8 +23,8 @@ trigger engine. IMPORTANT: Putting a watch must be done via this API only. Do not put a watch directly to the `.watches` index using the Elasticsearch Index API. - If {security} is enabled, make sure no `write` privileges are - granted to anyone over the `.watches` index. + If {es} {security-features} are enabled, make sure no `write` + privileges are granted to anyone over the `.watches` index. When adding a watch you can also define its initial {xpack-ref}/how-watcher-works.html#watch-active-state[active state]. You do that @@ -77,9 +80,9 @@ information, see {xpack-ref}/security-privileges.html[Security Privileges]. [float] ==== Security Integration -When {security} is enabled, your watch will only be able to index or search on -indices for which the user that stored the watch, has privileges. If the user is -able to read index `a`, but not index `b`, the same will apply, when the watch +When {es} {security-features} are enabled, your watch can index or search only +on indices for which the user that stored the watch has privileges. If the user +is able to read index `a`, but not index `b`, the same will apply, when the watch is executed. [float] diff --git a/x-pack/docs/en/rest-api/watcher/start.asciidoc b/x-pack/docs/en/rest-api/watcher/start.asciidoc index 25f41586c0783..54deb1f8068c3 100644 --- a/x-pack/docs/en/rest-api/watcher/start.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/start.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[watcher-api-start]] === Start API +++++ +Start +++++ The `start` API starts the {watcher} service if the service is not already running. diff --git a/x-pack/docs/en/rest-api/watcher/stats.asciidoc b/x-pack/docs/en/rest-api/watcher/stats.asciidoc index 3d99915514a9e..d5fa4c75bbf13 100644 --- a/x-pack/docs/en/rest-api/watcher/stats.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/stats.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[watcher-api-stats]] === Stats API +++++ +Stats +++++ The `stats` API returns the current {watcher} metrics. diff --git a/x-pack/docs/en/rest-api/watcher/stop.asciidoc b/x-pack/docs/en/rest-api/watcher/stop.asciidoc index 0d34717d4eeed..5cbc8a622672f 100644 --- a/x-pack/docs/en/rest-api/watcher/stop.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/stop.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[watcher-api-stop]] === Stop API +++++ +Stop +++++ The `stop` API stops the {watcher} service if the service is running. diff --git a/x-pack/docs/en/security/auditing/event-types.asciidoc b/x-pack/docs/en/security/auditing/event-types.asciidoc index e2d71912ba95d..442877f31b5d3 100644 --- a/x-pack/docs/en/security/auditing/event-types.asciidoc +++ b/x-pack/docs/en/security/auditing/event-types.asciidoc @@ -29,7 +29,7 @@ The following is a list of the events that can be generated: | `run_as_denied` | | | Logged when an authenticated user attempts to <> another user action they do not have the necessary <> to do so. -| `tampered_request` | | | Logged when {security} detects that the request has +| `tampered_request` | | | Logged when the {security-features} detect that the request has been tampered with. Typically relates to `search/scroll` requests when the scroll ID is believed to have been tampered with. diff --git a/x-pack/docs/en/security/auditing/output-index.asciidoc b/x-pack/docs/en/security/auditing/output-index.asciidoc index 1c59762ea2a98..0d4ea3cc6fc60 100644 --- a/x-pack/docs/en/security/auditing/output-index.asciidoc +++ b/x-pack/docs/en/security/auditing/output-index.asciidoc @@ -38,9 +38,9 @@ xpack.security.audit.index.settings: These settings apply to the local audit indices, as well as to the <>, but only if the remote cluster -does *not* have {security} installed, or the {es} versions are different. -If the remote cluster has {security} installed, and the versions coincide, the -settings for the audit indices there will take precedence, +does *not* have {security-features} enabled or the {es} versions are different. +If the remote cluster has {security-features} enabled and the versions coincide, +the settings for the audit indices there will take precedence, even if they are unspecified (i.e. left to defaults). NOTE: Audit events are batched for indexing so there is a lag before diff --git a/x-pack/docs/en/security/auditing/overview.asciidoc b/x-pack/docs/en/security/auditing/overview.asciidoc index 6f04e17d83138..8248bcb082479 100644 --- a/x-pack/docs/en/security/auditing/overview.asciidoc +++ b/x-pack/docs/en/security/auditing/overview.asciidoc @@ -13,7 +13,7 @@ Audit logs are **disabled** by default. To enable this functionality, you must set `xpack.security.audit.enabled` to `true` in `elasticsearch.yml`. ============================================================================ -{Security} provides two ways to persist audit logs: +The {es} {security-features} provide two ways to persist audit logs: * The <> output, which persists events to a dedicated `_audit.log` file on the host's file system. diff --git a/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc index dd2412c7e2dfc..ab08bb4aaaed9 100644 --- a/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc @@ -2,13 +2,12 @@ [[configuring-ad-realm]] === Configuring an Active Directory realm -You can configure {security} to communicate with Active Directory to authenticate +You can configure {es} to communicate with Active Directory to authenticate users. To integrate with Active Directory, you configure an `active_directory` -realm and map Active Directory users and groups to {security} roles in the role -mapping file. +realm and map Active Directory users and groups to roles in the role mapping file. For more information about Active Directory realms, see -{xpack-ref}/active-directory-realm.html[Active Directory User Authentication]. +{stack-ov}/active-directory-realm.html[Active Directory User Authentication]. . Add a realm configuration of type `active_directory` to `elasticsearch.yml` under the `xpack.security.authc.realms.active_directory` namespace. @@ -25,7 +24,7 @@ NOTE: Binding to Active Directory fails if the domain name is not mapped in DNS. If DNS is not being provided by a Windows DNS server, add a mapping for the domain in the local `/etc/hosts` file. -For example, the following realm configuration configures {security} to connect +For example, the following realm configuration configures {es} to connect to `ldaps://example.com:636` to authenticate users through Active Directory: [source, yaml] @@ -60,7 +59,7 @@ You must also set the `url` setting, since you must authenticate against the Global Catalog, which uses a different port and might not be running on every Domain Controller. -For example, the following realm configuration configures {security} to connect +For example, the following realm configuration configures {es} to connect to specific Domain Controllers on the Global Catalog port with the domain name set to the forest root: @@ -96,7 +95,7 @@ ports (389 or 636) in order to query the configuration container to retrieve the domain name from the NetBIOS name. -- -. (Optional) Configure how {security} should interact with multiple Active +. (Optional) Configure how {es} should interact with multiple Active Directory servers. + -- @@ -113,14 +112,14 @@ operation are supported: failover and load balancing. See <>. + -- The Active Directory realm authenticates users using an LDAP bind request. By -default, all of the LDAP operations are run by the user that {security} is +default, all of the LDAP operations are run by the user that {es} is authenticating. In some cases, regular users may not be able to access all of the necessary items within Active Directory and a _bind user_ is needed. A bind user can be configured and is used to perform all operations other than the LDAP bind request, which is required to authenticate the credentials provided by the user. The use of a bind user enables the -{xpack-ref}/run-as-privilege.html[run as feature] to be used with the Active +{stack-ov}/run-as-privilege.html[run as feature] to be used with the Active Directory realm and the ability to maintain a set of pooled connections to Active Directory. These pooled connection reduce the number of resources that must be created and destroyed with every user authentication. @@ -235,7 +234,7 @@ user: <4> The Active Directory distinguished name (DN) of the user `John Doe`. For more information, see -{xpack-ref}/mapping-roles.html[Mapping users and groups to roles]. +{stack-ov}/mapping-roles.html[Mapping users and groups to roles]. -- . (Optional) Configure the `metadata` setting in the Active Directory realm to diff --git a/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc index ac596c11e0e5a..df92442507f64 100644 --- a/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc @@ -76,7 +76,8 @@ required changes. IMPORTANT: As the administrator of the cluster, it is your responsibility to ensure the same users are defined on every node in the cluster. - {security} does not deliver any mechanism to guarantee this. + The {es} {security-features} do not deliver any mechanisms to + guarantee this. -- @@ -103,7 +104,7 @@ the same changes are made on every node in the cluster. . (Optional) Change how often the `users` and `users_roles` files are checked. + -- -By default, {security} checks these files for changes every 5 seconds. You can +By default, {es} checks these files for changes every 5 seconds. You can change this default behavior by changing the `resource.reload.interval.high` setting in the `elasticsearch.yml` file (as this is a common setting in {es}, changing its value may effect other schedules in the system). diff --git a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc index 4f050cf937f7f..184fc76209339 100644 --- a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc @@ -2,15 +2,14 @@ [[configuring-pki-realm]] === Configuring a PKI realm -You can configure {security} to use Public Key Infrastructure (PKI) certificates -to authenticate users in {es}. This requires clients to present X.509 -certificates. +You can configure {es} to use Public Key Infrastructure (PKI) certificates +to authenticate users. This requires clients to present X.509 certificates. NOTE: You cannot use PKI certificates to authenticate users in {kib}. To use PKI in {es}, you configure a PKI realm, enable client authentication on the desired network layers (transport or http), and map the Distinguished Names -(DNs) from the user certificates to {security} roles in the +(DNs) from the user certificates to roles in the <> or role-mapping file. You can also use a combination of PKI and username/password authentication. For @@ -22,7 +21,7 @@ allow clients without certificates to authenticate with other credentials. IMPORTANT: You must enable SSL/TLS and enable client authentication to use PKI. -For more information, see {xpack-ref}/pki-realm.html[PKI User Authentication]. +For more information, see {stack-ov}/pki-realm.html[PKI User Authentication]. . Add a realm configuration for a `pki` realm to `elasticsearch.yml` under the `xpack.security.authc.realms.pki` namespace. @@ -75,8 +74,7 @@ xpack: . Enable client authentication on the desired network layers (transport or http). + -- -//TBD: This step might need to be split into a separate topic with additional details -//about setting up client authentication. + The PKI realm relies on the TLS settings of the node's network interface. The realm can be configured to be more restrictive than the underlying network connection - that is, it is possible to configure the node such that some @@ -174,7 +172,7 @@ the result. The user's distinguished name will be populated under the `pki_dn` key. You can also use the authenticate API to validate your role mapping. For more information, see -{xpack-ref}/mapping-roles.html[Mapping Users and Groups to Roles]. +{stack-ov}/mapping-roles.html[Mapping Users and Groups to Roles]. NOTE: The PKI realm supports {stack-ov}/realm-chains.html#authorization_realms[authorization realms] as an diff --git a/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc index 81859c4fd0490..35f79b16d4574 100644 --- a/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc @@ -101,7 +101,7 @@ introduction to realms, see {stack-ov}/realms.html[Realms]. It is recommended that the SAML realm be at the bottom of your authentication chain (that is, it has the _highest_ order). <4> This is the path to the metadata file that you saved for your identity provider. -The path that you enter here is relative to your `config/` directory. {security} +The path that you enter here is relative to your `config/` directory. {es} automatically monitors this file for changes and reloads the configuration whenever it is updated. <5> This is the identifier (SAML EntityID) that your IdP uses. It should match @@ -218,8 +218,8 @@ When a user authenticates using SAML, they are identified to the {stack}, but this does not automatically grant them access to perform any actions or access any data. -Your SAML users cannot do anything until they are mapped to {security} -roles. See {stack-ov}/saml-role-mapping.html[Configuring role mappings]. +Your SAML users cannot do anything until they are mapped to roles. See +{stack-ov}/saml-role-mapping.html[Configuring role mappings]. NOTE: The SAML realm supports {stack-ov}/realm-chains.html#authorization_realms[authorization realms] as an diff --git a/x-pack/docs/en/security/authentication/custom-realm.asciidoc b/x-pack/docs/en/security/authentication/custom-realm.asciidoc index 43a6195385559..3985e7457045f 100644 --- a/x-pack/docs/en/security/authentication/custom-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/custom-realm.asciidoc @@ -3,9 +3,9 @@ === Integrating with other authentication systems If you are using an authentication system that is not supported out-of-the-box -by {security}, you can create a custom realm to interact with it to authenticate -users. You implement a custom realm as an SPI loaded security extension -as part of an ordinary elasticsearch plugin. +by the {es} {security-features}, you can create a custom realm to interact with +it to authenticate users. You implement a custom realm as an SPI loaded security +extension as part of an ordinary elasticsearch plugin. [[implementing-custom-realm]] ==== Implementing a custom realm @@ -50,8 +50,8 @@ public AuthenticationFailureHandler getAuthenticationFailureHandler() { ---------------------------------------------------- + The `getAuthenticationFailureHandler` method is used to optionally provide a -custom `AuthenticationFailureHandler`, which will control how {security} responds -in certain authentication failure events. +custom `AuthenticationFailureHandler`, which will control how the +{es} {security-features} respond in certain authentication failure events. + [source,java] ---------------------------------------------------- diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index fa214dc48b7b1..2c11050a74753 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -151,7 +151,7 @@ order:: idp.metadata.path:: This is the path to the metadata file that you saved for your Identity Provider. The path that you enter here is relative to your `config/` directory. - {security} will automatically monitor this file for changes and will + {es} will automatically monitor this file for changes and will reload the configuration whenever it is updated. idp.entity_id:: @@ -207,14 +207,14 @@ Attributes in SAML are named using a URI such as more values associated with them. These attribute identifiers vary between IdPs, and most IdPs offer ways to -customise the URIs and their associated value. +customize the URIs and their associated value. {es} uses these attributes to infer information about the user who has logged in, and they can be used for role mapping (below). In order for these attributes to be useful, {es} and the IdP need to have a common value for the names of the attributes. This is done manually, by -configuring the IdP and the {security} SAML realm to use the same URI name for +configuring the IdP and the SAML realm to use the same URI name for each logical user attribute. The recommended steps for configuring these SAML attributes are as follows: @@ -469,7 +469,7 @@ or separate keys used for each of those. The Elastic Stack uses X.509 certificates with RSA private keys for SAML cryptography. These keys can be generated using any standard SSL tool, including -the `elasticsearch-certutil` tool that ships with {xpack}. +the `elasticsearch-certutil` tool. Your IdP may require that the Elastic Stack have a cryptographic key for signing SAML messages, and that you provide the corresponding signing certificate within @@ -518,7 +518,7 @@ Encryption certificates can be generated with the same process. ===== Configuring {es} for signing -By default, {security} will sign _all_ outgoing SAML messages if a signing +By default, {es} will sign _all_ outgoing SAML messages if a signing key has been configured. If you wish to use *PEM formatted* keys and certificates for signing, then @@ -559,17 +559,17 @@ are: `AuthnRequest`, `LogoutRequest` and `LogoutResponse`. ===== Configuring {es} for encrypted messages -{security} supports a single key for message decryption. If a key is -configured, then {security} will attempt to use it to decrypt +The {es} {security-features} support a single key for message decryption. If a +key is configured, then {es} attempts to use it to decrypt `EncryptedAssertion` and `EncryptedAttribute` elements in Authentication responses, and `EncryptedID` elements in Logout requests. -{security} will reject any SAML message that contains an `EncryptedAssertion` +{es} rejects any SAML message that contains an `EncryptedAssertion` that cannot be decrypted. If an `Assertion` contains both encrypted and plain-text attributes, then failure to decrypt the encrypted attributes will not cause an automatic -rejection. Rather, {security} will process the available plain-text attributes +rejection. Rather, {es} processes the available plain-text attributes (and any `EncryptedAttributes` that could be decrypted). If you wish to use *PEM formatted* keys and certificates for SAML encryption, @@ -620,8 +620,8 @@ When a user authenticates using SAML, they are identified to the Elastic Stack, but this does not automatically grant them access to perform any actions or access any data. -Your SAML users cannot do anything until they are assigned {security} -roles. This is done through either the +Your SAML users cannot do anything until they are assigned roles. This is done +through either the {ref}/security-api-put-role-mapping.html[add role mapping API], or with <>. @@ -680,7 +680,7 @@ PUT /_security/role_mapping/saml-finance // CONSOLE // TEST -If your users also exist in a repository that can be directly accessed by {security} +If your users also exist in a repository that can be directly accessed by {es} (such as an LDAP directory) then you can use <> instead of role mappings. @@ -858,3 +858,15 @@ xpack.security.authc.realms.saml.saml_eng: It is possible to have one or more {kib} instances that use SAML, while other instances use basic authentication against another realm type (e.g. <> or <>). + +=== Troubleshooting SAML Realm Configuration + +The SAML 2.0 specification offers a lot of options and flexibility for the implementers +of the standard which in turn adds to the complexity and the number of configuration options +that are available both at the Service Provider (Elastic Stack) and at the Identity Provider. +Additionally, different security domains have different security requirements that need +specific configuration to be satisfied. +A conscious effort has been made to mask this complexity with sane defaults and the detailed +documentation above but in case you encounter issues while configuring a SAML realm, you can +look through our {stack-ov}/trb-security-saml.html[SAML troubleshooting documentation] that has +suggestions and resolutions for common issues. diff --git a/x-pack/docs/en/security/authentication/user-cache.asciidoc b/x-pack/docs/en/security/authentication/user-cache.asciidoc index 99cc6c7d6cd6a..aa5cd1dc9038f 100644 --- a/x-pack/docs/en/security/authentication/user-cache.asciidoc +++ b/x-pack/docs/en/security/authentication/user-cache.asciidoc @@ -10,18 +10,17 @@ You can configure characteristics of the user cache with the `cache.ttl`, NOTE: PKI realms do not cache user credentials but do cache the resolved user object to avoid unnecessarily needing to perform role mapping on each request. -The cached user credentials are hashed in memory. By default, {security} uses a -salted `sha-256` hash algorithm. You can use a different hashing algorithm by -setting the `cache.hash_algo` realm settings. See +The cached user credentials are hashed in memory. By default, the {es} +{security-features} use a salted `sha-256` hash algorithm. You can use a +different hashing algorithm by setting the `cache.hash_algo` realm settings. See {ref}/security-settings.html#hashing-settings[User cache and password hash algorithms]. [[cache-eviction-api]] ==== Evicting users from the cache -{security} exposes a -{ref}/security-api-clear-cache.html[Clear Cache API] you can use -to force the eviction of cached users. For example, the following request evicts -all users from the `ad1` realm: +You can use the {ref}/security-api-clear-cache.html[clear cache API] to force +the eviction of cached users . For example, the following request evicts all +users from the `ad1` realm: [source, js] ------------------------------------------------------------ diff --git a/x-pack/docs/en/security/authorization/alias-privileges.asciidoc b/x-pack/docs/en/security/authorization/alias-privileges.asciidoc index 05c9359df5aeb..b9b6d44fd69b7 100644 --- a/x-pack/docs/en/security/authorization/alias-privileges.asciidoc +++ b/x-pack/docs/en/security/authorization/alias-privileges.asciidoc @@ -4,7 +4,8 @@ Elasticsearch allows to execute operations against {ref}/indices-aliases.html[index aliases], which are effectively virtual indices. An alias points to one or more indices, -holds metadata and potentially a filter. {security} treats aliases and indices +holds metadata and potentially a filter. The {es} {security-features} treat +aliases and indices the same. Privileges for indices actions are granted on specific indices or aliases. In order for an indices action to be authorized, the user that executes it needs to have permissions for that action on all the specific indices or diff --git a/x-pack/docs/en/security/authorization/custom-roles-provider.asciidoc b/x-pack/docs/en/security/authorization/custom-roles-provider.asciidoc index c218fa04f8ec7..bb8942985b701 100644 --- a/x-pack/docs/en/security/authorization/custom-roles-provider.asciidoc +++ b/x-pack/docs/en/security/authorization/custom-roles-provider.asciidoc @@ -3,7 +3,8 @@ === Custom roles provider extension If you need to retrieve user roles from a system not supported out-of-the-box -by {security}, you can create a custom roles provider to retrieve and resolve +by the {es} {security-features}, you can create a custom roles provider to +retrieve and resolve roles. You implement a custom roles provider as an SPI loaded security extension as part of an ordinary elasticsearch plugin. diff --git a/x-pack/docs/en/security/authorization/managing-roles.asciidoc b/x-pack/docs/en/security/authorization/managing-roles.asciidoc index 453e0809d30ff..cac4eaac1fbfa 100644 --- a/x-pack/docs/en/security/authorization/managing-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/managing-roles.asciidoc @@ -130,7 +130,7 @@ The following describes the structure of an application privileges entry: <2> The list of the names of the application privileges to grant to this role. <3> The resources to which those privileges apply. These are handled in the same way as index name pattern in `indices` permissions. These resources do not - have any special meaning to {security}. + have any special meaning to the {es} {security-features}. For details about the validation rules for these fields, see the {ref}/security-api-put-privileges.html[add application privileges API]. @@ -176,7 +176,7 @@ Based on the above definition, users owning the `clicks_admin` role can: TIP: For a complete list of available <> There are two available mechanisms to define roles: using the _Role Management APIs_ -or in local files on the {es} nodes. {security} also supports implementing +or in local files on the {es} nodes. You can also implement custom roles providers. If you need to integrate with another system to retrieve user roles, you can build a custom roles provider plugin. For more information, see <>. @@ -185,7 +185,7 @@ see <>. [[roles-management-ui]] === Role management UI -{security} enables you to easily manage users and roles from within {kib}. To +You can manage users and roles easily in {kib}. To manage roles, log in to {kib} and go to *Management / Elasticsearch / Roles*. [float] @@ -242,5 +242,5 @@ click_admins: query: '{"match": {"category": "click"}}' ----------------------------------- -{security} continuously monitors the `roles.yml` file and automatically picks +{es} continuously monitors the `roles.yml` file and automatically picks up and applies any changes to it. diff --git a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc index a20c194ef4ea5..a99e385bd8c25 100644 --- a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc @@ -10,9 +10,10 @@ For other types of realms, you must create _role-mappings_ that define which roles should be assigned to each user based on their username, groups, or other metadata. -{security} allows role-mappings to be defined via an -<>, or managed through <>. -These two sources of role-mapping are combined inside of {security}, so it is +You can define role-mappings via an +<> or manage them through <>. +These two sources of role-mapping are combined inside of the {es} +{security-features}, so it is possible for a single user to have some roles that have been mapped through the API, and other roles that are mapped through files. @@ -54,7 +55,7 @@ are values. The mappings can have a many-to-many relationship. When you map role to groups, the roles of a user in that group are the combination of the roles assigned to that group and the roles assigned to that user. -By default, {security} checks role mapping files for changes every 5 seconds. +By default, {es} checks role mapping files for changes every 5 seconds. You can change this default behavior by changing the `resource.reload.interval.high` setting in the `elasticsearch.yml` file. Since this is a common setting in Elasticsearch, changing its value might effect other @@ -69,8 +70,8 @@ To specify users and groups in the role mappings, you use their _Distinguished Names_ (DNs). A DN is a string that uniquely identifies the user or group, for example `"cn=John Doe,cn=contractors,dc=example,dc=com"`. -NOTE: {security} only supports Active Directory security groups. You cannot map - distribution groups to roles. +NOTE: The {es} {security-features} support only Active Directory security groups. +You cannot map distribution groups to roles. For example, the following snippet uses the file-based method to map the `admins` group to the `monitoring` role and map the `John Doe` user, the @@ -85,7 +86,7 @@ user: - "cn=users,dc=example,dc=com" - "cn=admins,dc=example,dc=com" ------------------------------------------------------------ -<1> The name of a {security} role. +<1> The name of a role. <2> The distinguished name of an LDAP group or an Active Directory security group. <3> The distinguished name of an LDAP or Active Directory user. diff --git a/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc b/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc index 8dba764cc1cb1..dfc7a57939af8 100644 --- a/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc +++ b/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc @@ -2,10 +2,11 @@ [[run-as-privilege]] === Submitting requests on behalf of other users -{security} supports a permission that enables an authenticated user to submit +The {es} {security-features} support a permission that enables an authenticated +user to submit requests on behalf of other users. If your application already authenticates users, you can use the _run as_ mechanism to restrict data access according to -{security} permissions without having to re-authenticate each user through. +{es} permissions without having to re-authenticate each user through. To "run as" (impersonate) another user, you must be able to retrieve the user from the realm you use to authenticate. Both the internal `native` and `file` realms diff --git a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc index f744e6d7092e3..1a52a9dab7a87 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc @@ -15,10 +15,10 @@ secured cluster: * <> -{security} enables you to secure your {es} cluster. But {es} itself is only one -product within the Elastic Stack. It is often the case that other products in -the stack are connected to the cluster and therefore need to be secured as well, -or at least communicate with the cluster in a secured way: +The {es} {security-features} enable you to secure your {es} cluster. But +{es} itself is only one product within the {stack}. It is often the case that +other products in the stack are connected to the cluster and therefore need to +be secured as well, or at least communicate with the cluster in a secured way: * <> * {auditbeat-ref}/securing-beats.html[Auditbeat] diff --git a/x-pack/docs/en/security/ccs-clients-integrations/beats.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/beats.asciidoc index 43c8be5409c28..fefc103c50995 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations/beats.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations/beats.asciidoc @@ -3,9 +3,9 @@ See: -* {auditbeat-ref}/securing-beats.html[Auditbeat and {security}] -* {filebeat-ref}/securing-beats.html[Filebeat and {security}] -* {heartbeat-ref}/securing-beats.html[Heartbeat and {security}] -* {metricbeat-ref}/securing-beats.html[Metricbeat and {security}] -* {packetbeat-ref}/securing-beats.html[Packetbeat and {security}] -* {winlogbeat-ref}/securing-beats.html[Winlogbeat and {security}] +* {auditbeat-ref}/securing-beats.html[{auditbeat}] +* {filebeat-ref}/securing-beats.html[{filebeat}] +* {heartbeat-ref}/securing-beats.html[{heartbeat}] +* {metricbeat-ref}/securing-beats.html[{metricbeat}] +* {packetbeat-ref}/securing-beats.html[{packetbeat}] +* {winlogbeat-ref}/securing-beats.html[{winlogbeat}] diff --git a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc index 235af4ea4eb05..b72afcb9b011c 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc @@ -1,9 +1,10 @@ [[cross-cluster-configuring]] -=== Cross Cluster Search and Security +=== Cross cluster search and security -{ref}/modules-cross-cluster-search.html[Cross Cluster Search] enables +{ref}/modules-cross-cluster-search.html[Cross cluster search] enables federated search across multiple clusters. When using cross cluster search -with secured clusters, all clusters must have {security} enabled. +with secured clusters, all clusters must have the {es} {security-features} +enabled. The local cluster (the cluster used to initiate cross cluster search) must be allowed to connect to the remote clusters, which means that the CA used to @@ -22,8 +23,8 @@ This feature was added as Beta in {es} `v5.3` with further improvements made in To use cross cluster search with secured clusters: -* Enable {security} on every node in each connected cluster. For more -information about the `xpack.security.enabled` setting, see +* Enable the {es} {security-features} on every node in each connected cluster. +For more information about the `xpack.security.enabled` setting, see {ref}/security-settings.html[Security Settings in {es}]. * Enable encryption globally. To encrypt communications, you must enable diff --git a/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc index d78c32bc361ff..6b7ff26cbf3eb 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc @@ -1,7 +1,8 @@ [[http-clients]] -=== HTTP/REST Clients and Security +=== HTTP/REST clients and security -{security} works with standard HTTP {wikipedia}/Basic_access_authentication[basic authentication] +The {es} {security-features} work with standard HTTP +{wikipedia}/Basic_access_authentication[basic authentication] headers to authenticate users. Since Elasticsearch is stateless, this header must be sent with every request: @@ -48,8 +49,8 @@ curl --user rdeniro:taxidriver -XPUT 'localhost:9200/idx' [float] ==== Client Libraries over HTTP -For more information about how to use {security} with the language specific clients -please refer to +For more information about using {security-features} with the language +specific clients, refer to https://github.com/elasticsearch/elasticsearch-ruby/tree/master/elasticsearch-transport#authentication[Ruby], http://elasticsearch-py.readthedocs.org/en/master/#ssl-and-authentication[Python], https://metacpan.org/pod/Search::Elasticsearch::Cxn::HTTPTiny#CONFIGURATION[Perl], diff --git a/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc index 3c537ef5ee2eb..8166f5cff9bcb 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc @@ -1,9 +1,9 @@ [[java-clients]] -=== Java Client and Security +=== Java Client and security deprecated[7.0.0, The `TransportClient` is deprecated in favour of the {java-rest}/java-rest-high.html[Java High Level REST Client] and will be removed in Elasticsearch 8.0. The {java-rest}/java-rest-high-level-migration.html[migration guide] describes all the steps needed to migrate.] -{security} supports the Java http://www.elastic.co/guide/en/elasticsearch/client/java-api/current/transport-client.html[transport client] for Elasticsearch. +The {es} {security-features} support the Java http://www.elastic.co/guide/en/elasticsearch/client/java-api/current/transport-client.html[transport client] for Elasticsearch. The transport client uses the same transport protocol that the cluster nodes use for inter-node communication. It is very efficient as it does not have to marshall and unmarshall JSON requests like a typical REST client. @@ -21,7 +21,8 @@ To use the transport client with a secured cluster, you need to: . {ref}/setup-xpack-client.html[Configure the {xpack} transport client]. . Configure a user with the privileges required to start the transport client. -A default `transport_client` role is built-in to {xpack} that grants the +A default `transport_client` role is built-in to the {es} {security-features}, +which grants the appropriate cluster permissions for the transport client to work with the secured cluster. The transport client uses the _Nodes Info API_ to fetch information about the nodes in the cluster. @@ -137,7 +138,7 @@ TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() [float] [[disabling-client-auth]] -===== Disabling Client Authentication +===== Disabling client authentication If you want to disable client authentication, you can use a client-specific transport protocol. For more information see <>. @@ -167,7 +168,7 @@ NOTE: If you are using a public CA that is already trusted by the Java runtime, [float] [[connecting-anonymously]] -===== Connecting Anonymously +===== Connecting anonymously To enable the transport client to connect anonymously, you must assign the anonymous user the privileges defined in the <> @@ -176,14 +177,14 @@ see <>. [float] [[security-client]] -==== Security Client +==== Security client -{security} exposes its own API through the `SecurityClient` class. To get a hold -of a `SecurityClient` you'll first need to create the `XPackClient`, which is a -wrapper around the existing Elasticsearch clients (any client class implementing +The {stack} {security-features} expose an API through the `SecurityClient` class. +To get a hold of a `SecurityClient` you first need to create the `XPackClient`, +which is a wrapper around the existing {es} clients (any client class implementing `org.elasticsearch.client.Client`). -The following example shows how you can clear {security}'s realm caches using +The following example shows how you can clear the realm caches using the `SecurityClient`: [source,java] diff --git a/x-pack/docs/en/security/ccs-clients-integrations/monitoring.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/monitoring.asciidoc index 67bffadfb296b..37c7e38f651bd 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations/monitoring.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations/monitoring.asciidoc @@ -1,15 +1,15 @@ [[secure-monitoring]] -=== Monitoring and Security +=== Monitoring and security -<> consists of two components: an agent -that you install on on each {es} and Logstash node, and a Monitoring UI +The <> consist of two components: +an agent that you install on on each {es} and Logstash node, and a Monitoring UI in {kib}. The monitoring agent collects and indexes metrics from the nodes and you visualize the data through the Monitoring dashboards in {kib}. The agent can index data on the same {es} cluster, or send it to an external monitoring cluster. -To use {monitoring} with {security} enabled, you need to -{kibana-ref}/using-kibana-with-security.html[set up {kib} to work with {security}] +To use the {monitor-features} with the {security-features} enabled, you need to +{kibana-ref}/using-kibana-with-security.html[set up {kib} to work with the {security-features}] and create at least one user for the Monitoring UI. If you are using an external monitoring cluster, you also need to configure a user for the monitoring agent and configure the agent to use the appropriate credentials when communicating diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index 22140eb680c9d..d9be6129347ec 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -2,25 +2,25 @@ [[configuring-security]] == Configuring security in {es} ++++ -Configuring Security +Configuring security ++++ -{security} enables you to easily secure a cluster. With {security}, you can +The {es} {security-features} enable you to easily secure a cluster. You can password-protect your data as well as implement more advanced security measures such as encrypting communications, role-based access control, IP filtering, and auditing. For more information, see -{xpack-ref}/elasticsearch-security.html[Securing the Elastic Stack]. +{stack-ov}/elasticsearch-security.html[Securing the {stack}]. -To use {security} in {es}: +To use {es} {security-features}: -. Verify that you are using a license that includes the {security} feature. +. Verify that you are using a license that includes the {security-features}. + -- -If you want to try all of the {xpack} features, you can start a 30-day trial. At -the end of the trial period, you can purchase a subscription to keep using the -full functionality of the {xpack} components. For more information, see +If you want to try all of the platinum features, you can start a 30-day trial. +At the end of the trial period, you can purchase a subscription to keep using +the full functionality. For more information, see https://www.elastic.co/subscriptions and -{xpack-ref}/license-management.html[License Management]. +{stack-ov}/license-management.html[License Management]. -- . Verify that the `xpack.security.enabled` setting is `true` on each node in @@ -37,7 +37,7 @@ NOTE: This requirement applies to clusters with more than one node and to clusters with a single node that listens on an external interface. Single-node clusters that use a loopback interface do not have this requirement. For more information, see -{xpack-ref}/encrypting-communications.html[Encrypting Communications]. +{stack-ov}/encrypting-communications.html[Encrypting Communications]. -- .. <>. @@ -49,7 +49,7 @@ information, see . Set the passwords for all built-in users. + -- -{security} provides +The {es} {security-features} provide {stack-ov}/built-in-users.html[built-in users] to help you get up and running. The +elasticsearch-setup-passwords+ command is the simplest method to set the built-in users' passwords for the first time. @@ -126,7 +126,7 @@ curl -XPOST -u elastic 'localhost:9200/_security/user/johndoe' -H "Content-Type: xpack.security.audit.enabled: true ---------------------------- + -For more information, see {xpack-ref}/auditing.html[Auditing Security Events] +For more information, see {stack-ov}/auditing.html[Auditing Security Events] and <>. .. Restart {es}. diff --git a/x-pack/docs/en/security/securing-communications.asciidoc b/x-pack/docs/en/security/securing-communications.asciidoc index 84f3b0bc27ac6..63fded729eb8c 100644 --- a/x-pack/docs/en/security/securing-communications.asciidoc +++ b/x-pack/docs/en/security/securing-communications.asciidoc @@ -6,7 +6,8 @@ Elasticsearch nodes store data that may be confidential. Attacks on the data may come from the network. These attacks could include sniffing of the data, manipulation of the data, and attempts to gain access to the server and thus the files storing the data. Securing your nodes is required in order to use a production -license that enables {security} and helps reduce the risk from network-based attacks. +license that enables {security-features} and helps reduce the risk from +network-based attacks. This section shows how to: diff --git a/x-pack/docs/en/security/using-ip-filtering.asciidoc b/x-pack/docs/en/security/using-ip-filtering.asciidoc index 86579f220f1ee..4e99ec4903dd1 100644 --- a/x-pack/docs/en/security/using-ip-filtering.asciidoc +++ b/x-pack/docs/en/security/using-ip-filtering.asciidoc @@ -5,19 +5,19 @@ You can apply IP filtering to application clients, node clients, or transport clients, in addition to other nodes that are attempting to join the cluster. -If a node's IP address is on the blacklist, {security} will still allow the -connection to Elasticsearch, but it will be dropped immediately, and no requests -will be processed. +If a node's IP address is on the blacklist, the {es} {security-features} allow +the connection to {es} but it is be dropped immediately and no requests are +processed. NOTE: Elasticsearch installations are not designed to be publicly accessible - over the Internet. IP Filtering and the other security capabilities of - {security} do not change this condition. + over the Internet. IP Filtering and the other capabilities of the + {es} {security-features} do not change this condition. [float] === Enabling IP filtering -{security} features an access control feature that allows or rejects hosts, -domains, or subnets. +The {es} {security-features} contain an access control feature that allows or +rejects hosts, domains, or subnets. You configure IP filtering by specifying the `xpack.security.transport.filter.allow` and `xpack.security.transport.filter.deny` settings in in `elasticsearch.yml`. Allow rules @@ -79,7 +79,7 @@ xpack.security.http.filter.enabled: true === Specifying TCP transport profiles {ref}/modules-transport.html[TCP transport profiles] -enable Elasticsearch to bind on multiple hosts. {security} enables you to apply +enable Elasticsearch to bind on multiple hosts. The {es} {security-features} enable you to apply different IP filtering on different profiles. [source,yaml] diff --git a/x-pack/docs/en/watcher/actions/webhook.asciidoc b/x-pack/docs/en/watcher/actions/webhook.asciidoc index 315e2639085d7..1b7c482d2c4be 100644 --- a/x-pack/docs/en/watcher/actions/webhook.asciidoc +++ b/x-pack/docs/en/watcher/actions/webhook.asciidoc @@ -70,13 +70,13 @@ For example, the following `webhook` action creates a new issue in GitHub: <1> The username and password for the user creating the issue NOTE: By default, both the username and the password are stored in the `.watches` - index in plain text. When {security} is enabled, {watcher} can encrypt the - password before storing it. + index in plain text. When the {es} {security-features} are enabled, + {watcher} can encrypt the password before storing it. You can also use PKI-based authentication when submitting requests to a cluster -secured with {security}. When you use PKI-based authentication instead of HTTP -basic auth, you don't need to store any authentication information in the watch -itself. To use PKI-based authentication, you {ref}/notification-settings.html#ssl-notification-settings +that has {es} {security-features} enabled. When you use PKI-based authentication +instead of HTTP basic auth, you don't need to store any authentication +information in the watch itself. To use PKI-based authentication, you {ref}/notification-settings.html#ssl-notification-settings [configure the SSL key settings] for {watcher} in `elasticsearch.yml`. diff --git a/x-pack/docs/en/watcher/java/delete-watch.asciidoc b/x-pack/docs/en/watcher/java/delete-watch.asciidoc index 1e9b7f0e0be0c..4d37b910fd179 100644 --- a/x-pack/docs/en/watcher/java/delete-watch.asciidoc +++ b/x-pack/docs/en/watcher/java/delete-watch.asciidoc @@ -11,8 +11,8 @@ related to this watch from the watch history. IMPORTANT: Deleting a watch must be done via this API only. Do not delete the watch directly from the `.watches` index using Elasticsearch's DELETE - Document API. I {security} is enabled, make sure no `write` privileges - are granted to anyone over the `.watches` index. + Document API. If the {es} {security-features} are enabled, make sure + no `write` privileges are granted to anyone over the `.watches` index. The following example deletes a watch with the `my-watch` id: diff --git a/x-pack/docs/en/watcher/java/put-watch.asciidoc b/x-pack/docs/en/watcher/java/put-watch.asciidoc index 17d564bbcb90c..682504187139b 100644 --- a/x-pack/docs/en/watcher/java/put-watch.asciidoc +++ b/x-pack/docs/en/watcher/java/put-watch.asciidoc @@ -10,8 +10,8 @@ registered with the relevant trigger engine (typically the scheduler, for the IMPORTANT: Putting a watch must be done via this API only. Do not put a watch directly to the `.watches` index using Elasticsearch's Index API. - When {security} is enabled, make sure no `write` privileges are - granted to anyone over the `.watches` index. + When the {es} {security-features} are enabled, make sure no `write` + privileges are granted to anyone over the `.watches` index. The following example adds a watch with the `my-watch` id that has the following diff --git a/x-pack/docs/en/watcher/managing-watches.asciidoc b/x-pack/docs/en/watcher/managing-watches.asciidoc index a7598d1f0dadf..a155132d5e4b1 100644 --- a/x-pack/docs/en/watcher/managing-watches.asciidoc +++ b/x-pack/docs/en/watcher/managing-watches.asciidoc @@ -19,9 +19,9 @@ since {watcher} stores its watches in the `.watches` index, you can list them by executing a search on this index. IMPORTANT: You can only perform read actions on the `.watches` index. You must - use the {watcher} APIs to create, update, and delete watches. If - {security} is enabled, we recommend you only grant users `read` - privileges on the `.watches` index. + use the {watcher} APIs to create, update, and delete watches. If {es} + {security-features} are enabled, we recommend you only grant users + `read` privileges on the `.watches` index. For example, the following returns the first 100 watches: diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index 0de5dd3c1a45e..8d677866e3219 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -162,7 +162,7 @@ public Collection createComponents( ccrLicenseChecker, restoreSourceService, new CcrRepositoryManager(settings, clusterService, client), - new AutoFollowCoordinator(client, clusterService, ccrLicenseChecker, threadPool::relativeTimeInMillis) + new AutoFollowCoordinator(settings, client, clusterService, ccrLicenseChecker, threadPool::relativeTimeInMillis) ); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java index 544a45792e070..d7495dec8c2cf 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java @@ -7,6 +7,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.core.XPackSettings; import java.util.Arrays; @@ -28,6 +29,12 @@ private CcrSettings() { public static final Setting CCR_FOLLOWING_INDEX_SETTING = Setting.boolSetting("index.xpack.ccr.following_index", false, Property.IndexScope, Property.InternalIndex); + /** + * Dynamic node setting for specifying the wait_for_timeout that the auto follow coordinator should be using. + */ + public static final Setting CCR_AUTO_FOLLOW_WAIT_FOR_METADATA_TIMEOUT = Setting.timeSetting( + "ccr.auto_follow.wait_for_metadata_timeout", TimeValue.timeValueSeconds(60), Property.NodeScope, Property.Dynamic); + /** * The settings defined by CCR. * @@ -36,7 +43,8 @@ private CcrSettings() { static List> getSettings() { return Arrays.asList( XPackSettings.CCR_ENABLED_SETTING, - CCR_FOLLOWING_INDEX_SETTING); + CCR_FOLLOWING_INDEX_SETTING, + CCR_AUTO_FOLLOW_WAIT_FOR_METADATA_TIMEOUT); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index 4888b0367fd20..5cc5920cd213a 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -27,12 +27,14 @@ import org.elasticsearch.common.collect.CopyOnWriteHashMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; import org.elasticsearch.xpack.core.ccr.AutoFollowStats; @@ -45,6 +47,7 @@ import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -72,6 +75,7 @@ public class AutoFollowCoordinator implements ClusterStateListener { private final CcrLicenseChecker ccrLicenseChecker; private final LongSupplier relativeMillisTimeProvider; + private volatile TimeValue waitForMetadataTimeOut; private volatile Map autoFollowers = Collections.emptyMap(); // The following fields are read and updated under a lock: @@ -81,6 +85,7 @@ public class AutoFollowCoordinator implements ClusterStateListener { private final LinkedHashMap recentAutoFollowErrors; public AutoFollowCoordinator( + Settings settings, Client client, ClusterService clusterService, CcrLicenseChecker ccrLicenseChecker, @@ -97,6 +102,15 @@ protected boolean removeEldestEntry(final Map.Entry MAX_AUTO_FOLLOW_ERRORS; } }; + + Consumer updater = newWaitForTimeOut -> { + if (newWaitForTimeOut.equals(waitForMetadataTimeOut) == false) { + LOGGER.info("changing wait_for_metadata_timeout from [{}] to [{}]", waitForMetadataTimeOut, newWaitForTimeOut); + waitForMetadataTimeOut = newWaitForTimeOut; + } + }; + clusterService.getClusterSettings().addSettingsUpdateConsumer(CcrSettings.CCR_AUTO_FOLLOW_WAIT_FOR_METADATA_TIMEOUT, updater); + waitForMetadataTimeOut = CcrSettings.CCR_AUTO_FOLLOW_WAIT_FOR_METADATA_TIMEOUT.get(settings); } public synchronized AutoFollowStats getStats() { @@ -180,6 +194,7 @@ void getRemoteClusterState(final String remoteCluster, request.metaData(true); request.routingTable(true); request.waitForMetaDataVersion(metadataVersion); + request.waitForTimeout(waitForMetadataTimeOut); // TODO: set non-compliant status on auto-follow coordination that can be viewed via a stats API ccrLicenseChecker.checkRemoteClusterLicenseAndFetchClusterState( client, @@ -345,7 +360,7 @@ private void autoFollowIndices(final AutoFollowMetadata autoFollowMetadata, Consumer resultHandler = result -> finalise(slot, result); checkAutoFollowPattern(autoFollowPatternName, remoteCluster, autoFollowPattern, leaderIndicesToFollow, headers, - patternsForTheSameRemoteCluster, resultHandler); + patternsForTheSameRemoteCluster, remoteClusterState.metaData(), resultHandler); } i++; } @@ -358,6 +373,7 @@ private void checkAutoFollowPattern(String autoFollowPattenName, List leaderIndicesToFollow, Map headers, List> patternsForTheSameRemoteCluster, + MetaData remoteMetadata, Consumer resultHandler) { final CountDown leaderIndicesCountDown = new CountDown(leaderIndicesToFollow.size()); @@ -377,6 +393,25 @@ private void checkAutoFollowPattern(String autoFollowPattenName, resultHandler.accept(new AutoFollowResult(autoFollowPattenName, results.asList())); } } else { + final Settings leaderIndexSettings = remoteMetadata.getIndexSafe(indexToFollow).getSettings(); + if (leaderIndexSettings.getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), + IndexMetaData.SETTING_INDEX_VERSION_CREATED.get(leaderIndexSettings).onOrAfter(Version.V_7_0_0)) == false) { + + String message = String.format(Locale.ROOT, "index [%s] cannot be followed, because soft deletes are not enabled", + indexToFollow.getName()); + LOGGER.warn(message); + updateAutoFollowMetadata(recordLeaderIndexAsFollowFunction(autoFollowPattenName, indexToFollow), error -> { + ElasticsearchException failure = new ElasticsearchException(message); + if (error != null) { + failure.addSuppressed(error); + } + results.set(slot, new Tuple<>(indexToFollow, failure)); + if (leaderIndicesCountDown.countDown()) { + resultHandler.accept(new AutoFollowResult(autoFollowPattenName, results.asList())); + } + }); + continue; + } followLeaderIndex(autoFollowPattenName, remoteCluster, indexToFollow, autoFollowPattern, headers, error -> { results.set(slot, new Tuple<>(indexToFollow, error)); if (leaderIndicesCountDown.countDown()) { @@ -455,12 +490,7 @@ static List getLeaderIndicesToFollow(AutoFollowPattern autoFollowPattern, // has a leader index uuid custom metadata entry that matches with uuid of leaderIndexMetaData variable // If so then handle it differently: not follow it, but just add an entry to // AutoFollowMetadata#followedLeaderIndexUUIDs - final Settings leaderIndexSettings = leaderIndexMetaData.getSettings(); - // soft deletes are enabled by default on indices created on 7.0.0 or later - if (leaderIndexSettings.getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), - IndexMetaData.SETTING_INDEX_VERSION_CREATED.get(leaderIndexSettings).onOrAfter(Version.V_7_0_0))) { - leaderIndicesToFollow.add(leaderIndexMetaData.getIndex()); - } + leaderIndicesToFollow.add(leaderIndexMetaData.getIndex()); } } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrRequests.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrRequests.java new file mode 100644 index 0000000000000..12432c740a701 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrRequests.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.xcontent.XContentType; + +public final class CcrRequests { + + private CcrRequests() {} + + public static ClusterStateRequest metaDataRequest(String leaderIndex) { + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.clear(); + clusterStateRequest.metaData(true); + clusterStateRequest.indices(leaderIndex); + return clusterStateRequest; + } + + public static PutMappingRequest putMappingRequest(String followerIndex, MappingMetaData mappingMetaData) { + PutMappingRequest putMappingRequest = new PutMappingRequest(followerIndex); + putMappingRequest.type(mappingMetaData.type()); + putMappingRequest.source(mappingMetaData.source().string(), XContentType.JSON); + return putMappingRequest; + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index bd22b85684ca4..0fed083bba9ac 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.engine.CommitStats; @@ -123,10 +122,7 @@ protected void innerUpdateMapping(LongConsumer handler, Consumer erro Index leaderIndex = params.getLeaderShardId().getIndex(); Index followIndex = params.getFollowShardId().getIndex(); - ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); - clusterStateRequest.clear(); - clusterStateRequest.metaData(true); - clusterStateRequest.indices(leaderIndex.getName()); + ClusterStateRequest clusterStateRequest = CcrRequests.metaDataRequest(leaderIndex.getName()); remoteClient.admin().cluster().state(clusterStateRequest, ActionListener.wrap(clusterStateResponse -> { IndexMetaData indexMetaData = clusterStateResponse.getState().metaData().getIndexSafe(leaderIndex); @@ -140,9 +136,7 @@ protected void innerUpdateMapping(LongConsumer handler, Consumer erro indexMetaData.getMappings().size() + "]"; MappingMetaData mappingMetaData = indexMetaData.getMappings().iterator().next().value; - PutMappingRequest putMappingRequest = new PutMappingRequest(followIndex.getName()); - putMappingRequest.type(mappingMetaData.type()); - putMappingRequest.source(mappingMetaData.source().string(), XContentType.JSON); + PutMappingRequest putMappingRequest = CcrRequests.putMappingRequest(followIndex.getName(), mappingMetaData); followerClient.admin().indices().putMapping(putMappingRequest, ActionListener.wrap( putMappingResponse -> handler.accept(indexMetaData.getMappingVersion()), errorHandler)); @@ -154,10 +148,7 @@ protected void innerUpdateSettings(final LongConsumer finalHandler, final Consum final Index leaderIndex = params.getLeaderShardId().getIndex(); final Index followIndex = params.getFollowShardId().getIndex(); - final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); - clusterStateRequest.clear(); - clusterStateRequest.metaData(true); - clusterStateRequest.indices(leaderIndex.getName()); + ClusterStateRequest clusterStateRequest = CcrRequests.metaDataRequest(leaderIndex.getName()); CheckedConsumer onResponse = clusterStateResponse -> { final IndexMetaData leaderIMD = clusterStateResponse.getState().metaData().getIndexSafe(leaderIndex); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java index 5d6982d8a3871..4fc3efedd82c6 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java @@ -350,95 +350,95 @@ static String[] extractLeaderShardHistoryUUIDs(Map ccrIndexMetaD * These dynamic settings don't affect how documents are indexed (affect index time text analysis) and / or * are inconvenient if they were replicated (e.g. changing number of replicas). */ - static final Set> WHITE_LISTED_SETTINGS; + static final Set> NON_REPLICATED_SETTINGS; static { - final Set> whiteListedSettings = new HashSet<>(); - whiteListedSettings.add(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING); - whiteListedSettings.add(IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING); - whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING); - whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING); - whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING); - whiteListedSettings.add(IndexMetaData.INDEX_READ_ONLY_SETTING); - whiteListedSettings.add(IndexMetaData.INDEX_BLOCKS_READ_SETTING); - whiteListedSettings.add(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING); - whiteListedSettings.add(IndexMetaData.INDEX_BLOCKS_METADATA_SETTING); - whiteListedSettings.add(IndexMetaData.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING); - whiteListedSettings.add(IndexMetaData.INDEX_PRIORITY_SETTING); - whiteListedSettings.add(IndexMetaData.SETTING_WAIT_FOR_ACTIVE_SHARDS); - - whiteListedSettings.add(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING); - whiteListedSettings.add(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING); - whiteListedSettings.add(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING); - whiteListedSettings.add(MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY); - whiteListedSettings.add(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING); - - whiteListedSettings.add(IndexSettings.MAX_RESULT_WINDOW_SETTING); - whiteListedSettings.add(IndexSettings.INDEX_WARMER_ENABLED_SETTING); - whiteListedSettings.add(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING); - whiteListedSettings.add(IndexSettings.MAX_RESCORE_WINDOW_SETTING); - whiteListedSettings.add(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING); - whiteListedSettings.add(IndexSettings.DEFAULT_FIELD_SETTING); - whiteListedSettings.add(IndexSettings.QUERY_STRING_LENIENT_SETTING); - whiteListedSettings.add(IndexSettings.QUERY_STRING_ANALYZE_WILDCARD); - whiteListedSettings.add(IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD); - whiteListedSettings.add(IndexSettings.ALLOW_UNMAPPED); - whiteListedSettings.add(IndexSettings.INDEX_SEARCH_IDLE_AFTER); - whiteListedSettings.add(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING); - whiteListedSettings.add(IndexSettings.MAX_SCRIPT_FIELDS_SETTING); - whiteListedSettings.add(IndexSettings.MAX_REGEX_LENGTH_SETTING); - whiteListedSettings.add(IndexSettings.MAX_TERMS_COUNT_SETTING); - whiteListedSettings.add(IndexSettings.MAX_ANALYZED_OFFSET_SETTING); - whiteListedSettings.add(IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING); - whiteListedSettings.add(IndexSettings.MAX_TOKEN_COUNT_SETTING); - whiteListedSettings.add(IndexSettings.MAX_SLICES_PER_SCROLL); - whiteListedSettings.add(IndexSettings.MAX_ADJACENCY_MATRIX_FILTERS_SETTING); - whiteListedSettings.add(IndexSettings.DEFAULT_PIPELINE); - whiteListedSettings.add(IndexSettings.INDEX_SEARCH_THROTTLED); - whiteListedSettings.add(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING); - whiteListedSettings.add(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING); - whiteListedSettings.add(IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING); - whiteListedSettings.add(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING); - whiteListedSettings.add(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING); - whiteListedSettings.add(IndexSettings.INDEX_GC_DELETES_SETTING); - whiteListedSettings.add(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD); - - whiteListedSettings.add(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING); - whiteListedSettings.add(BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING); - - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING); - - whiteListedSettings.add(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING); - whiteListedSettings.add(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING); - whiteListedSettings.add(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING); - whiteListedSettings.add(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING); - whiteListedSettings.add(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING); - whiteListedSettings.add(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING); - whiteListedSettings.add(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING); - whiteListedSettings.add(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING); - whiteListedSettings.add(MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING); - - whiteListedSettings.add(MergeSchedulerConfig.AUTO_THROTTLE_SETTING); - whiteListedSettings.add(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING); - whiteListedSettings.add(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING); - whiteListedSettings.add(EngineConfig.INDEX_CODEC_SETTING); - - WHITE_LISTED_SETTINGS = Collections.unmodifiableSet(whiteListedSettings); + final Set> nonReplicatedSettings = new HashSet<>(); + nonReplicatedSettings.add(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING); + nonReplicatedSettings.add(IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING); + nonReplicatedSettings.add(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING); + nonReplicatedSettings.add(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING); + nonReplicatedSettings.add(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING); + nonReplicatedSettings.add(IndexMetaData.INDEX_READ_ONLY_SETTING); + nonReplicatedSettings.add(IndexMetaData.INDEX_BLOCKS_READ_SETTING); + nonReplicatedSettings.add(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING); + nonReplicatedSettings.add(IndexMetaData.INDEX_BLOCKS_METADATA_SETTING); + nonReplicatedSettings.add(IndexMetaData.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING); + nonReplicatedSettings.add(IndexMetaData.INDEX_PRIORITY_SETTING); + nonReplicatedSettings.add(IndexMetaData.SETTING_WAIT_FOR_ACTIVE_SHARDS); + + nonReplicatedSettings.add(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING); + nonReplicatedSettings.add(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING); + nonReplicatedSettings.add(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING); + nonReplicatedSettings.add(MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY); + nonReplicatedSettings.add(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING); + + nonReplicatedSettings.add(IndexSettings.MAX_RESULT_WINDOW_SETTING); + nonReplicatedSettings.add(IndexSettings.INDEX_WARMER_ENABLED_SETTING); + nonReplicatedSettings.add(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING); + nonReplicatedSettings.add(IndexSettings.MAX_RESCORE_WINDOW_SETTING); + nonReplicatedSettings.add(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING); + nonReplicatedSettings.add(IndexSettings.DEFAULT_FIELD_SETTING); + nonReplicatedSettings.add(IndexSettings.QUERY_STRING_LENIENT_SETTING); + nonReplicatedSettings.add(IndexSettings.QUERY_STRING_ANALYZE_WILDCARD); + nonReplicatedSettings.add(IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD); + nonReplicatedSettings.add(IndexSettings.ALLOW_UNMAPPED); + nonReplicatedSettings.add(IndexSettings.INDEX_SEARCH_IDLE_AFTER); + nonReplicatedSettings.add(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING); + nonReplicatedSettings.add(IndexSettings.MAX_SCRIPT_FIELDS_SETTING); + nonReplicatedSettings.add(IndexSettings.MAX_REGEX_LENGTH_SETTING); + nonReplicatedSettings.add(IndexSettings.MAX_TERMS_COUNT_SETTING); + nonReplicatedSettings.add(IndexSettings.MAX_ANALYZED_OFFSET_SETTING); + nonReplicatedSettings.add(IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING); + nonReplicatedSettings.add(IndexSettings.MAX_TOKEN_COUNT_SETTING); + nonReplicatedSettings.add(IndexSettings.MAX_SLICES_PER_SCROLL); + nonReplicatedSettings.add(IndexSettings.MAX_ADJACENCY_MATRIX_FILTERS_SETTING); + nonReplicatedSettings.add(IndexSettings.DEFAULT_PIPELINE); + nonReplicatedSettings.add(IndexSettings.INDEX_SEARCH_THROTTLED); + nonReplicatedSettings.add(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING); + nonReplicatedSettings.add(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING); + nonReplicatedSettings.add(IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING); + nonReplicatedSettings.add(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING); + nonReplicatedSettings.add(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING); + nonReplicatedSettings.add(IndexSettings.INDEX_GC_DELETES_SETTING); + nonReplicatedSettings.add(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD); + + nonReplicatedSettings.add(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING); + nonReplicatedSettings.add(BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING); + + nonReplicatedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING); + nonReplicatedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING); + nonReplicatedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING); + nonReplicatedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING); + nonReplicatedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING); + nonReplicatedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING); + nonReplicatedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING); + nonReplicatedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING); + nonReplicatedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL); + nonReplicatedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING); + nonReplicatedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING); + nonReplicatedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING); + nonReplicatedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING); + nonReplicatedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING); + nonReplicatedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING); + nonReplicatedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING); + + nonReplicatedSettings.add(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING); + nonReplicatedSettings.add(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING); + nonReplicatedSettings.add(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING); + nonReplicatedSettings.add(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING); + nonReplicatedSettings.add(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING); + nonReplicatedSettings.add(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING); + nonReplicatedSettings.add(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING); + nonReplicatedSettings.add(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING); + nonReplicatedSettings.add(MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING); + + nonReplicatedSettings.add(MergeSchedulerConfig.AUTO_THROTTLE_SETTING); + nonReplicatedSettings.add(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING); + nonReplicatedSettings.add(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING); + nonReplicatedSettings.add(EngineConfig.INDEX_CODEC_SETTING); + + NON_REPLICATED_SETTINGS = Collections.unmodifiableSet(nonReplicatedSettings); } static Settings filter(Settings originalSettings) { @@ -455,7 +455,7 @@ static Settings filter(Settings originalSettings) { Iterator iterator = settings.keys().iterator(); while (iterator.hasNext()) { String key = iterator.next(); - for (Setting whitelistedSetting : WHITE_LISTED_SETTINGS) { + for (Setting whitelistedSetting : NON_REPLICATED_SETTINGS) { if (whitelistedSetting.match(key)) { iterator.remove(); break; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionAction.java index 33b8b415d8362..81cde2984f500 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionAction.java @@ -7,24 +7,19 @@ package org.elasticsearch.xpack.ccr.action.repositories; import org.elasticsearch.action.Action; -import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.nodes.BaseNodeResponse; -import org.elasticsearch.action.support.nodes.BaseNodesResponse; -import org.elasticsearch.action.support.nodes.TransportNodesAction; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportActionProxy; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.ccr.repository.CcrRestoreSourceService; -import java.io.IOException; -import java.util.List; - public class ClearCcrRestoreSessionAction extends Action { public static final ClearCcrRestoreSessionAction INSTANCE = new ClearCcrRestoreSessionAction(); @@ -36,86 +31,47 @@ private ClearCcrRestoreSessionAction() { @Override public ClearCcrRestoreSessionResponse newResponse() { - return new ClearCcrRestoreSessionResponse(); + throw new UnsupportedOperationException(); } - public static class TransportDeleteCcrRestoreSessionAction extends TransportNodesAction { + @Override + public Writeable.Reader getResponseReader() { + return ClearCcrRestoreSessionResponse::new; + } + + public static class TransportDeleteCcrRestoreSessionAction + extends HandledTransportAction { private final CcrRestoreSourceService ccrRestoreService; + private final ThreadPool threadPool; @Inject - public TransportDeleteCcrRestoreSessionAction(ThreadPool threadPool, ClusterService clusterService, ActionFilters actionFilters, - TransportService transportService, CcrRestoreSourceService ccrRestoreService) { - super(NAME, threadPool, clusterService, transportService, actionFilters, ClearCcrRestoreSessionRequest::new, - ClearCcrRestoreSessionRequest.Request::new, ThreadPool.Names.GENERIC, Response.class); + public TransportDeleteCcrRestoreSessionAction(ActionFilters actionFilters, TransportService transportService, + CcrRestoreSourceService ccrRestoreService) { + super(NAME, transportService, actionFilters, ClearCcrRestoreSessionRequest::new); + TransportActionProxy.registerProxyAction(transportService, NAME, ClearCcrRestoreSessionResponse::new); this.ccrRestoreService = ccrRestoreService; + this.threadPool = transportService.getThreadPool(); } @Override - protected ClearCcrRestoreSessionResponse newResponse(ClearCcrRestoreSessionRequest request, List responses, - List failures) { - return new ClearCcrRestoreSessionResponse(clusterService.getClusterName(), responses, failures); - } - - @Override - protected ClearCcrRestoreSessionRequest.Request newNodeRequest(String nodeId, ClearCcrRestoreSessionRequest request) { - return request.getRequest(); - } - - @Override - protected Response newNodeResponse() { - return new Response(); - } - - @Override - protected Response nodeOperation(ClearCcrRestoreSessionRequest.Request request) { - ccrRestoreService.closeSession(request.getSessionUUID()); - return new Response(clusterService.localNode()); + protected void doExecute(Task task, ClearCcrRestoreSessionRequest request, + ActionListener listener) { + // TODO: Currently blocking actions might occur in the session closed callbacks. This dispatch + // may be unnecessary when we remove these callbacks. + threadPool.generic().execute(() -> { + ccrRestoreService.closeSession(request.getSessionUUID()); + listener.onResponse(new ClearCcrRestoreSessionResponse()); + }); } } - public static class Response extends BaseNodeResponse { - - private Response() { - } - - private Response(StreamInput in) throws IOException { - readFrom(in); - } - - private Response(DiscoveryNode node) { - super(node); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - } - } - - public static class ClearCcrRestoreSessionResponse extends BaseNodesResponse { + public static class ClearCcrRestoreSessionResponse extends ActionResponse { ClearCcrRestoreSessionResponse() { } - ClearCcrRestoreSessionResponse(ClusterName clusterName, List chunkResponses, List failures) { - super(clusterName, chunkResponses, failures); - } - - @Override - protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(Response::new); - } - - @Override - protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + ClearCcrRestoreSessionResponse(StreamInput in) { } } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionRequest.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionRequest.java index 11605970736b0..b9d277ca1b49a 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionRequest.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionRequest.java @@ -6,68 +6,52 @@ package org.elasticsearch.xpack.ccr.action.repositories; -import org.elasticsearch.action.support.nodes.BaseNodeRequest; -import org.elasticsearch.action.support.nodes.BaseNodesRequest; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.transport.RemoteClusterAwareRequest; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -public class ClearCcrRestoreSessionRequest extends BaseNodesRequest { +public class ClearCcrRestoreSessionRequest extends ActionRequest implements RemoteClusterAwareRequest { - private Request request; + private DiscoveryNode node; + private String sessionUUID; - ClearCcrRestoreSessionRequest() { + ClearCcrRestoreSessionRequest(StreamInput in) throws IOException { + super.readFrom(in); + sessionUUID = in.readString(); } - public ClearCcrRestoreSessionRequest(String nodeId, Request request) { - super(nodeId); - this.request = request; + public ClearCcrRestoreSessionRequest(String sessionUUID, DiscoveryNode node) { + this.sessionUUID = sessionUUID; + this.node = node; } @Override - public void readFrom(StreamInput streamInput) throws IOException { - super.readFrom(streamInput); - request = new Request(); - request.readFrom(streamInput); + public ActionRequestValidationException validate() { + return null; } @Override - public void writeTo(StreamOutput streamOutput) throws IOException { - super.writeTo(streamOutput); - request.writeTo(streamOutput); + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException(); } - public Request getRequest() { - return request; + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(sessionUUID); } - public static class Request extends BaseNodeRequest { - - private String sessionUUID; - - Request() { - } - - public Request(String nodeId, String sessionUUID) { - super(nodeId); - this.sessionUUID = sessionUUID; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - sessionUUID = in.readString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(sessionUUID); - } + String getSessionUUID() { + return sessionUUID; + } - public String getSessionUUID() { - return sessionUUID; - } + @Override + public DiscoveryNode getPreferredTargetNode() { + return node; } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java index 7f362aa3b766c..2a1b354f5d8ea 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; @@ -70,7 +71,7 @@ protected PutCcrRestoreSessionResponse shardOperation(PutCcrRestoreSessionReques throw new ShardNotFoundException(shardId); } ccrRestoreService.openSession(request.getSessionUUID(), indexShard); - return new PutCcrRestoreSessionResponse(indexShard.routingEntry().currentNodeId()); + return new PutCcrRestoreSessionResponse(clusterService.localNode()); } @Override @@ -93,34 +94,34 @@ protected ShardsIterator shards(ClusterState state, InternalRequest request) { public static class PutCcrRestoreSessionResponse extends ActionResponse { - private String nodeId; + private DiscoveryNode node; PutCcrRestoreSessionResponse() { } - PutCcrRestoreSessionResponse(String nodeId) { - this.nodeId = nodeId; + PutCcrRestoreSessionResponse(DiscoveryNode node) { + this.node = node; } PutCcrRestoreSessionResponse(StreamInput in) throws IOException { super(in); - nodeId = in.readString(); + node = new DiscoveryNode(in); } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - nodeId = in.readString(); + node = new DiscoveryNode(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(nodeId); + node.writeTo(out); } - public String getNodeId() { - return nodeId; + public DiscoveryNode getNode() { + return node; } } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index aeaa7fc5eaf57..934f064d9e0fd 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -8,10 +8,13 @@ import org.apache.lucene.index.IndexCommit; import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -21,6 +24,7 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardRecoveryException; @@ -37,6 +41,7 @@ import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.ccr.action.CcrRequests; import org.elasticsearch.xpack.ccr.action.repositories.ClearCcrRestoreSessionAction; import org.elasticsearch.xpack.ccr.action.repositories.ClearCcrRestoreSessionRequest; import org.elasticsearch.xpack.ccr.action.repositories.PutCcrRestoreSessionAction; @@ -111,15 +116,10 @@ public SnapshotInfo getSnapshotInfo(SnapshotId snapshotId) { public MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId) { assert SNAPSHOT_ID.equals(snapshotId) : "RemoteClusterRepository only supports " + SNAPSHOT_ID + " as the SnapshotId"; Client remoteClient = client.getRemoteClusterClient(remoteClusterAlias); - ClusterStateResponse response = remoteClient - .admin() - .cluster() - .prepareState() - .clear() - .setMetaData(true) - .setIndices("dummy_index_name") // We set a single dummy index name to avoid fetching all the index data - .get(); - return response.getState().metaData(); + // We set a single dummy index name to avoid fetching all the index data + ClusterStateRequest clusterStateRequest = CcrRequests.metaDataRequest("dummy_index_name"); + ClusterStateResponse clusterState = remoteClient.admin().cluster().state(clusterStateRequest).actionGet(); + return clusterState.getState().metaData(); } @Override @@ -128,18 +128,12 @@ public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId ind String leaderIndex = index.getName(); Client remoteClient = client.getRemoteClusterClient(remoteClusterAlias); - ClusterStateResponse response = remoteClient - .admin() - .cluster() - .prepareState() - .clear() - .setMetaData(true) - .setIndices(leaderIndex) - .get(); + ClusterStateRequest clusterStateRequest = CcrRequests.metaDataRequest(leaderIndex); + ClusterStateResponse clusterState = remoteClient.admin().cluster().state(clusterStateRequest).actionGet(); // Validates whether the leader cluster has been configured properly: PlainActionFuture future = PlainActionFuture.newFuture(); - IndexMetaData leaderIndexMetaData = response.getState().metaData().index(leaderIndex); + IndexMetaData leaderIndexMetaData = clusterState.getState().metaData().index(leaderIndex); ccrLicenseChecker.fetchLeaderHistoryUUIDs(remoteClient, leaderIndexMetaData, future::onFailure, future::onResponse); String[] leaderHistoryUUIDs = future.actionGet(); @@ -252,15 +246,17 @@ public void restoreShard(IndexShard indexShard, SnapshotId snapshotId, Version v Map ccrMetaData = indexShard.indexSettings().getIndexMetaData().getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY); String leaderUUID = ccrMetaData.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY); - ShardId leaderShardId = new ShardId(shardId.getIndexName(), leaderUUID, shardId.getId()); + Index leaderIndex = new Index(shardId.getIndexName(), leaderUUID); + ShardId leaderShardId = new ShardId(leaderIndex, shardId.getId()); Client remoteClient = client.getRemoteClusterClient(remoteClusterAlias); String sessionUUID = UUIDs.randomBase64UUID(); PutCcrRestoreSessionAction.PutCcrRestoreSessionResponse response = remoteClient.execute(PutCcrRestoreSessionAction.INSTANCE, new PutCcrRestoreSessionRequest(sessionUUID, leaderShardId, recoveryMetadata)).actionGet(); - String nodeId = response.getNodeId(); + DiscoveryNode node = response.getNode(); // TODO: Implement file restore - closeSession(remoteClient, nodeId, sessionUUID); + closeSession(remoteClient, node, sessionUUID); + maybeUpdateMappings(client, remoteClient, leaderIndex, indexShard.indexSettings()); } @Override @@ -268,13 +264,23 @@ public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Ve throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); } - private void closeSession(Client remoteClient, String nodeId, String sessionUUID) { - ClearCcrRestoreSessionRequest clearRequest = new ClearCcrRestoreSessionRequest(nodeId, - new ClearCcrRestoreSessionRequest.Request(nodeId, sessionUUID)); + private void maybeUpdateMappings(Client localClient, Client remoteClient, Index leaderIndex, IndexSettings followerIndexSettings) { + ClusterStateRequest clusterStateRequest = CcrRequests.metaDataRequest(leaderIndex.getName()); + ClusterStateResponse clusterState = remoteClient.admin().cluster().state(clusterStateRequest).actionGet(); + IndexMetaData leaderIndexMetadata = clusterState.getState().metaData().getIndexSafe(leaderIndex); + long leaderMappingVersion = leaderIndexMetadata.getMappingVersion(); + + if (leaderMappingVersion > followerIndexSettings.getIndexMetaData().getMappingVersion()) { + Index followerIndex = followerIndexSettings.getIndex(); + MappingMetaData mappingMetaData = leaderIndexMetadata.mapping(); + PutMappingRequest putMappingRequest = CcrRequests.putMappingRequest(followerIndex.getName(), mappingMetaData); + localClient.admin().indices().putMapping(putMappingRequest).actionGet(); + } + } + + private void closeSession(Client remoteClient, DiscoveryNode node, String sessionUUID) { + ClearCcrRestoreSessionRequest clearRequest = new ClearCcrRestoreSessionRequest(sessionUUID, node); ClearCcrRestoreSessionAction.ClearCcrRestoreSessionResponse response = remoteClient.execute(ClearCcrRestoreSessionAction.INSTANCE, clearRequest).actionGet(); - if (response.hasFailures()) { - throw response.failures().get(0); - } } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index 1d705934cce35..01e51ea94f255 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -65,6 +65,7 @@ import org.elasticsearch.test.TestCluster; import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.ccr.LocalStateCcr; import org.elasticsearch.xpack.ccr.index.engine.FollowingEngine; import org.elasticsearch.xpack.core.XPackSettings; @@ -199,6 +200,8 @@ private NodeConfigurationSource createNodeConfigurationSource(String leaderSeedA builder.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false); builder.put(XPackSettings.LOGSTASH_ENABLED.getKey(), false); builder.put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial"); + // Let cluster state api return quickly in order to speed up auto follow tests: + builder.put(CcrSettings.CCR_AUTO_FOLLOW_WAIT_FOR_METADATA_TIMEOUT.getKey(), TimeValue.timeValueMillis(100)); if (leaderSeedAddress != null) { builder.put("cluster.remote.leader_cluster.seeds", leaderSeedAddress); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java index 2b1c8a8ef2edc..2fb1f868dd7fb 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java @@ -15,6 +15,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.ccr.LocalStateCcr; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; @@ -42,6 +43,8 @@ protected Settings nodeSettings() { builder.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false); builder.put(XPackSettings.LOGSTASH_ENABLED.getKey(), false); builder.put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial"); + // Let cluster state api return quickly in order to speed up auto follow tests: + builder.put(CcrSettings.CCR_AUTO_FOLLOW_WAIT_FOR_METADATA_TIMEOUT.getKey(), TimeValue.timeValueMillis(100)); return builder.build(); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java index f1802315e4760..6c85b2cb4891e 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java @@ -296,6 +296,43 @@ public void testConflictingPatterns() throws Exception { assertFalse(followerClient().admin().indices().exists(request).actionGet().isExists()); } + public void testAutoFollowSoftDeletesDisabled() throws Exception { + putAutoFollowPatterns("my-pattern1", new String[] {"logs-*"}); + + // Soft deletes are disabled: + Settings leaderIndexSettings = Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .build(); + createLeaderIndex("logs-20200101", leaderIndexSettings); + assertBusy(() -> { + AutoFollowStats autoFollowStats = getAutoFollowStats(); + assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(0L)); + assertThat(autoFollowStats.getNumberOfFailedFollowIndices(), equalTo(1L)); + assertThat(autoFollowStats.getRecentAutoFollowErrors().size(), equalTo(1)); + ElasticsearchException failure = autoFollowStats.getRecentAutoFollowErrors().firstEntry().getValue(); + assertThat(failure.getMessage(), equalTo("index [logs-20200101] cannot be followed, " + + "because soft deletes are not enabled")); + IndicesExistsRequest request = new IndicesExistsRequest("copy-logs-20200101"); + assertFalse(followerClient().admin().indices().exists(request).actionGet().isExists()); + }); + + // Soft deletes are enabled: + leaderIndexSettings = Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .build(); + createLeaderIndex("logs-20200102", leaderIndexSettings); + assertBusy(() -> { + AutoFollowStats autoFollowStats = getAutoFollowStats(); + assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(1L)); + IndicesExistsRequest request = new IndicesExistsRequest("copy-logs-20200102"); + assertTrue(followerClient().admin().indices().exists(request).actionGet().isExists()); + }); + } + private void putAutoFollowPatterns(String name, String[] patterns) { PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(); request.setName(name); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java index f711dd4303f2a..2d3ca857ff848 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; @@ -15,12 +16,14 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.RepositoriesService; @@ -35,6 +38,7 @@ import org.elasticsearch.xpack.ccr.repository.CcrRestoreSourceService; import java.io.IOException; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -42,6 +46,7 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.snapshots.RestoreService.restoreInProgress; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; // TODO: Fold this integration test into a more expansive integration test as more bootstrap from remote work // TODO: is completed. @@ -195,6 +200,60 @@ public void testThatSessionIsRegisteredWithPrimaryShard() throws IOException { assertEquals(0, restoreInfo.failedShards()); } + public void testFollowerMappingIsUpdated() throws IOException { + String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster"; + String leaderIndex = "index1"; + String followerIndex = "index2"; + + final int numberOfPrimaryShards = randomIntBetween(1, 3); + final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1), + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON)); + ensureLeaderGreen(leaderIndex); + + final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class); + final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class); + + Settings.Builder settingsBuilder = Settings.builder() + .put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followerIndex) + .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); + RestoreService.RestoreRequest restoreRequest = new RestoreService.RestoreRequest(leaderClusterRepoName, + CcrRepository.LATEST, new String[]{leaderIndex}, indicesOptions, + "^(.*)$", followerIndex, Settings.EMPTY, new TimeValue(1, TimeUnit.HOURS), false, + false, true, settingsBuilder.build(), new String[0], + "restore_snapshot[" + leaderClusterRepoName + ":" + leaderIndex + "]"); + + // TODO: Eventually when the file recovery work is complete, we should test updated mappings by + // indexing to the leader while the recovery is happening. However, into order to that test mappings + // are updated prior to that work, we index documents in the clear session callback. This will + // ensure a mapping change prior to the final mapping check on the follower side. + for (CcrRestoreSourceService restoreSourceService : getLeaderCluster().getDataNodeInstances(CcrRestoreSourceService.class)) { + restoreSourceService.addCloseSessionListener(s -> { + final String source = String.format(Locale.ROOT, "{\"k\":%d}", 1); + leaderClient().prepareIndex("index1", "doc", Long.toString(1)).setSource(source, XContentType.JSON).get(); + }); + } + + PlainActionFuture future = PlainActionFuture.newFuture(); + restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future)); + RestoreInfo restoreInfo = future.actionGet(); + + assertEquals(restoreInfo.totalShards(), restoreInfo.successfulShards()); + assertEquals(0, restoreInfo.failedShards()); + + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.clear(); + clusterStateRequest.metaData(true); + clusterStateRequest.indices(followerIndex); + ClusterStateResponse clusterState = followerClient().admin().cluster().state(clusterStateRequest).actionGet(); + IndexMetaData followerIndexMetadata = clusterState.getState().metaData().index(followerIndex); + assertEquals(2, followerIndexMetadata.getMappingVersion()); + + MappingMetaData mappingMetaData = followerClient().admin().indices().prepareGetMappings("index2").get().getMappings() + .get("index2").get("doc"); + assertThat(XContentMapValues.extractValue("properties.k.type", mappingMetaData.sourceAsMap()), equalTo("long")); + } + private ActionListener waitForRestore(ClusterService clusterService, ActionListener listener) { return new ActionListener() { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index 7228acaacf1a9..3acdde52a443e 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -19,11 +19,13 @@ import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator.AutoFollower; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; @@ -62,7 +64,7 @@ public void testAutoFollower() { Client client = mock(Client.class); when(client.getRemoteClusterClient(anyString())).thenReturn(client); - ClusterState remoteState = createRemoteClusterState("logs-20190101"); + ClusterState remoteState = createRemoteClusterState("logs-20190101", true); AutoFollowPattern autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null, null, null, null); @@ -183,7 +185,7 @@ void updateAutoFollowMetadata(Function updateFunctio public void testAutoFollowerUpdateClusterStateFailure() { Client client = mock(Client.class); when(client.getRemoteClusterClient(anyString())).thenReturn(client); - ClusterState remoteState = createRemoteClusterState("logs-20190101"); + ClusterState remoteState = createRemoteClusterState("logs-20190101", true); AutoFollowPattern autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null, null, null, null); @@ -240,7 +242,7 @@ void updateAutoFollowMetadata(Function updateFunctio public void testAutoFollowerCreateAndFollowApiCallFailure() { Client client = mock(Client.class); when(client.getRemoteClusterClient(anyString())).thenReturn(client); - ClusterState remoteState = createRemoteClusterState("logs-20190101"); + ClusterState remoteState = createRemoteClusterState("logs-20190101", true); AutoFollowPattern autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null, null, null, null); @@ -315,8 +317,7 @@ public void testGetLeaderIndicesToFollow() { String indexName = "metrics-" + i; Settings.Builder builder = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_INDEX_UUID, indexName) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), i % 2 == 0); + .put(IndexMetaData.SETTING_INDEX_UUID, indexName); imdBuilder.put(IndexMetaData.builder("metrics-" + i) .settings(builder) .numberOfShards(1) @@ -347,17 +348,21 @@ public void testGetLeaderIndicesToFollow() { List result = AutoFollower.getLeaderIndicesToFollow(autoFollowPattern, remoteState, clusterState, Collections.emptyList()); result.sort(Comparator.comparing(Index::getName)); - assertThat(result.size(), equalTo(3)); + assertThat(result.size(), equalTo(5)); assertThat(result.get(0).getName(), equalTo("metrics-0")); - assertThat(result.get(1).getName(), equalTo("metrics-2")); - assertThat(result.get(2).getName(), equalTo("metrics-4")); + assertThat(result.get(1).getName(), equalTo("metrics-1")); + assertThat(result.get(2).getName(), equalTo("metrics-2")); + assertThat(result.get(3).getName(), equalTo("metrics-3")); + assertThat(result.get(4).getName(), equalTo("metrics-4")); List followedIndexUUIDs = Collections.singletonList(remoteState.metaData().index("metrics-2").getIndexUUID()); result = AutoFollower.getLeaderIndicesToFollow(autoFollowPattern, remoteState, clusterState, followedIndexUUIDs); result.sort(Comparator.comparing(Index::getName)); - assertThat(result.size(), equalTo(2)); + assertThat(result.size(), equalTo(4)); assertThat(result.get(0).getName(), equalTo("metrics-0")); - assertThat(result.get(1).getName(), equalTo("metrics-4")); + assertThat(result.get(1).getName(), equalTo("metrics-1")); + assertThat(result.get(2).getName(), equalTo("metrics-3")); + assertThat(result.get(3).getName(), equalTo("metrics-4")); } public void testGetLeaderIndicesToFollow_shardsNotStarted() { @@ -370,7 +375,7 @@ public void testGetLeaderIndicesToFollow_shardsNotStarted() { .build(); // 1 shard started and another not started: - ClusterState remoteState = createRemoteClusterState("index1"); + ClusterState remoteState = createRemoteClusterState("index1", true); MetaData.Builder mBuilder= MetaData.builder(remoteState.metaData()); mBuilder.put(IndexMetaData.builder("index2") .settings(settings(Version.CURRENT).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)) @@ -530,8 +535,9 @@ public void testGetFollowerIndexName() { public void testStats() { AutoFollowCoordinator autoFollowCoordinator = new AutoFollowCoordinator( + Settings.EMPTY, null, - mock(ClusterService.class), + mockClusterService(), new CcrLicenseChecker(() -> true, () -> false), () -> 1L); @@ -586,7 +592,7 @@ public void testStats() { } public void testUpdateAutoFollowers() { - ClusterService clusterService = mock(ClusterService.class); + ClusterService clusterService = mockClusterService(); // Return a cluster state with no patterns so that the auto followers never really execute: ClusterState followerState = ClusterState.builder(new ClusterName("remote")) .metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, @@ -594,6 +600,7 @@ public void testUpdateAutoFollowers() { .build(); when(clusterService.state()).thenReturn(followerState); AutoFollowCoordinator autoFollowCoordinator = new AutoFollowCoordinator( + Settings.EMPTY, null, clusterService, new CcrLicenseChecker(() -> true, () -> false), @@ -648,8 +655,9 @@ public void testUpdateAutoFollowers() { public void testUpdateAutoFollowersNoPatterns() { AutoFollowCoordinator autoFollowCoordinator = new AutoFollowCoordinator( + Settings.EMPTY, null, - mock(ClusterService.class), + mockClusterService(), new CcrLicenseChecker(() -> true, () -> false), () -> 1L); ClusterState clusterState = ClusterState.builder(new ClusterName("remote")) @@ -662,8 +670,9 @@ public void testUpdateAutoFollowersNoPatterns() { public void testUpdateAutoFollowersNoAutoFollowMetadata() { AutoFollowCoordinator autoFollowCoordinator = new AutoFollowCoordinator( + Settings.EMPTY, null, - mock(ClusterService.class), + mockClusterService(), new CcrLicenseChecker(() -> true, () -> false), () -> 1L); ClusterState clusterState = ClusterState.builder(new ClusterName("remote")).build(); @@ -692,7 +701,8 @@ public void testWaitForMetadataVersion() { .metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata)) .build(); String indexName = "logs-" + i; - leaderStates.add(i == 0 ? createRemoteClusterState(indexName) : createRemoteClusterState(leaderStates.get(i - 1), indexName)); + leaderStates.add(i == 0 ? createRemoteClusterState(indexName, true) : + createRemoteClusterState(leaderStates.get(i - 1), indexName)); } List allResults = new ArrayList<>(); @@ -787,9 +797,83 @@ void updateAutoFollowMetadata(Function updateFunctio assertThat(counter.get(), equalTo(states.length)); } - private static ClusterState createRemoteClusterState(String indexName) { + public void testAutoFollowerSoftDeletesDisabled() { + Client client = mock(Client.class); + when(client.getRemoteClusterClient(anyString())).thenReturn(client); + + ClusterState remoteState = randomBoolean() ? createRemoteClusterState("logs-20190101", false) : + createRemoteClusterState("logs-20190101", null); + + AutoFollowPattern autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("logs-*"), + null, null, null, null, null, null, null, null, null, null, null); + Map patterns = new HashMap<>(); + patterns.put("remote", autoFollowPattern); + Map> followedLeaderIndexUUIDS = new HashMap<>(); + followedLeaderIndexUUIDS.put("remote", new ArrayList<>()); + Map> autoFollowHeaders = new HashMap<>(); + autoFollowHeaders.put("remote", Collections.singletonMap("key", "val")); + AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(patterns, followedLeaderIndexUUIDS, autoFollowHeaders); + + ClusterState currentState = ClusterState.builder(new ClusterName("name")) + .metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata)) + .build(); + + List results = new ArrayList<>(); + Consumer> handler = results::addAll; + AutoFollower autoFollower = new AutoFollower("remote", handler, localClusterStateSupplier(currentState), () -> 1L) { + @Override + void getRemoteClusterState(String remoteCluster, + long metadataVersion, + BiConsumer handler) { + assertThat(remoteCluster, equalTo("remote")); + handler.accept(new ClusterStateResponse(new ClusterName("name"), remoteState, 1L, false), null); + } + + @Override + void createAndFollow(Map headers, + PutFollowAction.Request followRequest, + Runnable successHandler, + Consumer failureHandler) { + fail("soft deletes are disabled; index should not be followed"); + } + + @Override + void updateAutoFollowMetadata(Function updateFunction, + Consumer handler) { + ClusterState resultCs = updateFunction.apply(currentState); + AutoFollowMetadata result = resultCs.metaData().custom(AutoFollowMetadata.TYPE); + assertThat(result.getFollowedLeaderIndexUUIDs().size(), equalTo(1)); + assertThat(result.getFollowedLeaderIndexUUIDs().get("remote").size(), equalTo(1)); + handler.accept(null); + } + + @Override + void cleanFollowedRemoteIndices(ClusterState remoteClusterState, List patterns) { + // Ignore, to avoid invoking updateAutoFollowMetadata(...) twice + } + }; + autoFollower.start(); + + assertThat(results.size(), equalTo(1)); + assertThat(results.get(0).clusterStateFetchException, nullValue()); + List> entries = new ArrayList<>(results.get(0).autoFollowExecutionResults.entrySet()); + assertThat(entries.size(), equalTo(1)); + assertThat(entries.get(0).getKey().getName(), equalTo("logs-20190101")); + assertThat(entries.get(0).getValue(), notNullValue()); + assertThat(entries.get(0).getValue().getMessage(), equalTo("index [logs-20190101] cannot be followed, " + + "because soft deletes are not enabled")); + } + + private static ClusterState createRemoteClusterState(String indexName, Boolean enableSoftDeletes) { + Settings.Builder indexSettings; + if (enableSoftDeletes != null) { + indexSettings = settings(Version.CURRENT).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), enableSoftDeletes); + } else { + indexSettings = settings(Version.V_6_6_0); + } + IndexMetaData indexMetaData = IndexMetaData.builder(indexName) - .settings(settings(Version.CURRENT).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)) + .settings(indexSettings) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -840,4 +924,12 @@ private static Supplier localClusterStateSupplier(ClusterState... }; } + private ClusterService mockClusterService() { + ClusterService clusterService = mock(ClusterService.class); + ClusterSettings clusterSettings = + new ClusterSettings(Settings.EMPTY, Collections.singleton(CcrSettings.CCR_AUTO_FOLLOW_WAIT_FOR_METADATA_TIMEOUT)); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + return clusterService; + } + } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java index fd4b51bec5749..85e3a2fb874ea 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java @@ -234,7 +234,7 @@ public void testDynamicIndexSettingsAreClassified() { for (Setting setting : IndexScopedSettings.BUILT_IN_INDEX_SETTINGS) { if (setting.isDynamic()) { - boolean notReplicated = TransportResumeFollowAction.WHITE_LISTED_SETTINGS.contains(setting); + boolean notReplicated = TransportResumeFollowAction.NON_REPLICATED_SETTINGS.contains(setting); boolean replicated = replicatedSettings.contains(setting); assertThat("setting [" + setting.getKey() + "] is not classified as replicated xor not replicated", notReplicated ^ replicated, is(true)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java index e7460d5a2eb38..b50d38c73c827 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java @@ -7,6 +7,7 @@ package org.elasticsearch.license; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.client.Client; @@ -159,6 +160,10 @@ public void checkRemoteClusterLicenses(final List clusterAliases, final @Override public void onResponse(final XPackInfoResponse xPackInfoResponse) { final XPackInfoResponse.LicenseInfo licenseInfo = xPackInfoResponse.getLicenseInfo(); + if (licenseInfo == null) { + listener.onFailure(new ResourceNotFoundException("license info is missing for cluster [" + clusterAlias.get() + "]")); + return; + } if ((licenseInfo.getStatus() == LicenseStatus.ACTIVE) == false || predicate.test(License.OperationMode.resolve(licenseInfo.getMode())) == false) { listener.onResponse(LicenseCheck.failure(new RemoteClusterLicenseInfo(clusterAlias.get(), licenseInfo))); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStep.java index 03bf905290ffd..46504dd305d7d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStep.java @@ -53,18 +53,30 @@ public void evaluateCondition(IndexMetaData indexMetaData, Listener listener) { return; } - if (indexMetaData.getAliases().containsKey(rolloverAlias) == false) { - listener.onFailure(new IllegalArgumentException(String.format(Locale.ROOT, - "%s [%s] does not point to index [%s]", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, rolloverAlias, - indexMetaData.getIndex().getName()))); - return; + // The order of the following checks is important in ways which may not be obvious. + + // First, figure out if 1) The configured alias points to this index, and if so, + // whether this index is the write alias for this index + boolean aliasPointsToThisIndex = indexMetaData.getAliases().containsKey(rolloverAlias); + + Boolean isWriteIndex = null; + if (aliasPointsToThisIndex) { + // The writeIndex() call returns a tri-state boolean: + // true -> this index is the write index for this alias + // false -> this index is not the write index for this alias + // null -> this alias is a "classic-style" alias and does not have a write index configured, but only points to one index + // and is thus the write index by default + isWriteIndex = indexMetaData.getAliases().get(rolloverAlias).writeIndex(); } boolean indexingComplete = LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE_SETTING.get(indexMetaData.getSettings()); if (indexingComplete) { logger.trace(indexMetaData.getIndex() + " has lifecycle complete set, skipping " + WaitForRolloverReadyStep.NAME); - Boolean isWriteIndex = indexMetaData.getAliases().get(rolloverAlias).writeIndex(); - if (Boolean.TRUE.equals(isWriteIndex)) { + // If this index is still the write index for this alias, skipping rollover and continuing with the policy almost certainly + // isn't what we want, as something likely still expects to be writing to this index. + // If the alias doesn't point to this index, that's okay as that will be the result if this index is using a + // "classic-style" alias and has already rolled over, and we want to continue with the policy. + if (aliasPointsToThisIndex && Boolean.TRUE.equals(isWriteIndex)) { listener.onFailure(new IllegalStateException(String.format(Locale.ROOT, "index [%s] has [%s] set to [true], but is still the write index for alias [%s]", indexMetaData.getIndex().getName(), LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, rolloverAlias))); @@ -75,6 +87,20 @@ public void evaluateCondition(IndexMetaData indexMetaData, Listener listener) { return; } + // If indexing_complete is *not* set, and the alias does not point to this index, we can't roll over this index, so error out. + if (aliasPointsToThisIndex == false) { + listener.onFailure(new IllegalArgumentException(String.format(Locale.ROOT, + "%s [%s] does not point to index [%s]", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, rolloverAlias, + indexMetaData.getIndex().getName()))); + return; + } + + // Similarly, if isWriteIndex is false (see note above on false vs. null), we can't roll over this index, so error out. + if (Boolean.FALSE.equals(isWriteIndex)) { + listener.onFailure(new IllegalArgumentException(String.format(Locale.ROOT, + "index [%s] is not the write index for alias [%s]", rolloverAlias, indexMetaData.getIndex().getName()))); + } + RolloverRequest rolloverRequest = new RolloverRequest(rolloverAlias, null); rolloverRequest.dryRun(true); if (maxAge != null) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java index 300b55a5e6394..418ae16d3e632 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java @@ -39,7 +39,6 @@ public class OpenJobAction extends Action { public static final OpenJobAction INSTANCE = new OpenJobAction(); public static final String NAME = "cluster:admin/xpack/ml/job/open"; - private OpenJobAction() { super(NAME); } @@ -132,15 +131,12 @@ public String toString() { public static class JobParams implements XPackPlugin.XPackPersistentTaskParams { - /** TODO Remove in 7.0.0 */ - public static final ParseField IGNORE_DOWNTIME = new ParseField("ignore_downtime"); public static final ParseField TIMEOUT = new ParseField("timeout"); public static final ParseField JOB = new ParseField("job"); public static ObjectParser PARSER = new ObjectParser<>(MlTasks.JOB_TASK_NAME, true, JobParams::new); static { PARSER.declareString(JobParams::setJobId, Job.ID); - PARSER.declareBoolean((p, v) -> {}, IGNORE_DOWNTIME); PARSER.declareString((params, val) -> params.setTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT); PARSER.declareObject(JobParams::setJob, (p, c) -> Job.LENIENT_PARSER.apply(p, c).build(), JOB); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index d8a841e9ee979..02f76c54a2739 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -130,7 +130,6 @@ public class DatafeedConfig extends AbstractDiffable implements public static final ParseField AGGREGATIONS = new ParseField("aggregations"); public static final ParseField AGGS = new ParseField("aggs"); public static final ParseField SCRIPT_FIELDS = new ParseField("script_fields"); - public static final ParseField SOURCE = new ParseField("_source"); public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config"); public static final ParseField HEADERS = new ParseField("headers"); public static final ParseField DELAYED_DATA_CHECK_CONFIG = new ParseField("delayed_data_check_config"); @@ -185,9 +184,6 @@ private static ObjectParser createParser(boolean ignoreUnknownFie return parsedScriptFields; }, SCRIPT_FIELDS); parser.declareInt(Builder::setScrollSize, SCROLL_SIZE); - // TODO this is to read former _source field. Remove in v7.0.0 - parser.declareBoolean((builder, value) -> { - }, SOURCE); parser.declareObject(Builder::setChunkingConfig, ignoreUnknownFields ? ChunkingConfig.LENIENT_PARSER : ChunkingConfig.STRICT_PARSER, CHUNKING_CONFIG); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java index 27e9dcbe86c47..7d462bd153371 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java @@ -136,30 +136,7 @@ public AnalysisConfig(StreamInput in) throws IOException { detectors = Collections.unmodifiableList(in.readList(Detector::new)); influencers = Collections.unmodifiableList(in.readList(StreamInput::readString)); - // BWC for result_finalization_window and overlapping_buckets - // TODO Remove in 7.0.0 - if (in.getVersion().before(Version.V_6_6_0)) { - in.readOptionalBoolean(); - in.readOptionalLong(); - } multivariateByFields = in.readOptionalBoolean(); - - // BWC for removed multiple_bucket_spans - // TODO Remove in 7.0.0 - if (in.getVersion().before(Version.V_6_5_0)) { - if (in.readBoolean()) { - final int arraySize = in.readVInt(); - for (int i = 0; i < arraySize; i++) { - in.readTimeValue(); - } - } - } - - // BWC for removed per-partition normalization - // TODO Remove in 7.0.0 - if (in.getVersion().before(Version.V_6_5_0)) { - in.readBoolean(); - } } @Override @@ -180,25 +157,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeList(detectors); out.writeStringList(influencers); - // BWC for result_finalization_window and overlapping_buckets - // TODO Remove in 7.0.0 - if (out.getVersion().before(Version.V_6_6_0)) { - out.writeOptionalBoolean(null); - out.writeOptionalLong(null); - } out.writeOptionalBoolean(multivariateByFields); - - // BWC for removed multiple_bucket_spans - // TODO Remove in 7.0.0 - if (out.getVersion().before(Version.V_6_5_0)) { - out.writeBoolean(false); - } - - // BWC for removed per-partition normalization - // TODO Remove in 7.0.0 - if (out.getVersion().before(Version.V_6_5_0)) { - out.writeBoolean(false); - } } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java index 1a7641ef64b88..79ccaeaae1a9e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java @@ -619,12 +619,19 @@ private static Map getRealmsSSLSettings(Settings settings) { final Map sslSettings = new HashMap<>(); final String prefix = "xpack.security.authc.realms."; final Map settingsByRealmType = settings.getGroups(prefix); - settingsByRealmType.forEach((realmType, typeSettings) -> - typeSettings.getAsGroups().forEach((realmName, realmSettings) -> { - Settings realmSSLSettings = realmSettings.getByPrefix("ssl."); - // Put this even if empty, so that the name will be mapped to the global SSL configuration - sslSettings.put(prefix + realmType + "." + realmName + ".ssl", realmSSLSettings); - }) + settingsByRealmType.forEach((realmType, typeSettings) -> { + final Optional nonDottedSetting = typeSettings.keySet().stream().filter(k -> k.indexOf('.') == -1).findAny(); + if (nonDottedSetting.isPresent()) { + logger.warn("Skipping any SSL configuration from realm [{}{}] because the key [{}] is not in the correct format", + prefix, realmType, nonDottedSetting.get()); + } else { + typeSettings.getAsGroups().forEach((realmName, realmSettings) -> { + Settings realmSSLSettings = realmSettings.getByPrefix("ssl."); + // Put this even if empty, so that the name will be mapped to the global SSL configuration + sslSettings.put(prefix + realmType + "." + realmName + ".ssl", realmSSLSettings); + }); + } + } ); return sslSettings; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java index 58ca42c7f681e..24a6960da4833 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.license; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; @@ -349,6 +350,41 @@ public void testBuildErrorMessageForInactiveLicense() { equalTo("the license on cluster [expired-cluster] is not active")); } + public void testCheckRemoteClusterLicencesNoLicenseMetadata() { + final ThreadPool threadPool = createMockThreadPool(); + final Client client = createMockClient(threadPool); + doAnswer(invocationMock -> { + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(new XPackInfoResponse(null, null, null)); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, XPackLicenseState::isPlatinumOrTrialOperationMode); + final AtomicReference exception = new AtomicReference<>(); + + licenseChecker.checkRemoteClusterLicenses( + Collections.singletonList("remote"), + doubleInvocationProtectingListener(new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + fail(); + } + + @Override + public void onFailure(final Exception e) { + exception.set(e); + } + + })); + + assertNotNull(exception.get()); + assertThat(exception.get(), instanceOf(ResourceNotFoundException.class)); + assertThat(exception.get().getMessage(), equalTo("license info is missing for cluster [remote]")); + } + private ActionListener doubleInvocationProtectingListener( final ActionListener listener) { final AtomicBoolean listenerInvoked = new AtomicBoolean(); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java index cb14f78461dec..58300f36c2eb2 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java @@ -46,6 +46,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import static java.util.Collections.singletonMap; @@ -99,7 +100,7 @@ public void testFullPolicy() throws Exception { // asserts that rollover was called assertBusy(() -> assertTrue(indexExists(secondIndex))); // asserts that shrink deleted the original index - assertBusy(() -> assertFalse(indexExists(originalIndex))); + assertBusy(() -> assertFalse(indexExists(originalIndex)), 20, TimeUnit.SECONDS); // asserts that the delete phase completed for the managed shrunken index assertBusy(() -> assertFalse(indexExists(shrunkenOriginalIndex))); } @@ -177,7 +178,7 @@ public void testMoveToRolloverStep() throws Exception { // asserts that rollover was called assertBusy(() -> assertTrue(indexExists(secondIndex))); // asserts that shrink deleted the original index - assertBusy(() -> assertFalse(indexExists(originalIndex))); + assertBusy(() -> assertFalse(indexExists(originalIndex)), 20, TimeUnit.SECONDS); // asserts that the delete phase completed for the managed shrunken index assertBusy(() -> assertFalse(indexExists(shrunkenOriginalIndex))); } @@ -531,6 +532,58 @@ public void testDeletePolicyInUse() throws IOException { not(containsString(managedByOtherPolicyIndex)))); } + public void testRemoveAndReaddPolicy() throws Exception { + String originalIndex = index + "-000001"; + String secondIndex = index + "-000002"; + // Set up a policy with rollover + createNewSingletonPolicy("hot", new RolloverAction(null, null, 1L)); + createIndexWithSettings( + originalIndex, + Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(LifecycleSettings.LIFECYCLE_NAME, policy) + .put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, "alias")); + + // Index a document + index(client(), originalIndex, "_id", "foo", "bar"); + + // Wait for rollover to happen + assertBusy(() -> assertTrue(indexExists(secondIndex))); + + // Remove the policy from the original index + Request removeRequest = new Request("POST", "/" + originalIndex + "/_ilm/remove"); + removeRequest.setJsonEntity(""); + client().performRequest(removeRequest); + + // Add the policy again + Request addPolicyRequest = new Request("PUT", "/" + originalIndex + "/_settings"); + addPolicyRequest.setJsonEntity("{\n" + + " \"settings\": {\n" + + " \"index.lifecycle.name\": \"" + policy + "\",\n" + + " \"index.lifecycle.rollover_alias\": \"alias\"\n" + + " }\n" + + "}"); + client().performRequest(addPolicyRequest); + assertBusy(() -> assertTrue((boolean) explainIndex(originalIndex).getOrDefault("managed", false))); + + // Wait for rollover to error + assertBusy(() -> assertThat(getStepKeyForIndex(originalIndex), equalTo(new StepKey("hot", RolloverAction.NAME, ErrorStep.NAME)))); + + // Set indexing complete + Request setIndexingCompleteRequest = new Request("PUT", "/" + originalIndex + "/_settings"); + setIndexingCompleteRequest.setJsonEntity("{\n" + + " \"index.lifecycle.indexing_complete\": true\n" + + "}"); + client().performRequest(setIndexingCompleteRequest); + + // Retry policy + Request retryRequest = new Request("POST", "/" + originalIndex + "/_ilm/retry"); + client().performRequest(retryRequest); + + // Wait for everything to be copacetic + assertBusy(() -> assertThat(getStepKeyForIndex(originalIndex), equalTo(TerminalPolicyStep.KEY))); + } + private void createFullPolicy(TimeValue hotTime) throws IOException { Map warmActions = new HashMap<>(); warmActions.put(ForceMergeAction.NAME, new ForceMergeAction(1)); @@ -580,10 +633,18 @@ private void createIndexWithSettingsNoAlias(String index, Settings.Builder setti } private void createIndexWithSettings(String index, Settings.Builder settings) throws IOException { + createIndexWithSettings(index, settings, randomBoolean()); + } + + private void createIndexWithSettings(String index, Settings.Builder settings, boolean useWriteIndex) throws IOException { Request request = new Request("PUT", "/" + index); + String writeIndexSnippet = ""; + if (useWriteIndex) { + writeIndexSnippet = "\"is_write_index\": true"; + } request.setJsonEntity("{\n \"settings\": " + Strings.toString(settings.build()) - + ", \"aliases\" : { \"alias\": { \"is_write_index\": true } } }"); + + ", \"aliases\" : { \"alias\": { " + writeIndexSnippet + " } } }"); client().performRequest(request); // wait for the shards to initialize ensureGreen(index); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java index ef22d2c84010f..7ba7bcafe55f8 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java @@ -500,6 +500,7 @@ private static IndexMetaData.Builder removePolicyForIndex(IndexMetaData indexMet boolean notChanged = true; notChanged &= Strings.isNullOrEmpty(newSettings.remove(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey())); + notChanged &= Strings.isNullOrEmpty(newSettings.remove(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE_SETTING.getKey())); notChanged &= Strings.isNullOrEmpty(newSettings.remove(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS_SETTING.getKey())); long newSettingsVersion = notChanged ? indexMetadata.getSettingsVersion() : 1 + indexMetadata.getSettingsVersion(); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java index 63ee9349043a8..7693a752b28a1 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java @@ -1134,6 +1134,32 @@ public void testRemovePolicyForIndexIndexInUnsafe() { assertIndexNotManagedByILM(newClusterState, index); } + public void testRemovePolicyWithIndexingComplete() { + String indexName = randomAlphaOfLength(10); + String oldPolicyName = "old_policy"; + StepKey currentStep = new StepKey(randomAlphaOfLength(10), MockAction.NAME, randomAlphaOfLength(10)); + LifecyclePolicy oldPolicy = createPolicy(oldPolicyName, null, currentStep); + Settings.Builder indexSettingsBuilder = Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, oldPolicyName) + .put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, true); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStep.getPhase()); + lifecycleState.setAction(currentStep.getAction()); + lifecycleState.setStep(currentStep.getName()); + List policyMetadatas = new ArrayList<>(); + policyMetadatas.add(new LifecyclePolicyMetadata(oldPolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); + Index index = clusterState.metaData().index(indexName).getIndex(); + Index[] indices = new Index[] { index }; + List failedIndexes = new ArrayList<>(); + + ClusterState newClusterState = IndexLifecycleRunner.removePolicyForIndexes(indices, clusterState, failedIndexes); + + assertTrue(failedIndexes.isEmpty()); + assertIndexNotManagedByILM(newClusterState, index); + } + public void testIsReadyToTransition() { String policyName = "async_action_policy"; StepKey stepKey = new StepKey("phase", MockAction.NAME, MockAction.NAME); @@ -1186,6 +1212,7 @@ public static void assertIndexNotManagedByILM(ClusterState clusterState, Index i assertNotNull(indexSettings); assertFalse(LifecycleSettings.LIFECYCLE_NAME_SETTING.exists(indexSettings)); assertFalse(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS_SETTING.exists(indexSettings)); + assertFalse(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE_SETTING.exists(indexSettings)); } public static void assertClusterStateOnPolicy(ClusterState oldClusterState, Index index, String expectedPolicy, StepKey previousStep, diff --git a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java index e4e2972413e33..cf70c16cdb0b9 100644 --- a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java +++ b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java @@ -29,7 +29,7 @@ public class Logstash extends Plugin implements ActionPlugin { private static final String LOGSTASH_TEMPLATE_FILE_NAME = "logstash-management"; - private static final String LOGSTASH_INDEX_NAME = ".logstash-management"; + private static final String LOGSTASH_INDEX_TEMPLATE_NAME = ".logstash-management"; private static final String OLD_LOGSTASH_INDEX_NAME = "logstash-index-template"; private static final String TEMPLATE_VERSION_PATTERN = Pattern.quote("${logstash.template.version}"); @@ -61,7 +61,7 @@ public Collection createGuiceModules() { public UnaryOperator> getIndexTemplateMetaDataUpgrader() { return templates -> { templates.keySet().removeIf(OLD_LOGSTASH_INDEX_NAME::equals); - TemplateUtils.loadTemplateIntoMap("/" + LOGSTASH_TEMPLATE_FILE_NAME + ".json", templates, LOGSTASH_INDEX_NAME, + TemplateUtils.loadTemplateIntoMap("/" + LOGSTASH_TEMPLATE_FILE_NAME + ".json", templates, LOGSTASH_INDEX_TEMPLATE_NAME, Version.CURRENT.toString(), TEMPLATE_VERSION_PATTERN, LogManager.getLogger(Logstash.class)); return templates; }; diff --git a/x-pack/plugin/ml/qa/basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java b/x-pack/plugin/ml/qa/basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java index 38cf3be2ec163..4d3ec8e2b2470 100644 --- a/x-pack/plugin/ml/qa/basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java +++ b/x-pack/plugin/ml/qa/basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java @@ -266,7 +266,6 @@ private Response createDatafeed(String datafeedId, String jobId) throws Exceptio xContentBuilder.field("job_id", jobId); xContentBuilder.array("indexes", "airline-data"); xContentBuilder.array("types", "_doc"); - xContentBuilder.field("_source", true); xContentBuilder.endObject(); Request request = new Request("PUT", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId); request.setJsonEntity(Strings.toString(xContentBuilder)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 5310a92fc2063..7cb74c4df5eda 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; @@ -259,7 +260,8 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu public static final Setting ML_ENABLED = Setting.boolSetting("node.ml", XPackSettings.MACHINE_LEARNING_ENABLED, Property.NodeScope); - public static final String ML_ENABLED_NODE_ATTR = "ml.enabled"; + // This is not used in v7 and higher, but users are still prevented from setting it directly to avoid confusion + private static final String PRE_V7_ML_ENABLED_NODE_ATTR = "ml.enabled"; public static final String MAX_OPEN_JOBS_NODE_ATTR = "ml.max_open_jobs"; public static final String MACHINE_MEMORY_NODE_ATTR = "ml.machine_memory"; public static final Setting CONCURRENT_JOB_ALLOCATIONS = @@ -289,6 +291,14 @@ public MachineLearning(Settings settings, Path configPath) { protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } + public static boolean isMlNode(DiscoveryNode node) { + Map nodeAttributes = node.getAttributes(); + try { + return Integer.parseInt(nodeAttributes.get(MAX_OPEN_JOBS_NODE_ATTR)) > 0; + } catch (NumberFormatException e) { + return false; + } + } public List> getSettings() { return Collections.unmodifiableList( @@ -299,16 +309,14 @@ public List> getSettings() { MAX_LAZY_ML_NODES, MAX_MACHINE_MEMORY_PERCENT, AutodetectBuilder.DONT_PERSIST_MODEL_STATE_SETTING, - AutodetectBuilder.MAX_ANOMALY_RECORDS_SETTING, AutodetectBuilder.MAX_ANOMALY_RECORDS_SETTING_DYNAMIC, - AutodetectProcessManager.MAX_RUNNING_JOBS_PER_NODE, AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE, AutodetectProcessManager.MIN_DISK_SPACE_OFF_HEAP, MlConfigMigrationEligibilityCheck.ENABLE_CONFIG_MIGRATION)); } public Settings additionalSettings() { - String mlEnabledNodeAttrName = "node.attr." + ML_ENABLED_NODE_ATTR; + String mlEnabledNodeAttrName = "node.attr." + PRE_V7_ML_ENABLED_NODE_ATTR; String maxOpenJobsPerNodeNodeAttrName = "node.attr." + MAX_OPEN_JOBS_NODE_ATTR; String machineMemoryAttrName = "node.attr." + MACHINE_MEMORY_NODE_ATTR; @@ -320,12 +328,12 @@ public Settings additionalSettings() { Settings.Builder additionalSettings = Settings.builder(); Boolean allocationEnabled = ML_ENABLED.get(settings); if (allocationEnabled != null && allocationEnabled) { - // TODO: the simple true/false flag will not be required once all supported versions have the number - consider removing in 7.0 - addMlNodeAttribute(additionalSettings, mlEnabledNodeAttrName, "true"); addMlNodeAttribute(additionalSettings, maxOpenJobsPerNodeNodeAttrName, String.valueOf(AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.get(settings))); addMlNodeAttribute(additionalSettings, machineMemoryAttrName, Long.toString(machineMemoryFromStats(OsProbe.getInstance().osStats()))); + // This is not used in v7 and higher, but users are still prevented from setting it directly to avoid confusion + disallowMlNodeAttributes(mlEnabledNodeAttrName); } else { disallowMlNodeAttributes(mlEnabledNodeAttrName, maxOpenJobsPerNodeNodeAttrName, machineMemoryAttrName); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java index e499f554641ce..5f937609e8cc9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java @@ -143,8 +143,7 @@ private int mlNodeCount(final ClusterState clusterState) { int mlNodeCount = 0; for (DiscoveryNode node : clusterState.getNodes()) { - String enabled = node.getAttributes().get(MachineLearning.ML_ENABLED_NODE_ATTR); - if (Boolean.parseBoolean(enabled)) { + if (MachineLearning.isMlNode(node)) { ++mlNodeCount; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java index 481a144d7fed0..b186ea2184845 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java @@ -399,9 +399,7 @@ private void deleteQuantiles(ParentTaskAssigningClient parentTaskClient, String // The quantiles type and doc ID changed in v5.5 so delete both the old and new format DeleteByQueryRequest request = new DeleteByQueryRequest(AnomalyDetectorsIndex.jobStateIndexName()); // Just use ID here, not type, as trying to delete different types spams the logs with an exception stack trace - IdsQueryBuilder query = new IdsQueryBuilder().addIds(Quantiles.documentId(jobId), - // TODO: remove in 7.0 - Quantiles.v54DocumentId(jobId)); + IdsQueryBuilder query = new IdsQueryBuilder().addIds(Quantiles.documentId(jobId)); request.setQuery(query); request.setIndicesOptions(MlIndicesUtils.addIgnoreUnavailable(IndicesOptions.lenientExpandOpen())); request.setAbortOnVersionConflict(false); @@ -436,9 +434,7 @@ private void deleteCategorizerState(ParentTaskAssigningClient parentTaskClient, // The categorizer state type and doc ID changed in v5.5 so delete both the old and new format DeleteByQueryRequest request = new DeleteByQueryRequest(AnomalyDetectorsIndex.jobStateIndexName()); // Just use ID here, not type, as trying to delete different types spams the logs with an exception stack trace - IdsQueryBuilder query = new IdsQueryBuilder().addIds(CategorizerState.documentId(jobId, docNum), - // TODO: remove in 7.0 - CategorizerState.v54DocumentId(jobId, docNum)); + IdsQueryBuilder query = new IdsQueryBuilder().addIds(CategorizerState.documentId(jobId, docNum)); request.setQuery(query); request.setIndicesOptions(MlIndicesUtils.addIgnoreUnavailable(IndicesOptions.lenientExpandOpen())); request.setAbortOnVersionConflict(false); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index 557ebd8f87b04..21f97cbb5dc99 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -69,7 +69,6 @@ import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.MlConfigMigrationEligibilityCheck; import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; -import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; @@ -104,7 +103,6 @@ public class TransportOpenJobAction extends TransportMasterNodeAction nodeAttributes = node.getAttributes(); - String enabled = nodeAttributes.get(MachineLearning.ML_ENABLED_NODE_ATTR); - if (Boolean.valueOf(enabled) == false) { + if (MachineLearning.isMlNode(node) == false) { String reason = "Not opening job [" + jobId + "] on node [" + nodeNameOrId(node) + "], because this node isn't a ml node."; logger.trace(reason); @@ -281,19 +274,17 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j continue; } + Map nodeAttributes = node.getAttributes(); String maxNumberOfOpenJobsStr = nodeAttributes.get(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR); - int maxNumberOfOpenJobs = fallbackMaxNumberOfOpenJobs; - // TODO: remove leniency and reject the node if the attribute is null in 7.0 - if (maxNumberOfOpenJobsStr != null) { - try { - maxNumberOfOpenJobs = Integer.parseInt(maxNumberOfOpenJobsStr); - } catch (NumberFormatException e) { - String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndMlAttributes(node) + "], because " + - MachineLearning.MAX_OPEN_JOBS_NODE_ATTR + " attribute [" + maxNumberOfOpenJobsStr + "] is not an integer"; - logger.trace(reason); - reasons.add(reason); - continue; - } + int maxNumberOfOpenJobs; + try { + maxNumberOfOpenJobs = Integer.parseInt(maxNumberOfOpenJobsStr); + } catch (NumberFormatException e) { + String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndMlAttributes(node) + "], because " + + MachineLearning.MAX_OPEN_JOBS_NODE_ATTR + " attribute [" + maxNumberOfOpenJobsStr + "] is not an integer"; + logger.trace(reason); + reasons.add(reason); + continue; } long availableCount = maxNumberOfOpenJobs - numberOfAssignedJobs; if (availableCount == 0) { @@ -311,18 +302,15 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j } String machineMemoryStr = nodeAttributes.get(MachineLearning.MACHINE_MEMORY_NODE_ATTR); - long machineMemory = -1; - // TODO: remove leniency and reject the node if the attribute is null in 7.0 - if (machineMemoryStr != null) { - try { - machineMemory = Long.parseLong(machineMemoryStr); - } catch (NumberFormatException e) { - String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndMlAttributes(node) + "], because " + - MachineLearning.MACHINE_MEMORY_NODE_ATTR + " attribute [" + machineMemoryStr + "] is not a long"; - logger.trace(reason); - reasons.add(reason); - continue; - } + long machineMemory; + try { + machineMemory = Long.parseLong(machineMemoryStr); + } catch (NumberFormatException e) { + String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndMlAttributes(node) + "], because " + + MachineLearning.MACHINE_MEMORY_NODE_ATTR + " attribute [" + machineMemoryStr + "] is not a long"; + logger.trace(reason); + reasons.add(reason); + continue; } if (allocateByMemory) { @@ -735,13 +723,6 @@ public static class OpenJobPersistentTasksExecutor extends PersistentTasksExecut private final MlMemoryTracker memoryTracker; private final Client client; - /** - * The maximum number of open jobs can be different on each node. However, nodes on older versions - * won't add their setting to the cluster state, so for backwards compatibility with these nodes we - * assume the older node's setting is the same as that of the node running this code. - * TODO: remove this member in 7.0 - */ - private final int fallbackMaxNumberOfOpenJobs; private volatile int maxConcurrentJobAllocations; private volatile int maxMachineMemoryPercent; private volatile int maxLazyMLNodes; @@ -753,7 +734,6 @@ public OpenJobPersistentTasksExecutor(Settings settings, ClusterService clusterS this.autodetectProcessManager = autodetectProcessManager; this.memoryTracker = memoryTracker; this.client = client; - this.fallbackMaxNumberOfOpenJobs = AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.get(settings); this.maxConcurrentJobAllocations = MachineLearning.CONCURRENT_JOB_ALLOCATIONS.get(settings); this.maxMachineMemoryPercent = MachineLearning.MAX_MACHINE_MEMORY_PERCENT.get(settings); this.maxLazyMLNodes = MachineLearning.MAX_LAZY_ML_NODES.get(settings); @@ -770,14 +750,13 @@ public PersistentTasksCustomMetaData.Assignment getAssignment(OpenJobAction.JobP params.getJob(), clusterState, maxConcurrentJobAllocations, - fallbackMaxNumberOfOpenJobs, maxMachineMemoryPercent, memoryTracker, logger); if (assignment.getExecutorNode() == null) { int numMlNodes = 0; for (DiscoveryNode node : clusterState.getNodes()) { - if (Boolean.valueOf(node.getAttributes().get(MachineLearning.ML_ENABLED_NODE_ATTR))) { + if (MachineLearning.isMlNode(node)) { numMlNodes++; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPersistJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPersistJobAction.java index 67b93ac0c9766..91a6727172676 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPersistJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPersistJobAction.java @@ -5,21 +5,13 @@ */ package org.elasticsearch.xpack.ml.action; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.PersistJobAction; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; public class TransportPersistJobAction extends TransportJobTaskAction { @@ -44,33 +36,4 @@ protected void taskOperation(PersistJobAction.Request request, TransportOpenJobA } }); } - - @Override - protected void doExecute(Task task, PersistJobAction.Request request, ActionListener listener) { - // TODO Remove this overridden method in 7.0.0 - DiscoveryNodes nodes = clusterService.state().nodes(); - PersistentTasksCustomMetaData tasks = clusterService.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - PersistentTasksCustomMetaData.PersistentTask jobTask = MlTasks.getJobTask(request.getJobId(), tasks); - if (jobTask == null || jobTask.getExecutorNode() == null) { - logger.debug("[{}] Cannot persist the job because the job is not open", request.getJobId()); - listener.onResponse(new PersistJobAction.Response(false)); - return; - } - - DiscoveryNode executorNode = nodes.get(jobTask.getExecutorNode()); - if (executorNode == null) { - listener.onFailure(ExceptionsHelper.conflictStatusException("Cannot persist job [{}] as" + - "executor node [{}] cannot be found", request.getJobId(), jobTask.getExecutorNode())); - return; - } - - Version nodeVersion = executorNode.getVersion(); - if (nodeVersion.before(Version.V_6_3_0)) { - listener.onFailure( - new ElasticsearchException("Cannot persist job [" + request.getJobId() + "] on node with version " + nodeVersion)); - return; - } - - super.doExecute(task, request, listener); - } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java index 400ca28d97419..b65feb68da056 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java @@ -24,7 +24,6 @@ import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelState; import org.elasticsearch.xpack.core.ml.job.results.Result; import java.util.List; @@ -59,45 +58,6 @@ public void deleteModelSnapshots(List modelSnapshots, ActionListe String stateIndexName = AnomalyDetectorsIndex.jobStateIndexName(); - // TODO: remove in 7.0 - ActionListener docDeleteListener = ActionListener.wrap( - response -> { - // if the doc delete worked then don't bother trying the old types - if (response.hasFailures() == false) { - listener.onResponse(response); - return; - } - BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); - for (ModelSnapshot modelSnapshot : modelSnapshots) { - for (String stateDocId : modelSnapshot.legacyStateDocumentIds()) { - bulkRequestBuilder.add(client.prepareDelete(stateIndexName, ModelState.TYPE, stateDocId)); - } - - bulkRequestBuilder.add(client.prepareDelete(AnomalyDetectorsIndex.jobResultsAliasedName(modelSnapshot.getJobId()), - ModelSnapshot.TYPE.getPreferredName(), ModelSnapshot.v54DocumentId(modelSnapshot))); - } - - bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - try { - bulkRequestBuilder.execute(ActionListener.wrap( - listener::onResponse, - // ignore problems relating to single type indices - if we're running against a single type - // index then it must be type doc, so just return the response from deleting that type - e -> { - if (e instanceof IllegalArgumentException - && e.getMessage().contains("as the final mapping would have more than 1 type")) { - listener.onResponse(response); - } - listener.onFailure(e); - } - )); - } catch (Exception e) { - listener.onFailure(e); - } - }, - listener::onFailure - ); - BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); for (ModelSnapshot modelSnapshot : modelSnapshots) { for (String stateDocId : modelSnapshot.stateDocumentIds()) { @@ -110,7 +70,7 @@ public void deleteModelSnapshots(List modelSnapshots, ActionListe bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); try { - executeAsyncWithOrigin(client, ML_ORIGIN, BulkAction.INSTANCE, bulkRequestBuilder.request(), docDeleteListener); + executeAsyncWithOrigin(client, ML_ORIGIN, BulkAction.INSTANCE, bulkRequestBuilder.request(), listener); } catch (Exception e) { listener.onFailure(e); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java index 5465a11d149ab..755152cab5e48 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java @@ -82,16 +82,9 @@ public class AutodetectBuilder { /** * The maximum number of anomaly records that will be written each bucket */ - @Deprecated - public static final Setting MAX_ANOMALY_RECORDS_SETTING = Setting.intSetting("max.anomaly.records", DEFAULT_MAX_NUM_RECORDS, - Setting.Property.NodeScope, Setting.Property.Deprecated); - // Though this setting is dynamic, it is only set when a new job is opened. So, already runnin jobs will not get the updated value. - public static final Setting MAX_ANOMALY_RECORDS_SETTING_DYNAMIC = Setting.intSetting( - "xpack.ml.max_anomaly_records", - MAX_ANOMALY_RECORDS_SETTING, - 1, - Setting.Property.NodeScope, - Setting.Property.Dynamic); + // Though this setting is dynamic, it is only set when a new job is opened. So, already running jobs will not get the updated value. + public static final Setting MAX_ANOMALY_RECORDS_SETTING_DYNAMIC = Setting.intSetting("xpack.ml.max_anomaly_records", + DEFAULT_MAX_NUM_RECORDS, Setting.Property.NodeScope, Setting.Property.Dynamic); /** * Config setting storing the flag that disables model persistence diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index a1c8cadce60ee..251a2a5224ae9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -95,15 +95,11 @@ public class AutodetectProcessManager { // available resources on that node: https://github.com/elastic/x-pack-elasticsearch/issues/546 // However, it is useful to also be able to apply a hard limit. - // WARNING: These settings cannot be made DYNAMIC, because they are tied to several threadpools + // WARNING: This setting cannot be made DYNAMIC, because it is tied to several threadpools // and a threadpool's size can't be changed at runtime. // See MachineLearning#getExecutorBuilders(...) - // TODO: Remove the deprecated setting in 7.0 and move the default value to the replacement setting - @Deprecated - public static final Setting MAX_RUNNING_JOBS_PER_NODE = - Setting.intSetting("max_running_jobs", 20, 1, 512, Property.NodeScope, Property.Deprecated); public static final Setting MAX_OPEN_JOBS_PER_NODE = - Setting.intSetting("xpack.ml.max_open_jobs", MAX_RUNNING_JOBS_PER_NODE, 1, Property.NodeScope); + Setting.intSetting("xpack.ml.max_open_jobs", 20, 1, 512, Property.NodeScope); // Undocumented setting for integration test purposes public static final Setting MIN_DISK_SPACE_OFF_HEAP = diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java index 6a2f25f6a3046..4ac5ce45dc227 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java @@ -342,7 +342,7 @@ private void givenNodeCount(int nodeCount) { DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); for (int i = 0; i < nodeCount; i++) { Map attrs = new HashMap<>(); - attrs.put(MachineLearning.ML_ENABLED_NODE_ATTR, Boolean.toString(true)); + attrs.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, Integer.toString(20)); Set roles = new HashSet<>(); roles.add(DiscoveryNode.Role.DATA); roles.add(DiscoveryNode.Role.MASTER); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index 84d2ecaf918f9..cfb16254a9dde 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -108,8 +108,9 @@ public void testValidate_givenValidJob() { public void testSelectLeastLoadedMlNode_byCount() { Map nodeAttr = new HashMap<>(); - nodeAttr.put(MachineLearning.ML_ENABLED_NODE_ATTR, "true"); - // MachineLearning.MACHINE_MEMORY_NODE_ATTR not set, so this will fall back to allocating by count + nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); + nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "-1"); + // MachineLearning.MACHINE_MEMORY_NODE_ATTR negative, so this will fall back to allocating by count DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), nodeAttr, Collections.emptySet(), Version.CURRENT)) @@ -135,7 +136,7 @@ public void testSelectLeastLoadedMlNode_byCount() { jobBuilder.setJobVersion(Version.CURRENT); Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id4", jobBuilder.build(), - cs.build(), 2, 10, 30, memoryTracker, logger); + cs.build(), 2, 30, memoryTracker, logger); assertEquals("", result.getExplanation()); assertEquals("_node_id3", result.getExecutorNode()); } @@ -146,7 +147,8 @@ public void testSelectLeastLoadedMlNode_maxCapacity() { int maxRunningJobsPerNode = randomIntBetween(1, 100); Map nodeAttr = new HashMap<>(); - nodeAttr.put(MachineLearning.ML_ENABLED_NODE_ATTR, "true"); + nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, Integer.toString(maxRunningJobsPerNode)); + nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); String[] jobIds = new String[numNodes * maxRunningJobsPerNode]; @@ -171,7 +173,7 @@ public void testSelectLeastLoadedMlNode_maxCapacity() { Job job = BaseMlIntegTestCase.createFareQuoteJob("job_id0", new ByteSizeValue(150, ByteSizeUnit.MB)).build(new Date()); Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id0", job, cs.build(), 2, - maxRunningJobsPerNode, 30, memoryTracker, logger); + 30, memoryTracker, logger); assertNull(result.getExecutorNode()); assertTrue(result.getExplanation().contains("because this node is full. Number of opened jobs [" + maxRunningJobsPerNode + "], xpack.ml.max_open_jobs [" + maxRunningJobsPerNode + "]")); @@ -197,14 +199,15 @@ public void testSelectLeastLoadedMlNode_noMlNodes() { Job job = BaseMlIntegTestCase.createFareQuoteJob("job_id2", new ByteSizeValue(2, ByteSizeUnit.MB)).build(new Date()); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id2", job, cs.build(), 2, 10, 30, memoryTracker, logger); + Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id2", job, cs.build(), 2, 30, memoryTracker, logger); assertTrue(result.getExplanation().contains("because this node isn't a ml node")); assertNull(result.getExecutorNode()); } public void testSelectLeastLoadedMlNode_maxConcurrentOpeningJobs() { Map nodeAttr = new HashMap<>(); - nodeAttr.put(MachineLearning.ML_ENABLED_NODE_ATTR, "true"); + nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); + nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), nodeAttr, Collections.emptySet(), Version.CURRENT)) @@ -231,7 +234,7 @@ public void testSelectLeastLoadedMlNode_maxConcurrentOpeningJobs() { Job job = BaseMlIntegTestCase.createFareQuoteJob("job_id6", new ByteSizeValue(2, ByteSizeUnit.MB)).build(new Date()); ClusterState cs = csBuilder.build(); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id6", job, cs, 2, 10, 30, memoryTracker, logger); + Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id6", job, cs, 2, 30, memoryTracker, logger); assertEquals("_node_id3", result.getExecutorNode()); tasksBuilder = PersistentTasksCustomMetaData.builder(tasks); @@ -241,7 +244,7 @@ public void testSelectLeastLoadedMlNode_maxConcurrentOpeningJobs() { csBuilder = ClusterState.builder(cs); csBuilder.metaData(MetaData.builder(cs.metaData()).putCustom(PersistentTasksCustomMetaData.TYPE, tasks)); cs = csBuilder.build(); - result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id7", job, cs, 2, 10, 30, memoryTracker, logger); + result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id7", job, cs, 2, 30, memoryTracker, logger); assertNull("no node selected, because OPENING state", result.getExecutorNode()); assertTrue(result.getExplanation().contains("because node exceeds [2] the maximum number of jobs [2] in opening state")); @@ -252,7 +255,7 @@ public void testSelectLeastLoadedMlNode_maxConcurrentOpeningJobs() { csBuilder = ClusterState.builder(cs); csBuilder.metaData(MetaData.builder(cs.metaData()).putCustom(PersistentTasksCustomMetaData.TYPE, tasks)); cs = csBuilder.build(); - result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id7", job, cs, 2, 10, 30, memoryTracker, logger); + result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id7", job, cs, 2, 30, memoryTracker, logger); assertNull("no node selected, because stale task", result.getExecutorNode()); assertTrue(result.getExplanation().contains("because node exceeds [2] the maximum number of jobs [2] in opening state")); @@ -263,14 +266,15 @@ public void testSelectLeastLoadedMlNode_maxConcurrentOpeningJobs() { csBuilder = ClusterState.builder(cs); csBuilder.metaData(MetaData.builder(cs.metaData()).putCustom(PersistentTasksCustomMetaData.TYPE, tasks)); cs = csBuilder.build(); - result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id7", job, cs, 2, 10, 30, memoryTracker, logger); + result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id7", job, cs, 2, 30, memoryTracker, logger); assertNull("no node selected, because null state", result.getExecutorNode()); assertTrue(result.getExplanation().contains("because node exceeds [2] the maximum number of jobs [2] in opening state")); } public void testSelectLeastLoadedMlNode_concurrentOpeningJobsAndStaleFailedJob() { Map nodeAttr = new HashMap<>(); - nodeAttr.put(MachineLearning.ML_ENABLED_NODE_ATTR, "true"); + nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); + nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), nodeAttr, Collections.emptySet(), Version.CURRENT)) @@ -301,7 +305,7 @@ public void testSelectLeastLoadedMlNode_concurrentOpeningJobsAndStaleFailedJob() Job job = BaseMlIntegTestCase.createFareQuoteJob("job_id7", new ByteSizeValue(2, ByteSizeUnit.MB)).build(new Date()); // Allocation won't be possible if the stale failed job is treated as opening - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id7", job, cs, 2, 10, 30, memoryTracker, logger); + Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id7", job, cs, 2, 30, memoryTracker, logger); assertEquals("_node_id1", result.getExecutorNode()); tasksBuilder = PersistentTasksCustomMetaData.builder(tasks); @@ -311,14 +315,15 @@ public void testSelectLeastLoadedMlNode_concurrentOpeningJobsAndStaleFailedJob() csBuilder = ClusterState.builder(cs); csBuilder.metaData(MetaData.builder(cs.metaData()).putCustom(PersistentTasksCustomMetaData.TYPE, tasks)); cs = csBuilder.build(); - result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id8", job, cs, 2, 10, 30, memoryTracker, logger); + result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id8", job, cs, 2, 30, memoryTracker, logger); assertNull("no node selected, because OPENING state", result.getExecutorNode()); assertTrue(result.getExplanation().contains("because node exceeds [2] the maximum number of jobs [2] in opening state")); } public void testSelectLeastLoadedMlNode_noCompatibleJobTypeNodes() { Map nodeAttr = new HashMap<>(); - nodeAttr.put(MachineLearning.ML_ENABLED_NODE_ATTR, "true"); + nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); + nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), nodeAttr, Collections.emptySet(), Version.CURRENT)) @@ -342,7 +347,7 @@ public void testSelectLeastLoadedMlNode_noCompatibleJobTypeNodes() { cs.nodes(nodes); metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); cs.metaData(metaData); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("incompatible_type_job", job, cs.build(), 2, 10, 30, + Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("incompatible_type_job", job, cs.build(), 2, 30, memoryTracker, logger); assertThat(result.getExplanation(), containsString("because this node does not support jobs of type [incompatible_type]")); assertNull(result.getExecutorNode()); @@ -350,7 +355,8 @@ public void testSelectLeastLoadedMlNode_noCompatibleJobTypeNodes() { public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() { Map nodeAttr = new HashMap<>(); - nodeAttr.put(MachineLearning.ML_ENABLED_NODE_ATTR, "true"); + nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); + nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), nodeAttr, Collections.emptySet(), Version.V_6_2_0)) @@ -373,7 +379,7 @@ public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); cs.metaData(metaData); Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_with_incompatible_model_snapshot", job, cs.build(), - 2, 10, 30, memoryTracker, logger); + 2, 30, memoryTracker, logger); assertThat(result.getExplanation(), containsString( "because the job's model snapshot requires a node of version [6.3.0] or higher")); assertNull(result.getExecutorNode()); @@ -381,7 +387,8 @@ public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() public void testSelectLeastLoadedMlNode_jobWithRulesButNoNodeMeetsRequiredVersion() { Map nodeAttr = new HashMap<>(); - nodeAttr.put(MachineLearning.ML_ENABLED_NODE_ATTR, "true"); + nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); + nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), nodeAttr, Collections.emptySet(), Version.V_6_2_0)) @@ -400,7 +407,7 @@ public void testSelectLeastLoadedMlNode_jobWithRulesButNoNodeMeetsRequiredVersio cs.metaData(metaData); Job job = jobWithRules("job_with_rules"); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_with_rules", job, cs.build(), 2, 10, 30, memoryTracker, + Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_with_rules", job, cs.build(), 2, 30, memoryTracker, logger); assertThat(result.getExplanation(), containsString( "because jobs using custom_rules require a node of version [6.4.0] or higher")); @@ -409,7 +416,8 @@ public void testSelectLeastLoadedMlNode_jobWithRulesButNoNodeMeetsRequiredVersio public void testSelectLeastLoadedMlNode_jobWithRulesAndNodeMeetsRequiredVersion() { Map nodeAttr = new HashMap<>(); - nodeAttr.put(MachineLearning.ML_ENABLED_NODE_ATTR, "true"); + nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); + nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), nodeAttr, Collections.emptySet(), Version.V_6_2_0)) @@ -428,7 +436,7 @@ public void testSelectLeastLoadedMlNode_jobWithRulesAndNodeMeetsRequiredVersion( cs.metaData(metaData); Job job = jobWithRules("job_with_rules"); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_with_rules", job, cs.build(), 2, 10, 30, memoryTracker, + Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_with_rules", job, cs.build(), 2, 30, memoryTracker, logger); assertNotNull(result.getExecutorNode()); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java index 720f46fe95673..77041f40e0f4b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java @@ -209,7 +209,6 @@ public void testDedicatedMlNode() throws Exception { PersistentTask task = tasks.getTask(MlTasks.jobTaskId(jobId)); DiscoveryNode node = clusterState.nodes().resolveNode(task.getExecutorNode()); - assertThat(node.getAttributes(), hasEntry(MachineLearning.ML_ENABLED_NODE_ATTR, "true")); assertThat(node.getAttributes(), hasEntry(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "20")); JobTaskState jobTaskState = (JobTaskState) task.getState(); assertNotNull(jobTaskState); @@ -425,7 +424,6 @@ private void assertJobTask(String jobId, JobState expectedState, boolean hasExec assertNotNull(task.getExecutorNode()); assertFalse(needsReassignment(task.getAssignment(), clusterState.nodes())); DiscoveryNode node = clusterState.nodes().resolveNode(task.getExecutorNode()); - assertThat(node.getAttributes(), hasEntry(MachineLearning.ML_ENABLED_NODE_ATTR, "true")); assertThat(node.getAttributes(), hasEntry(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "20")); JobTaskState jobTaskState = (JobTaskState) task.getState(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java index 50092a766e7d9..d68fe5225fb16 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java @@ -15,12 +15,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.persistent.PersistentTasksClusterService; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -72,7 +74,6 @@ public void testFailOver() throws Exception { }); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32905") public void testLoseDedicatedMasterNode() throws Exception { internalCluster().ensureAtMostNumDataNodes(0); logger.info("Starting dedicated master node..."); @@ -290,6 +291,17 @@ private void run(String jobId, CheckedRunnable disrupt) throws Except client().admin().indices().prepareSyncedFlush().get(); disrupt.run(); + + PersistentTasksClusterService persistentTasksClusterService = + internalCluster().getInstance(PersistentTasksClusterService.class, internalCluster().getMasterName()); + // Speed up rechecks to a rate that is quicker than what settings would allow. + // The tests would work eventually without doing this, but the assertBusy() below + // would need to wait 30 seconds, which would make the suite run very slowly. + // The 200ms refresh puts a greater burden on the master node to recheck + // persistent tasks, but it will cope in these tests as it's not doing anything + // else. + persistentTasksClusterService.setRecheckInterval(TimeValue.timeValueMillis(200)); + assertBusy(() -> { ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); PersistentTasksCustomMetaData tasks = clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java index 96c3176272f46..346e9aa5d5dbc 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java @@ -153,25 +153,6 @@ public void testMaxOpenJobsSetting_givenNewSettingOnly() { assertEquals(7, maxOpenJobs); } - public void testMaxOpenJobsSetting_givenOldSettingOnly() { - Settings.Builder settings = Settings.builder(); - settings.put(AutodetectProcessManager.MAX_RUNNING_JOBS_PER_NODE.getKey(), 9); - int maxOpenJobs = AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.get(settings.build()); - assertEquals(9, maxOpenJobs); - assertWarnings("[max_running_jobs] setting was deprecated in Elasticsearch and will be removed in a future release! " - + "See the breaking changes documentation for the next major version."); - } - - public void testMaxOpenJobsSetting_givenOldAndNewSettings() { - Settings.Builder settings = Settings.builder(); - settings.put(AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.getKey(), 7); - settings.put(AutodetectProcessManager.MAX_RUNNING_JOBS_PER_NODE.getKey(), 9); - int maxOpenJobs = AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.get(settings.build()); - assertEquals(7, maxOpenJobs); - assertWarnings("[max_running_jobs] setting was deprecated in Elasticsearch and will be removed in a future release! " - + "See the breaking changes documentation for the next major version."); - } - public void testOpenJob() { Client client = mock(Client.class); AutodetectCommunicator communicator = mock(AutodetectCommunicator.class); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 3a735332a6b8b..ad9f1d7aa948c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -115,6 +115,8 @@ import org.elasticsearch.xpack.core.security.authc.DefaultAuthenticationFailureHandler; import org.elasticsearch.xpack.core.security.authc.InternalRealmsSettings; import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; @@ -244,6 +246,7 @@ import java.util.function.Predicate; import java.util.function.Supplier; import java.util.function.UnaryOperator; +import java.util.stream.Collectors; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; @@ -296,7 +299,7 @@ public Security(Settings settings, final Path configPath) { this.env = transportClientMode ? null : new Environment(settings, configPath); this.enabled = XPackSettings.SECURITY_ENABLED.get(settings); if (enabled && transportClientMode == false) { - validateAutoCreateIndex(settings); + runStartupChecks(settings); // we load them all here otherwise we can't access secure settings since they are closed once the checks are // fetched final List checks = new ArrayList<>(); @@ -315,6 +318,12 @@ public Security(Settings settings, final Path configPath) { this.bootstrapChecks = Collections.emptyList(); } this.securityExtensions.addAll(extensions); + + } + + private static void runStartupChecks(Settings settings) { + validateAutoCreateIndex(settings); + validateRealmSettings(settings); } @Override @@ -781,6 +790,40 @@ public Map getProcessors(Processor.Parameters paramet return Collections.singletonMap(SetSecurityUserProcessor.TYPE, new SetSecurityUserProcessor.Factory(parameters.threadContext)); } + /** + * Realm settings were changed in 7.0. This method validates that the settings in use on this node match the new style of setting. + * In 6.x a realm config would be + *
+     *   xpack.security.authc.realms.file1.type: file
+     *   xpack.security.authc.realms.file1.order: 0
+     * 
+ * In 7.x this realm should be + *
+     *   xpack.security.authc.realms.file.file1.order: 0
+     * 
+ * If confronted with an old style config, the ES Settings validation would simply fail with an error such as + * unknown setting [xpack.security.authc.realms.file1.order]. This validation method provides an error that is easier to + * understand and take action on. + */ + static void validateRealmSettings(Settings settings) { + final Set badRealmSettings = settings.keySet().stream() + .filter(k -> k.startsWith(RealmSettings.PREFIX)) + .filter(key -> { + final String suffix = key.substring(RealmSettings.PREFIX.length()); + // suffix-part, only contains a single '.' + return suffix.indexOf('.') == suffix.lastIndexOf('.'); + }) + .collect(Collectors.toSet()); + if (badRealmSettings.isEmpty() == false) { + String sampleRealmSetting = RealmSettings.realmSettingPrefix(new RealmConfig.RealmIdentifier("file", "my_file")) + "order"; + throw new IllegalArgumentException("Incorrect realm settings found. " + + "Realm settings have been changed to include the type as part of the setting key.\n" + + "For example '" + sampleRealmSetting + "'\n" + + "Found invalid config: " + Strings.collectionToDelimitedString(badRealmSettings, ", ") + "\n" + + "Please see the breaking changes documentation." + ); + } + } static boolean indexAuditLoggingEnabled(Settings settings) { if (XPackSettings.AUDIT_ENABLED.get(settings)) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index e9924b9d85245..8674a5b295085 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.xpack.core.security.SecurityExtension; import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; @@ -66,8 +67,8 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; import static org.elasticsearch.discovery.DiscoveryModule.ZEN2_DISCOVERY_TYPE; import static org.elasticsearch.discovery.DiscoveryModule.ZEN_DISCOVERY_TYPE; -import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_INDEX_FORMAT; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -99,9 +100,9 @@ private Collection createComponents(Settings testSettings, SecurityExten throw new IllegalStateException("Security object already exists (" + security + ")"); } Settings settings = Settings.builder() - .put("xpack.security.enabled", true) - .put(testSettings) - .put("path.home", createTempDir()).build(); + .put("xpack.security.enabled", true) + .put(testSettings) + .put("path.home", createTempDir()).build(); Environment env = TestEnvironment.newEnvironment(settings); licenseState = new TestUtils.UpdatableLicenseState(settings); SSLService sslService = new SSLService(settings, env); @@ -159,7 +160,7 @@ public void testCustomRealmExtension() throws Exception { public void testCustomRealmExtensionConflict() throws Exception { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> createComponents(Settings.EMPTY, new DummyExtension(FileRealmSettings.TYPE))); + () -> createComponents(Settings.EMPTY, new DummyExtension(FileRealmSettings.TYPE))); assertEquals("Realm type [" + FileRealmSettings.TYPE + "] is already registered", e.getMessage()); } @@ -181,8 +182,8 @@ public void testDisabledByDefault() throws Exception { public void testIndexAuditTrail() throws Exception { Settings settings = Settings.builder() - .put(XPackSettings.AUDIT_ENABLED.getKey(), true) - .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "index").build(); + .put(XPackSettings.AUDIT_ENABLED.getKey(), true) + .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "index").build(); Collection components = createComponents(settings); AuditTrailService service = findComponent(AuditTrailService.class, components); assertNotNull(service); @@ -192,8 +193,8 @@ public void testIndexAuditTrail() throws Exception { public void testIndexAndLoggingAuditTrail() throws Exception { Settings settings = Settings.builder() - .put(XPackSettings.AUDIT_ENABLED.getKey(), true) - .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "index,logfile").build(); + .put(XPackSettings.AUDIT_ENABLED.getKey(), true) + .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "index,logfile").build(); Collection components = createComponents(settings); AuditTrailService service = findComponent(AuditTrailService.class, components); assertNotNull(service); @@ -204,8 +205,8 @@ public void testIndexAndLoggingAuditTrail() throws Exception { public void testUnknownOutput() { Settings settings = Settings.builder() - .put(XPackSettings.AUDIT_ENABLED.getKey(), true) - .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "foo").build(); + .put(XPackSettings.AUDIT_ENABLED.getKey(), true) + .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "foo").build(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> createComponents(settings)); assertEquals("Unknown audit trail output [foo]", e.getMessage()); } @@ -218,9 +219,9 @@ public void testHttpSettingDefaults() throws Exception { public void testTransportSettingNetty4Both() { Settings both4 = Security.additionalSettings(Settings.builder() - .put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4) - .put(NetworkModule.HTTP_TYPE_KEY, SecurityField.NAME4) - .build(), true, false); + .put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4) + .put(NetworkModule.HTTP_TYPE_KEY, SecurityField.NAME4) + .build(), true, false); assertFalse(NetworkModule.TRANSPORT_TYPE_SETTING.exists(both4)); assertFalse(NetworkModule.HTTP_TYPE_SETTING.exists(both4)); } @@ -229,13 +230,13 @@ public void testTransportSettingValidation() { final String badType = randomFrom("netty4", "other", "security1"); Settings settingsTransport = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, badType).build(); IllegalArgumentException badTransport = expectThrows(IllegalArgumentException.class, - () -> Security.additionalSettings(settingsTransport, true, false)); + () -> Security.additionalSettings(settingsTransport, true, false)); assertThat(badTransport.getMessage(), containsString(SecurityField.NAME4)); assertThat(badTransport.getMessage(), containsString(NetworkModule.TRANSPORT_TYPE_KEY)); Settings settingsHttp = Settings.builder().put(NetworkModule.HTTP_TYPE_KEY, badType).build(); IllegalArgumentException badHttp = expectThrows(IllegalArgumentException.class, - () -> Security.additionalSettings(settingsHttp, true, false)); + () -> Security.additionalSettings(settingsHttp, true, false)); assertThat(badHttp.getMessage(), containsString(SecurityField.NAME4)); assertThat(badHttp.getMessage(), containsString(NetworkModule.HTTP_TYPE_KEY)); } @@ -249,21 +250,21 @@ public void testSettingFilter() throws Exception { public void testFilteredSettings() throws Exception { createComponents(Settings.EMPTY); final List> realmSettings = security.getSettings().stream() - .filter(s -> s.getKey().startsWith("xpack.security.authc.realms")) - .collect(Collectors.toList()); + .filter(s -> s.getKey().startsWith("xpack.security.authc.realms")) + .collect(Collectors.toList()); Arrays.asList( - "bind_dn", "bind_password", - "hostname_verification", - "truststore.password", "truststore.path", "truststore.algorithm", - "keystore.key_password").forEach(suffix -> { + "bind_dn", "bind_password", + "hostname_verification", + "truststore.password", "truststore.path", "truststore.algorithm", + "keystore.key_password").forEach(suffix -> { final List> matching = realmSettings.stream() - .filter(s -> s.getKey().endsWith("." + suffix)) - .collect(Collectors.toList()); + .filter(s -> s.getKey().endsWith("." + suffix)) + .collect(Collectors.toList()); assertThat("For suffix " + suffix, matching, Matchers.not(empty())); matching.forEach(setting -> assertThat("For setting " + setting, - setting.getProperties(), Matchers.hasItem(Setting.Property.Filtered))); + setting.getProperties(), Matchers.hasItem(Setting.Property.Filtered))); }); } @@ -290,7 +291,7 @@ public void testTLSJoinValidator() throws Exception { TestUtils.putLicense(builder, license); ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(builder.build()).build(); EnumSet productionModes = EnumSet.of(License.OperationMode.GOLD, License.OperationMode.PLATINUM, - License.OperationMode.STANDARD); + License.OperationMode.STANDARD); if (productionModes.contains(license.operationMode()) && tlsOn == false && "single-node".equals(discoveryType) == false) { IllegalStateException ise = expectThrows(IllegalStateException.class, () -> validator.accept(node, state)); assertEquals("TLS setup is required for license type [" + license.operationMode().name() + "]", ise.getMessage()); @@ -340,18 +341,18 @@ public void testIndexJoinValidator_Old_And_Rolling() throws Exception { assertNotNull(joinValidator); DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); IndexMetaData indexMetaData = IndexMetaData.builder(SECURITY_INDEX_NAME) - .settings(settings(Version.V_6_1_0).put(INDEX_FORMAT_SETTING.getKey(), INTERNAL_INDEX_FORMAT - 1)) - .numberOfShards(1).numberOfReplicas(0) - .build(); + .settings(settings(Version.V_6_1_0).put(INDEX_FORMAT_SETTING.getKey(), INTERNAL_INDEX_FORMAT - 1)) + .numberOfShards(1).numberOfReplicas(0) + .build(); DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), Version.V_6_1_0); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(existingOtherNode).build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) - .nodes(discoveryNodes) - .metaData(MetaData.builder().put(indexMetaData, true).build()).build(); + .nodes(discoveryNodes) + .metaData(MetaData.builder().put(indexMetaData, true).build()).build(); IllegalStateException e = expectThrows(IllegalStateException.class, - () -> joinValidator.accept(node, clusterState)); + () -> joinValidator.accept(node, clusterState)); assertThat(e.getMessage(), equalTo("Security index is not on the current version [6] - " + - "The Upgrade API must be run for 7.x nodes to join the cluster")); + "The Upgrade API must be run for 7.x nodes to join the cluster")); } public void testIndexJoinValidator_FullyCurrentCluster() throws Exception { @@ -361,14 +362,14 @@ public void testIndexJoinValidator_FullyCurrentCluster() throws Exception { DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); int indexFormat = randomBoolean() ? INTERNAL_INDEX_FORMAT : INTERNAL_INDEX_FORMAT - 1; IndexMetaData indexMetaData = IndexMetaData.builder(SECURITY_INDEX_NAME) - .settings(settings(Version.V_6_1_0).put(INDEX_FORMAT_SETTING.getKey(), indexFormat)) - .numberOfShards(1).numberOfReplicas(0) - .build(); + .settings(settings(Version.V_6_1_0).put(INDEX_FORMAT_SETTING.getKey(), indexFormat)) + .numberOfShards(1).numberOfReplicas(0) + .build(); DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(existingOtherNode).build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) - .nodes(discoveryNodes) - .metaData(MetaData.builder().put(indexMetaData, true).build()).build(); + .nodes(discoveryNodes) + .metaData(MetaData.builder().put(indexMetaData, true).build()).build(); joinValidator.accept(node, clusterState); } @@ -379,14 +380,14 @@ public void testIndexUpgradeValidatorWithUpToDateIndex() throws Exception { Version version = randomBoolean() ? Version.CURRENT : Version.V_6_1_0; DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); IndexMetaData indexMetaData = IndexMetaData.builder(SECURITY_INDEX_NAME) - .settings(settings(version).put(INDEX_FORMAT_SETTING.getKey(), INTERNAL_INDEX_FORMAT)) - .numberOfShards(1).numberOfReplicas(0) - .build(); + .settings(settings(version).put(INDEX_FORMAT_SETTING.getKey(), INTERNAL_INDEX_FORMAT)) + .numberOfShards(1).numberOfReplicas(0) + .build(); DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), version); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(existingOtherNode).build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) - .nodes(discoveryNodes) - .metaData(MetaData.builder().put(indexMetaData, true).build()).build(); + .nodes(discoveryNodes) + .metaData(MetaData.builder().put(indexMetaData, true).build()).build(); joinValidator.accept(node, clusterState); } @@ -398,7 +399,7 @@ public void testIndexUpgradeValidatorWithMissingIndex() throws Exception { DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), Version.V_6_1_0); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(existingOtherNode).build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) - .nodes(discoveryNodes).build(); + .nodes(discoveryNodes).build(); joinValidator.accept(node, clusterState); } @@ -409,15 +410,15 @@ public void testGetFieldFilterSecurityEnabled() throws Exception { Map permissionsMap = new HashMap<>(); FieldPermissions permissions = new FieldPermissions( - new FieldPermissionsDefinition(new String[]{"field_granted"}, Strings.EMPTY_ARRAY)); + new FieldPermissionsDefinition(new String[] { "field_granted" }, Strings.EMPTY_ARRAY)); IndicesAccessControl.IndexAccessControl indexGrantedAccessControl = new IndicesAccessControl.IndexAccessControl(true, permissions, - Collections.emptySet()); + Collections.emptySet()); permissionsMap.put("index_granted", indexGrantedAccessControl); IndicesAccessControl.IndexAccessControl indexAccessControl = new IndicesAccessControl.IndexAccessControl(false, - FieldPermissions.DEFAULT, Collections.emptySet()); + FieldPermissions.DEFAULT, Collections.emptySet()); permissionsMap.put("index_not_granted", indexAccessControl); IndicesAccessControl.IndexAccessControl nullFieldPermissions = - new IndicesAccessControl.IndexAccessControl(true, null, Collections.emptySet()); + new IndicesAccessControl.IndexAccessControl(true, null, Collections.emptySet()); permissionsMap.put("index_null", nullFieldPermissions); IndicesAccessControl index = new IndicesAccessControl(true, permissionsMap); threadContext.putTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, index); @@ -446,4 +447,25 @@ public void testGetFieldFilterSecurityEnabledLicenseNoFLS() throws Exception { assertNotSame(MapperPlugin.NOOP_FIELD_FILTER, fieldFilter); assertSame(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply(randomAlphaOfLengthBetween(3, 6))); } + + public void testValidateRealmsWhenSettingsAreInvalid() { + final Settings settings = Settings.builder() + .put(RealmSettings.PREFIX + "my_pki.type", "pki") + .put(RealmSettings.PREFIX + "ldap1.type", "ldap") + .build(); + final IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Security.validateRealmSettings(settings)); + assertThat(iae.getMessage(), containsString("Incorrect realm settings")); + assertThat(iae.getMessage(), containsString("breaking changes doc")); + assertThat(iae.getMessage(), containsString(RealmSettings.PREFIX + "my_pki.type")); + assertThat(iae.getMessage(), containsString(RealmSettings.PREFIX + "ldap1.type")); + } + + public void testValidateRealmsWhenSettingsAreCorrect() { + final Settings settings = Settings.builder() + .put(RealmSettings.PREFIX + "pki.my_pki.order", 0) + .put(RealmSettings.PREFIX + "ldap.ldap1.order", 1) + .build(); + Security.validateRealmSettings(settings); + // no-exception + } } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/CliIntegrationTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/CliIntegrationTestCase.java index fe717d23a2cb8..e7a73cd12d524 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/CliIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/CliIntegrationTestCase.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.rest.action.document.RestIndexAction; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.sql.qa.cli.EmbeddedCli.SecurityConfig; import org.junit.After; @@ -58,9 +57,8 @@ protected SecurityConfig securityConfig() { } protected void index(String index, CheckedConsumer body) throws IOException { - Request request = new Request("PUT", "/" + index + "/doc/1"); + Request request = new Request("PUT", "/" + index + "/_doc/1"); request.addParameter("refresh", "true"); - request.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); XContentBuilder builder = JsonXContent.contentBuilder().startObject(); body.accept(builder); builder.endObject(); diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java index 039c15359eb8c..7832f7d35d7a0 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.rest.action.document.RestIndexAction; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.sql.jdbc.EsDataSource; import org.junit.After; @@ -85,9 +84,8 @@ public static void index(String index, CheckedConsumer body) throws IOException { - Request request = new Request("PUT", "/" + index + "/doc/" + documentId); + Request request = new Request("PUT", "/" + index + "/_doc/" + documentId); request.addParameter("refresh", "true"); - request.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); XContentBuilder builder = JsonXContent.contentBuilder().startObject(); body.accept(builder); builder.endObject(); diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java index 3cb870852583e..3bb710c998290 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java @@ -150,7 +150,7 @@ public void testGettingInvalidByte() throws Exception { double doubleNotByte = randomDoubleBetween(Byte.MAX_VALUE + 1, Double.MAX_VALUE, true); float floatNotByte = randomFloatBetween(Byte.MAX_VALUE + 1, Float.MAX_VALUE); String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); - long randomDate = randomLong(); + long randomDate = randomNonNegativeLong(); String doubleErrorMessage = (doubleNotByte > Long.MAX_VALUE || doubleNotByte < Long.MIN_VALUE) ? Double.toString(doubleNotByte) : Long.toString(Math.round(doubleNotByte)); @@ -279,7 +279,7 @@ public void testGettingInvalidShort() throws Exception { double doubleNotShort = randomDoubleBetween(Short.MAX_VALUE + 1, Double.MAX_VALUE, true); float floatNotShort = randomFloatBetween(Short.MAX_VALUE + 1, Float.MAX_VALUE); String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); - long randomDate = randomLong(); + long randomDate = randomNonNegativeLong(); String doubleErrorMessage = (doubleNotShort > Long.MAX_VALUE || doubleNotShort < Long.MIN_VALUE) ? Double.toString(doubleNotShort) : Long.toString(Math.round(doubleNotShort)); @@ -400,7 +400,7 @@ public void testGettingInvalidInteger() throws Exception { double doubleNotInt = randomDoubleBetween(getMaxIntPlusOne().doubleValue(), Double.MAX_VALUE, true); float floatNotInt = randomFloatBetween(getMaxIntPlusOne().floatValue(), Float.MAX_VALUE); String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); - long randomDate = randomLong(); + long randomDate = randomNonNegativeLong(); String doubleErrorMessage = (doubleNotInt > Long.MAX_VALUE || doubleNotInt < Long.MIN_VALUE) ? Double.toString(doubleNotInt) : Long.toString(Math.round(doubleNotInt)); @@ -511,7 +511,7 @@ public void testGettingInvalidLong() throws Exception { double doubleNotLong = randomDoubleBetween(getMaxLongPlusOne().doubleValue(), Double.MAX_VALUE, true); float floatNotLong = randomFloatBetween(getMaxLongPlusOne().floatValue(), Float.MAX_VALUE); String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); - long randomDate = randomLong(); + long randomDate = randomNonNegativeLong(); index("test", "1", builder -> { builder.field("test_double", doubleNotLong); @@ -606,7 +606,7 @@ public void testGettingInvalidDouble() throws Exception { }); String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); - long randomDate = randomLong(); + long randomDate = randomNonNegativeLong(); index("test", "1", builder -> { builder.field("test_keyword", randomString); @@ -689,7 +689,7 @@ public void testGettingInvalidFloat() throws Exception { }); String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); - long randomDate = randomLong(); + long randomDate = randomNonNegativeLong(); index("test", "1", builder -> { builder.field("test_keyword", randomString); @@ -722,8 +722,8 @@ public void testGettingBooleanValues() throws Exception { builder.startObject("test_boolean").field("type", "boolean").endObject(); builder.startObject("test_date").field("type", "date").endObject(); }); - long randomDate1 = randomLong(); - long randomDate2 = randomLong(); + long randomDate1 = randomNonNegativeLong(); + long randomDate2 = randomNonNegativeLong(); // true values indexSimpleDocumentWithTrueValues(randomDate1); @@ -805,7 +805,7 @@ public void testGettingDateWithoutCalendar() throws Exception { builder.startObject("test_boolean").field("type", "boolean").endObject(); builder.startObject("test_date").field("type", "date").endObject(); }); - Long randomLongDate = randomLong(); + Long randomLongDate = randomNonNegativeLong(); indexSimpleDocumentWithTrueValues(randomLongDate); String timeZoneId = randomKnownTimeZone(); @@ -838,7 +838,7 @@ public void testGettingDateWithCalendar() throws Exception { builder.startObject("test_boolean").field("type", "boolean").endObject(); builder.startObject("test_date").field("type", "date").endObject(); }); - Long randomLongDate = randomLong(); + Long randomLongDate = randomNonNegativeLong(); indexSimpleDocumentWithTrueValues(randomLongDate); index("test", "2", builder -> { builder.timeField("test_date", null); @@ -874,7 +874,7 @@ public void testGettingTimeWithoutCalendar() throws Exception { builder.startObject("test_boolean").field("type", "boolean").endObject(); builder.startObject("test_date").field("type", "date").endObject(); }); - Long randomLongDate = randomLong(); + Long randomLongDate = randomNonNegativeLong(); indexSimpleDocumentWithTrueValues(randomLongDate); String timeZoneId = randomKnownTimeZone(); @@ -906,7 +906,7 @@ public void testGettingTimeWithCalendar() throws Exception { builder.startObject("test_boolean").field("type", "boolean").endObject(); builder.startObject("test_date").field("type", "date").endObject(); }); - Long randomLongDate = randomLong(); + Long randomLongDate = randomNonNegativeLong(); indexSimpleDocumentWithTrueValues(randomLongDate); index("test", "2", builder -> { builder.timeField("test_date", null); @@ -940,7 +940,7 @@ public void testGettingTimestampWithoutCalendar() throws Exception { builder.startObject("release_date").field("type", "date").endObject(); builder.startObject("republish_date").field("type", "date").endObject(); }); - long randomMillis = randomLong(); + long randomMillis = randomNonNegativeLong(); index("library", "1", builder -> { builder.field("name", "Don Quixote"); @@ -951,7 +951,7 @@ public void testGettingTimestampWithoutCalendar() throws Exception { index("library", "2", builder -> { builder.field("name", "1984"); builder.field("page_count", 328); - builder.field("release_date", -649036800000L); + builder.field("release_date", 649036800000L); builder.field("republish_date", 599616000000L); }); @@ -970,7 +970,7 @@ public void testGettingTimestampWithoutCalendar() throws Exception { assertTrue(results.next()); assertEquals(599616000000L, results.getTimestamp("republish_date").getTime()); - assertEquals(-649036800000L, ((Timestamp) results.getObject(2)).getTime()); + assertEquals(649036800000L, ((Timestamp) results.getObject(2)).getTime()); assertFalse(results.next()); }); @@ -983,7 +983,7 @@ public void testGettingTimestampWithCalendar() throws Exception { builder.startObject("test_boolean").field("type", "boolean").endObject(); builder.startObject("test_date").field("type", "date").endObject(); }); - Long randomLongDate = randomLong(); + Long randomLongDate = randomNonNegativeLong(); indexSimpleDocumentWithTrueValues(randomLongDate); index("test", "2", builder -> { builder.timeField("test_date", null); @@ -1022,7 +1022,7 @@ public void testValidGetObjectCalls() throws Exception { double d = randomDouble(); float f = randomFloat(); boolean randomBool = randomBoolean(); - Long randomLongDate = randomLong(); + Long randomLongDate = randomNonNegativeLong(); String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); index("test", "1", builder -> { @@ -1273,7 +1273,7 @@ private void createIndex(String index) throws Exception { createIndex.endObject(); createIndex.startObject("mappings"); { - createIndex.startObject("doc"); + createIndex.startObject("_doc"); { createIndex.startObject("properties"); {} @@ -1287,7 +1287,7 @@ private void createIndex(String index) throws Exception { } private void updateMapping(String index, CheckedConsumer body) throws Exception { - Request request = new Request("PUT", "/" + index + "/_mapping/doc"); + Request request = new Request("PUT", "/" + index + "/_mapping/_doc"); XContentBuilder updateMapping = JsonXContent.contentBuilder().startObject(); updateMapping.startObject("properties"); { @@ -1546,4 +1546,4 @@ private Connection useDataSource(String timeZoneId) throws SQLException { assertNotNull("The timezone should be specified", connectionProperties.getProperty(JDBC_TIMEZONE)); return connection; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec index d4837bfdafc60..f9576c7b859a6 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec @@ -262,9 +262,51 @@ SELECT HISTOGRAM(birth_date, INTERVAL 1 YEAR) AS h, COUNT(*) as c FROM test_emp null |10 ; -histogramDateWithDateFunction-Ignore -SELECT YEAR(HISTOGRAM(birth_date, INTERVAL 1 YEAR)) AS h, COUNT(*) as c FROM test_emp GROUP BY h ORDER BY h DESC; +histogramDateWithMonthOnTop +schema::h:i|c:l +SELECT HISTOGRAM(MONTH(birth_date), 2) AS h, COUNT(*) as c FROM test_emp GROUP BY h ORDER BY h DESC; + + h | c +---------------+--------------- +12 |7 +10 |17 +8 |16 +6 |16 +4 |18 +2 |10 +0 |6 +null |10 +; + +histogramDateWithYearOnTop +schema::h:i|c:l +SELECT HISTOGRAM(YEAR(birth_date), 2) AS h, COUNT(*) as c FROM test_emp GROUP BY h ORDER BY h DESC; + h | c +---------------+--------------- +1964 |5 +1962 |13 +1960 |16 +1958 |16 +1956 |9 +1954 |12 +1952 |19 +null |10 +; - - +histogramNumericWithExpression +schema::h:i|c:l +SELECT HISTOGRAM(emp_no % 100, 10) AS h, COUNT(*) as c FROM test_emp GROUP BY h ORDER BY h DESC; + + h | c +---------------+--------------- +90 |10 +80 |10 +70 |10 +60 |10 +50 |10 +40 |10 +30 |10 +20 |10 +10 |10 +0 |10 ; \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec index 03d412b2ab536..fb7207d4c5cf4 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec @@ -725,6 +725,27 @@ SELECT HISTOGRAM(salary, 5000) AS h FROM emp GROUP BY h; // end::histogramNumeric ; +histogramNumericExpression +schema::h:i|c:l +// tag::histogramNumericExpression +SELECT HISTOGRAM(salary % 100, 10) AS h, COUNT(*) AS c FROM emp GROUP BY h; + + h | c +---------------+--------------- +0 |10 +10 |15 +20 |10 +30 |14 +40 |9 +50 |9 +60 |8 +70 |13 +80 |3 +90 |9 + +// end::histogramNumericExpression +; + histogramDate schema::h:ts|c:l // tag::histogramDate @@ -752,6 +773,30 @@ null |10 // end::histogramDate ; +expressionOnHistogramNotAllowed-Ignore +// tag::expressionOnHistogramNotAllowed +SELECT MONTH(HISTOGRAM(birth_date), 2)) AS h, COUNT(*) as c FROM emp GROUP BY h ORDER BY h DESC; +// end::expressionOnHistogramNotAllowed + +histogramDateExpression +schema::h:i|c:l +// tag::histogramDateExpression +SELECT HISTOGRAM(MONTH(birth_date), 2) AS h, COUNT(*) as c FROM emp GROUP BY h ORDER BY h DESC; + + h | c +---------------+--------------- +12 |7 +10 |17 +8 |16 +6 |16 +4 |18 +2 |10 +0 |6 +null |10 + +// end::histogramDateExpression +; + /////////////////////////////// // // Date/Time diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java index 47f68a640c76c..189509e95114c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java @@ -18,6 +18,8 @@ import org.elasticsearch.xpack.sql.expression.function.FunctionAttribute; import org.elasticsearch.xpack.sql.expression.function.Functions; import org.elasticsearch.xpack.sql.expression.function.Score; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.grouping.GroupingFunctionAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.sql.expression.predicate.conditional.ConditionalFunction; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.In; @@ -224,6 +226,7 @@ Collection verify(LogicalPlan plan) { validateConditional(p, localFailures); checkFilterOnAggs(p, localFailures); + checkFilterOnGrouping(p, localFailures); if (!groupingFailures.contains(p)) { checkGroupBy(p, localFailures, resolvedFunctions, groupingFailures); @@ -419,7 +422,7 @@ private static boolean checkGroupByHavingHasOnlyAggs(Expression e, Node sourc return true; } // skip aggs (allowed to refer to non-group columns) - if (Functions.isAggregate(e)) { + if (Functions.isAggregate(e) || Functions.isGrouping(e)) { return true; } @@ -448,6 +451,21 @@ private static boolean checkGroupByAgg(LogicalPlan p, Set localFailures } })); + a.groupings().forEach(e -> { + if (Functions.isGrouping(e) == false) { + e.collectFirstChildren(c -> { + if (Functions.isGrouping(c)) { + localFailures.add(fail(c, + "Cannot combine [%s] grouping function inside GROUP BY, found [%s];" + + " consider moving the expression inside the histogram", + Expressions.name(c), Expressions.name(e))); + return true; + } + return false; + }); + } + }); + if (!localFailures.isEmpty()) { return false; } @@ -547,19 +565,30 @@ private static void checkFilterOnAggs(LogicalPlan p, Set localFailures) if (p instanceof Filter) { Filter filter = (Filter) p; if ((filter.child() instanceof Aggregate) == false) { - filter.condition().forEachDown(f -> { - if (Functions.isAggregate(f) || Functions.isGrouping(f)) { - String type = Functions.isAggregate(f) ? "aggregate" : "grouping"; - localFailures.add(fail(f, - "Cannot use WHERE filtering on %s function [%s], use HAVING instead", type, Expressions.name(f))); + filter.condition().forEachDown(e -> { + if (Functions.isAggregate(e) || e instanceof AggregateFunctionAttribute) { + localFailures.add( + fail(e, "Cannot use WHERE filtering on aggregate function [%s], use HAVING instead", Expressions.name(e))); } - - }, Function.class); + }, Expression.class); } } } + private static void checkFilterOnGrouping(LogicalPlan p, Set localFailures) { + if (p instanceof Filter) { + Filter filter = (Filter) p; + filter.condition().forEachDown(e -> { + if (Functions.isGrouping(e) || e instanceof GroupingFunctionAttribute) { + localFailures + .add(fail(e, "Cannot filter on grouping function [%s], use its argument instead", Expressions.name(e))); + } + }, Expression.class); + } + } + + private static void checkForScoreInsideFunctions(LogicalPlan p, Set localFailures) { // Make sure that SCORE is only used in "top level" functions p.forEachExpressions(e -> @@ -647,4 +676,4 @@ private static boolean areTypesCompatible(DataType left, DataType right) { (left.isNumeric() && right.isNumeric()); } } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java index a67da8d6efd0b..6d39fa6fbc226 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java @@ -346,6 +346,9 @@ public static Integer weekOfYear(Object dateTime, String tzId) { } public static ZonedDateTime asDateTime(Object dateTime) { + if (dateTime == null) { + return null; + } if (dateTime instanceof JodaCompatibleZonedDateTime) { return ((JodaCompatibleZonedDateTime) dateTime).getZonedDateTime(); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/Grouping.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/Grouping.java new file mode 100644 index 0000000000000..e11f82a842ee0 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/Grouping.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.gen.script; + +import org.elasticsearch.xpack.sql.expression.function.grouping.GroupingFunctionAttribute; + +class Grouping extends Param { + + Grouping(GroupingFunctionAttribute groupRef) { + super(groupRef); + } + + String groupName() { + return value().functionId(); + } + + @Override + public String prefix() { + return "g"; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/Params.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/Params.java index 0fc85b3241f99..ed00160dbc3d5 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/Params.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/Params.java @@ -85,25 +85,15 @@ Map asAggPaths() { String s = a.aggProperty() != null ? a.aggProperty() : a.aggName(); map.put(p.prefix() + aggs++, s); } - } - - return map; - } - - // return the agg refs - List asAggRefs() { - List refs = new ArrayList<>(); - - for (Param p : params) { - if (p instanceof Agg) { - refs.add(((Agg) p).aggName()); + if (p instanceof Grouping) { + Grouping g = (Grouping) p; + map.put(p.prefix() + aggs++, g.groupName()); } } - return refs; + return map; } - private static List> flatten(List> params) { List> flatten = emptyList(); @@ -116,6 +106,9 @@ private static List> flatten(List> params) { else if (p instanceof Agg) { flatten.add(p); } + else if (p instanceof Grouping) { + flatten.add(p); + } else if (p instanceof Var) { flatten.add(p); } @@ -131,4 +124,4 @@ else if (p instanceof Var) { public String toString() { return params.toString(); } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ParamsBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ParamsBuilder.java index 6719776c84a57..25e92103cccf5 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ParamsBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ParamsBuilder.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.expression.gen.script; import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.grouping.GroupingFunctionAttribute; import java.util.ArrayList; import java.util.List; @@ -28,6 +29,11 @@ public ParamsBuilder agg(AggregateFunctionAttribute agg) { return this; } + public ParamsBuilder grouping(GroupingFunctionAttribute grouping) { + params.add(new Grouping(grouping)); + return this; + } + public ParamsBuilder script(Params ps) { params.add(new Script(ps)); return this; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptTemplate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptTemplate.java index 9279cdcc1b8aa..aeefa5c78f0e3 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptTemplate.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptTemplate.java @@ -44,10 +44,6 @@ public Params params() { return params; } - public List aggRefs() { - return params.asAggRefs(); - } - public Map aggPaths() { return params.asAggPaths(); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java index faa7985b654f9..074518f6b7d7c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.grouping.GroupingFunctionAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunctionAttribute; import org.elasticsearch.xpack.sql.expression.literal.IntervalDayTime; import org.elasticsearch.xpack.sql.expression.literal.IntervalYearMonth; @@ -37,6 +38,9 @@ default ScriptTemplate asScript(Expression exp) { if (attr instanceof AggregateFunctionAttribute) { return scriptWithAggregate((AggregateFunctionAttribute) attr); } + if (attr instanceof GroupingFunctionAttribute) { + return scriptWithGrouping((GroupingFunctionAttribute) attr); + } if (attr instanceof FieldAttribute) { return scriptWithField((FieldAttribute) attr); } @@ -83,6 +87,16 @@ default ScriptTemplate scriptWithAggregate(AggregateFunctionAttribute aggregate) dataType()); } + default ScriptTemplate scriptWithGrouping(GroupingFunctionAttribute grouping) { + String template = "{}"; + if (grouping.dataType() == DataType.DATE) { + template = "{sql}.asDateTime({})"; + } + return new ScriptTemplate(processScript(template), + paramsBuilder().grouping(grouping).build(), + dataType()); + } + default ScriptTemplate scriptWithField(FieldAttribute field) { return new ScriptTemplate(processScript("doc[{}].value"), paramsBuilder().variable(field.name()).build(), diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java index ffd12ea6fb99d..3dc894bda3fff 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java @@ -68,6 +68,7 @@ import org.elasticsearch.xpack.sql.plan.logical.OrderBy; import org.elasticsearch.xpack.sql.plan.logical.Project; import org.elasticsearch.xpack.sql.plan.logical.SubQueryAlias; +import org.elasticsearch.xpack.sql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.sql.rule.Rule; import org.elasticsearch.xpack.sql.rule.RuleExecutor; import org.elasticsearch.xpack.sql.session.EmptyExecutable; @@ -1849,14 +1850,15 @@ protected LogicalPlan rule(LogicalPlan plan) { if (plan instanceof Project) { Project p = (Project) plan; List values = extractConstants(p.projections()); - if (values.size() == p.projections().size() && !(p.child() instanceof EsRelation)) { + if (values.size() == p.projections().size() && !(p.child() instanceof EsRelation) && + isNotQueryWithFromClauseAndFilterFoldedToFalse(p)) { return new LocalRelation(p.location(), new SingletonExecutable(p.output(), values.toArray())); } } if (plan instanceof Aggregate) { Aggregate a = (Aggregate) plan; List values = extractConstants(a.aggregates()); - if (values.size() == a.aggregates().size()) { + if (values.size() == a.aggregates().size() && isNotQueryWithFromClauseAndFilterFoldedToFalse(a)) { return new LocalRelation(a.location(), new SingletonExecutable(a.output(), values.toArray())); } } @@ -1875,6 +1877,15 @@ private List extractConstants(List named) { } return values; } + + /** + * Check if the plan doesn't model a query with FROM clause on a table + * that its filter (WHERE clause) is folded to FALSE. + */ + private static boolean isNotQueryWithFromClauseAndFilterFoldedToFalse(UnaryPlan plan) { + return (!(plan.child() instanceof LocalRelation) || (plan.child() instanceof LocalRelation && + !(((LocalRelation) plan.child()).executable() instanceof EmptyExecutable))); + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java index 23d2c20d3059a..14654f7e50d10 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java @@ -41,7 +41,7 @@ import org.elasticsearch.xpack.sql.plan.logical.UnresolvedRelation; import org.elasticsearch.xpack.sql.plan.logical.With; import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; -import org.elasticsearch.xpack.sql.session.EmptyExecutable; +import org.elasticsearch.xpack.sql.session.SingletonExecutable; import org.elasticsearch.xpack.sql.type.DataType; import java.util.LinkedHashMap; @@ -104,7 +104,7 @@ public LogicalPlan visitQueryNoWith(QueryNoWithContext ctx) { public LogicalPlan visitQuerySpecification(QuerySpecificationContext ctx) { LogicalPlan query; if (ctx.fromClause() == null) { - query = new LocalRelation(source(ctx), new EmptyExecutable(emptyList())); + query = new LocalRelation(source(ctx), new SingletonExecutable()); } else { query = plan(ctx.fromClause()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java index 20aad3f2f9a48..96c267b3ba6fd 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java @@ -281,7 +281,7 @@ protected PhysicalPlan rule(AggregateExec a) { // found match for expression; if it's an attribute or scalar, end the processing chain with // the reference to the backing agg if (matchingGroup != null) { - if (exp instanceof Attribute || exp instanceof ScalarFunction) { + if (exp instanceof Attribute || exp instanceof ScalarFunction || exp instanceof GroupingFunction) { Processor action = null; ZoneId zi = DataType.DATE == exp.dataType() ? DateUtils.UTC : null; /* diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java index af180aae90bdc..4f071ee50f4f1 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java @@ -277,7 +277,7 @@ else if (exp instanceof GroupingFunction) { if (h.dataType() == DataType.DATE) { long intervalAsMillis = Intervals.inMillis(h.interval()); // TODO: set timezone - if (field instanceof FieldAttribute || field instanceof DateTimeHistogramFunction) { + if (field instanceof FieldAttribute) { key = new GroupByDateHistogram(aggId, nameOf(field), intervalAsMillis, h.zoneId()); } else if (field instanceof Function) { key = new GroupByDateHistogram(aggId, ((Function) field).asScript(), intervalAsMillis, h.zoneId()); @@ -285,7 +285,7 @@ else if (exp instanceof GroupingFunction) { } // numeric histogram else { - if (field instanceof FieldAttribute || field instanceof DateTimeHistogramFunction) { + if (field instanceof FieldAttribute) { key = new GroupByNumericHistogram(aggId, nameOf(field), Foldables.doubleValueOf(h.interval())); } else if (field instanceof Function) { key = new GroupByNumericHistogram(aggId, ((Function) field).asScript(), diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java index 47ab30c976941..1f972989e3782 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java @@ -11,7 +11,6 @@ import org.elasticsearch.xpack.sql.expression.gen.script.Scripts; import org.elasticsearch.xpack.sql.util.Check; -import java.util.Collection; import java.util.Map; import java.util.Objects; @@ -32,14 +31,6 @@ public AggFilter(String name, ScriptTemplate scriptTemplate) { this.aggPaths = scriptTemplate.aggPaths(); } - public Map aggPaths() { - return aggPaths; - } - - public Collection aggRefs() { - return scriptTemplate.aggRefs(); - } - public ScriptTemplate scriptTemplate() { return scriptTemplate; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonExecutable.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonExecutable.java index 129c30a0df3b8..8a526fac6dfe4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonExecutable.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonExecutable.java @@ -9,6 +9,7 @@ import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.util.Check; +import java.util.Collections; import java.util.List; public class SingletonExecutable implements Executable { @@ -16,6 +17,10 @@ public class SingletonExecutable implements Executable { private final List output; private final Object[] values; + public SingletonExecutable() { + this(Collections.emptyList()); + } + public SingletonExecutable(List output, Object... values) { Check.isTrue(output.size() == values.length, "Attributes {} and values {} are out of sync", output, values); this.output = output; diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/TestUtils.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/TestUtils.java index cd6fa79cb552c..c704285f4eba0 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/TestUtils.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/TestUtils.java @@ -11,6 +11,10 @@ import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.util.DateUtils; +import java.time.Clock; +import java.time.Duration; +import java.time.ZonedDateTime; + public class TestUtils { private TestUtils() {} @@ -18,4 +22,16 @@ private TestUtils() {} public static final Configuration TEST_CFG = new Configuration(DateUtils.UTC, Protocol.FETCH_SIZE, Protocol.REQUEST_TIMEOUT, Protocol.PAGE_TIMEOUT, null, Mode.PLAIN, null, null); + /** + * Returns the current UTC date-time with milliseconds precision. + * In Java 9+ (as opposed to Java 8) the {@code Clock} implementation uses system's best clock implementation (which could mean + * that the precision of the clock can be milliseconds, microseconds or nanoseconds), whereas in Java 8 + * {@code System.currentTimeMillis()} is always used. To account for these differences, this method defines a new {@code Clock} + * which will offer a value for {@code ZonedDateTime.now()} set to always have milliseconds precision. + * + * @return {@link ZonedDateTime} instance for the current date-time with milliseconds precision in UTC + */ + public static final ZonedDateTime now() { + return ZonedDateTime.now(Clock.tick(Clock.system(DateUtils.UTC), Duration.ofMillis(1))); + } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index a3fd459bf3c3b..5a786441d3300 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -481,8 +481,27 @@ public void testAggsInWhere() { } public void testHistogramInFilter() { - assertEquals("1:63: Cannot use WHERE filtering on grouping function [HISTOGRAM(date)], use HAVING instead", + assertEquals("1:63: Cannot filter on grouping function [HISTOGRAM(date)], use its argument instead", error("SELECT HISTOGRAM(date, INTERVAL 1 MONTH) AS h FROM test WHERE " + "HISTOGRAM(date, INTERVAL 1 MONTH) > CAST('2000-01-01' AS DATE) GROUP BY h")); } + + // related https://github.com/elastic/elasticsearch/issues/36853 + public void testHistogramInHaving() { + assertEquals("1:75: Cannot filter on grouping function [h], use its argument instead", + error("SELECT HISTOGRAM(date, INTERVAL 1 MONTH) AS h FROM test GROUP BY h HAVING " + + "h > CAST('2000-01-01' AS DATE)")); + } + + public void testGroupByScalarOnTopOfGrouping() { + assertEquals( + "1:14: Cannot combine [HISTOGRAM(date)] grouping function inside GROUP BY, " + + "found [MONTH_OF_YEAR(HISTOGRAM(date) [Z])]; consider moving the expression inside the histogram", + error("SELECT MONTH(HISTOGRAM(date, INTERVAL 1 MONTH)) AS h FROM test GROUP BY h")); + } + + public void testAggsInHistogram() { + assertEquals("1:47: Cannot use an aggregate [MAX] for grouping", + error("SELECT MAX(date) FROM test GROUP BY HISTOGRAM(MAX(int), 1)")); + } } \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java index bb85921369ac6..c20f4e9d632af 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.xpack.sql.plan.physical.LocalExec; import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.sql.session.EmptyExecutable; +import org.elasticsearch.xpack.sql.session.SingletonExecutable; import org.elasticsearch.xpack.sql.stats.Metrics; import org.elasticsearch.xpack.sql.type.EsField; import org.elasticsearch.xpack.sql.type.TypesTests; @@ -68,6 +69,48 @@ public void testFoldingToLocalExecWithProject() { assertThat(ee.output().get(0).toString(), startsWith("keyword{f}#")); } + public void testLocalExecWithPrunedFilterWithFunction() { + PhysicalPlan p = plan("SELECT E() FROM test WHERE PI() = 5"); + assertEquals(LocalExec.class, p.getClass()); + LocalExec le = (LocalExec) p; + assertEquals(EmptyExecutable.class, le.executable().getClass()); + EmptyExecutable ee = (EmptyExecutable) le.executable(); + assertEquals(1, ee.output().size()); + assertThat(ee.output().get(0).toString(), startsWith("E{c}#")); + } + + public void testLocalExecWithPrunedFilterWithFunctionAndAggregation() { + PhysicalPlan p = plan("SELECT E() FROM test WHERE PI() = 5 GROUP BY 1"); + assertEquals(LocalExec.class, p.getClass()); + LocalExec le = (LocalExec) p; + assertEquals(EmptyExecutable.class, le.executable().getClass()); + EmptyExecutable ee = (EmptyExecutable) le.executable(); + assertEquals(1, ee.output().size()); + assertThat(ee.output().get(0).toString(), startsWith("E{c}#")); + } + + public void testLocalExecWithoutFromClause() { + PhysicalPlan p = plan("SELECT E(), 'foo', abs(10)"); + assertEquals(LocalExec.class, p.getClass()); + LocalExec le = (LocalExec) p; + assertEquals(SingletonExecutable.class, le.executable().getClass()); + SingletonExecutable ee = (SingletonExecutable) le.executable(); + assertEquals(3, ee.output().size()); + assertThat(ee.output().get(0).toString(), startsWith("E{c}#")); + assertThat(ee.output().get(1).toString(), startsWith("foo{c}#")); + assertThat(ee.output().get(2).toString(), startsWith("ABS(10){c}#")); + } + + public void testLocalExecWithoutFromClauseWithPrunedFilter() { + PhysicalPlan p = plan("SELECT E() WHERE PI() = 5"); + assertEquals(LocalExec.class, p.getClass()); + LocalExec le = (LocalExec) p; + assertEquals(EmptyExecutable.class, le.executable().getClass()); + EmptyExecutable ee = (EmptyExecutable) le.executable(); + assertEquals(1, ee.output().size()); + assertThat(ee.output().get(0).toString(), startsWith("E{c}#")); + } + public void testFoldingOfIsNull() { PhysicalPlan p = plan("SELECT keyword FROM test WHERE (keyword IS NOT NULL) IS NULL"); assertEquals(LocalExec.class, p.getClass()); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java index cc91cdf6eabd7..a8145d9f3bf58 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java @@ -92,8 +92,9 @@ */ public class NodeSubclassTests> extends ESTestCase { - private static final List>> CLASSES_WITH_MIN_TWO_CHILDREN = Arrays.asList( - IfNull.class, In.class, InPipe.class, Percentile.class, Percentiles.class, PercentileRanks.class); + + private static final List> CLASSES_WITH_MIN_TWO_CHILDREN = Arrays.> asList(IfNull.class, In.class, InPipe.class, + Percentile.class, Percentiles.class, PercentileRanks.class); private final Class subclass; diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java index ffe68e1765f1c..a301c1218c492 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java @@ -7,9 +7,9 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.TestUtils; import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.type.DataTypeConversion.Conversion; -import org.elasticsearch.xpack.sql.util.DateUtils; import java.time.ZonedDateTime; @@ -79,7 +79,6 @@ public void testConversionToLong() { assertEquals("cannot cast [0xff] to [Long]", e.getMessage()); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35683") public void testConversionToDate() { DataType to = DATE; { @@ -111,7 +110,7 @@ public void testConversionToDate() { assertEquals(dateTime(18000000L), conversion.convert("1970-01-01T00:00:00-05:00")); // double check back and forth conversion - ZonedDateTime dt = ZonedDateTime.now(DateUtils.UTC); + ZonedDateTime dt = TestUtils.now(); Conversion forward = conversionFor(DATE, KEYWORD); Conversion back = conversionFor(KEYWORD, DATE); assertEquals(dt, back.convert(forward.convert(dt))); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml index 046044ed38b63..0c89c444d00c6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml @@ -160,7 +160,7 @@ setup: - match: { datafeeds.0.state: "started"} - is_true: datafeeds.0.node.name - is_true: datafeeds.0.node.transport_address - - match: { datafeeds.0.node.attributes.ml\.enabled: "true"} + - match: { datafeeds.0.node.attributes.ml\.max_open_jobs: "20"} --- "Test implicit get all datafeed stats given started datafeeds": diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml index a764775301824..aea80c69f4988 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml @@ -97,7 +97,7 @@ setup: - match: { jobs.0.state: opened } - is_true: jobs.0.node.name - is_true: jobs.0.node.transport_address - - match: { jobs.0.node.attributes.ml\.enabled: "true"} + - match: { jobs.0.node.attributes.ml\.max_open_jobs: "20"} - is_true: jobs.0.open_time --- diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/ScriptCondition.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/ScriptCondition.java index 19307569ce06b..3a936df86be17 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/ScriptCondition.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/ScriptCondition.java @@ -12,10 +12,8 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; -import org.elasticsearch.xpack.watcher.support.Variables; import java.io.IOException; -import java.util.Map; /** * This class executes a script against the ctx payload and returns a boolean @@ -58,10 +56,6 @@ public Result execute(WatchExecutionContext ctx) { } public Result doExecute(WatchExecutionContext ctx) { - Map parameters = Variables.createCtxParamsMap(ctx, ctx.payload()); - if (script.getParams() != null && !script.getParams().isEmpty()) { - parameters.putAll(script.getParams()); - } WatcherConditionScript conditionScript = scriptFactory.newInstance(script.getParams(), ctx); return conditionScript.execute() ? MET : UNMET; } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/WatcherConditionScript.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/WatcherConditionScript.java index 1148cc6a58eb5..1a5c8718bbd45 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/WatcherConditionScript.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/WatcherConditionScript.java @@ -5,43 +5,26 @@ */ package org.elasticsearch.xpack.watcher.condition; -import org.elasticsearch.script.ParameterMap; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.watcher.support.Variables; -import java.util.Collections; -import java.util.HashMap; import java.util.Map; /** * A script to determine whether a watch should be run. */ public abstract class WatcherConditionScript { - public static final String[] PARAMETERS = {}; - - private static final Map DEPRECATIONS; - static { - Map deprecations = new HashMap<>(); - deprecations.put( - "ctx", - "Accessing variable [ctx] via [params.ctx] from within a watcher_condition script " + - "is deprecated in favor of directly accessing [ctx]." - ); - DEPRECATIONS = Collections.unmodifiableMap(deprecations); - } + public static final String[] PARAMETERS = {}; private final Map params; // TODO: ctx should have its members extracted into execute parameters, but it needs to be a member for bwc access in params private final Map ctx; public WatcherConditionScript(Map params, WatchExecutionContext watcherContext) { - Map paramsWithCtx = new HashMap<>(params); - Map ctx = Variables.createCtx(watcherContext, watcherContext.payload()); - paramsWithCtx.put("ctx", ctx); - this.params = new ParameterMap(Collections.unmodifiableMap(paramsWithCtx), DEPRECATIONS); - this.ctx = ctx; + this.params = params; + this.ctx = Variables.createCtx(watcherContext, watcherContext.payload()); } public abstract boolean execute(); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/script/WatcherTransformScript.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/script/WatcherTransformScript.java index 6d84c32578bc0..57ee1e9f35c5d 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/script/WatcherTransformScript.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/script/WatcherTransformScript.java @@ -5,44 +5,27 @@ */ package org.elasticsearch.xpack.watcher.transform.script; -import org.elasticsearch.script.ParameterMap; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.watch.Payload; import org.elasticsearch.xpack.watcher.support.Variables; -import java.util.Collections; -import java.util.HashMap; import java.util.Map; /** * A script to transform the results of a watch execution. */ public abstract class WatcherTransformScript { - public static final String[] PARAMETERS = {}; - - private static final Map DEPRECATIONS; - static { - Map deprecations = new HashMap<>(); - deprecations.put( - "ctx", - "Accessing variable [ctx] via [params.ctx] from within a watcher_transform script " + - "is deprecated in favor of directly accessing [ctx]." - ); - DEPRECATIONS = Collections.unmodifiableMap(deprecations); - } + public static final String[] PARAMETERS = {}; private final Map params; // TODO: ctx should have its members extracted into execute parameters, but it needs to be a member bwc access in params private final Map ctx; public WatcherTransformScript(Map params, WatchExecutionContext watcherContext, Payload payload) { - Map paramsWithCtx = new HashMap<>(params); - Map ctx = Variables.createCtx(watcherContext, payload); - paramsWithCtx.put("ctx", ctx); - this.params = new ParameterMap(Collections.unmodifiableMap(paramsWithCtx), DEPRECATIONS); - this.ctx = ctx; + this.params = params; + this.ctx = Variables.createCtx(watcherContext, payload); } public abstract Object execute(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java index 1787904e98f0a..72ab01009bb99 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java @@ -30,10 +30,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; -import org.elasticsearch.xpack.core.watcher.execution.Wid; -import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; import org.elasticsearch.xpack.core.watcher.watch.Payload; -import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; import org.elasticsearch.xpack.watcher.test.WatcherMockScriptPlugin; import org.joda.time.DateTime; @@ -41,7 +38,6 @@ import org.junit.Before; import java.io.IOException; -import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.Map; @@ -54,8 +50,6 @@ import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContext; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class ScriptConditionTests extends ESTestCase { @@ -206,25 +200,6 @@ public void testScriptConditionAccessCtx() throws Exception { assertThat(condition.execute(ctx).met(), is(true)); } - public void testParamsCtxDeprecated() throws Exception { - WatchExecutionContext watcherContext = mock(WatchExecutionContext.class); - when(watcherContext.id()).thenReturn(mock(Wid.class)); - when(watcherContext.watch()).thenReturn(mock(Watch.class)); - when(watcherContext.triggerEvent()).thenReturn(mock(TriggerEvent.class)); - DateTime now = DateTime.now(DateTimeZone.UTC); - when(watcherContext.executionTime()).thenReturn(now); - WatcherConditionScript watcherScript = new WatcherConditionScript(Collections.emptyMap(), watcherContext) { - @Override - public boolean execute() { - assertThat(getParams().get("ctx"), is(getCtx())); - return true; - } - }; - watcherScript.execute(); - assertWarnings("Accessing variable [ctx] via [params.ctx] from within a watcher_condition script " + - "is deprecated in favor of directly accessing [ctx]."); - } - private static XContentBuilder createConditionContent(String script, String scriptLang, ScriptType scriptType) throws IOException { XContentBuilder builder = jsonBuilder(); if (scriptType == null) { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/script/ScriptTransformTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/script/ScriptTransformTests.java index 077e168c547ee..3e2ec780fa4c0 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/script/ScriptTransformTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/script/ScriptTransformTests.java @@ -16,14 +16,9 @@ import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; -import org.elasticsearch.xpack.core.watcher.execution.Wid; import org.elasticsearch.xpack.core.watcher.transform.Transform; -import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; import org.elasticsearch.xpack.core.watcher.watch.Payload; -import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.elasticsearch.xpack.watcher.Watcher; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.util.Collections; import java.util.HashMap; @@ -188,25 +183,6 @@ public void testScriptConditionParserBadLang() throws Exception { assertThat(e.getMessage(), containsString("script_lang not supported [not_a_valid_lang]")); } - public void testParamsCtxDeprecated() throws Exception { - WatchExecutionContext watcherContext = mock(WatchExecutionContext.class); - when(watcherContext.id()).thenReturn(mock(Wid.class)); - when(watcherContext.watch()).thenReturn(mock(Watch.class)); - when(watcherContext.triggerEvent()).thenReturn(mock(TriggerEvent.class)); - DateTime now = DateTime.now(DateTimeZone.UTC); - when(watcherContext.executionTime()).thenReturn(now); - Payload payload = mock(Payload.class); - WatcherTransformScript watcherScript = new WatcherTransformScript(Collections.emptyMap(), watcherContext, payload) { - @Override - public Object execute() { - return getParams().get("ctx"); - } - }; - assertThat(watcherScript.execute(), is(watcherScript.getCtx())); - assertWarnings("Accessing variable [ctx] via [params.ctx] from within a watcher_transform script " + - "is deprecated in favor of directly accessing [ctx]."); - } - static String scriptTypeField(ScriptType type) { switch (type) { case INLINE: return "source";