diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 9a6d82b54ef8e..00a6a5dfa46d6 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -9,4 +9,5 @@ BWC_VERSION: - "7.3.1" - "7.3.2" - "7.4.0" + - "7.5.0" - "8.0.0" diff --git a/.ci/java-versions.properties b/.ci/java-versions.properties index 16b760507324d..a63e17527daee 100644 --- a/.ci/java-versions.properties +++ b/.ci/java-versions.properties @@ -5,7 +5,7 @@ # are 'java' or 'openjdk' followed by the major release number. ES_BUILD_JAVA=openjdk12 -ES_RUNTIME_JAVA=java11 +ES_RUNTIME_JAVA=openjdk11 GRADLE_TASK=build diff --git a/.ci/matrix-runtime-javas.yml b/.ci/matrix-runtime-javas.yml index 6c661ac3c06e8..29e3c95004fdc 100644 --- a/.ci/matrix-runtime-javas.yml +++ b/.ci/matrix-runtime-javas.yml @@ -14,3 +14,4 @@ ES_RUNTIME_JAVA: - zulu11 - zulu12 - corretto11 + - adoptopenjdk11 diff --git a/build.gradle b/build.gradle index 1719f70367bbc..a826ecb4475e2 100644 --- a/build.gradle +++ b/build.gradle @@ -33,7 +33,7 @@ import static org.elasticsearch.gradle.tool.Boilerplate.maybeConfigure plugins { id 'com.gradle.build-scan' version '2.4' - id 'base' + id 'lifecycle-base' id 'elasticsearch.global-build-info' } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 75adcbea2f166..d6710d7828c81 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -48,7 +48,6 @@ import org.gradle.api.artifacts.ModuleVersionIdentifier import org.gradle.api.artifacts.ProjectDependency import org.gradle.api.artifacts.ResolvedArtifact import org.gradle.api.artifacts.dsl.RepositoryHandler -import org.gradle.api.artifacts.repositories.ArtifactRepository import org.gradle.api.artifacts.repositories.IvyArtifactRepository import org.gradle.api.artifacts.repositories.IvyPatternRepositoryLayout import org.gradle.api.artifacts.repositories.MavenArtifactRepository @@ -85,12 +84,10 @@ import org.gradle.util.GradleVersion import java.nio.charset.StandardCharsets import java.nio.file.Files -import java.time.ZoneOffset -import java.time.ZonedDateTime import java.util.regex.Matcher -import static org.elasticsearch.gradle.tool.Boilerplate.maybeConfigure import static org.elasticsearch.gradle.tool.Boilerplate.findByName +import static org.elasticsearch.gradle.tool.Boilerplate.maybeConfigure /** * Encapsulates build configuration for elasticsearch projects. @@ -411,11 +408,11 @@ class BuildPlugin implements Plugin { project.getRepositories().all { repository -> if (repository instanceof MavenArtifactRepository) { final MavenArtifactRepository maven = (MavenArtifactRepository) repository - assertRepositoryURIUsesHttps(maven, project, maven.getUrl()) - repository.getArtifactUrls().each { uri -> assertRepositoryURIUsesHttps(maven, project, uri) } + assertRepositoryURIIsSecure(maven.name, project.path, maven.getUrl()) + repository.getArtifactUrls().each { uri -> assertRepositoryURIIsSecure(maven.name, project.path, uri) } } else if (repository instanceof IvyArtifactRepository) { final IvyArtifactRepository ivy = (IvyArtifactRepository) repository - assertRepositoryURIUsesHttps(ivy, project, ivy.getUrl()) + assertRepositoryURIIsSecure(ivy.name, project.path, ivy.getUrl()) } } RepositoryHandler repos = project.repositories @@ -455,9 +452,15 @@ class BuildPlugin implements Plugin { } } - private static void assertRepositoryURIUsesHttps(final ArtifactRepository repository, final Project project, final URI uri) { - if (uri != null && uri.toURL().getProtocol().equals("http")) { - throw new GradleException("repository [${repository.name}] on project with path [${project.path}] is using http for artifacts on [${uri.toURL()}]") + static void assertRepositoryURIIsSecure(final String repositoryName, final String projectPath, final URI uri) { + if (uri != null && ["file", "https", "s3"].contains(uri.getScheme()) == false) { + final String message = String.format( + Locale.ROOT, + "repository [%s] on project with path [%s] is not using a secure protocol for artifacts on [%s]", + repositoryName, + projectPath, + uri.toURL()) + throw new GradleException(message) } } @@ -884,6 +887,12 @@ class BuildPlugin implements Plugin { // TODO: remove this once cname is prepended to transport.publish_address by default in 8.0 test.systemProperty 'es.transport.cname_in_publish_address', 'true' + // Set netty system properties to the properties we configure in jvm.options + test.systemProperty('io.netty.noUnsafe', 'true') + test.systemProperty('io.netty.noKeySetOptimization', 'true') + test.systemProperty('io.netty.recycler.maxCapacityPerThread', '0') + test.systemProperty('io.netty.allocator.numDirectArenas', '0') + test.testLogging { TestLoggingContainer logging -> logging.showExceptions = true logging.showCauses = true diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy index c1dbddd9e9d52..520fed560aa19 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy @@ -123,7 +123,7 @@ public class SnippetsTask extends DefaultTask { } } if (snippet.testResponse - && 'js' == snippet.language + && ('js' == snippet.language || 'console-result' == snippet.language) && null == snippet.skip) { String quoted = snippet.contents // quote values starting with $ @@ -162,7 +162,7 @@ public class SnippetsTask extends DefaultTask { } return } - matcher = line =~ /\["?source"?,\s*"?(\w+)"?(,.*)?].*/ + matcher = line =~ /\["?source"?,\s*"?([-\w]+)"?(,.*)?].*/ if (matcher.matches()) { lastLanguage = matcher.group(1) lastLanguageLine = lineNumber diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index aef41d0a16ae8..8c849a3d987e7 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -236,7 +236,7 @@ class RestIntegTestTask extends DefaultTask { } project.dependencies { restSpec ClasspathUtils.isElasticsearchProject() ? project.project(':rest-api-spec') : - "org.elasticsearch.rest-api-spec:${VersionProperties.elasticsearch}" + "org.elasticsearch:rest-api-spec:${VersionProperties.elasticsearch}" } Task copyRestSpec = project.tasks.findByName('copyRestSpec') if (copyRestSpec != null) { diff --git a/buildSrc/src/test/groovy/org/elasticsearch/gradle/BuildPluginTests.java b/buildSrc/src/test/groovy/org/elasticsearch/gradle/BuildPluginTests.java index 90af9a2401ace..c61a0a3935898 100644 --- a/buildSrc/src/test/groovy/org/elasticsearch/gradle/BuildPluginTests.java +++ b/buildSrc/src/test/groovy/org/elasticsearch/gradle/BuildPluginTests.java @@ -22,6 +22,9 @@ import org.gradle.api.GradleException; import org.junit.Test; +import java.net.URI; +import java.net.URISyntaxException; + public class BuildPluginTests extends GradleUnitTestCase { @@ -36,4 +39,25 @@ public void testFailingDockerVersions() { BuildPlugin.checkDockerVersionRecent("Docker version 17.04.0, build e68fc7a"); } + @Test(expected = GradleException.class) + public void testRepositoryURIThatUsesHttpScheme() throws URISyntaxException { + final URI uri = new URI("http://s3.amazonaws.com/artifacts.elastic.co/maven"); + BuildPlugin.assertRepositoryURIIsSecure("test", "test", uri); + } + + public void testRepositoryThatUsesFileScheme() throws URISyntaxException { + final URI uri = new URI("file:/tmp/maven"); + BuildPlugin.assertRepositoryURIIsSecure("test", "test", uri); + } + + public void testRepositoryURIThatUsesHttpsScheme() throws URISyntaxException { + final URI uri = new URI("https://s3.amazonaws.com/artifacts.elastic.co/maven"); + BuildPlugin.assertRepositoryURIIsSecure("test", "test", uri); + } + + public void testRepositoryURIThatUsesS3Scheme() throws URISyntaxException { + final URI uri = new URI("s3://artifacts.elastic.co/maven"); + BuildPlugin.assertRepositoryURIIsSecure("test", "test", uri); + } + } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginIT.java index dd0dbb25208dc..2e0e594b0f51c 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginIT.java @@ -18,19 +18,29 @@ */ package org.elasticsearch.gradle; +import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; +import org.gradle.testkit.runner.GradleRunner; +import org.junit.Rule; +import org.junit.rules.TemporaryFolder; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.util.Arrays; +import java.util.List; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; public class BuildPluginIT extends GradleIntegrationTestCase { + @Rule + public TemporaryFolder tmpDir = new TemporaryFolder(); + public void testPluginCanBeApplied() { BuildResult result = getGradleRunner("elasticsearch.build") .withArguments("hello", "-s") @@ -46,6 +56,51 @@ public void testCheckTask() { assertTaskSuccessful(result, ":check"); } + public void testInsecureMavenRepository() throws IOException { + final String name = "elastic-maven"; + final String url = "http://s3.amazonaws.com/artifacts.elastic.co/maven"; + // add an insecure maven repository to the build.gradle + final List lines = Arrays.asList( + "repositories {", + " maven {", + " name \"elastic-maven\"", + " url \"" + url + "\"\n", + " }", + "}"); + runInsecureArtifactRepositoryTest(name, url, lines); + } + + public void testInsecureIvyRepository() throws IOException { + final String name = "elastic-ivy"; + final String url = "http://s3.amazonaws.com/artifacts.elastic.co/ivy"; + // add an insecure ivy repository to the build.gradle + final List lines = Arrays.asList( + "repositories {", + " ivy {", + " name \"elastic-ivy\"", + " url \"" + url + "\"\n", + " }", + "}"); + runInsecureArtifactRepositoryTest(name, url, lines); + } + + private void runInsecureArtifactRepositoryTest(final String name, final String url, final List lines) throws IOException { + final File projectDir = getProjectDir("elasticsearch.build"); + FileUtils.copyDirectory(projectDir, tmpDir.getRoot(), pathname -> pathname.getPath().contains("/build/") == false); + final List buildGradleLines = + Files.readAllLines(tmpDir.getRoot().toPath().resolve("build.gradle"), StandardCharsets.UTF_8); + buildGradleLines.addAll(lines); + Files.write(tmpDir.getRoot().toPath().resolve("build.gradle"), buildGradleLines, StandardCharsets.UTF_8); + final BuildResult result = GradleRunner.create() + .withProjectDir(tmpDir.getRoot()) + .withArguments("clean", "hello", "-s", "-i", "--warning-mode=all", "--scan") + .withPluginClasspath() + .buildAndFail(); + assertOutputContains( + result.getOutput(), + "repository [" + name + "] on project with path [:] is not using a secure protocol for artifacts on [" + url + "]"); + } + public void testLicenseAndNotice() throws IOException { BuildResult result = getGradleRunner("elasticsearch.build") .withArguments("clean", "assemble", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath()) diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 2480f94720e1f..05ff486c74491 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -85,6 +85,7 @@ processTestResources { from({ zipTree(configurations.restSpec.singleFile) }) { include 'rest-api-spec/api/**' } + from(project(':client:rest-high-level').file('src/test/resources')) } dependencyLicenses { @@ -117,6 +118,7 @@ if (isEclipse) { File nodeCert = file("./testnode.crt") File nodeTrustStore = file("./testnode.jks") +File pkiTrustCert = file("./src/test/resources/org/elasticsearch/client/security/delegate_pki/testRootCA.crt") integTest.runner { systemProperty 'tests.rest.cluster.username', System.getProperty('tests.rest.cluster.username', 'test_user') @@ -134,6 +136,12 @@ testClusters.integTest { // Truststore settings are not used since TLS is not enabled. Included for testing the get certificates API setting 'xpack.security.http.ssl.certificate_authorities', 'testnode.crt' setting 'xpack.security.transport.ssl.truststore.path', 'testnode.jks' + setting 'xpack.security.authc.realms.file.default_file.order', '0' + setting 'xpack.security.authc.realms.native.default_native.order', '1' + setting 'xpack.security.authc.realms.pki.pki1.order', '2' + setting 'xpack.security.authc.realms.pki.pki1.certificate_authorities', '[ "testRootCA.crt" ]' + setting 'xpack.security.authc.realms.pki.pki1.delegation.enabled', 'true' + setting 'indices.lifecycle.poll_interval', '1000ms' keystore 'xpack.security.transport.ssl.truststore.secure_password', 'testnode' user username: System.getProperty('tests.rest.cluster.username', 'test_user'), @@ -141,4 +149,5 @@ testClusters.integTest { extraConfigFile nodeCert.name, nodeCert extraConfigFile nodeTrustStore.name, nodeTrustStore + extraConfigFile pkiTrustCert.name, pkiTrustCert } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java index c97d1fae57519..be84d790f229e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java @@ -31,6 +31,8 @@ import org.elasticsearch.client.security.CreateApiKeyResponse; import org.elasticsearch.client.security.CreateTokenRequest; import org.elasticsearch.client.security.CreateTokenResponse; +import org.elasticsearch.client.security.DelegatePkiAuthenticationRequest; +import org.elasticsearch.client.security.DelegatePkiAuthenticationResponse; import org.elasticsearch.client.security.DeletePrivilegesRequest; import org.elasticsearch.client.security.DeletePrivilegesResponse; import org.elasticsearch.client.security.DeleteRoleMappingRequest; @@ -1005,4 +1007,39 @@ public Cancellable invalidateApiKeyAsync(final InvalidateApiKeyRequest request, return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::invalidateApiKey, options, InvalidateApiKeyResponse::fromXContent, listener, emptySet()); } + + /** + * Get an Elasticsearch access token from an {@code X509Certificate} chain. The certificate chain is that of the client from a mutually + * authenticated TLS session, and it is validated by the PKI realms with {@code delegation.enabled} toggled to {@code true}.
+ * See the + * docs for more details. + * + * @param request the request containing the certificate chain + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response from the delegate-pki-authentication API key call + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public DelegatePkiAuthenticationResponse delegatePkiAuthentication(DelegatePkiAuthenticationRequest request, RequestOptions options) + throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, SecurityRequestConverters::delegatePkiAuthentication, options, + DelegatePkiAuthenticationResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously get an Elasticsearch access token from an {@code X509Certificate} chain. The certificate chain is that of the client + * from a mutually authenticated TLS session, and it is validated by the PKI realms with {@code delegation.enabled} toggled to + * {@code true}.
+ * See the + * docs for more details. + * + * @param request the request containing the certificate chain + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request + */ + public Cancellable delegatePkiAuthenticationAsync(DelegatePkiAuthenticationRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::delegatePkiAuthentication, options, + DelegatePkiAuthenticationResponse::fromXContent, listener, emptySet()); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java index 18ecc2cea281a..c88d1d180fcc8 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java @@ -28,6 +28,7 @@ import org.elasticsearch.client.security.ClearRolesCacheRequest; import org.elasticsearch.client.security.CreateApiKeyRequest; import org.elasticsearch.client.security.CreateTokenRequest; +import org.elasticsearch.client.security.DelegatePkiAuthenticationRequest; import org.elasticsearch.client.security.DeletePrivilegesRequest; import org.elasticsearch.client.security.DeleteRoleMappingRequest; import org.elasticsearch.client.security.DeleteRoleRequest; @@ -221,6 +222,12 @@ static Request createToken(CreateTokenRequest createTokenRequest) throws IOExcep return request; } + static Request delegatePkiAuthentication(DelegatePkiAuthenticationRequest delegatePkiAuthenticationRequest) throws IOException { + Request request = new Request(HttpPost.METHOD_NAME, "/_security/delegate_pki"); + request.setEntity(createEntity(delegatePkiAuthenticationRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request invalidateToken(InvalidateTokenRequest invalidateTokenRequest) throws IOException { Request request = new Request(HttpDelete.METHOD_NAME, "/_security/oauth2/token"); request.setEntity(createEntity(invalidateTokenRequest, REQUEST_BODY_CONTENT_TYPE)); @@ -294,7 +301,7 @@ static Request getApiKey(final GetApiKeyRequest getApiKeyRequest) throws IOExcep if (Strings.hasText(getApiKeyRequest.getRealmName())) { request.addParameter("realm_name", getApiKeyRequest.getRealmName()); } - + request.addParameter("owner", Boolean.toString(getApiKeyRequest.ownedByAuthenticatedUser())); return request; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/MlDataFrameAnalysisNamedXContentProvider.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/MlDataFrameAnalysisNamedXContentProvider.java index 3b78c60be91fd..809317d735b54 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/MlDataFrameAnalysisNamedXContentProvider.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/MlDataFrameAnalysisNamedXContentProvider.java @@ -32,6 +32,10 @@ public List getNamedXContentParsers() { new NamedXContentRegistry.Entry( DataFrameAnalysis.class, OutlierDetection.NAME, - (p, c) -> OutlierDetection.fromXContent(p))); + (p, c) -> OutlierDetection.fromXContent(p)), + new NamedXContentRegistry.Entry( + DataFrameAnalysis.class, + Regression.NAME, + (p, c) -> Regression.fromXContent(p))); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/Regression.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/Regression.java new file mode 100644 index 0000000000000..450da1a3e0c94 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/Regression.java @@ -0,0 +1,242 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.dataframe; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class Regression implements DataFrameAnalysis { + + public static Regression fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public static Builder builder(String dependentVariable) { + return new Builder(dependentVariable); + } + + public static final ParseField NAME = new ParseField("regression"); + + static final ParseField DEPENDENT_VARIABLE = new ParseField("dependent_variable"); + static final ParseField LAMBDA = new ParseField("lambda"); + static final ParseField GAMMA = new ParseField("gamma"); + static final ParseField ETA = new ParseField("eta"); + static final ParseField MAXIMUM_NUMBER_TREES = new ParseField("maximum_number_trees"); + static final ParseField FEATURE_BAG_FRACTION = new ParseField("feature_bag_fraction"); + static final ParseField PREDICTION_FIELD_NAME = new ParseField("prediction_field_name"); + static final ParseField TRAINING_PERCENT = new ParseField("training_percent"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME.getPreferredName(), true, + a -> new Regression( + (String) a[0], + (Double) a[1], + (Double) a[2], + (Double) a[3], + (Integer) a[4], + (Double) a[5], + (String) a[6], + (Double) a[7])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), DEPENDENT_VARIABLE); + PARSER.declareDouble(ConstructingObjectParser.optionalConstructorArg(), LAMBDA); + PARSER.declareDouble(ConstructingObjectParser.optionalConstructorArg(), GAMMA); + PARSER.declareDouble(ConstructingObjectParser.optionalConstructorArg(), ETA); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAXIMUM_NUMBER_TREES); + PARSER.declareDouble(ConstructingObjectParser.optionalConstructorArg(), FEATURE_BAG_FRACTION); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), PREDICTION_FIELD_NAME); + PARSER.declareDouble(ConstructingObjectParser.optionalConstructorArg(), TRAINING_PERCENT); + } + + private final String dependentVariable; + private final Double lambda; + private final Double gamma; + private final Double eta; + private final Integer maximumNumberTrees; + private final Double featureBagFraction; + private final String predictionFieldName; + private final Double trainingPercent; + + private Regression(String dependentVariable, @Nullable Double lambda, @Nullable Double gamma, @Nullable Double eta, + @Nullable Integer maximumNumberTrees, @Nullable Double featureBagFraction, @Nullable String predictionFieldName, + @Nullable Double trainingPercent) { + this.dependentVariable = Objects.requireNonNull(dependentVariable); + this.lambda = lambda; + this.gamma = gamma; + this.eta = eta; + this.maximumNumberTrees = maximumNumberTrees; + this.featureBagFraction = featureBagFraction; + this.predictionFieldName = predictionFieldName; + this.trainingPercent = trainingPercent; + } + + @Override + public String getName() { + return NAME.getPreferredName(); + } + + public String getDependentVariable() { + return dependentVariable; + } + + public Double getLambda() { + return lambda; + } + + public Double getGamma() { + return gamma; + } + + public Double getEta() { + return eta; + } + + public Integer getMaximumNumberTrees() { + return maximumNumberTrees; + } + + public Double getFeatureBagFraction() { + return featureBagFraction; + } + + public String getPredictionFieldName() { + return predictionFieldName; + } + + public Double getTrainingPercent() { + return trainingPercent; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(DEPENDENT_VARIABLE.getPreferredName(), dependentVariable); + if (lambda != null) { + builder.field(LAMBDA.getPreferredName(), lambda); + } + if (gamma != null) { + builder.field(GAMMA.getPreferredName(), gamma); + } + if (eta != null) { + builder.field(ETA.getPreferredName(), eta); + } + if (maximumNumberTrees != null) { + builder.field(MAXIMUM_NUMBER_TREES.getPreferredName(), maximumNumberTrees); + } + if (featureBagFraction != null) { + builder.field(FEATURE_BAG_FRACTION.getPreferredName(), featureBagFraction); + } + if (predictionFieldName != null) { + builder.field(PREDICTION_FIELD_NAME.getPreferredName(), predictionFieldName); + } + if (trainingPercent != null) { + builder.field(TRAINING_PERCENT.getPreferredName(), trainingPercent); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(dependentVariable, lambda, gamma, eta, maximumNumberTrees, featureBagFraction, predictionFieldName, + trainingPercent); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Regression that = (Regression) o; + return Objects.equals(dependentVariable, that.dependentVariable) + && Objects.equals(lambda, that.lambda) + && Objects.equals(gamma, that.gamma) + && Objects.equals(eta, that.eta) + && Objects.equals(maximumNumberTrees, that.maximumNumberTrees) + && Objects.equals(featureBagFraction, that.featureBagFraction) + && Objects.equals(predictionFieldName, that.predictionFieldName) + && Objects.equals(trainingPercent, that.trainingPercent); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + public static class Builder { + private String dependentVariable; + private Double lambda; + private Double gamma; + private Double eta; + private Integer maximumNumberTrees; + private Double featureBagFraction; + private String predictionFieldName; + private Double trainingPercent; + + private Builder(String dependentVariable) { + this.dependentVariable = Objects.requireNonNull(dependentVariable); + } + + public Builder setLambda(Double lambda) { + this.lambda = lambda; + return this; + } + + public Builder setGamma(Double gamma) { + this.gamma = gamma; + return this; + } + + public Builder setEta(Double eta) { + this.eta = eta; + return this; + } + + public Builder setMaximumNumberTrees(Integer maximumNumberTrees) { + this.maximumNumberTrees = maximumNumberTrees; + return this; + } + + public Builder setFeatureBagFraction(Double featureBagFraction) { + this.featureBagFraction = featureBagFraction; + return this; + } + + public Builder setPredictionFieldName(String predictionFieldName) { + this.predictionFieldName = predictionFieldName; + return this; + } + + public Builder setTrainingPercent(Double trainingPercent) { + this.trainingPercent = trainingPercent; + return this; + } + + public Regression build() { + return new Regression(dependentVariable, lambda, gamma, eta, maximumNumberTrees, featureBagFraction, predictionFieldName, + trainingPercent); + } + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/DelegatePkiAuthenticationRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/DelegatePkiAuthenticationRequest.java new file mode 100644 index 0000000000000..c67e692c14d44 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/DelegatePkiAuthenticationRequest.java @@ -0,0 +1,107 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.security.cert.CertificateEncodingException; +import java.security.cert.X509Certificate; +import java.util.Base64; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static java.util.Collections.unmodifiableList; + +public final class DelegatePkiAuthenticationRequest implements Validatable, ToXContentObject { + + private final List x509CertificateChain; + + public DelegatePkiAuthenticationRequest(final List x509CertificateChain) { + if (x509CertificateChain == null || x509CertificateChain.isEmpty()) { + throw new IllegalArgumentException("certificate chain must not be empty or null"); + } + this.x509CertificateChain = unmodifiableList(x509CertificateChain); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject().startArray("x509_certificate_chain"); + try { + for (X509Certificate cert : x509CertificateChain) { + builder.value(Base64.getEncoder().encodeToString(cert.getEncoded())); + } + } catch (CertificateEncodingException e) { + throw new IOException(e); + } + return builder.endArray().endObject(); + } + + public List getCertificateChain() { + return this.x509CertificateChain; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final DelegatePkiAuthenticationRequest that = (DelegatePkiAuthenticationRequest) o; + return Objects.equals(x509CertificateChain, that.x509CertificateChain); + } + + @Override + public int hashCode() { + return Objects.hash(x509CertificateChain); + } + + @Override + public Optional validate() { + ValidationException validationException = new ValidationException(); + if (false == isOrderedCertificateChain(x509CertificateChain)) { + validationException.addValidationError("certificates chain must be an ordered chain"); + } + return validationException.validationErrors().isEmpty() ? Optional.empty() : Optional.of(validationException); + } + + /** + * Checks that the {@code X509Certificate} list is ordered, such that the end-entity certificate is first and it is followed by any + * certificate authorities'. The check validates that the {@code issuer} of every certificate is the {@code subject} of the certificate + * in the next array position. No other certificate attributes are checked. + */ + private static boolean isOrderedCertificateChain(List chain) { + for (int i = 1; i < chain.size(); i++) { + X509Certificate cert = chain.get(i - 1); + X509Certificate issuer = chain.get(i); + if (false == cert.getIssuerX500Principal().equals(issuer.getSubjectX500Principal())) { + return false; + } + } + return true; + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/DelegatePkiAuthenticationResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/DelegatePkiAuthenticationResponse.java new file mode 100644 index 0000000000000..064a5a9a4e293 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/DelegatePkiAuthenticationResponse.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +public final class DelegatePkiAuthenticationResponse { + + private final String accessToken; + private final String type; + private final TimeValue expiresIn; + + public DelegatePkiAuthenticationResponse(String accessToken, String type, TimeValue expiresIn) { + this.accessToken = accessToken; + this.type = type; + this.expiresIn = expiresIn; + } + + public String getAccessToken() { + return accessToken; + } + + public String getType() { + return type; + } + + public TimeValue getExpiresIn() { + return expiresIn; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final DelegatePkiAuthenticationResponse that = (DelegatePkiAuthenticationResponse) o; + return Objects.equals(accessToken, that.accessToken) && + Objects.equals(type, that.type) && + Objects.equals(expiresIn, that.expiresIn); + } + + @Override + public int hashCode() { + return Objects.hash(accessToken, type, expiresIn); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "delegate_pki_response", true, + args -> new DelegatePkiAuthenticationResponse((String) args[0], (String) args[1], TimeValue.timeValueSeconds((Long) args[2]))); + + static { + PARSER.declareString(constructorArg(), new ParseField("access_token")); + PARSER.declareString(constructorArg(), new ParseField("type")); + PARSER.declareLong(constructorArg(), new ParseField("expires_in")); + } + + public static DelegatePkiAuthenticationResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetApiKeyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetApiKeyRequest.java index 6fa98ec549b07..9427489786387 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetApiKeyRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetApiKeyRequest.java @@ -36,13 +36,14 @@ public final class GetApiKeyRequest implements Validatable, ToXContentObject { private final String userName; private final String id; private final String name; + private final boolean ownedByAuthenticatedUser; // pkg scope for testing GetApiKeyRequest(@Nullable String realmName, @Nullable String userName, @Nullable String apiKeyId, - @Nullable String apiKeyName) { + @Nullable String apiKeyName, boolean ownedByAuthenticatedUser) { if (Strings.hasText(realmName) == false && Strings.hasText(userName) == false && Strings.hasText(apiKeyId) == false - && Strings.hasText(apiKeyName) == false) { - throwValidationError("One of [api key id, api key name, username, realm name] must be specified"); + && Strings.hasText(apiKeyName) == false && ownedByAuthenticatedUser == false) { + throwValidationError("One of [api key id, api key name, username, realm name] must be specified if [owner] flag is false"); } if (Strings.hasText(apiKeyId) || Strings.hasText(apiKeyName)) { if (Strings.hasText(realmName) || Strings.hasText(userName)) { @@ -50,6 +51,11 @@ public final class GetApiKeyRequest implements Validatable, ToXContentObject { "username or realm name must not be specified when the api key id or api key name is specified"); } } + if (ownedByAuthenticatedUser) { + if (Strings.hasText(realmName) || Strings.hasText(userName)) { + throwValidationError("neither username nor realm-name may be specified when retrieving owned API keys"); + } + } if (Strings.hasText(apiKeyId) && Strings.hasText(apiKeyName)) { throwValidationError("only one of [api key id, api key name] can be specified"); } @@ -57,6 +63,7 @@ public final class GetApiKeyRequest implements Validatable, ToXContentObject { this.userName = userName; this.id = apiKeyId; this.name = apiKeyName; + this.ownedByAuthenticatedUser = ownedByAuthenticatedUser; } private void throwValidationError(String message) { @@ -79,13 +86,17 @@ public String getName() { return name; } + public boolean ownedByAuthenticatedUser() { + return ownedByAuthenticatedUser; + } + /** * Creates get API key request for given realm name * @param realmName realm name * @return {@link GetApiKeyRequest} */ public static GetApiKeyRequest usingRealmName(String realmName) { - return new GetApiKeyRequest(realmName, null, null, null); + return new GetApiKeyRequest(realmName, null, null, null, false); } /** @@ -94,7 +105,7 @@ public static GetApiKeyRequest usingRealmName(String realmName) { * @return {@link GetApiKeyRequest} */ public static GetApiKeyRequest usingUserName(String userName) { - return new GetApiKeyRequest(null, userName, null, null); + return new GetApiKeyRequest(null, userName, null, null, false); } /** @@ -104,25 +115,36 @@ public static GetApiKeyRequest usingUserName(String userName) { * @return {@link GetApiKeyRequest} */ public static GetApiKeyRequest usingRealmAndUserName(String realmName, String userName) { - return new GetApiKeyRequest(realmName, userName, null, null); + return new GetApiKeyRequest(realmName, userName, null, null, false); } /** * Creates get API key request for given api key id * @param apiKeyId api key id + * @param ownedByAuthenticatedUser set {@code true} if the request is only for the API keys owned by current + * authenticated user else{@code false} * @return {@link GetApiKeyRequest} */ - public static GetApiKeyRequest usingApiKeyId(String apiKeyId) { - return new GetApiKeyRequest(null, null, apiKeyId, null); + public static GetApiKeyRequest usingApiKeyId(String apiKeyId, boolean ownedByAuthenticatedUser) { + return new GetApiKeyRequest(null, null, apiKeyId, null, ownedByAuthenticatedUser); } /** * Creates get API key request for given api key name * @param apiKeyName api key name + * @param ownedByAuthenticatedUser set {@code true} if the request is only for the API keys owned by current + * authenticated user else{@code false} * @return {@link GetApiKeyRequest} */ - public static GetApiKeyRequest usingApiKeyName(String apiKeyName) { - return new GetApiKeyRequest(null, null, null, apiKeyName); + public static GetApiKeyRequest usingApiKeyName(String apiKeyName, boolean ownedByAuthenticatedUser) { + return new GetApiKeyRequest(null, null, null, apiKeyName, ownedByAuthenticatedUser); + } + + /** + * Creates get api key request to retrieve api key information for the api keys owned by the current authenticated user. + */ + public static GetApiKeyRequest forOwnedApiKeys() { + return new GetApiKeyRequest(null, null, null, null, true); } @Override diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/InvalidateApiKeyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/InvalidateApiKeyRequest.java index d3203354b7ab1..351294e36d38b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/InvalidateApiKeyRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/InvalidateApiKeyRequest.java @@ -36,13 +36,14 @@ public final class InvalidateApiKeyRequest implements Validatable, ToXContentObj private final String userName; private final String id; private final String name; + private final boolean ownedByAuthenticatedUser; // pkg scope for testing InvalidateApiKeyRequest(@Nullable String realmName, @Nullable String userName, @Nullable String apiKeyId, - @Nullable String apiKeyName) { + @Nullable String apiKeyName, boolean ownedByAuthenticatedUser) { if (Strings.hasText(realmName) == false && Strings.hasText(userName) == false && Strings.hasText(apiKeyId) == false - && Strings.hasText(apiKeyName) == false) { - throwValidationError("One of [api key id, api key name, username, realm name] must be specified"); + && Strings.hasText(apiKeyName) == false && ownedByAuthenticatedUser == false) { + throwValidationError("One of [api key id, api key name, username, realm name] must be specified if [owner] flag is false"); } if (Strings.hasText(apiKeyId) || Strings.hasText(apiKeyName)) { if (Strings.hasText(realmName) || Strings.hasText(userName)) { @@ -50,6 +51,11 @@ public final class InvalidateApiKeyRequest implements Validatable, ToXContentObj "username or realm name must not be specified when the api key id or api key name is specified"); } } + if (ownedByAuthenticatedUser) { + if (Strings.hasText(realmName) || Strings.hasText(userName)) { + throwValidationError("neither username nor realm-name may be specified when invalidating owned API keys"); + } + } if (Strings.hasText(apiKeyId) && Strings.hasText(apiKeyName)) { throwValidationError("only one of [api key id, api key name] can be specified"); } @@ -57,6 +63,7 @@ public final class InvalidateApiKeyRequest implements Validatable, ToXContentObj this.userName = userName; this.id = apiKeyId; this.name = apiKeyName; + this.ownedByAuthenticatedUser = ownedByAuthenticatedUser; } private void throwValidationError(String message) { @@ -79,13 +86,17 @@ public String getName() { return name; } + public boolean ownedByAuthenticatedUser() { + return ownedByAuthenticatedUser; + } + /** * Creates invalidate API key request for given realm name * @param realmName realm name * @return {@link InvalidateApiKeyRequest} */ public static InvalidateApiKeyRequest usingRealmName(String realmName) { - return new InvalidateApiKeyRequest(realmName, null, null, null); + return new InvalidateApiKeyRequest(realmName, null, null, null, false); } /** @@ -94,7 +105,7 @@ public static InvalidateApiKeyRequest usingRealmName(String realmName) { * @return {@link InvalidateApiKeyRequest} */ public static InvalidateApiKeyRequest usingUserName(String userName) { - return new InvalidateApiKeyRequest(null, userName, null, null); + return new InvalidateApiKeyRequest(null, userName, null, null, false); } /** @@ -104,25 +115,36 @@ public static InvalidateApiKeyRequest usingUserName(String userName) { * @return {@link InvalidateApiKeyRequest} */ public static InvalidateApiKeyRequest usingRealmAndUserName(String realmName, String userName) { - return new InvalidateApiKeyRequest(realmName, userName, null, null); + return new InvalidateApiKeyRequest(realmName, userName, null, null, false); } /** * Creates invalidate API key request for given api key id * @param apiKeyId api key id + * @param ownedByAuthenticatedUser set {@code true} if the request is only for the API keys owned by current authenticated user else + * {@code false} * @return {@link InvalidateApiKeyRequest} */ - public static InvalidateApiKeyRequest usingApiKeyId(String apiKeyId) { - return new InvalidateApiKeyRequest(null, null, apiKeyId, null); + public static InvalidateApiKeyRequest usingApiKeyId(String apiKeyId, boolean ownedByAuthenticatedUser) { + return new InvalidateApiKeyRequest(null, null, apiKeyId, null, ownedByAuthenticatedUser); } /** * Creates invalidate API key request for given api key name * @param apiKeyName api key name + * @param ownedByAuthenticatedUser set {@code true} if the request is only for the API keys owned by current authenticated user else + * {@code false} * @return {@link InvalidateApiKeyRequest} */ - public static InvalidateApiKeyRequest usingApiKeyName(String apiKeyName) { - return new InvalidateApiKeyRequest(null, null, null, apiKeyName); + public static InvalidateApiKeyRequest usingApiKeyName(String apiKeyName, boolean ownedByAuthenticatedUser) { + return new InvalidateApiKeyRequest(null, null, null, apiKeyName, ownedByAuthenticatedUser); + } + + /** + * Creates invalidate api key request to invalidate api keys owned by the current authenticated user. + */ + public static InvalidateApiKeyRequest forOwnedApiKeys() { + return new InvalidateApiKeyRequest(null, null, null, null, true); } @Override @@ -140,6 +162,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (name != null) { builder.field("name", name); } + builder.field("owner", ownedByAuthenticatedUser); return builder.endObject(); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractRequestTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractRequestTestCase.java index 8a10831c47749..5436cdf1c379f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractRequestTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractRequestTestCase.java @@ -56,10 +56,24 @@ public final void testFromXContent() throws IOException { assertInstances(serverInstance, clientTestInstance); } + /** + * @return The client test instance to be serialized to xcontent as bytes + */ protected abstract C createClientTestInstance(); + /** + * @param parser The xcontent parser + * @return The server side instance that is parsed from the xcontent which originates from the client side test instance + */ protected abstract S doParseToServerInstance(XContentParser parser) throws IOException; + /** + * Assert that the server instance and client test instance contain the same content. + * Typically by asserting whether each property of both instances are equal to each other. + * + * @param serverInstance The server side instance that was created by {@link #doParseToServerInstance(XContentParser)} + * @param clientTestInstance The client side test instance that was created by {@link #createClientTestInstance()} + */ protected abstract void assertInstances(S serverInstance, C clientTestInstance); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractResponseTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractResponseTestCase.java index 2a91a639a5ac3..ea110589a4f21 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractResponseTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractResponseTestCase.java @@ -33,7 +33,7 @@ /** * Base class for HLRC response parsing tests. * - * This case class facilitates generating server side reponse test instances and + * This case class facilitates generating server side response test instances and * verifies that they are correctly parsed into HLRC response instances. * * @param The class representing the response on the server side. @@ -42,9 +42,8 @@ public abstract class AbstractResponseTestCase extends ESTestCase { public final void testFromXContent() throws IOException { - final S serverTestInstance = createServerTestInstance(); - final XContentType xContentType = randomFrom(XContentType.values()); + final S serverTestInstance = createServerTestInstance(xContentType); final BytesReference bytes = toShuffledXContent(serverTestInstance, xContentType, getParams(), randomBoolean()); final XContent xContent = XContentFactory.xContent(xContentType); @@ -56,12 +55,32 @@ public final void testFromXContent() throws IOException { assertInstances(serverTestInstance, clientInstance); } - protected abstract S createServerTestInstance(); + /** + * @param xContentType The xcontent type that will be used to serialize the test instance. + * This is parameter is needed if the test instance contains serialized xcontent as bytes or string. + * + * @return The server side test instance to will be serialized as xcontent to be used to parse client side response class. + */ + protected abstract S createServerTestInstance(XContentType xContentType); + /** + * @param parser The xcontent parser + * @return The client side instance that is parsed from the xcontent generated from the server side test instance. + */ protected abstract C doParseToClientInstance(XContentParser parser) throws IOException; + /** + * Assert that the server instance and client instance contain the same content. + * Typically by asserting whether each property of both instances are equal to each other. + * + * @param serverTestInstance The server side instance that was created by {@link #createServerTestInstance(XContentType)} + * @param clientInstance The client side instance that was created by {@link #doParseToClientInstance(XContentParser)} + */ protected abstract void assertInstances(S serverTestInstance, C clientInstance); + /** + * @return The params used when generated the xcontent from server side test instance as bytes + */ protected ToXContent.Params getParams() { return ToXContent.EMPTY_PARAMS; } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 6c2439e23c345..85bd59a570c19 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -1215,9 +1215,9 @@ public void testDeleteCalendarEvent() throws IOException { assertThat(remainingIds, not(hasItem(deletedEvent))); } - public void testPutDataFrameAnalyticsConfig() throws Exception { + public void testPutDataFrameAnalyticsConfig_GivenOutlierDetectionAnalysis() throws Exception { MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); - String configId = "put-test-config"; + String configId = "test-put-df-analytics-outlier-detection"; DataFrameAnalyticsConfig config = DataFrameAnalyticsConfig.builder() .setId(configId) .setSource(DataFrameAnalyticsSource.builder() @@ -1247,6 +1247,41 @@ public void testPutDataFrameAnalyticsConfig() throws Exception { assertThat(createdConfig.getDescription(), equalTo("some description")); } + public void testPutDataFrameAnalyticsConfig_GivenRegression() throws Exception { + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + String configId = "test-put-df-analytics-regression"; + DataFrameAnalyticsConfig config = DataFrameAnalyticsConfig.builder() + .setId(configId) + .setSource(DataFrameAnalyticsSource.builder() + .setIndex("put-test-source-index") + .build()) + .setDest(DataFrameAnalyticsDest.builder() + .setIndex("put-test-dest-index") + .build()) + .setAnalysis(org.elasticsearch.client.ml.dataframe.Regression + .builder("my_dependent_variable") + .setTrainingPercent(80.0) + .build()) + .setDescription("this is a regression") + .build(); + + createIndex("put-test-source-index", defaultMappingForTest()); + + PutDataFrameAnalyticsResponse putDataFrameAnalyticsResponse = execute( + new PutDataFrameAnalyticsRequest(config), + machineLearningClient::putDataFrameAnalytics, machineLearningClient::putDataFrameAnalyticsAsync); + DataFrameAnalyticsConfig createdConfig = putDataFrameAnalyticsResponse.getConfig(); + assertThat(createdConfig.getId(), equalTo(config.getId())); + assertThat(createdConfig.getSource().getIndex(), equalTo(config.getSource().getIndex())); + assertThat(createdConfig.getSource().getQueryConfig(), equalTo(new QueryConfig(new MatchAllQueryBuilder()))); // default value + assertThat(createdConfig.getDest().getIndex(), equalTo(config.getDest().getIndex())); + assertThat(createdConfig.getDest().getResultsField(), equalTo("ml")); // default value + assertThat(createdConfig.getAnalysis(), equalTo(config.getAnalysis())); + assertThat(createdConfig.getAnalyzedFields(), equalTo(config.getAnalyzedFields())); + assertThat(createdConfig.getModelMemoryLimit(), equalTo(ByteSizeValue.parseBytesSizeValue("1gb", ""))); // default value + assertThat(createdConfig.getDescription(), equalTo("this is a regression")); + } + public void testGetDataFrameAnalyticsConfig_SingleConfig() throws Exception { MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); String configId = "get-test-config"; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 8354be4130957..d0d6f674064ab 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.client; import com.fasterxml.jackson.core.JsonParseException; - import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.apache.http.HttpResponse; @@ -677,7 +676,7 @@ public void testDefaultNamedXContents() { public void testProvidedNamedXContents() { List namedXContents = RestHighLevelClient.getProvidedNamedXContents(); - assertEquals(36, namedXContents.size()); + assertEquals(37, namedXContents.size()); Map, Integer> categories = new HashMap<>(); List names = new ArrayList<>(); for (NamedXContentRegistry.Entry namedXContent : namedXContents) { @@ -711,8 +710,9 @@ public void testProvidedNamedXContents() { assertTrue(names.contains(ShrinkAction.NAME)); assertTrue(names.contains(FreezeAction.NAME)); assertTrue(names.contains(SetPriorityAction.NAME)); - assertEquals(Integer.valueOf(1), categories.get(DataFrameAnalysis.class)); + assertEquals(Integer.valueOf(2), categories.get(DataFrameAnalysis.class)); assertTrue(names.contains(OutlierDetection.NAME.getPreferredName())); + assertTrue(names.contains(org.elasticsearch.client.ml.dataframe.Regression.NAME.getPreferredName())); assertEquals(Integer.valueOf(1), categories.get(SyncConfig.class)); assertTrue(names.contains(TimeSyncConfig.NAME)); assertEquals(Integer.valueOf(2), categories.get(org.elasticsearch.client.ml.dataframe.evaluation.Evaluation.class)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java index 4c99cb323969e..7804c59d686b5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.client.security.ChangePasswordRequest; import org.elasticsearch.client.security.CreateApiKeyRequest; import org.elasticsearch.client.security.CreateTokenRequest; +import org.elasticsearch.client.security.DelegatePkiAuthenticationRequest; import org.elasticsearch.client.security.DeletePrivilegesRequest; import org.elasticsearch.client.security.DeleteRoleMappingRequest; import org.elasticsearch.client.security.DeleteRoleRequest; @@ -58,6 +59,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.security.cert.X509Certificate; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -67,6 +69,8 @@ import static org.elasticsearch.client.RequestConvertersTests.assertToXContentBody; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class SecurityRequestConvertersTests extends ESTestCase { @@ -304,6 +308,18 @@ public void testCreateTokenWithClientCredentialsGrant() throws Exception { assertToXContentBody(createTokenRequest, request.getEntity()); } + public void testDelegatePkiAuthentication() throws Exception { + X509Certificate mockCertificate = mock(X509Certificate.class); + when(mockCertificate.getEncoded()).thenReturn(new byte[0]); + DelegatePkiAuthenticationRequest delegatePkiAuthenticationRequest = new DelegatePkiAuthenticationRequest( + Arrays.asList(mockCertificate)); + Request request = SecurityRequestConverters.delegatePkiAuthentication(delegatePkiAuthenticationRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_security/delegate_pki", request.getEndpoint()); + assertEquals(0, request.getParameters().size()); + assertToXContentBody(delegatePkiAuthenticationRequest, request.getEntity()); + } + public void testGetApplicationPrivilege() throws Exception { final String application = randomAlphaOfLength(6); final String privilege = randomAlphaOfLength(4); @@ -446,10 +462,11 @@ public void testGetApiKey() throws IOException { final Request request = SecurityRequestConverters.getApiKey(getApiKeyRequest); assertEquals(HttpGet.METHOD_NAME, request.getMethod()); assertEquals("/_security/api_key", request.getEndpoint()); - Map mapOfParameters = new HashMap<>(); - mapOfParameters.put("realm_name", realmName); - mapOfParameters.put("username", userName); - assertThat(request.getParameters(), equalTo(mapOfParameters)); + Map expectedMapOfParameters = new HashMap<>(); + expectedMapOfParameters.put("realm_name", realmName); + expectedMapOfParameters.put("username", userName); + expectedMapOfParameters.put("owner", Boolean.FALSE.toString()); + assertThat(request.getParameters(), equalTo(expectedMapOfParameters)); } public void testInvalidateApiKey() throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/XPackInfoResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/XPackInfoResponseTests.java index 3eaa1e157dc90..03806d7b98ec8 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/XPackInfoResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/XPackInfoResponseTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.client; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.protocol.xpack.XPackInfoResponse; import org.elasticsearch.protocol.xpack.XPackInfoResponse.BuildInfo; import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo; @@ -85,7 +86,7 @@ private FeatureSet randomFeatureSet() { } @Override - protected XPackInfoResponse createServerTestInstance() { + protected XPackInfoResponse createServerTestInstance(XContentType xContentType) { return new XPackInfoResponse( randomBoolean() ? null : randomBuildInfo(), randomBoolean() ? null : randomLicenseInfo(), diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/CcrStatsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/CcrStatsResponseTests.java index eaf6103a0ecfe..a687ea45f2023 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/CcrStatsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/CcrStatsResponseTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.client.ccr.IndicesFollowStats.ShardFollowStats; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; @@ -44,7 +45,7 @@ public class CcrStatsResponseTests extends AbstractResponseTestCase { @Override - protected CcrStatsAction.Response createServerTestInstance() { + protected CcrStatsAction.Response createServerTestInstance(XContentType xContentType) { org.elasticsearch.xpack.core.ccr.AutoFollowStats autoFollowStats = new org.elasticsearch.xpack.core.ccr.AutoFollowStats( randomNonNegativeLong(), randomNonNegativeLong(), diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java index 2c5bfba5025f7..55fa037422ae3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.ccr.action.FollowInfoAction; import org.elasticsearch.xpack.core.ccr.action.FollowParameters; @@ -37,7 +38,7 @@ public class FollowInfoResponseTests extends AbstractResponseTestCase { @Override - protected FollowInfoAction.Response createServerTestInstance() { + protected FollowInfoAction.Response createServerTestInstance(XContentType xContentType) { int numInfos = randomIntBetween(0, 32); List infos = new ArrayList<>(numInfos); for (int i = 0; i < numInfos; i++) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowStatsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowStatsResponseTests.java index ff93c8df33eda..b5c645709a663 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowStatsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowStatsResponseTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.client.ccr.IndicesFollowStats.ShardFollowStats; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; @@ -41,7 +42,7 @@ public class FollowStatsResponseTests extends AbstractResponseTestCase { @Override - protected FollowStatsAction.StatsResponses createServerTestInstance() { + protected FollowStatsAction.StatsResponses createServerTestInstance(XContentType xContentType) { return createStatsResponse(); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java index 65ef3aa062d84..820640635786d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.action.GetAutoFollowPatternAction; @@ -41,7 +42,7 @@ public class GetAutoFollowPatternResponseTests extends AbstractResponseTestCase< GetAutoFollowPatternResponse> { @Override - protected GetAutoFollowPatternAction.Response createServerTestInstance() { + protected GetAutoFollowPatternAction.Response createServerTestInstance(XContentType xContentType) { int numPatterns = randomIntBetween(0, 16); NavigableMap patterns = new TreeMap<>(); for (int i = 0; i < numPatterns; i++) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutFollowResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutFollowResponseTests.java index 52fe70b3a3990..15dd064469390 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutFollowResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutFollowResponseTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; import java.io.IOException; @@ -30,7 +31,7 @@ public class PutFollowResponseTests extends AbstractResponseTestCase { @Override - protected PutFollowAction.Response createServerTestInstance() { + protected PutFollowAction.Response createServerTestInstance(XContentType xContentType) { return new PutFollowAction.Response(randomBoolean(), randomBoolean(), randomBoolean()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/AcknowledgedResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/AcknowledgedResponseTests.java index 996d0c1f306b0..004beccbb67b3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/AcknowledgedResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/AcknowledgedResponseTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; @@ -30,7 +31,7 @@ public class AcknowledgedResponseTests extends AbstractResponseTestCase { @Override - protected org.elasticsearch.action.support.master.AcknowledgedResponse createServerTestInstance() { + protected org.elasticsearch.action.support.master.AcknowledgedResponse createServerTestInstance(XContentType xContentType) { return new org.elasticsearch.action.support.master.AcknowledgedResponse(randomBoolean()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/BroadcastResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/BroadcastResponseTests.java index 3a67a8d7a4feb..0f541597da204 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/BroadcastResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/BroadcastResponseTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; import java.io.IOException; @@ -43,7 +44,7 @@ public class BroadcastResponseTests extends AbstractResponseTestCase shardIds; @Override - protected org.elasticsearch.action.support.broadcast.BroadcastResponse createServerTestInstance() { + protected org.elasticsearch.action.support.broadcast.BroadcastResponse createServerTestInstance(XContentType xContentType) { index = randomAlphaOfLength(8); id = randomAlphaOfLength(8); final int total = randomIntBetween(1, 16); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/MainResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/MainResponseTests.java index 3a551a0e17620..0549cab10d329 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/MainResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/MainResponseTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.VersionUtils; import java.io.IOException; @@ -33,7 +34,7 @@ public class MainResponseTests extends AbstractResponseTestCase { @Override - protected org.elasticsearch.action.main.MainResponse createServerTestInstance() { + protected org.elasticsearch.action.main.MainResponse createServerTestInstance(XContentType xContentType) { String clusterUuid = randomAlphaOfLength(10); ClusterName clusterName = new ClusterName(randomAlphaOfLength(10)); String nodeName = randomAlphaOfLength(10); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerPositionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerPositionTests.java index bf0680a2dc247..620629f1760ea 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerPositionTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerPositionTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerPosition; import java.util.LinkedHashMap; @@ -45,7 +46,7 @@ public static DataFrameIndexerPosition randomDataFrameIndexerPosition() { } @Override - protected DataFrameIndexerPosition createServerTestInstance() { + protected DataFrameIndexerPosition createServerTestInstance(XContentType xContentType) { return randomDataFrameIndexerPosition(); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsTests.java index 90dec41467a70..02bb3331bd571 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStats; import java.io.IOException; @@ -48,7 +49,7 @@ public static DataFrameTransformCheckpointStats randomDataFrameTransformCheckpoi } @Override - protected DataFrameTransformCheckpointStats createServerTestInstance() { + protected DataFrameTransformCheckpointStats createServerTestInstance(XContentType xContentType) { return randomDataFrameTransformCheckpointStats(); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformProgressTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformProgressTests.java index c3f8479dd4146..83858bb43cf16 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformProgressTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformProgressTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; import static org.hamcrest.Matchers.equalTo; @@ -51,7 +52,7 @@ public static DataFrameTransformProgress randomDataFrameTransformProgress() { } @Override - protected DataFrameTransformProgress createServerTestInstance() { + protected DataFrameTransformProgress createServerTestInstance(XContentType xContentType) { return randomDataFrameTransformProgress(); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/TimeSyncConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/TimeSyncConfigTests.java index 0c6a0350882a4..1c0c3f9f7f7c5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/TimeSyncConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/TimeSyncConfigTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.client.dataframe.transforms.TimeSyncConfig; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; @@ -41,7 +42,7 @@ public static void assertHlrcEquals(org.elasticsearch.xpack.core.dataframe.trans } @Override - protected org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig createServerTestInstance() { + protected org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig createServerTestInstance(XContentType xContentType) { return randomTimeSyncConfig(); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java index fd98e52a1527e..e9f01ae527886 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.DateHistogramGroupSource; @@ -48,7 +49,7 @@ public static DateHistogramGroupSource randomDateHistogramGroupSource() { } @Override - protected DateHistogramGroupSource createServerTestInstance() { + protected DateHistogramGroupSource createServerTestInstance(XContentType xContentType) { return randomDateHistogramGroupSource(); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index f8e63ecc81323..f1017e86bd063 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -139,6 +139,7 @@ import org.elasticsearch.client.ml.dataframe.DataFrameAnalyticsStats; import org.elasticsearch.client.ml.dataframe.OutlierDetection; import org.elasticsearch.client.ml.dataframe.QueryConfig; +import org.elasticsearch.client.ml.dataframe.Regression; import org.elasticsearch.client.ml.dataframe.evaluation.EvaluationMetric; import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.AucRocMetric; import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.BinarySoftClassification; @@ -2923,16 +2924,28 @@ public void testPutDataFrameAnalytics() throws Exception { .build(); // end::put-data-frame-analytics-dest-config - // tag::put-data-frame-analytics-analysis-default + // tag::put-data-frame-analytics-outlier-detection-default DataFrameAnalysis outlierDetection = OutlierDetection.createDefault(); // <1> - // end::put-data-frame-analytics-analysis-default + // end::put-data-frame-analytics-outlier-detection-default - // tag::put-data-frame-analytics-analysis-customized + // tag::put-data-frame-analytics-outlier-detection-customized DataFrameAnalysis outlierDetectionCustomized = OutlierDetection.builder() // <1> .setMethod(OutlierDetection.Method.DISTANCE_KNN) // <2> .setNNeighbors(5) // <3> .build(); - // end::put-data-frame-analytics-analysis-customized + // end::put-data-frame-analytics-outlier-detection-customized + + // tag::put-data-frame-analytics-regression + DataFrameAnalysis regression = Regression.builder("my_dependent_variable") // <1> + .setLambda(1.0) // <2> + .setGamma(5.5) // <3> + .setEta(5.5) // <4> + .setMaximumNumberTrees(50) // <5> + .setFeatureBagFraction(0.4) // <6> + .setPredictionFieldName("my_prediction_field_name") // <7> + .setTrainingPercent(50.0) // <8> + .build(); + // end::put-data-frame-analytics-regression // tag::put-data-frame-analytics-analyzed-fields FetchSourceContext analyzedFields = diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index 9bbc3b2ea9072..88234f80e8fde 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -37,6 +37,8 @@ import org.elasticsearch.client.security.CreateApiKeyResponse; import org.elasticsearch.client.security.CreateTokenRequest; import org.elasticsearch.client.security.CreateTokenResponse; +import org.elasticsearch.client.security.DelegatePkiAuthenticationRequest; +import org.elasticsearch.client.security.DelegatePkiAuthenticationResponse; import org.elasticsearch.client.security.DeletePrivilegesRequest; import org.elasticsearch.client.security.DeletePrivilegesResponse; import org.elasticsearch.client.security.DeleteRoleMappingRequest; @@ -77,6 +79,7 @@ import org.elasticsearch.client.security.PutUserResponse; import org.elasticsearch.client.security.RefreshPolicy; import org.elasticsearch.client.security.TemplateRoleName; +import org.elasticsearch.client.security.AuthenticateResponse.RealmInfo; import org.elasticsearch.client.security.support.ApiKey; import org.elasticsearch.client.security.support.CertificateInfo; import org.elasticsearch.client.security.support.expressiondsl.RoleMapperExpression; @@ -99,6 +102,11 @@ import javax.crypto.SecretKeyFactory; import javax.crypto.spec.PBEKeySpec; import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; @@ -1917,7 +1925,7 @@ public void testGetApiKey() throws Exception { Instant.now().plusMillis(expiration.getMillis()), false, "test_user", "default_file"); { // tag::get-api-key-id-request - GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.usingApiKeyId(createApiKeyResponse1.getId()); + GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.usingApiKeyId(createApiKeyResponse1.getId(), false); // end::get-api-key-id-request // tag::get-api-key-execute @@ -1931,7 +1939,7 @@ public void testGetApiKey() throws Exception { { // tag::get-api-key-name-request - GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.usingApiKeyName(createApiKeyResponse1.getName()); + GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.usingApiKeyName(createApiKeyResponse1.getName(), false); // end::get-api-key-name-request GetApiKeyResponse getApiKeyResponse = client.security().getApiKey(getApiKeyRequest, RequestOptions.DEFAULT); @@ -1965,6 +1973,18 @@ public void testGetApiKey() throws Exception { verifyApiKey(getApiKeyResponse.getApiKeyInfos().get(0), expectedApiKeyInfo); } + { + // tag::get-api-keys-owned-by-authenticated-user-request + GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.forOwnedApiKeys(); + // end::get-api-keys-owned-by-authenticated-user-request + + GetApiKeyResponse getApiKeyResponse = client.security().getApiKey(getApiKeyRequest, RequestOptions.DEFAULT); + + assertThat(getApiKeyResponse.getApiKeyInfos(), is(notNullValue())); + assertThat(getApiKeyResponse.getApiKeyInfos().size(), is(1)); + verifyApiKey(getApiKeyResponse.getApiKeyInfos().get(0), expectedApiKeyInfo); + } + { // tag::get-user-realm-api-keys-request GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.usingRealmAndUserName("default_file", "test_user"); @@ -1980,7 +2000,7 @@ public void testGetApiKey() throws Exception { } { - GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.usingApiKeyId(createApiKeyResponse1.getId()); + GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.usingApiKeyId(createApiKeyResponse1.getId(), false); ActionListener listener; // tag::get-api-key-execute-listener @@ -2041,7 +2061,7 @@ public void testInvalidateApiKey() throws Exception { { // tag::invalidate-api-key-id-request - InvalidateApiKeyRequest invalidateApiKeyRequest = InvalidateApiKeyRequest.usingApiKeyId(createApiKeyResponse1.getId()); + InvalidateApiKeyRequest invalidateApiKeyRequest = InvalidateApiKeyRequest.usingApiKeyId(createApiKeyResponse1.getId(), false); // end::invalidate-api-key-id-request // tag::invalidate-api-key-execute @@ -2066,7 +2086,8 @@ public void testInvalidateApiKey() throws Exception { assertNotNull(createApiKeyResponse2.getKey()); // tag::invalidate-api-key-name-request - InvalidateApiKeyRequest invalidateApiKeyRequest = InvalidateApiKeyRequest.usingApiKeyName(createApiKeyResponse2.getName()); + InvalidateApiKeyRequest invalidateApiKeyRequest = InvalidateApiKeyRequest.usingApiKeyName(createApiKeyResponse2.getName(), + false); // end::invalidate-api-key-name-request InvalidateApiKeyResponse invalidateApiKeyResponse = client.security().invalidateApiKey(invalidateApiKeyRequest, @@ -2159,7 +2180,7 @@ public void testInvalidateApiKey() throws Exception { assertThat(createApiKeyResponse6.getName(), equalTo("k6")); assertNotNull(createApiKeyResponse6.getKey()); - InvalidateApiKeyRequest invalidateApiKeyRequest = InvalidateApiKeyRequest.usingApiKeyId(createApiKeyResponse6.getId()); + InvalidateApiKeyRequest invalidateApiKeyRequest = InvalidateApiKeyRequest.usingApiKeyId(createApiKeyResponse6.getId(), false); ActionListener listener; // tag::invalidate-api-key-execute-listener @@ -2195,5 +2216,109 @@ public void onFailure(Exception e) { assertThat(invalidatedApiKeyIds, containsInAnyOrder(expectedInvalidatedApiKeyIds.toArray(Strings.EMPTY_ARRAY))); assertThat(response.getPreviouslyInvalidatedApiKeys().size(), equalTo(0)); } + + { + createApiKeyRequest = new CreateApiKeyRequest("k7", roles, expiration, refreshPolicy); + CreateApiKeyResponse createApiKeyResponse7 = client.security().createApiKey(createApiKeyRequest, RequestOptions.DEFAULT); + assertThat(createApiKeyResponse7.getName(), equalTo("k7")); + assertNotNull(createApiKeyResponse7.getKey()); + + // tag::invalidate-api-keys-owned-by-authenticated-user-request + InvalidateApiKeyRequest invalidateApiKeyRequest = InvalidateApiKeyRequest.forOwnedApiKeys(); + // end::invalidate-api-keys-owned-by-authenticated-user-request + + InvalidateApiKeyResponse invalidateApiKeyResponse = client.security().invalidateApiKey(invalidateApiKeyRequest, + RequestOptions.DEFAULT); + + final List errors = invalidateApiKeyResponse.getErrors(); + final List invalidatedApiKeyIds = invalidateApiKeyResponse.getInvalidatedApiKeys(); + final List previouslyInvalidatedApiKeyIds = invalidateApiKeyResponse.getPreviouslyInvalidatedApiKeys(); + + assertTrue(errors.isEmpty()); + List expectedInvalidatedApiKeyIds = Arrays.asList(createApiKeyResponse7.getId()); + assertThat(invalidatedApiKeyIds, containsInAnyOrder(expectedInvalidatedApiKeyIds.toArray(Strings.EMPTY_ARRAY))); + assertThat(previouslyInvalidatedApiKeyIds.size(), equalTo(0)); + } + + } + + public void testDelegatePkiAuthentication() throws Exception { + final RestHighLevelClient client = highLevelClient(); + X509Certificate clientCertificate = readCertForPkiDelegation("testClient.crt"); + X509Certificate intermediateCA = readCertForPkiDelegation("testIntermediateCA.crt"); + { + //tag::delegate-pki-request + DelegatePkiAuthenticationRequest request = new DelegatePkiAuthenticationRequest( + Arrays.asList(clientCertificate, intermediateCA)); + //end::delegate-pki-request + //tag::delegate-pki-execute + DelegatePkiAuthenticationResponse response = client.security().delegatePkiAuthentication(request, RequestOptions.DEFAULT); + //end::delegate-pki-execute + //tag::delegate-pki-response + String accessToken = response.getAccessToken(); // <1> + //end::delegate-pki-response + + RequestOptions.Builder optionsBuilder = RequestOptions.DEFAULT.toBuilder(); + optionsBuilder.addHeader("Authorization", "Bearer " + accessToken); + AuthenticateResponse resp = client.security().authenticate(optionsBuilder.build()); + User user = resp.getUser(); + assertThat(user, is(notNullValue())); + assertThat(user.getUsername(), is("Elasticsearch Test Client")); + RealmInfo authnRealm = resp.getAuthenticationRealm(); + assertThat(authnRealm, is(notNullValue())); + assertThat(authnRealm.getName(), is("pki1")); + assertThat(authnRealm.getType(), is("pki")); + } + + { + DelegatePkiAuthenticationRequest request = new DelegatePkiAuthenticationRequest( + Arrays.asList(clientCertificate, intermediateCA)); + ActionListener listener; + + //tag::delegate-pki-execute-listener + listener = new ActionListener() { + @Override + public void onResponse(DelegatePkiAuthenticationResponse getRolesResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::delegate-pki-execute-listener + + assertNotNull(listener); + + // Replace the empty listener by a blocking listener in test + final PlainActionFuture future = new PlainActionFuture<>(); + listener = future; + + //tag::delegate-pki-execute-async + client.security().delegatePkiAuthenticationAsync(request, RequestOptions.DEFAULT, listener); // <1> + //end::delegate-pki-execute-async + + final DelegatePkiAuthenticationResponse response = future.get(30, TimeUnit.SECONDS); + String accessToken = response.getAccessToken(); + RequestOptions.Builder optionsBuilder = RequestOptions.DEFAULT.toBuilder(); + optionsBuilder.addHeader("Authorization", "Bearer " + accessToken); + AuthenticateResponse resp = client.security().authenticate(optionsBuilder.build()); + User user = resp.getUser(); + assertThat(user, is(notNullValue())); + assertThat(user.getUsername(), is("Elasticsearch Test Client")); + RealmInfo authnRealm = resp.getAuthenticationRealm(); + assertThat(authnRealm, is(notNullValue())); + assertThat(authnRealm.getName(), is("pki1")); + assertThat(authnRealm.getType(), is("pki")); + } + } + + private X509Certificate readCertForPkiDelegation(String certificateName) throws Exception { + Path path = getDataPath("/org/elasticsearch/client/security/delegate_pki/" + certificateName); + try (InputStream in = Files.newInputStream(path)) { + CertificateFactory factory = CertificateFactory.getInstance("X.509"); + return (X509Certificate) factory.generateCertificate(in); + } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeResponseTests.java index c6614c1751e7e..3512a88ac550d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeResponseTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.RandomObjects; import java.io.IOException; @@ -30,7 +31,7 @@ public class AnalyzeResponseTests extends AbstractResponseTestCase { @Override - protected AnalyzeAction.Response createServerTestInstance() { + protected AnalyzeAction.Response createServerTestInstance(XContentType xContentType) { int tokenCount = randomIntBetween(1, 30); AnalyzeAction.AnalyzeToken[] tokens = new AnalyzeAction.AnalyzeToken[tokenCount]; for (int i = 0; i < tokenCount; i++) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/CloseIndexResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/CloseIndexResponseTests.java index b66927eff4479..c02845bbef93a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/CloseIndexResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/CloseIndexResponseTests.java @@ -51,7 +51,7 @@ public class CloseIndexResponseTests extends AbstractResponseTestCase { @Override - protected org.elasticsearch.action.admin.indices.close.CloseIndexResponse createServerTestInstance() { + protected org.elasticsearch.action.admin.indices.close.CloseIndexResponse createServerTestInstance(XContentType xContentType) { boolean acknowledged = true; final String[] indicesNames = generateRandomStringArray(10, 10, false, true); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/ReloadAnalyzersResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/ReloadAnalyzersResponseTests.java index 6719e10808e42..928bf0fb8b086 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/ReloadAnalyzersResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/ReloadAnalyzersResponseTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; import org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse.ReloadDetails; @@ -48,7 +49,7 @@ public class ReloadAnalyzersResponseTests private Set shardIds; @Override - protected org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse createServerTestInstance() { + protected org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse createServerTestInstance(XContentType xContentType) { index = randomAlphaOfLength(8); id = randomAlphaOfLength(8); final int total = randomIntBetween(1, 16); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetBasicStatusResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetBasicStatusResponseTests.java index 5629ba88c7517..2bf209fe1c8af 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetBasicStatusResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetBasicStatusResponseTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; @@ -27,7 +28,7 @@ public class GetBasicStatusResponseTests extends AbstractResponseTestCase { @Override - protected org.elasticsearch.license.GetBasicStatusResponse createServerTestInstance() { + protected org.elasticsearch.license.GetBasicStatusResponse createServerTestInstance(XContentType xContentType) { return new org.elasticsearch.license.GetBasicStatusResponse(randomBoolean()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetTrialStatusResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetTrialStatusResponseTests.java index d046c01859824..60b5230e9f4bc 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetTrialStatusResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetTrialStatusResponseTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; @@ -27,7 +28,7 @@ public class GetTrialStatusResponseTests extends AbstractResponseTestCase { @Override - protected org.elasticsearch.license.GetTrialStatusResponse createServerTestInstance() { + protected org.elasticsearch.license.GetTrialStatusResponse createServerTestInstance(XContentType xContentType) { return new org.elasticsearch.license.GetTrialStatusResponse(randomBoolean()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/MlInfoActionResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/MlInfoActionResponseTests.java index 2fa57aa9dd952..1d20e5efe62ce 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/MlInfoActionResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/MlInfoActionResponseTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.action.MlInfoAction.Response; import java.io.IOException; @@ -31,7 +32,7 @@ public class MlInfoActionResponseTests extends AbstractResponseTestCase { @Override - protected Response createServerTestInstance() { + protected Response createServerTestInstance(XContentType xContentType) { int size = randomInt(10); Map info = new HashMap<>(); for (int j = 0; j < size; j++) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutCalendarActionResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutCalendarActionResponseTests.java index d5ef3dbcc0b1d..2a8ce4abaabca 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutCalendarActionResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutCalendarActionResponseTests.java @@ -21,6 +21,7 @@ import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.action.PutCalendarAction; import org.elasticsearch.xpack.core.ml.calendars.Calendar; @@ -33,7 +34,7 @@ public class PutCalendarActionResponseTests extends AbstractResponseTestCase { @Override - protected PutCalendarAction.Response createServerTestInstance() { + protected PutCalendarAction.Response createServerTestInstance(XContentType xContentType) { String calendarId = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz".toCharArray()).ofCodePointsLength(random(), 10, 10); int size = randomInt(10); List items = new ArrayList<>(size); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/RegressionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/RegressionTests.java new file mode 100644 index 0000000000000..02e41ecdff333 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/RegressionTests.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.dataframe; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class RegressionTests extends AbstractXContentTestCase { + + public static Regression randomRegression() { + return Regression.builder(randomAlphaOfLength(10)) + .setLambda(randomBoolean() ? null : randomDoubleBetween(0.0, Double.MAX_VALUE, true)) + .setGamma(randomBoolean() ? null : randomDoubleBetween(0.0, Double.MAX_VALUE, true)) + .setEta(randomBoolean() ? null : randomDoubleBetween(0.001, 1.0, true)) + .setMaximumNumberTrees(randomBoolean() ? null : randomIntBetween(1, 2000)) + .setFeatureBagFraction(randomBoolean() ? null : randomDoubleBetween(0.0, 1.0, false)) + .setPredictionFieldName(randomBoolean() ? null : randomAlphaOfLength(10)) + .setTrainingPercent(randomBoolean() ? null : randomDoubleBetween(1.0, 100.0, true)) + .build(); + } + + @Override + protected Regression createTestInstance() { + return randomRegression(); + } + + @Override + protected Regression doParseInstance(XContentParser parser) throws IOException { + return Regression.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/DelegatePkiAuthenticationRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/DelegatePkiAuthenticationRequestTests.java new file mode 100644 index 0000000000000..08c7055e8f4bb --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/DelegatePkiAuthenticationRequestTests.java @@ -0,0 +1,107 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.AbstractRequestTestCase; +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +import javax.security.auth.x500.X500Principal; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DelegatePkiAuthenticationRequestTests extends AbstractRequestTestCase { + + public void testEmptyOrNullCertificateChain() throws Exception { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + new DelegatePkiAuthenticationRequest((List)null); + }); + assertThat(e.getMessage(), is("certificate chain must not be empty or null")); + e = expectThrows(IllegalArgumentException.class, () -> { + new DelegatePkiAuthenticationRequest(Collections.emptyList()); + }); + assertThat(e.getMessage(), is("certificate chain must not be empty or null")); + } + + public void testUnorderedCertificateChain() throws Exception { + List mockCertChain = new ArrayList<>(2); + mockCertChain.add(mock(X509Certificate.class)); + when(mockCertChain.get(0).getIssuerX500Principal()).thenReturn(new X500Principal("CN=Test, OU=elasticsearch, O=org")); + mockCertChain.add(mock(X509Certificate.class)); + when(mockCertChain.get(1).getSubjectX500Principal()).thenReturn(new X500Principal("CN=Not Test, OU=elasticsearch, O=org")); + DelegatePkiAuthenticationRequest request = new DelegatePkiAuthenticationRequest(mockCertChain); + Optional ve = request.validate(); + assertThat(ve.isPresent(), is(true)); + assertThat(ve.get().validationErrors().size(), is(1)); + assertThat(ve.get().validationErrors().get(0), is("certificates chain must be an ordered chain")); + } + + @Override + protected DelegatePkiAuthenticationRequest createClientTestInstance() { + List certificates = randomCertificateList(); + return new DelegatePkiAuthenticationRequest(certificates); + } + + @Override + protected org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationRequest doParseToServerInstance(XContentParser parser) + throws IOException { + return org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationRequest.fromXContent(parser); + } + + @Override + protected void assertInstances(org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationRequest serverInstance, + DelegatePkiAuthenticationRequest clientTestInstance) { + assertThat(serverInstance.getCertificateChain(), is(clientTestInstance.getCertificateChain())); + } + + private List randomCertificateList() { + List certificates = Arrays.asList(randomArray(1, 3, X509Certificate[]::new, () -> { + try { + return readCertForPkiDelegation(randomFrom("testClient.crt", "testIntermediateCA.crt", "testRootCA.crt")); + } catch (Exception e) { + throw new RuntimeException(e); + } + })); + return certificates; + } + + private X509Certificate readCertForPkiDelegation(String certificateName) throws Exception { + Path path = getDataPath("/org/elasticsearch/client/security/delegate_pki/" + certificateName); + try (InputStream in = Files.newInputStream(path)) { + CertificateFactory factory = CertificateFactory.getInstance("X.509"); + return (X509Certificate) factory.generateCertificate(in); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/DelegatePkiAuthenticationResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/DelegatePkiAuthenticationResponseTests.java new file mode 100644 index 0000000000000..8a5ecb16e540e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/DelegatePkiAuthenticationResponseTests.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.AbstractResponseTestCase; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.client.security.DelegatePkiAuthenticationResponse; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; + +import static org.hamcrest.Matchers.is; + +public class DelegatePkiAuthenticationResponseTests extends + AbstractResponseTestCase { + + @Override + protected org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationResponse createServerTestInstance( + XContentType xContentType) { + return new org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationResponse(randomAlphaOfLength(6), + TimeValue.parseTimeValue(randomTimeValue(), getClass().getSimpleName() + ".expiresIn")); + } + + @Override + protected DelegatePkiAuthenticationResponse doParseToClientInstance(XContentParser parser) throws IOException { + return DelegatePkiAuthenticationResponse.fromXContent(parser); + } + + @Override + protected void assertInstances(org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationResponse serverTestInstance, + DelegatePkiAuthenticationResponse clientInstance) { + assertThat(serverTestInstance.getAccessToken(), is(clientInstance.getAccessToken())); + assertThat(serverTestInstance.getExpiresIn(), is(clientInstance.getExpiresIn())); + assertThat(clientInstance.getType(), is("Bearer")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetApiKeyRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetApiKeyRequestTests.java index 79551e1e73e92..cbd05ae4c5ac3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetApiKeyRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetApiKeyRequestTests.java @@ -30,10 +30,10 @@ public class GetApiKeyRequestTests extends ESTestCase { public void testRequestValidation() { - GetApiKeyRequest request = GetApiKeyRequest.usingApiKeyId(randomAlphaOfLength(5)); + GetApiKeyRequest request = GetApiKeyRequest.usingApiKeyId(randomAlphaOfLength(5), randomBoolean()); Optional ve = request.validate(); assertFalse(ve.isPresent()); - request = GetApiKeyRequest.usingApiKeyName(randomAlphaOfLength(5)); + request = GetApiKeyRequest.usingApiKeyName(randomAlphaOfLength(5), randomBoolean()); ve = request.validate(); assertFalse(ve.isPresent()); request = GetApiKeyRequest.usingRealmName(randomAlphaOfLength(5)); @@ -45,28 +45,40 @@ public void testRequestValidation() { request = GetApiKeyRequest.usingRealmAndUserName(randomAlphaOfLength(5), randomAlphaOfLength(7)); ve = request.validate(); assertFalse(ve.isPresent()); + request = GetApiKeyRequest.forOwnedApiKeys(); + ve = request.validate(); + assertFalse(ve.isPresent()); } public void testRequestValidationFailureScenarios() throws IOException { String[][] inputs = new String[][] { - { randomFrom(new String[] { null, "" }), randomFrom(new String[] { null, "" }), randomFrom(new String[] { null, "" }), - randomFrom(new String[] { null, "" }) }, - { randomFrom(new String[] { null, "" }), "user", "api-kid", "api-kname" }, - { "realm", randomFrom(new String[] { null, "" }), "api-kid", "api-kname" }, - { "realm", "user", "api-kid", randomFrom(new String[] { null, "" }) }, - { randomFrom(new String[] { null, "" }), randomFrom(new String[] { null, "" }), "api-kid", "api-kname" } }; - String[] expectedErrorMessages = new String[] { "One of [api key id, api key name, username, realm name] must be specified", + { randomNullOrEmptyString(), randomNullOrEmptyString(), randomNullOrEmptyString(), randomNullOrEmptyString(), "false" }, + { randomNullOrEmptyString(), "user", "api-kid", "api-kname", "false" }, + { "realm", randomNullOrEmptyString(), "api-kid", "api-kname", "false" }, + { "realm", "user", "api-kid", randomNullOrEmptyString(), "false" }, + { randomNullOrEmptyString(), randomNullOrEmptyString(), "api-kid", "api-kname", "false" }, + { "realm", randomNullOrEmptyString(), randomNullOrEmptyString(), randomNullOrEmptyString(), "true"}, + { randomNullOrEmptyString(), "user", randomNullOrEmptyString(), randomNullOrEmptyString(), "true"} }; + String[] expectedErrorMessages = new String[] { + "One of [api key id, api key name, username, realm name] must be specified if [owner] flag is false", "username or realm name must not be specified when the api key id or api key name is specified", "username or realm name must not be specified when the api key id or api key name is specified", "username or realm name must not be specified when the api key id or api key name is specified", - "only one of [api key id, api key name] can be specified" }; + "only one of [api key id, api key name] can be specified", + "neither username nor realm-name may be specified when retrieving owned API keys", + "neither username nor realm-name may be specified when retrieving owned API keys" }; for (int i = 0; i < inputs.length; i++) { final int caseNo = i; IllegalArgumentException ve = expectThrows(IllegalArgumentException.class, - () -> new GetApiKeyRequest(inputs[caseNo][0], inputs[caseNo][1], inputs[caseNo][2], inputs[caseNo][3])); + () -> new GetApiKeyRequest(inputs[caseNo][0], inputs[caseNo][1], inputs[caseNo][2], inputs[caseNo][3], + Boolean.valueOf(inputs[caseNo][4]))); assertNotNull(ve); assertThat(ve.getMessage(), equalTo(expectedErrorMessages[caseNo])); } } + + private static String randomNullOrEmptyString() { + return randomBoolean() ? "" : null; + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/InvalidateApiKeyRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/InvalidateApiKeyRequestTests.java index 25ee4bb05bcc4..a29adb9ea382b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/InvalidateApiKeyRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/InvalidateApiKeyRequestTests.java @@ -31,10 +31,10 @@ public class InvalidateApiKeyRequestTests extends ESTestCase { public void testRequestValidation() { - InvalidateApiKeyRequest request = InvalidateApiKeyRequest.usingApiKeyId(randomAlphaOfLength(5)); + InvalidateApiKeyRequest request = InvalidateApiKeyRequest.usingApiKeyId(randomAlphaOfLength(5), randomBoolean()); Optional ve = request.validate(); assertThat(ve.isPresent(), is(false)); - request = InvalidateApiKeyRequest.usingApiKeyName(randomAlphaOfLength(5)); + request = InvalidateApiKeyRequest.usingApiKeyName(randomAlphaOfLength(5), randomBoolean()); ve = request.validate(); assertThat(ve.isPresent(), is(false)); request = InvalidateApiKeyRequest.usingRealmName(randomAlphaOfLength(5)); @@ -46,28 +46,40 @@ public void testRequestValidation() { request = InvalidateApiKeyRequest.usingRealmAndUserName(randomAlphaOfLength(5), randomAlphaOfLength(7)); ve = request.validate(); assertThat(ve.isPresent(), is(false)); + request = InvalidateApiKeyRequest.forOwnedApiKeys(); + ve = request.validate(); + assertFalse(ve.isPresent()); } public void testRequestValidationFailureScenarios() throws IOException { String[][] inputs = new String[][] { - { randomFrom(new String[] { null, "" }), randomFrom(new String[] { null, "" }), randomFrom(new String[] { null, "" }), - randomFrom(new String[] { null, "" }) }, - { randomFrom(new String[] { null, "" }), "user", "api-kid", "api-kname" }, - { "realm", randomFrom(new String[] { null, "" }), "api-kid", "api-kname" }, - { "realm", "user", "api-kid", randomFrom(new String[] { null, "" }) }, - { randomFrom(new String[] { null, "" }), randomFrom(new String[] { null, "" }), "api-kid", "api-kname" } }; - String[] expectedErrorMessages = new String[] { "One of [api key id, api key name, username, realm name] must be specified", + { randomNullOrEmptyString(), randomNullOrEmptyString(), randomNullOrEmptyString(), randomNullOrEmptyString(), "false" }, + { randomNullOrEmptyString(), "user", "api-kid", "api-kname", "false" }, + { "realm", randomNullOrEmptyString(), "api-kid", "api-kname", "false" }, + { "realm", "user", "api-kid", randomNullOrEmptyString(), "false" }, + { randomNullOrEmptyString(), randomNullOrEmptyString(), "api-kid", "api-kname", "false" }, + { "realm", randomNullOrEmptyString(), randomNullOrEmptyString(), randomNullOrEmptyString(), "true" }, + { randomNullOrEmptyString(), "user", randomNullOrEmptyString(), randomNullOrEmptyString(), "true" } }; + String[] expectedErrorMessages = new String[] { + "One of [api key id, api key name, username, realm name] must be specified if [owner] flag is false", "username or realm name must not be specified when the api key id or api key name is specified", "username or realm name must not be specified when the api key id or api key name is specified", "username or realm name must not be specified when the api key id or api key name is specified", - "only one of [api key id, api key name] can be specified" }; + "only one of [api key id, api key name] can be specified", + "neither username nor realm-name may be specified when invalidating owned API keys", + "neither username nor realm-name may be specified when invalidating owned API keys" }; for (int i = 0; i < inputs.length; i++) { final int caseNo = i; IllegalArgumentException ve = expectThrows(IllegalArgumentException.class, - () -> new InvalidateApiKeyRequest(inputs[caseNo][0], inputs[caseNo][1], inputs[caseNo][2], inputs[caseNo][3])); + () -> new InvalidateApiKeyRequest(inputs[caseNo][0], inputs[caseNo][1], inputs[caseNo][2], inputs[caseNo][3], + Boolean.valueOf(inputs[caseNo][4]))); assertNotNull(ve); assertThat(ve.getMessage(), equalTo(expectedErrorMessages[caseNo])); } } + + private static String randomNullOrEmptyString() { + return randomBoolean() ? "" : null; + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/hlrc/HasPrivilegesResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/hlrc/HasPrivilegesResponseTests.java index 59228989e6949..83f3e4e4c3cf3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/hlrc/HasPrivilegesResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/hlrc/HasPrivilegesResponseTests.java @@ -82,7 +82,7 @@ public void testToXContent() throws Exception { } @Override - protected org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse createServerTestInstance() { + protected org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse createServerTestInstance(XContentType xContentType) { return randomResponse(); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/GetWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/GetWatchResponseTests.java index b69ea90a49e4a..6c65196f20421 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/GetWatchResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/GetWatchResponseTests.java @@ -43,7 +43,7 @@ public class GetWatchResponseTests extends AbstractResponseTestCase { @Override - protected GetWatchResponse createServerTestInstance() { + protected GetWatchResponse createServerTestInstance(XContentType xContentType) { String id = randomAlphaOfLength(10); if (LuceneTestCase.rarely()) { return new GetWatchResponse(id); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/DeleteWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/DeleteWatchResponseTests.java index 493375c451745..16a31f09377dd 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/DeleteWatchResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/DeleteWatchResponseTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.client.watcher.DeleteWatchResponse; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; @@ -30,7 +31,7 @@ public class DeleteWatchResponseTests extends AbstractResponseTestCase< org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse, DeleteWatchResponse> { @Override - protected org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse createServerTestInstance() { + protected org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse createServerTestInstance(XContentType xContentType) { String id = randomAlphaOfLength(10); long version = randomLongBetween(1, 10); boolean found = randomBoolean(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/ExecuteWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/ExecuteWatchResponseTests.java index c1492eb53020f..593e39d942dae 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/ExecuteWatchResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/ExecuteWatchResponseTests.java @@ -35,7 +35,7 @@ public class ExecuteWatchResponseTests extends AbstractResponseTestCase< ExecuteWatchResponse, org.elasticsearch.client.watcher.ExecuteWatchResponse> { @Override - protected ExecuteWatchResponse createServerTestInstance() { + protected ExecuteWatchResponse createServerTestInstance(XContentType xContentType) { String id = "my_watch_0-2015-06-02T23:17:55.124Z"; try { XContentBuilder builder = XContentFactory.jsonBuilder(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/PutWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/PutWatchResponseTests.java index a47de0d15fda6..0d8097afdd9de 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/PutWatchResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/PutWatchResponseTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.client.watcher.PutWatchResponse; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; @@ -30,7 +31,7 @@ public class PutWatchResponseTests extends AbstractResponseTestCase< org.elasticsearch.protocol.xpack.watcher.PutWatchResponse, PutWatchResponse> { @Override - protected org.elasticsearch.protocol.xpack.watcher.PutWatchResponse createServerTestInstance() { + protected org.elasticsearch.protocol.xpack.watcher.PutWatchResponse createServerTestInstance(XContentType xContentType) { String id = randomAlphaOfLength(10); long seqNo = randomNonNegativeLong(); long primaryTerm = randomLongBetween(1, 20); diff --git a/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/README.asciidoc b/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/README.asciidoc new file mode 100644 index 0000000000000..3230bdde7e2ce --- /dev/null +++ b/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/README.asciidoc @@ -0,0 +1,35 @@ += Certificate Chain details +This document details the steps used to create the certificate chain in this directory. +The chain has a length of 3: the Root CA, the Intermediate CA and the Client Certificate. +All openssl commands use the same configuration file, albeit different sections of it. +The OpenSSL Configuration file is located in this directory as `openssl_config.cnf`. + +== Instructions on generating self-signed Root CA +The self-signed Root CA, 'testRootCA.crt', and its associated private key in this directory +have been generated using the following openssl commands. + +[source,shell] +----------------------------------------------------------------------------------------------------------- +openssl genrsa -out testRootCA.key 2048 +openssl req -x509 -new -key testRootCA.key -days 1460 -subj "/CN=Elasticsearch Test Root CA/OU=elasticsearch/O=org" -out testRootCA.crt -config ./openssl_config.cnf +----------------------------------------------------------------------------------------------------------- + +== Instructions on generating the Intermediate CA +The `testIntermediateCA.crt` CA certificate is "issued" by the `testRootCA.crt`. + +[source,shell] +----------------------------------------------------------------------------------------------------------- +openssl genrsa -out testIntermediateCA.key 2048 +openssl req -new -key testIntermediateCA.key -subj "/CN=Elasticsearch Test Intermediate CA/OU=Elasticsearch/O=org" -out testIntermediateCA.csr -config ./openssl_config.cnf +openssl x509 -req -in testIntermediateCA.csr -CA testRootCA.crt -CAkey testRootCA.key -CAcreateserial -out testIntermediateCA.crt -days 1460 -sha256 -extensions v3_ca -extfile ./openssl_config.cnf +----------------------------------------------------------------------------------------------------------- + +== Instructions on generating the Client Certificate +The `testClient.crt` end entity certificate is "issued" by the `testIntermediateCA.crt`. + +[source,shell] +----------------------------------------------------------------------------------------------------------- +openssl genrsa -out testClient.key 2048 +openssl req -new -key testClient.key -subj "/CN=Elasticsearch Test Client/OU=Elasticsearch/O=org" -out testClient.csr -config ./openssl_config.cnf +openssl x509 -req -in testClient.csr -CA testIntermediateCA.crt -CAkey testIntermediateCA.key -CAcreateserial -out testClient.crt -days 1460 -sha256 -extensions usr_cert -extfile ./openssl_config.cnf +----------------------------------------------------------------------------------------------------------- diff --git a/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/openssl_config.cnf b/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/openssl_config.cnf new file mode 100644 index 0000000000000..64ff556f35219 --- /dev/null +++ b/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/openssl_config.cnf @@ -0,0 +1,185 @@ +#################################################################### +# CA Definition +[ ca ] +default_ca = CA_default # The default ca section + +#################################################################### +# Per the above, this is where we define CA values +[ CA_default ] + +# By default we use "user certificate" extensions when signing +x509_extensions = usr_cert # The extentions to add to the cert + +# Honor extensions requested of us +copy_extensions = copy + +# Comment out the following two lines for the "traditional" +# (and highly broken) format. +name_opt = ca_default # Subject Name options +cert_opt = ca_default # Certificate field options + +# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs +# so this is commented out by default to leave a V1 CRL. +# crlnumber must also be commented out to leave a V1 CRL. +#crl_extensions = crl_ext +default_days = 1460 # how long to certify for +default_md = sha256 # which md to use. +preserve = no # keep passed DN ordering + +# A few difference way of specifying how similar the request should look +# For type CA, the listed attributes must be the same, and the optional +# and supplied fields are just that :-) +policy = policy_anything + +#################################################################### +# The default policy for the CA when signing requests, requires some +# resemblence to the CA cert +# +[ policy_match ] +countryName = match # Must be the same as the CA +stateOrProvinceName = match # Must be the same as the CA +organizationName = match # Must be the same as the CA +organizationalUnitName = optional # not required +commonName = supplied # must be there, whatever it is +emailAddress = optional # not required + +#################################################################### +# An alternative policy not referred to anywhere in this file. Can +# be used by specifying '-policy policy_anything' to ca(8). +# +[ policy_anything ] +countryName = optional +stateOrProvinceName = optional +localityName = optional +organizationName = optional +organizationalUnitName = optional +commonName = supplied +emailAddress = optional + +#################################################################### +# This is where we define how to generate CSRs +[ req ] +default_bits = 2048 +default_keyfile = privkey.pem +distinguished_name = req_distinguished_name # where to get DN for reqs +attributes = req_attributes # req attributes +x509_extensions = v3_ca # The extentions to add to self signed certs +req_extensions = v3_req # The extensions to add to req's + +# This sets a mask for permitted string types. There are several options. +# default: PrintableString, T61String, BMPString. +# pkix : PrintableString, BMPString. +# utf8only: only UTF8Strings. +# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings). +# MASK:XXXX a literal mask value. +# WARNING: current versions of Netscape crash on BMPStrings or UTF8Strings +# so use this option with caution! +string_mask = nombstr + + +#################################################################### +# Per "req" section, this is where we define DN info +[ req_distinguished_name ] + +0.organizationName = Organization Name (company) +0.organizationName_default = org + +organizationalUnitName = Organizational Unit Name (eg, section) +organizationalUnitName_default = elasticsearch + +commonName = Common Name (hostname, IP, or your name) +commonName_default = Elasticsearch Test Certificate +commonName_max = 64 + +#################################################################### +# We don't want these, but the section must exist +[ req_attributes ] +#challengePassword = A challenge password +#challengePassword_min = 4 +#challengePassword_max = 20 +#unstructuredName = An optional company name + + +#################################################################### +# Extensions for when we sign normal certs (specified as default) +[ usr_cert ] + +# User certs aren't CAs, by definition +basicConstraints=CA:false + +# Here are some examples of the usage of nsCertType. If it is omitted +# the certificate can be used for anything *except* object signing. +# This is OK for an SSL server. +#nsCertType = server +# For an object signing certificate this would be used. +#nsCertType = objsign +# For normal client use this is typical +#nsCertType = client, email +# and for everything including object signing: +#nsCertType = client, email, objsign +# This is typical in keyUsage for a client certificate. +#keyUsage = nonRepudiation, digitalSignature, keyEncipherment + +# PKIX recommendations harmless if included in all certificates. +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid,issuer + +# This stuff is for subjectAltName and issuerAltname. +# Import the email address. +#subjectAltName=email:copy +# An alternative to produce certificates that aren't +# deprecated according to PKIX. +#subjectAltName=email:move + + +#################################################################### +# Extension for requests +[ v3_req ] +basicConstraints = CA:FALSE + +# PKIX recommendation. +subjectKeyIdentifier = hash + +subjectAltName = @alt_names + +#################################################################### +# An alternative section of extensions, not referred to anywhere +# else in the config. We'll use this via '-extensions v3_ca' when +# using ca(8) to sign another CA. +# +[ v3_ca ] + +# PKIX recommendation. +subjectKeyIdentifier=hash +authorityKeyIdentifier = keyid,issuer + +# This is what PKIX recommends but some broken software chokes on critical +# extensions. +#basicConstraints = critical,CA:true +# So we do this instead. +basicConstraints = CA:true + +# Key usage: this is typical for a CA certificate. However since it will +# prevent it being used as an test self-signed certificate it is best +# left out by default. +# keyUsage = cRLSign, keyCertSign + +# Some might want this also +# nsCertType = sslCA, emailCA + +# Include email address in subject alt name: another PKIX recommendation +#subjectAltName=email:move +# Copy issuer details +#issuerAltName=issuer:copy + +subjectAltName = @alt_names + +[ alt_names ] +DNS.1 = localhost +DNS.2 = localhost.localdomain +DNS.3 = localhost4 +DNS.4 = localhost4.localdomain4 +DNS.5 = localhost6 +DNS.6 = localhost6.localdomain6 +IP.1 = 127.0.0.1 +IP.2 = ::1 diff --git a/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testClient.crt b/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testClient.crt new file mode 100644 index 0000000000000..45efce91ef33a --- /dev/null +++ b/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testClient.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIJAIxTS7Qdho9jMA0GCSqGSIb3DQEBCwUAMFMxKzApBgNV +BAMTIkVsYXN0aWNzZWFyY2ggVGVzdCBJbnRlcm1lZGlhdGUgQ0ExFjAUBgNVBAsT +DUVsYXN0aWNzZWFyY2gxDDAKBgNVBAoTA29yZzAeFw0xOTA3MTkxMzMzNDFaFw0y +MzA3MTgxMzMzNDFaMEoxIjAgBgNVBAMTGUVsYXN0aWNzZWFyY2ggVGVzdCBDbGll +bnQxFjAUBgNVBAsTDUVsYXN0aWNzZWFyY2gxDDAKBgNVBAoTA29yZzCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBANHgMX2aX8t0nj4sGLNuKISmmXIYCj9R +wRqS7L03l9Nng7kOKnhHu/nXDt7zMRJyHj+q6FAt5khlavYSVCQyrDybRuA5z31g +OdqXerrjs2OXS5HSHNvoDAnHFsaYX/5geMewVTtc/vqpd7Ph/QtaKfmG2FK0JNQo +0k24tcgCIcyMtBh6BA70yGBM0OT8GdOgd/d/mA7mRhaxIUMNYQzRYRsp4hMnnWoO +TkR5Q8KSO3MKw9dPSpPe8EnwtJE10S3s5aXmgytru/xQqrFycPBNj4KbKVmqMP0G +60CzXik5pr2LNvOFz3Qb6sYJtqeZF+JKgGWdaTC89m63+TEnUHqk0lcCAwEAAaNN +MEswCQYDVR0TBAIwADAdBgNVHQ4EFgQU/+aAD6Q4mFq1vpHorC25/OY5zjcwHwYD +VR0jBBgwFoAU8siFCiMiYZZm/95qFC75AG/LRE0wDQYJKoZIhvcNAQELBQADggEB +AIRpCgDLpvXcgDHUk10uhxev21mlIbU+VP46ANnCuj0UELhTrdTuWvO1PAI4z+Wb +DUxryQfOOXO9R6D0dE5yR56L/J7d+KayW34zU7yRDZM7+rXpocdQ1Ex8mjP9HJ/B +f56YZTBQJpXeDrKow4FvtkI3bcIMkqmbG16LHQXeG3RS4ds4S4wCnE2nA6vIn9y+ +4R999q6y1VSBORrYULcDWxS54plHLEdiMr1vVallg82AGobS9GMcTL2U4Nx5IYZG +7sbTk3LrDxVpVg/S2wLofEdOEwqCeHug/iOihNLJBabEW6z4TDLJAVW5KCY1Dfhk +YlBfHn7vxKkfKoCUK/yLWWI= +-----END CERTIFICATE----- diff --git a/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testClient.key b/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testClient.key new file mode 100644 index 0000000000000..186e6f86745f1 --- /dev/null +++ b/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testClient.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA0eAxfZpfy3SePiwYs24ohKaZchgKP1HBGpLsvTeX02eDuQ4q +eEe7+dcO3vMxEnIeP6roUC3mSGVq9hJUJDKsPJtG4DnPfWA52pd6uuOzY5dLkdIc +2+gMCccWxphf/mB4x7BVO1z++ql3s+H9C1op+YbYUrQk1CjSTbi1yAIhzIy0GHoE +DvTIYEzQ5PwZ06B393+YDuZGFrEhQw1hDNFhGyniEyedag5ORHlDwpI7cwrD109K +k97wSfC0kTXRLezlpeaDK2u7/FCqsXJw8E2PgpspWaow/QbrQLNeKTmmvYs284XP +dBvqxgm2p5kX4kqAZZ1pMLz2brf5MSdQeqTSVwIDAQABAoIBAQDAjP767Ioc4LZZ +9h0HafaUlUDMs4+bPkd7OPcoNnv+AceRHZULW0zz0EIdfGM2OCrWYNfYz/Op0hpK +/s/hkfgBdriU+ZUKwyDxEu8Pzd6EbYdwlqPRgdihk92qgJv5hsro8jeQSibJFHf1 +Ok3tf2BpRTTs08fCOl2P3vowMPyPa5Ho9bf4lzP8IsR2BZvoaev3za9ZWR6ZDzE6 +EWkBBNgIU4aPn1IJ6dz2+rVtN6+xXET0eYSBEac3xMQaPWLEX0EDBYPW1d+mUva/ +3lJvTrs3g8oyiTyVu0l9Yxdgox1mtgmrqqwxJ6XuouzImuXMMDXaz0K/E/+u2yPF +V6kRvWuJAoGBAPOnEgBC3ezl+x+47cgbwpy97uZhZmV9HkMrSH9DKDwC+t57TdGX +ypt2S/IS/vbPupFv0aHaWmJ6SN/HyTN4znwuulV3kE8mEpQzIPbluWfgQzT6ukJe ++YFI/+IXwIRBLA7khtfo01LGHSmLTENsnd/aoRySY3K6zJz36Ys3vFdjAoGBANyC +7rF5YjPdgsAgOT7EboNGkc8UuW/Sh3xRp0c4Y+PBenf60yA5XkRJLYR4sZDjWTr0 +aKBY7Y8r+59U+bBrwUuhhoW08JZ/SBWja05+4DhH0ToA3vtbPv9lRyQfkF1DdBkn +XpyM2vaJE5M454acwnKJ81AyoueYtZ8pD3Q7c219AoGAJ+F1wdMwDgGKvCOB0Boz +HYK9IrpYj04OcQIZqLLuV/xI4befAiptQEr5nVLcprtTl1CNKIfb+Xh4iyBhX2pr +qcngN/MNDNd3fQhtYdwyH72GYpqTeB+hiTbQo0ot+bfNJVbkd1ylkkvZJB6nyfVy +VdysOEgBvRq0OREfCemCi28CgYEAoF1EE6NQDKICTZDhsMkQCb5PmcbbmPwFdh63 +xW64DlGNrCWoVt4BtS12wck4cUM1iE9oq3wgv6df5Z7ZuziSKVt9xk0xTnGgTcQ7 +7KkOjT+FZGZvw2K3bOsNkrK1vW2pyAU+pCE3uGU17DJNBjOIod27Kk649C61ntsw +lvoJVs0CgYBLr9pzBRPyD5/lM9hm2EI7ITa+fVcu3V3bJfXENHKzpb0lB2fhl0PI +swpiU8RUEKWyjBuHsdQdxg7AgFi/7s+SX7KLo4cudDRd73iiXYdNGB7R0/MAG8Jl +/lMXn14noS4trA8fNGGg/2fANTBtLTbOX9i4s7clAo8ETywQ33owug== +-----END RSA PRIVATE KEY----- diff --git a/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testIntermediateCA.crt b/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testIntermediateCA.crt new file mode 100644 index 0000000000000..7d8781b888901 --- /dev/null +++ b/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testIntermediateCA.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEBTCCAu2gAwIBAgIJAIx9twpbtGkCMA0GCSqGSIb3DQEBCwUAMEsxIzAhBgNV +BAMTGkVsYXN0aWNzZWFyY2ggVGVzdCBSb290IENBMRYwFAYDVQQLEw1lbGFzdGlj +c2VhcmNoMQwwCgYDVQQKEwNvcmcwHhcNMTkwNzE5MTMzMjM0WhcNMjMwNzE4MTMz +MjM0WjBTMSswKQYDVQQDEyJFbGFzdGljc2VhcmNoIFRlc3QgSW50ZXJtZWRpYXRl +IENBMRYwFAYDVQQLEw1FbGFzdGljc2VhcmNoMQwwCgYDVQQKEwNvcmcwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCnJ2KTJZnQzOt0uUf+5oLNcvDLnnWY +LzXZpOOX666Almwx+PVkDxkiGSe0QB9RWJqHSrsP1ryGIeCIzGMOctLt6QA7Peee +HdrKqOQgN620nDSd2EZ3s0Iddh1Ns/lfTtBJCP/03suaktm7j8EYKAyOlTIUhiKm +sTFlxPUSKjbtR4wR1ljnKN8X+j/ghr9mWhQrMR9rsGFObU8DQFho2Ti90C4HoMNU +dy4j+2G3VVpaq4he4/4CbPrWQQ3dKGpzVAngIuAv4eQ/y88EHAFwutxQZWAew4Va +5y3O112acSb9oC7g0NHQcBnos/WIChF5ki8V3LFnxN7jYvUUk9YxfA8hAgMBAAGj +geMwgeAwHQYDVR0OBBYEFPLIhQojImGWZv/eahQu+QBvy0RNMB8GA1UdIwQYMBaA +FM4SyNzpz82ihQ160zrLUVaWfI+1MAwGA1UdEwQFMAMBAf8wgY8GA1UdEQSBhzCB +hIIJbG9jYWxob3N0ghVsb2NhbGhvc3QubG9jYWxkb21haW6CCmxvY2FsaG9zdDSC +F2xvY2FsaG9zdDQubG9jYWxkb21haW40ggpsb2NhbGhvc3Q2ghdsb2NhbGhvc3Q2 +LmxvY2FsZG9tYWluNocEfwAAAYcQAAAAAAAAAAAAAAAAAAAAATANBgkqhkiG9w0B +AQsFAAOCAQEAMkh4nUi2yt5TX+ryBWaaA4/2ZOsxSeec5E1EjemPMUWGzFipV1YY +k/mpv51E+BbPgtmGMG8Win/PETKYuX8D+zPauFEmJmyJmm5B4mr1406RWERqNDql +36sOw89G0mDT/wIB4tkNdh830ml+d75aRVVB4X5pFAE8ZzI3g4OW4YxT3ZfUEhDl +QeGVatobvIaX8KpNSevjFAFuQzSgj61VXI+2+UIRV4tJP2xEqu5ISuArHcGhvNlS +bU3vZ80tTCa0tHyJrVqaqtQ23MDBzYPj6wJ/pvBQWAgZKnC3qJgXlJ9des117I1g +J98AXCDGu5LBW/p2C9VpSktpnfzsX4NHqg== +-----END CERTIFICATE----- diff --git a/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testIntermediateCA.key b/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testIntermediateCA.key new file mode 100644 index 0000000000000..5147725f4486a --- /dev/null +++ b/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testIntermediateCA.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEApydikyWZ0MzrdLlH/uaCzXLwy551mC812aTjl+uugJZsMfj1 +ZA8ZIhkntEAfUViah0q7D9a8hiHgiMxjDnLS7ekAOz3nnh3ayqjkIDettJw0ndhG +d7NCHXYdTbP5X07QSQj/9N7LmpLZu4/BGCgMjpUyFIYiprExZcT1Eio27UeMEdZY +5yjfF/o/4Ia/ZloUKzEfa7BhTm1PA0BYaNk4vdAuB6DDVHcuI/tht1VaWquIXuP+ +Amz61kEN3Shqc1QJ4CLgL+HkP8vPBBwBcLrcUGVgHsOFWuctztddmnEm/aAu4NDR +0HAZ6LP1iAoReZIvFdyxZ8Te42L1FJPWMXwPIQIDAQABAoIBABp4z1C0dL6vpV5v +9Wn2AaMd3+qvZro6R9H3HiAyMAmnSO1FGz/EcFuJFlOikBMm8BobCLMCdAreFJw1 +mj5wit0ouGOpcyQEYGEWDELZ7oWa825IESjl18OosA1dQlIIvk3Cwh56pk4NkbP1 +mUQFG6/9CthbQeOaTlNqtNEypE5Bc+JGbQaUhRP6tF+Rxnpys2nIJt/Vp9khw0Du +K7Z6astunhfPDwLFGwHhflc6re1B+mxpLKTDHCcydJo2Kuh/LuuEtPkE5Ar4LwQk +D+/61iZHC4B8/4IkBlAsgCJ1B18L6JdTbSYeVlepkSkJML5t6z+cvt5VcObF7F8X +pPZn+kECgYEA2NaB0eshWNnHTMRv+sE92DCv0M7uV1eKtaopxOElAKJ/J2gpqcTh +GzdTVRg1M2LgVNk97ViL5bsXaVStRe085m8oA0bI9WbIoQRUFp40dRFRUjl+4TN0 +pdxXL4VmQMWuwlO6p8/JY8sInnHVCT+2z8lek8P3bdtTQZV9OZQTn0kCgYEAxVe8 +obJdnUSXuRDWg588TW35PNqOTJcerIU6eRKwafvCcrhMoX62Xbv6y6kKXndW/JuW +AbfSNiAOV+HGUbf8Xc54Xzk2mouoJA0S0tJ040jqOkFOaKIxYQudTU8y9bTXNsAk +oX3wOhlt2q9xffAK1gYffP5XPXnYnsb8qaMIeRkCgYBM9yaxOgJmJTbGmtscaEbp +W66sMScMPXhwruuQhFG7/fGgLSrMpaM5I9QiWitYB/qUY1/FxS4y5suSiYnPTjvV +lxLexttBr6/65yxpstHv06vHwby1dqwqyyDvLyxyRTiYpVuVgP18vG5cvw7c746W +BmXZkS9cAQN2Pfdq3pJwcQKBgEbCZd2owg5hCPIPyosZbpro4uRiDYIC8bm0b7n3 +7I+j+R3/XWLOt382pv+dlh03N1aORyRIkDReHCaAywaELRZJsTmbnyudBeYfVe+I +DOduPqYywnWcKo58hqOw0Tnu5Pg5vyi0qo16jrxKCiy5BHmnamT8IbXmWbjc6r28 +uo4JAoGAfAPvPJ2fV5vpzr4LPoVyaSiFj414D+5XYxX6CWpdTryelpP2Rs1VfJ1a +7EusUtWs26pAKwttDY4yoTvog7rrskgtXzisaoNMDbH/PfsoqjMnnIgakvKmHpUM +l6E1ecWFExEg5v6yvmxFC7JIUzIYOoysWu3X44G8rQ+vDQNRFZQ= +-----END RSA PRIVATE KEY----- diff --git a/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testRootCA.crt b/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testRootCA.crt new file mode 100644 index 0000000000000..50ba7a21727a6 --- /dev/null +++ b/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testRootCA.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID/TCCAuWgAwIBAgIJAIAPVUXOUQDNMA0GCSqGSIb3DQEBCwUAMEsxIzAhBgNV +BAMTGkVsYXN0aWNzZWFyY2ggVGVzdCBSb290IENBMRYwFAYDVQQLEw1lbGFzdGlj +c2VhcmNoMQwwCgYDVQQKEwNvcmcwHhcNMTkwNzE5MTMzMjIwWhcNMjMwNzE4MTMz +MjIwWjBLMSMwIQYDVQQDExpFbGFzdGljc2VhcmNoIFRlc3QgUm9vdCBDQTEWMBQG +A1UECxMNZWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAzIgn8r2kirt90id0uoi6YEGBPx+XDzthLbLsN+M0 +nXhj40OVcGPiww+cre14bJr0M6MG4CvFjRJc92RoVrE8+7XOKt0bgiHeVM+b0LEh +wVMH9koararPVMo0CjCMN4ChHMOWKBPUNZswvk+pFC+QbTcfgQLycqh+lTB1O6l3 +hPnmunEqhLIj9ke3FwA326igdb+16EbKYVL2c5unNoC5ZMc5Z9bnn4/GNXptkHhy ++SvG7IZKW2pAzei3Df/n47ZhJfQKERUCe9eO7b/ZmTEzAzYj9xucE5lYcpkOZd6g +IMU3vXe4FeD/BM4sOLkKTtMejiElEecxw8cLI9Nji/0y1wIDAQABo4HjMIHgMB0G +A1UdDgQWBBTOEsjc6c/NooUNetM6y1FWlnyPtTAfBgNVHSMEGDAWgBTOEsjc6c/N +ooUNetM6y1FWlnyPtTAMBgNVHRMEBTADAQH/MIGPBgNVHREEgYcwgYSCCWxvY2Fs +aG9zdIIVbG9jYWxob3N0LmxvY2FsZG9tYWluggpsb2NhbGhvc3Q0ghdsb2NhbGhv +c3Q0LmxvY2FsZG9tYWluNIIKbG9jYWxob3N0NoIXbG9jYWxob3N0Ni5sb2NhbGRv +bWFpbjaHBH8AAAGHEAAAAAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQELBQADggEB +ACHjwoDJILv77sQ5QN6SoAp6GYqiC9/doDIzDFCd/WP7G8EbaosHM6jM7NbrlK3g +PNTzuY1pLPoI3YJSO4Al/UfzEffaYSbZC2QZG9F6fUSWhvR+nxzPSXWkjzIInv1j +pPMgnUl6oJaUbsSR/evtvWNSxrM3LewkRTOoktkXM6SjTUHjdP6ikrkrarrWZgzr +K30BqGL6kDSv9LkyXe6RSgQDtQe51Yut+lKGCcy8AoEwG/3cjb7XnrWcFsJXjYbf +4m3QsS8yHU/O/xgyvVHOfki+uGVepzSjdzDMLE1GBkju05NR2eJZ8omj/QiJa0+z +1d/AOKExvWvo1yQ28ORcwo4= +-----END CERTIFICATE----- diff --git a/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testRootCA.key b/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testRootCA.key new file mode 100644 index 0000000000000..148bbd52bd76f --- /dev/null +++ b/client/rest-high-level/src/test/resources/org/elasticsearch/client/security/delegate_pki/testRootCA.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAzIgn8r2kirt90id0uoi6YEGBPx+XDzthLbLsN+M0nXhj40OV +cGPiww+cre14bJr0M6MG4CvFjRJc92RoVrE8+7XOKt0bgiHeVM+b0LEhwVMH9koa +rarPVMo0CjCMN4ChHMOWKBPUNZswvk+pFC+QbTcfgQLycqh+lTB1O6l3hPnmunEq +hLIj9ke3FwA326igdb+16EbKYVL2c5unNoC5ZMc5Z9bnn4/GNXptkHhy+SvG7IZK +W2pAzei3Df/n47ZhJfQKERUCe9eO7b/ZmTEzAzYj9xucE5lYcpkOZd6gIMU3vXe4 +FeD/BM4sOLkKTtMejiElEecxw8cLI9Nji/0y1wIDAQABAoIBAQC6LMnoPFW1brs1 ++3JWhTTZf2btlYzEcbGgjnhU2v0+xaJu8UrrFhEIq4JcE4gFm/rjsecFUPKu2eND +0eLj3st699+lxsRObRPbMWtMyJ/IQRNDTesA4DV/odtC1zQbJXwCGcrpyjrlXNE+ +unZWiIE32PBVV+BnHBa1KHneCAFiSRLrySAiDAnTIJxB6ufweoxevLoJPPNLlbo7 +H2jv6g1Som/Imjhof4KhD/1Q04Sed2wScSS/7Bz38eO68HG4NMFY+M2/cLzrbflg +QdeKHNhoIGnSFMEW5TCVlI4qrP8zvPPdZmLOMBT+Ocm3pc5xDAPwFYCe8wH1DVn+ +b3sVpwu5AoGBAOhFA7gUDZjRBkNAqJfbUdhdWSslePQsjeTKsu5rc4gk2aiL4bZ4 +fxG0Dq1hX7FjAmYrGqnsXsbxxDnCkhXGH1lY73kF0Zzwr2Pg1yRHyn1nCinhD4g4 +G2vBr37QtWn4wS/L7V//D3xrcCTG3QgAmvZZ99tYgqlmnUzmawdZ8kQ7AoGBAOFt +qg7sTSNWVpKkfkyX2NXvBMt5e3Qcwnge2pX+SBgljwjNUwSSMLwxdBDSyDXIhk8W +s4pJLtMDJsT/2WBKC9WJm9m3gc7yYZznLJ+5YPcieXHGGNXCRldPePhTIjnL591H +CSXoc3BZ2iKK745BYuPqSuLb2XfE3/hwoaFR4S4VAoGAQ6ywG7dECu2ELJ4vQSe2 +3hq8u1SMvGAq66mfntYR8G4EORagqkDLjUXwLNY9Qnr9nPUcLLxhFQgmS0oEtHFo +eujtxU5Lt7Vs9OXy6XA9cHJQRMl9dAwc+TWSw5ld8kV3TEzXmevAAFlxcFW82vMK +M5MdI3zTfTYXyOst7hNoAjcCgYAhz/cgAeWYFU0q9a1UA7qsbAuGEZSo1997cPVM +ZjWeGZQYt+Np3hudPrWwCE2rc4Zhun/3j/6L+/8GsXGDddfMkbVktJet2ME3bZ1N +39phdzRMEnCLL3aphewZIy8RCDqhABSpMPKPuYp0f+5qofgZQ300BdHamxcVBp/X +uJZT+QKBgQDdJQd+QxfCb8BZ11fWtyWJWQWZMmyX2EEbAIMvYQP3xh8PHmw2JoiQ +VQ103bCkegJ1S7ubrGltdt8pyjN4rrByXJmxCe1Y/LSHIp9w8D3jaiLCRSk1EmBw +jXjnZoiJn3GV5jmbV10hzrn7jqRcwhYA5zuoE7qb604V7cPZLzHtog== +-----END RSA PRIVATE KEY----- diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index 6fd17858c9fef..fa3de68cfb87b 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -79,6 +79,7 @@ -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true -Dio.netty.recycler.maxCapacityPerThread=0 +-Dio.netty.allocator.numDirectArenas=0 # log4j 2 -Dlog4j.shutdownHookEnabled=false diff --git a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java index 5871525be2a5c..d0d5bef9cfcf4 100644 --- a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java +++ b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java @@ -63,9 +63,6 @@ static List choose(final List userDefinedJvmOptions) throws Inte ergonomicChoices.add("-Dio.netty.allocator.type=pooled"); } } - if (systemProperties.containsKey("io.netty.allocator.numDirectArenas") == false) { - ergonomicChoices.add("-Dio.netty.allocator.numDirectArenas=0"); - } final long maxDirectMemorySize = extractMaxDirectMemorySize(finalJvmOptions); if (maxDirectMemorySize == 0) { ergonomicChoices.add("-XX:MaxDirectMemorySize=" + heapSize / 2); diff --git a/docs/build.gradle b/docs/build.gradle index a2d13cd0d090b..b7e2f81e3d746 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -218,6 +218,42 @@ buildRestTests.setups['sales'] = ''' {"index":{}} {"date": "2015/03/01 00:00:00", "price": 175, "promoted": false, "rating": 2, "type": "t-shirt"}''' +// Used by cumulative cardinality aggregation docs +buildRestTests.setups['user_hits'] = ''' + - do: + indices.create: + index: user_hits + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + user_id: + type: keyword + timestamp: + type: date + - do: + bulk: + index: user_hits + refresh: true + body: | + {"index":{}} + {"timestamp": "2019-01-01T13:00:00", "user_id": "1"} + {"index":{}} + {"timestamp": "2019-01-01T13:00:00", "user_id": "2"} + {"index":{}} + {"timestamp": "2019-01-02T13:00:00", "user_id": "1"} + {"index":{}} + {"timestamp": "2019-01-02T13:00:00", "user_id": "3"} + {"index":{}} + {"timestamp": "2019-01-03T13:00:00", "user_id": "1"} + {"index":{}} + {"timestamp": "2019-01-03T13:00:00", "user_id": "2"} + {"index":{}} + {"timestamp": "2019-01-03T13:00:00", "user_id": "4"}''' + + // Dummy bank account data used by getting-started.asciidoc buildRestTests.setups['bank'] = ''' - do: diff --git a/docs/java-rest/high-level/getting-started.asciidoc b/docs/java-rest/high-level/getting-started.asciidoc index 89912cc2a4593..0f3b66e667401 100644 --- a/docs/java-rest/high-level/getting-started.asciidoc +++ b/docs/java-rest/high-level/getting-started.asciidoc @@ -154,3 +154,23 @@ executes the request. For example, this is the place where you'd specify a `NodeSelector` to control which node receives the request. See the <> for more examples of customizing the options. +=== Asynchronous usage + +All of the the methods across the different clients exist in a traditional synchronous and +asynchronous variant. The difference is that the asynchronous ones use asynchronous requests +in the REST Low Level Client. This is useful if you are doing multiple requests or are using e.g. +rx java, Kotlin co-routines, or similar frameworks. + +The asynchronous methods are recognizable by the fact that they have the word "Async" in their name +and return a `Cancellable` instance. The asynchronous methods accept the same request object +as the synchronous variant and accept a generic `ActionListener` where `T` is the return +type of the synchronous method. + +All asynchronous methods return a `Cancellable` object with a `cancel` method that you may call +in case you want to abort the request. Cancelling +no longer needed requests is a good way to avoid putting unnecessary +load on Elasticsearch. + +Using the `Cancellable` instance is optional and you can safely ignore this if you have +no need for this. A use case for this would be using this with e.g. Kotlin's `suspendCancellableCoRoutine`. + diff --git a/docs/java-rest/high-level/ml/put-data-frame-analytics.asciidoc b/docs/java-rest/high-level/ml/put-data-frame-analytics.asciidoc index e91d88e0499e9..4520026f16694 100644 --- a/docs/java-rest/high-level/ml/put-data-frame-analytics.asciidoc +++ b/docs/java-rest/high-level/ml/put-data-frame-analytics.asciidoc @@ -75,25 +75,45 @@ include-tagged::{doc-tests-file}[{api}-dest-config] ==== Analysis The analysis to be performed. -Currently, only one analysis is supported: +OutlierDetection+. +Currently, the supported analyses include : +OutlierDetection+, +Regression+. + +===== Outlier Detection +OutlierDetection+ analysis can be created in one of two ways: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-analysis-default] +include-tagged::{doc-tests-file}[{api}-outlier-detection-default] -------------------------------------------------- <1> Constructing a new OutlierDetection object with default strategy to determine outliers or ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-analysis-customized] +include-tagged::{doc-tests-file}[{api}-outlier-detection-customized] -------------------------------------------------- <1> Constructing a new OutlierDetection object <2> The method used to perform the analysis <3> Number of neighbors taken into account during analysis +===== Regression + ++Regression+ analysis requires to set which is the +dependent_variable+ and +has a number of other optional parameters: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-regression] +-------------------------------------------------- +<1> Constructing a new Regression builder object with the required dependent variable +<2> The lambda regularization parameter. A non-negative double. +<3> The gamma regularization parameter. A non-negative double. +<4> The applied shrinkage. A double in [0.001, 1]. +<5> The maximum number of trees the forest is allowed to contain. An integer in [1, 2000]. +<6> The fraction of features which will be used when selecting a random bag for each candidate split. A double in (0, 1]. +<7> The name of the prediction field in the results object. +<8> The percentage of training-eligible rows to be used in training. Defaults to 100%. + ==== Analyzed fields FetchContext object containing fields to be included in / excluded from the analysis @@ -113,4 +133,4 @@ The returned +{response}+ contains the newly created {dataframe-analytics-config ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-response] --------------------------------------------------- \ No newline at end of file +-------------------------------------------------- diff --git a/docs/java-rest/high-level/security/delegate-pki-authentication.asciidoc b/docs/java-rest/high-level/security/delegate-pki-authentication.asciidoc new file mode 100644 index 0000000000000..9cb667c24dc6e --- /dev/null +++ b/docs/java-rest/high-level/security/delegate-pki-authentication.asciidoc @@ -0,0 +1,62 @@ +-- +:api: delegate-pki +:request: DelegatePkiAuthenticationRequest +:response: DelegatePkiAuthenticationResponse +-- + +[id="{upid}-{api}"] +=== Delegate PKI Authentication API + +This API is called by *smart* proxies to Elasticsearch, such as Kibana, that +terminate the user's TLS session but that still wish to authenticate the user +on the Elasticsearch side using a PKI realm, which normally requires users to +authenticate over TLS directly to Elasticsearch. It implements the exchange of +the client's {@code X509Certificate} chain from the TLS authentication into an +Elasticsearch access token. + +IMPORTANT: The association between the subject public key in the target +certificate and the corresponding private key is *not* validated. This is part +of the TLS authentication process and it is delegated to the proxy calling this +API. The proxy is *trusted* to have performed the TLS authentication, and this +API translates that authentication into an Elasticsearch access token. + +[id="{upid}-{api}-request"] +==== Delegate PKI Authentication Request + +The request contains the client's {@code X509Certificate} chain. The +certificate chain is represented as a list where the first element is the +target certificate containing the subject distinguished name that is requesting +access. This may be followed by additional certificates, with each subsequent +certificate being the one used to certify the previous one. The certificate +chain is validated according to RFC 5280, by sequentially considering the trust +configuration of every installed {@code PkiRealm} that has {@code +PkiRealmSettings#DELEGATION_ENABLED_SETTING} set to {@code true} (default is +{@code false}). A successfully trusted target certificate is also subject to +the validation of the subject distinguished name according to that respective's +realm {@code PkiRealmSettings#USERNAME_PATTERN_SETTING}. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[delegate-pki-request] +-------------------------------------------------- + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Delegate PKI Authentication Response + +The returned +{response}+ contains the following properties: + +`accessToken`:: This is the newly created access token. + It can be used to authenticate to the Elasticsearch cluster. +`type`:: The type of the token, this is always `"Bearer"`. +`expiresIn`:: The length of time (in seconds) until the token will expire. + The token will be considered invalid after that time. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[delegate-pki-response] +-------------------------------------------------- +<1> The `accessToken` can be used to authentication to Elasticsearch. + + diff --git a/docs/java-rest/high-level/security/get-api-key.asciidoc b/docs/java-rest/high-level/security/get-api-key.asciidoc index bb98b527d22ba..911acd3e92ef5 100644 --- a/docs/java-rest/high-level/security/get-api-key.asciidoc +++ b/docs/java-rest/high-level/security/get-api-key.asciidoc @@ -21,6 +21,8 @@ The +{request}+ supports retrieving API key information for . All API keys for a specific user in a specific realm +. A specific key or all API keys owned by the current authenticated user + ===== Retrieve a specific API key by its id ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -51,6 +53,12 @@ include-tagged::{doc-tests-file}[get-user-api-keys-request] include-tagged::{doc-tests-file}[get-user-realm-api-keys-request] -------------------------------------------------- +===== Retrieve all API keys for the current authenticated user +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[get-api-keys-owned-by-authenticated-user-request] +-------------------------------------------------- + include::../execution.asciidoc[] [id="{upid}-{api}-response"] diff --git a/docs/java-rest/high-level/security/invalidate-api-key.asciidoc b/docs/java-rest/high-level/security/invalidate-api-key.asciidoc index 7f9c43b3165a8..b8a99f932d93e 100644 --- a/docs/java-rest/high-level/security/invalidate-api-key.asciidoc +++ b/docs/java-rest/high-level/security/invalidate-api-key.asciidoc @@ -21,6 +21,8 @@ The +{request}+ supports invalidating . All API keys for a specific user in a specific realm +. A specific key or all API keys owned by the current authenticated user + ===== Specific API key by API key id ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -51,6 +53,12 @@ include-tagged::{doc-tests-file}[invalidate-user-api-keys-request] include-tagged::{doc-tests-file}[invalidate-user-realm-api-keys-request] -------------------------------------------------- +===== Retrieve all API keys for the current authenticated user +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[invalidate-api-keys-owned-by-authenticated-user-request] +-------------------------------------------------- + include::../execution.asciidoc[] [id="{upid}-{api}-response"] diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index d0f4b070a55d6..9d55ff79ce261 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -338,6 +338,12 @@ the underlying http client. On the server side, this does not automatically translate to the execution of that request being cancelled, which needs to be specifically implemented in the API itself. +The use of the `Cancellable` instance is optional and you can safely ignore this +if you don't need it. A typical usecase for this would be using this together with +frameworks like Rx Java or the Kotlin's `suspendCancellableCoRoutine`. Cancelling +no longer needed requests is a good way to avoid putting unnecessary +load on Elasticsearch. + ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-async-cancel] diff --git a/docs/plugins/plugin-script.asciidoc b/docs/plugins/plugin-script.asciidoc index 950a0f86e4149..f7906a0be50ec 100644 --- a/docs/plugins/plugin-script.asciidoc +++ b/docs/plugins/plugin-script.asciidoc @@ -106,6 +106,20 @@ sudo ES_JAVA_OPTS="-Djavax.net.ssl.trustStore=/path/to/trustStore.jks" bin/elast ----------------------------------- -- +[[mandatory-plugins]] +=== Mandatory Plugins + +If you rely on some plugins, you can define mandatory plugins by adding +`plugin.mandatory` setting to the `config/elasticsearch.yml` file, for +example: + +[source,yaml] +-------------------------------------------------- +plugin.mandatory: analysis-icu,lang-js +-------------------------------------------------- + +For safety reasons, a node will not start if it is missing a mandatory plugin. + [[listing-removing-updating]] === Listing, Removing and Updating Installed Plugins @@ -229,17 +243,3 @@ The default location of the `plugins` directory depends on which package you ins * {ref}/zip-windows.html#windows-layout[Directory layout of Windows `.zip` archives] * {ref}/deb.html#deb-layout[Directory layout of Debian package] * {ref}/rpm.html#rpm-layout[Directory layout of RPM] - -[float] -=== Mandatory Plugins - -If you rely on some plugins, you can define mandatory plugins by adding -`plugin.mandatory` setting to the `config/elasticsearch.yml` file, for -example: - -[source,yaml] --------------------------------------------------- -plugin.mandatory: analysis-icu,lang-js --------------------------------------------------- - -For safety reasons, a node will not start if it is missing a mandatory plugin. diff --git a/docs/reference/aggregations/pipeline/cumulative-cardinality-aggregation.asciidoc b/docs/reference/aggregations/pipeline/cumulative-cardinality-aggregation.asciidoc new file mode 100644 index 0000000000000..2e316164d64b9 --- /dev/null +++ b/docs/reference/aggregations/pipeline/cumulative-cardinality-aggregation.asciidoc @@ -0,0 +1,235 @@ +[role="xpack"] +[testenv="basic"] +[[search-aggregations-pipeline-cumulative-cardinality-aggregation]] +=== Cumulative Cardinality Aggregation + +A parent pipeline aggregation which calculates the Cumulative Cardinality in a parent histogram (or date_histogram) +aggregation. The specified metric must be a cardinality aggregation and the enclosing histogram +must have `min_doc_count` set to `0` (default for `histogram` aggregations). + +The `cumulative_cardinality` agg is useful for finding "total new items", like the number of new visitors to your +website each day. A regular cardinality aggregation will tell you how many unique visitors came each day, but doesn't +differentiate between "new" or "repeat" visitors. The Cumulative Cardinality aggregation can be used to determine +how many of each day's unique visitors are "new". + +==== Syntax + +A `cumulative_cardinality` aggregation looks like this in isolation: + +[source,js] +-------------------------------------------------- +{ + "cumulative_cardinality": { + "buckets_path": "my_cardinality_agg" + } +} +-------------------------------------------------- +// NOTCONSOLE + +[[cumulative-cardinality-params]] +.`cumulative_cardinality` Parameters +[options="header"] +|=== +|Parameter Name |Description |Required |Default Value +|`buckets_path` |The path to the cardinality aggregation we wish to find the cumulative cardinality for (see <> for more + details) |Required | +|`format` |format to apply to the output value of this aggregation |Optional |`null` +|=== + +The following snippet calculates the cumulative cardinality of the total daily `users`: + +[source,js] +-------------------------------------------------- +GET /user_hits/_search +{ + "size": 0, + "aggs" : { + "users_per_day" : { + "date_histogram" : { + "field" : "timestamp", + "calendar_interval" : "day" + }, + "aggs": { + "distinct_users": { + "cardinality": { + "field": "user_id" + } + }, + "total_new_users": { + "cumulative_cardinality": { + "buckets_path": "distinct_users" <1> + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:user_hits] + +<1> `buckets_path` instructs this aggregation to use the output of the `distinct_users` aggregation for the cumulative cardinality + +And the following may be the response: + +[source,js] +-------------------------------------------------- +{ + "took": 11, + "timed_out": false, + "_shards": ..., + "hits": ..., + "aggregations": { + "users_per_day": { + "buckets": [ + { + "key_as_string": "2019-01-01T00:00:00.000Z", + "key": 1546300800000, + "doc_count": 2, + "distinct_users": { + "value": 2 + }, + "total_new_users": { + "value": 2 + } + }, + { + "key_as_string": "2019-01-02T00:00:00.000Z", + "key": 1546387200000, + "doc_count": 2, + "distinct_users": { + "value": 2 + }, + "total_new_users": { + "value": 3 + } + }, + { + "key_as_string": "2019-01-03T00:00:00.000Z", + "key": 1546473600000, + "doc_count": 3, + "distinct_users": { + "value": 3 + }, + "total_new_users": { + "value": 4 + } + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 11/"took": $body.took/] +// TESTRESPONSE[s/"_shards": \.\.\./"_shards": $body._shards/] +// TESTRESPONSE[s/"hits": \.\.\./"hits": $body.hits/] + + +Note how the second day, `2019-01-02`, has two distinct users but the `total_new_users` metric generated by the +cumulative pipeline agg only increments to three. This means that only one of the two users that day were +new, the other had already been seen in the previous day. This happens again on the third day, where only +one of three users is completely new. + +==== Incremental cumulative cardinality + +The `cumulative_cardinality` agg will show you the total, distinct count since the beginning of the time period +being queried. Sometimes, however, it is useful to see the "incremental" count. Meaning, how many new users +are added each day, rather than the total cumulative count. + +This can be accomplished by adding a `derivative` aggregation to our query: + +[source,js] +-------------------------------------------------- +GET /user_hits/_search +{ + "size": 0, + "aggs" : { + "users_per_day" : { + "date_histogram" : { + "field" : "timestamp", + "calendar_interval" : "day" + }, + "aggs": { + "distinct_users": { + "cardinality": { + "field": "user_id" + } + }, + "total_new_users": { + "cumulative_cardinality": { + "buckets_path": "distinct_users" + } + }, + "incremental_new_users": { + "derivative": { + "buckets_path": "total_new_users" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:user_hits] + + +And the following may be the response: + +[source,js] +-------------------------------------------------- +{ + "took": 11, + "timed_out": false, + "_shards": ..., + "hits": ..., + "aggregations": { + "users_per_day": { + "buckets": [ + { + "key_as_string": "2019-01-01T00:00:00.000Z", + "key": 1546300800000, + "doc_count": 2, + "distinct_users": { + "value": 2 + }, + "total_new_users": { + "value": 2 + } + }, + { + "key_as_string": "2019-01-02T00:00:00.000Z", + "key": 1546387200000, + "doc_count": 2, + "distinct_users": { + "value": 2 + }, + "total_new_users": { + "value": 3 + }, + "incremental_new_users": { + "value": 1.0 + } + }, + { + "key_as_string": "2019-01-03T00:00:00.000Z", + "key": 1546473600000, + "doc_count": 3, + "distinct_users": { + "value": 3 + }, + "total_new_users": { + "value": 4 + }, + "incremental_new_users": { + "value": 1.0 + } + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 11/"took": $body.took/] +// TESTRESPONSE[s/"_shards": \.\.\./"_shards": $body._shards/] +// TESTRESPONSE[s/"hits": \.\.\./"hits": $body.hits/] diff --git a/docs/reference/analysis/analyzers/custom-analyzer.asciidoc b/docs/reference/analysis/analyzers/custom-analyzer.asciidoc index 92133822fa51f..4b601a0b9bd3a 100644 --- a/docs/reference/analysis/analyzers/custom-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/custom-analyzer.asciidoc @@ -162,26 +162,26 @@ PUT my_index "settings": { "analysis": { "analyzer": { - "my_custom_analyzer": { + "my_custom_analyzer": { <1> "type": "custom", "char_filter": [ - "emoticons" <1> + "emoticons" ], - "tokenizer": "punctuation", <1> + "tokenizer": "punctuation", "filter": [ "lowercase", - "english_stop" <1> + "english_stop" ] } }, "tokenizer": { - "punctuation": { <1> + "punctuation": { <2> "type": "pattern", "pattern": "[ .,!?]" } }, "char_filter": { - "emoticons": { <1> + "emoticons": { <3> "type": "mapping", "mappings": [ ":) => _happy_", @@ -190,7 +190,7 @@ PUT my_index } }, "filter": { - "english_stop": { <1> + "english_stop": { <4> "type": "stop", "stopwords": "_english_" } @@ -207,9 +207,12 @@ POST my_index/_analyze -------------------------------------------------- // CONSOLE -<1> The `emoticons` character filter, `punctuation` tokenizer and - `english_stop` token filter are custom implementations which are defined - in the same index settings. +<1> Assigns the index a default custom analyzer, `my_custom_analyzer`. This +analyzer uses a custom tokenizer, character filter, and token filter that +are defined later in the request. +<2> Defines the custom `punctuation` tokenizer. +<3> Defines the custom `emoticons` character filter. +<4> Defines the custom `english_stop` token filter. ///////////////////// diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index 518b8112d730a..b9fd677228683 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -1,14 +1,16 @@ [[docs-delete-by-query]] -=== Delete By Query API +=== Delete by query API +++++ +Delete by query +++++ -The simplest usage of `_delete_by_query` just performs a deletion on every -document that matches a query. Here is the API: +Deletes documents that match the specified query. [source,js] -------------------------------------------------- -POST twitter/_delete_by_query +POST /twitter/_delete_by_query { - "query": { <1> + "query": { "match": { "message": "some message" } @@ -18,11 +20,12 @@ POST twitter/_delete_by_query // CONSOLE // TEST[setup:big_twitter] -<1> The query must be passed as a value to the `query` key, in the same -way as the <>. You can also use the `q` -parameter in the same way as the search API. +[[docs-delete-by-query-api-request]] +==== {api-request-title} + +`POST //_delete_by_query` -That will return something like this: +//// [source,js] -------------------------------------------------- @@ -45,136 +48,76 @@ That will return something like this: } -------------------------------------------------- // TESTRESPONSE[s/"took" : 147/"took" : "$body.took"/] +//// -`_delete_by_query` gets a snapshot of the index when it starts and deletes what -it finds using `internal` versioning. That means that you'll get a version -conflict if the document changes between the time when the snapshot was taken -and when the delete request is processed. When the versions match the document -is deleted. - -NOTE: Since `internal` versioning does not support the value 0 as a valid -version number, documents with version equal to zero cannot be deleted using -`_delete_by_query` and will fail the request. - -During the `_delete_by_query` execution, multiple search requests are sequentially -executed in order to find all the matching documents to delete. Every time a batch -of documents is found, a corresponding bulk request is executed to delete all -these documents. In case a search or bulk request got rejected, `_delete_by_query` - relies on a default policy to retry rejected requests (up to 10 times, with - exponential back off). Reaching the maximum retries limit causes the `_delete_by_query` - to abort and all failures are returned in the `failures` of the response. - The deletions that have been performed still stick. In other words, the process - is not rolled back, only aborted. While the first failure causes the abort, all - failures that are returned by the failing bulk request are returned in the `failures` - element; therefore it's possible for there to be quite a few failed entities. - -If you'd like to count version conflicts rather than cause them to abort, then -set `conflicts=proceed` on the url or `"conflicts": "proceed"` in the request body. - -Back to the API format, this will delete tweets from the `twitter` index: +[[docs-delete-by-query-api-desc]] +==== {api-description-title} -[source,js] --------------------------------------------------- -POST twitter/_delete_by_query?conflicts=proceed -{ - "query": { - "match_all": {} - } -} --------------------------------------------------- -// CONSOLE -// TEST[setup:twitter] +You can specify the query criteria in the request URI or the request body +using the same syntax as the <>. -It's also possible to delete documents of multiple indexes at once, just like -the search API: +When you submit a delete by query request, {es} gets a snapshot of the index +when it begins processing the request and deletes matching documents using +`internal` versioning. If a document changes between the time that the +snapshot is taken and the delete operation is processed, it results in a version +conflict and the delete operation fails. -[source,js] --------------------------------------------------- -POST twitter,blog/_delete_by_query -{ - "query": { - "match_all": {} - } -} --------------------------------------------------- -// CONSOLE -// TEST[s/^/PUT twitter\nPUT blog\n/] +NOTE: Documents with a version equal to 0 cannot be deleted using delete by +query because `internal` versioning does not support 0 as a valid +version number. -If you provide `routing` then the routing is copied to the scroll query, -limiting the process to the shards that match that routing value: +While processing a delete by query request, {es} performs multiple search +requests sequentially to find all of the matching documents to delete. A bulk +delete request is performed for each batch of matching documents. If a +search or bulk request is rejected, the requests are retried up to 10 times, with +exponential back off. If the maximum retry limit is reached, processing halts +and all failed requests are returned in the response. Any delete requests that +completed successfully still stick, they are not rolled back. -[source,js] --------------------------------------------------- -POST twitter/_delete_by_query?routing=1 -{ - "query": { - "range" : { - "age" : { - "gte" : 10 - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[setup:twitter] - -By default `_delete_by_query` uses scroll batches of 1000. You can change the -batch size with the `scroll_size` URL parameter: - -[source,js] --------------------------------------------------- -POST twitter/_delete_by_query?scroll_size=5000 -{ - "query": { - "term": { - "user": "kimchy" - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[setup:twitter] +You can opt to count version conflicts instead of halting and returning by +setting `conflicts` to `proceeed`. +===== Refreshing shards -[float] -==== URL Parameters +Specifying the `refresh` parameter refreshes all shards involved in the delete +by query once the request completes. This is different than the delete API's +`refresh` parameter, which causes just the shard that received the delete +request to be refreshed. Unlike the delete API, it does not support +`wait_for`. -In addition to the standard parameters like `pretty`, the delete by query API -also supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout`, -and `scroll`. +[[docs-delete-by-query-task-api]] +===== Running delete by query asynchronously -Sending the `refresh` will refresh all shards involved in the delete by query -once the request completes. This is different than the delete API's `refresh` -parameter which causes just the shard that received the delete request -to be refreshed. Also unlike the delete API it does not support `wait_for`. +If the request contains `wait_for_completion=false`, {es} +performs some preflight checks, launches the request, and returns a +<> +you can use to cancel or get the status of the task. {es} creates a +record of this task as a document at `.tasks/task/${taskId}`. When you are +done with a task, you should delete the task document so {es} can reclaim the +space. -If the request contains `wait_for_completion=false` then Elasticsearch will -perform some preflight checks, launch the request, and then return a `task` -which can be used with <> -to cancel or get the status of the task. Elasticsearch will also create a -record of this task as a document at `.tasks/task/${taskId}`. This is yours -to keep or remove as you see fit. When you are done with it, delete it so -Elasticsearch can reclaim the space it uses. +===== Waiting for active shards `wait_for_active_shards` controls how many copies of a shard must be active -before proceeding with the request. See <> +before proceeding with the request. See <> for details. `timeout` controls how long each write request waits for unavailable -shards to become available. Both work exactly how they work in the -<>. As `_delete_by_query` uses scroll search, you can also specify -the `scroll` parameter to control how long it keeps the "search context" alive, -e.g. `?scroll=10m`. By default it's 5 minutes. - -`requests_per_second` can be set to any positive decimal number (`1.4`, `6`, -`1000`, etc.) and throttles the rate at which delete by query issues batches of -delete operations by padding each batch with a wait time. The throttling can be -disabled by setting `requests_per_second` to `-1`. - -The throttling is done by waiting between batches so that scroll that -`_delete_by_query` uses internally can be given a timeout that takes into -account the padding. The padding time is the difference between the batch size -divided by the `requests_per_second` and the time spent writing. By default the -batch size is `1000`, so if the `requests_per_second` is set to `500`: +shards to become available. Both work exactly the way they work in the +<>. Delete by query uses scrolled searches, so you can also +specify the `scroll` parameter to control how long it keeps the search context +alive, for example `?scroll=10m`. The default is 5 minutes. + +===== Throttling delete requests + +To control the rate at which delete by query issues batches of delete operations, +you can set `requests_per_second` to any positive decimal number. This pads each +batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` +to disable throttling. + +Throttling uses a wait time between batches so that the internal scroll requests +can be given a timeout that takes the request padding into account. The padding +time is the difference between the batch size divided by the +`requests_per_second` and the time spent writing. By default the batch size is +`1000`, so if `requests_per_second` is set to `500`: [source,txt] -------------------------------------------------- @@ -182,11 +125,120 @@ target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds -------------------------------------------------- -Since the batch is issued as a single `_bulk` request, large batch sizes will -cause Elasticsearch to create many requests and then wait for a while before -starting the next set. This is "bursty" instead of "smooth". The default is `-1`. +Since the batch is issued as a single `_bulk` request, large batch sizes +cause {es} to create many requests and wait before starting the next set. +This is "bursty" instead of "smooth". -[float] +[[docs-delete-by-query-slice]] +===== Slicing + +Delete by query supports <> to parallelize the +delete process. This can improve efficiency and provide a +convenient way to break the request down into smaller parts. + +Setting `slices` to `auto` chooses a reasonable number for most indices. +If you're slicing manually or otherwise tuning automatic slicing, keep in mind +that: + +* Query performance is most efficient when the number of `slices` is equal to +the number of shards in the index. If that number is large (for example, +500), choose a lower number as too many `slices` hurts performance. Setting +`slices` higher than the number of shards generally does not improve efficiency +and adds overhead. + +* Delete performance scales linearly across available resources with the +number of slices. + +Whether query or delete performance dominates the runtime depends on the +documents being reindexed and cluster resources. + +[[docs-delete-by-query-api-path-params]] +==== {api-path-parms-title} + +``:: +(Optional, string) A comma-separated list of index names to search. Use `_all` +or omit to search all indices. + +[[docs-delete-by-query-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=analyzer] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=analyze_wildcard] + +`conflicts`:: + (Optional, string) What to do if delete by query hits version conflicts: + `abort` or `proceed`. Defaults to `abort`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=default_operator] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=df] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] ++ +Defaults to `open`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=from] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=lenient] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=max_docs] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=preference] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=search-q] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=request_cache] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=refresh] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=requests_per_second] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=routing] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=scroll] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=scroll_size] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=search_type] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=search_timeout] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=slices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=sort] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=source] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=source_excludes] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=source_includes] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=stats] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=terminate_after] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeout] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=version] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeout] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards] + +[[docs-delete-by-query-api-request-body]] +==== {api-request-body-title} + +`query`:: + (Optional, <>) Specifies the documents to delete + using the <>. + + +[[docs-delete-by-quer-api-response-body]] ==== Response body ////////////////////////// @@ -289,141 +341,81 @@ this is non-empty then the request aborted because of those failures. Delete by query is implemented using batches, and any failure causes the entire process to abort but all failures in the current batch are collected into the array. You can use the `conflicts` option to prevent reindex from aborting on -version conflicts. - +version conflicts. -[float] -[[docs-delete-by-query-task-api]] -==== Works with the Task API +[[docs-delete-by-query-api-example]] +==== {api-examples-title} -You can fetch the status of any running delete by query requests with the -<>: +Delete all tweets from the `twitter` index: [source,js] -------------------------------------------------- -GET _tasks?detailed=true&actions=*/delete/byquery +POST twitter/_delete_by_query?conflicts=proceed +{ + "query": { + "match_all": {} + } +} -------------------------------------------------- // CONSOLE -// TEST[skip:No tasks to retrieve] +// TEST[setup:twitter] -The response looks like: +Delete documents from multiple indices: [source,js] -------------------------------------------------- +POST /twitter,blog/_delete_by_query { - "nodes" : { - "r1A2WoRbTwKZ516z6NEs5A" : { - "name" : "r1A2WoR", - "transport_address" : "127.0.0.1:9300", - "host" : "127.0.0.1", - "ip" : "127.0.0.1:9300", - "attributes" : { - "testattr" : "test", - "portsfile" : "true" - }, - "tasks" : { - "r1A2WoRbTwKZ516z6NEs5A:36619" : { - "node" : "r1A2WoRbTwKZ516z6NEs5A", - "id" : 36619, - "type" : "transport", - "action" : "indices:data/write/delete/byquery", - "status" : { <1> - "total" : 6154, - "updated" : 0, - "created" : 0, - "deleted" : 3500, - "batches" : 36, - "version_conflicts" : 0, - "noops" : 0, - "retries": 0, - "throttled_millis": 0 - }, - "description" : "" - } - } - } + "query": { + "match_all": {} } } -------------------------------------------------- -// TESTRESPONSE -<1> This object contains the actual status. It is just like the response JSON -with the important addition of the `total` field. `total` is the total number -of operations that the reindex expects to perform. You can estimate the -progress by adding the `updated`, `created`, and `deleted` fields. The request -will finish when their sum is equal to the `total` field. - -With the task id you can look up the task directly: - -[source,js] --------------------------------------------------- -GET /_tasks/r1A2WoRbTwKZ516z6NEs5A:36619 --------------------------------------------------- // CONSOLE -// TEST[catch:missing] - -The advantage of this API is that it integrates with `wait_for_completion=false` -to transparently return the status of completed tasks. If the task is completed -and `wait_for_completion=false` was set on it then it'll come back with -`results` or an `error` field. The cost of this feature is the document that -`wait_for_completion=false` creates at `.tasks/task/${taskId}`. It is up to -you to delete that document. - - -[float] -[[docs-delete-by-query-cancel-task-api]] -==== Works with the Cancel Task API +// TEST[s/^/PUT twitter\nPUT blog\n/] -Any delete by query can be canceled using the <>: +Limit the delete by query operation to shards that a particular routing +value: [source,js] -------------------------------------------------- -POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel +POST twitter/_delete_by_query?routing=1 +{ + "query": { + "range" : { + "age" : { + "gte" : 10 + } + } + } +} -------------------------------------------------- // CONSOLE +// TEST[setup:twitter] -The task ID can be found using the <>. - -Cancellation should happen quickly but might take a few seconds. The task status -API above will continue to list the delete by query task until this task checks that it -has been cancelled and terminates itself. - - -[float] -[[docs-delete-by-query-rethrottle]] -==== Rethrottling - -The value of `requests_per_second` can be changed on a running delete by query -using the `_rethrottle` API: +By default `_delete_by_query` uses scroll batches of 1000. You can change the +batch size with the `scroll_size` URL parameter: [source,js] -------------------------------------------------- -POST _delete_by_query/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 +POST twitter/_delete_by_query?scroll_size=5000 +{ + "query": { + "term": { + "user": "kimchy" + } + } +} -------------------------------------------------- // CONSOLE - -The task ID can be found using the <>. - -Just like when setting it on the delete by query API, `requests_per_second` -can be either `-1` to disable throttling or any decimal number -like `1.7` or `12` to throttle to that level. Rethrottling that speeds up the -query takes effect immediately but rethrotting that slows down the query will -take effect after completing the current batch. This prevents scroll -timeouts. - -[float] -[[docs-delete-by-query-slice]] -==== Slicing - -Delete by query supports <> to parallelize the deleting process. -This parallelization can improve efficiency and provide a convenient way to -break the request down into smaller parts. +// TEST[setup:twitter] [float] [[docs-delete-by-query-manual-slice]] -===== Manual slicing +===== Slice manually Slice a delete by query manually by providing a slice id and total number of -slices to each request: +slices: [source,js] ---------------------------------------------------------------- @@ -495,11 +487,11 @@ Which results in a sensible `total` like this one: [float] [[docs-delete-by-query-automatic-slice]] -===== Automatic slicing +===== Use automatic slicing You can also let delete-by-query automatically parallelize using -<> to slice on `_id`. Use `slices` to specify the number of -slices to use: +<> to slice on `_id`. Use `slices` to specify +the number of slices to use: [source,js] ---------------------------------------------------------------- @@ -550,7 +542,7 @@ Which results in a sensible `total` like this one: ---------------------------------------------------------------- // TESTRESPONSE -Setting `slices` to `auto` will let Elasticsearch choose the number of slices +Setting `slices` to `auto` will let {es} choose the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple source indices, it will choose the number of slices based on the index with the smallest number of shards. @@ -580,21 +572,114 @@ being deleted. though these are all taken at approximately the same time. [float] -[[docs-delete-by-query-picking-slices]] -====== Picking the number of slices +[[docs-delete-by-query-rethrottle]] +===== Change throttling for a request -If slicing automatically, setting `slices` to `auto` will choose a reasonable -number for most indices. If you're slicing manually or otherwise tuning -automatic slicing, use these guidelines. +The value of `requests_per_second` can be changed on a running delete by query +using the `_rethrottle` API. Rethrottling that speeds up the +query takes effect immediately but rethrotting that slows down the query +takes effect after completing the current batch to prevent scroll +timeouts. -Query performance is most efficient when the number of `slices` is equal to the -number of shards in the index. If that number is large (for example, -500), choose a lower number as too many `slices` will hurt performance. Setting -`slices` higher than the number of shards generally does not improve efficiency -and adds overhead. +[source,js] +-------------------------------------------------- +POST _delete_by_query/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 +-------------------------------------------------- +// CONSOLE -Delete performance scales linearly across available resources with the -number of slices. +Use the <> to get the task ID. Set `requests_per_second` +to any positive decimal value or `-1` to disable throttling. -Whether query or delete performance dominates the runtime depends on the -documents being reindexed and cluster resources. +===== Get the status of a delete by query operation + +Use the <> to get the status of a delete by query +operation: + + +[source,js] +-------------------------------------------------- +GET _tasks?detailed=true&actions=*/delete/byquery +-------------------------------------------------- +// CONSOLE +// TEST[skip:No tasks to retrieve] + +The response looks like: + +[source,js] +-------------------------------------------------- +{ + "nodes" : { + "r1A2WoRbTwKZ516z6NEs5A" : { + "name" : "r1A2WoR", + "transport_address" : "127.0.0.1:9300", + "host" : "127.0.0.1", + "ip" : "127.0.0.1:9300", + "attributes" : { + "testattr" : "test", + "portsfile" : "true" + }, + "tasks" : { + "r1A2WoRbTwKZ516z6NEs5A:36619" : { + "node" : "r1A2WoRbTwKZ516z6NEs5A", + "id" : 36619, + "type" : "transport", + "action" : "indices:data/write/delete/byquery", + "status" : { <1> + "total" : 6154, + "updated" : 0, + "created" : 0, + "deleted" : 3500, + "batches" : 36, + "version_conflicts" : 0, + "noops" : 0, + "retries": 0, + "throttled_millis": 0 + }, + "description" : "" + } + } + } + } +} +-------------------------------------------------- +// TESTRESPONSE +<1> This object contains the actual status. It is just like the response JSON +with the important addition of the `total` field. `total` is the total number +of operations that the reindex expects to perform. You can estimate the +progress by adding the `updated`, `created`, and `deleted` fields. The request +will finish when their sum is equal to the `total` field. + +With the task id you can look up the task directly: + +[source,js] +-------------------------------------------------- +GET /_tasks/r1A2WoRbTwKZ516z6NEs5A:36619 +-------------------------------------------------- +// CONSOLE +// TEST[catch:missing] + +The advantage of this API is that it integrates with `wait_for_completion=false` +to transparently return the status of completed tasks. If the task is completed +and `wait_for_completion=false` was set on it then it'll come back with +`results` or an `error` field. The cost of this feature is the document that +`wait_for_completion=false` creates at `.tasks/task/${taskId}`. It is up to +you to delete that document. + + +[float] +[[docs-delete-by-query-cancel-task-api]] +==== Cancel a delete by query operation + +Any delete by query can be canceled using the <>: + +[source,js] +-------------------------------------------------- +POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel +-------------------------------------------------- +// CONSOLE + +The task ID can be found using the <>. + +Cancellation should happen quickly but might take a few seconds. The task status +API above will continue to list the delete by query task until this task checks that it +has been cancelled and terminates itself. diff --git a/docs/reference/docs/delete.asciidoc b/docs/reference/docs/delete.asciidoc index 97a7f65896643..66f2daf03354c 100644 --- a/docs/reference/docs/delete.asciidoc +++ b/docs/reference/docs/delete.asciidoc @@ -144,23 +144,23 @@ DELETE /twitter/_doc/1?timeout=5m [[docs-delete-api-query-params]] ==== {api-query-parms-title} -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-seq-no] +include::{docdir}/rest-api/common-parms.asciidoc[tag=if_seq_no] -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-primary-term] +include::{docdir}/rest-api/common-parms.asciidoc[tag=if_primary_term] -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-pipeline] +include::{docdir}/rest-api/common-parms.asciidoc[tag=pipeline] -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-refresh] +include::{docdir}/rest-api/common-parms.asciidoc[tag=refresh] -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-routing] +include::{docdir}/rest-api/common-parms.asciidoc[tag=routing] include::{docdir}/rest-api/common-parms.asciidoc[tag=timeout] include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version] -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version-type] +include::{docdir}/rest-api/common-parms.asciidoc[tag=version_type] -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-wait-for-active-shards] +include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards] [[docs-delete-api-example]] ==== {api-examples-title} diff --git a/docs/reference/docs/get.asciidoc b/docs/reference/docs/get.asciidoc index 14f37770fa57a..bee7e3700a906 100644 --- a/docs/reference/docs/get.asciidoc +++ b/docs/reference/docs/get.asciidoc @@ -164,9 +164,9 @@ be performed on (default: random). (Optional, boolean) Set to `false` to disable real time GET (default: `true`). See <>. -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-refresh] +include::{docdir}/rest-api/common-parms.asciidoc[tag=refresh] -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-routing] +include::{docdir}/rest-api/common-parms.asciidoc[tag=routing] `stored_fields`:: (Optional, boolean) Set to `true` to retrieve the document fields stored in the @@ -185,7 +185,7 @@ you want to retrieve. include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version] -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version-type] +include::{docdir}/rest-api/common-parms.asciidoc[tag=version_type] [[docs-get-api-response-body]] ==== {api-response-body-title} @@ -240,7 +240,7 @@ GET twitter/_doc/0 The API returns the following result: -[source,js] +[source,console-result] -------------------------------------------------- { "_index" : "twitter", @@ -359,7 +359,7 @@ GET twitter/_doc/1?stored_fields=tags,counter The API returns the following result: -[source,js] +[source,console-result] -------------------------------------------------- { "_index": "twitter", @@ -403,7 +403,7 @@ GET twitter/_doc/2?routing=user1&stored_fields=tags,counter The API returns the following result: -[source,js] +[source,console-result] -------------------------------------------------- { "_index": "twitter", diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index 5bbfaf4bf0c80..4aaa22fbe23cd 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -37,9 +37,9 @@ POST request. [[docs-index-api-query-params]] ==== {api-query-parms-title} -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-seq-no] +include::{docdir}/rest-api/common-parms.asciidoc[tag=if_seq_no] -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-primary-term] +include::{docdir}/rest-api/common-parms.asciidoc[tag=if_primary_term] `op_type`:: (Optional, enum) Set to `create` to only index the document @@ -47,19 +47,19 @@ if it does not already exist (_put if absent_). If a document with the specified `_id` already exists, the indexing operation will fail. Same as using the `/_create` endpoint. Valid values: `index`, `create`. Default: `index`. -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-pipeline] +include::{docdir}/rest-api/common-parms.asciidoc[tag=pipeline] -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-refresh] +include::{docdir}/rest-api/common-parms.asciidoc[tag=refresh] -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-routing] +include::{docdir}/rest-api/common-parms.asciidoc[tag=routing] include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version] -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version-type] +include::{docdir}/rest-api/common-parms.asciidoc[tag=version_type] -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-wait-for-active-shards] +include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards] [[docs-index-api-request-body]] ==== {api-request-body-title} diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 1a732cf1246be..823c624c20e56 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -46,22 +46,20 @@ automatically if it doesn't exist. For more information, see <>. [[docs-update-api-query-params]] ==== {api-query-parms-title} -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-seq-no] +include::{docdir}/rest-api/common-parms.asciidoc[tag=if_seq_no] -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-primary-term] +include::{docdir}/rest-api/common-parms.asciidoc[tag=if_primary_term] `lang`:: (Optional, string) The script language. Default: `painless`. -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-refresh] +include::{docdir}/rest-api/common-parms.asciidoc[tag=refresh] `retry_on_conflict`:: (Optional, integer) Specify how many times should the operation be retried when a conflict occurs. Default: 0. -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-refresh] - -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-routing] +include::{docdir}/rest-api/common-parms.asciidoc[tag=routing] `_source`:: (Optional, list) Set to `false` to disable source retrieval (default: `true`). @@ -75,7 +73,7 @@ You can also specify a comma-separated list of the fields you want to retrieve. include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] -include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-wait-for-active-shards] +include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards] [[update-api-example]] ==== {api-examples-title} diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 3ca58d6922364..4c5b3535787b8 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -17,7 +17,7 @@ Follow this getting started tutorial to: Need more context? Check out the <> to learn the lingo and understand the basics of +{es} Introduction>> to learn the lingo and understand the basics of how {es} works. If you're already familiar with {es} and want to see how it works with the rest of the stack, you might want to jump to the {stack-gs}/get-started-elastic-stack.html[Elastic Stack @@ -26,29 +26,45 @@ Tutorial] to see how to set up a system monitoring solution with {es}, {kib}, TIP: The fastest way to get started with {es} is to https://www.elastic.co/cloud/elasticsearch-service/signup[start a free 14-day -trial of Elasticsearch Service] in the cloud. +trial of {ess}] in the cloud. -- [[getting-started-install]] == Get {es} up and running -To take {es} for a test drive, you can create a one-click cloud deployment -on the https://www.elastic.co/cloud/elasticsearch-service/signup[Elasticsearch Service], -or <> on your own +To take {es} for a test drive, you can create a +https://www.elastic.co/cloud/elasticsearch-service/signup[hosted deployment] on +the {ess} or set up a multi-node {es} cluster on your own Linux, macOS, or Windows machine. +[float] +[[run-elasticsearch-hosted]] +=== Run {es} on Elastic Cloud + +When you create a deployment on the {es} Service, the service provisions +a three-node {es} cluster along with Kibana and APM. + +To create a deployment: + +. Sign up for a https://www.elastic.co/cloud/elasticsearch-service/signup[free trial] +and verify your email address. +. Set a password for your account. +. Click **Create Deployment**. + +Once you've created a deployment, you're ready to <>. [float] [[run-elasticsearch-local]] === Run {es} locally on Linux, macOS, or Windows -When you create a cluster on the Elasticsearch Service, you automatically -get a three-node cluster. By installing from the tar or zip archive, you can -start multiple instances of {es} locally to see how a multi-node cluster behaves. +When you create a deployment on the {ess}, a master node and +two data nodes are provisioned automatically. By installing from the tar or zip +archive, you can start multiple instances of {es} locally to see how a multi-node +cluster behaves. To run a three-node {es} cluster locally: -. Download the Elasticsearch archive for your OS: +. Download the {es} archive for your OS: + Linux: https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-linux-x86_64.tar.gz[elasticsearch-{version}-linux-x86_64.tar.gz] + @@ -92,7 +108,7 @@ Windows PowerShell: Expand-Archive elasticsearch-{version}-windows-x86_64.zip -------------------------------------------------- -. Start elasticsearch from the `bin` directory: +. Start {es} from the `bin` directory: + Linux and macOS: + @@ -165,7 +181,7 @@ epoch timestamp cluster status node.total node.data shards pri relo i -------------------------------------------------- // TESTRESPONSE[s/1565052807 00:53:27 elasticsearch/\\d+ \\d+:\\d+:\\d+ integTest/] // TESTRESPONSE[s/3 3 6 3/\\d+ \\d+ \\d+ \\d+/] -// TESTRESPONSE[s/0 0 -/0 \\d+ -/] +// TESTRESPONSE[s/0 0 -/0 \\d+ (-|\\d+(micros|ms|s))/] // TESTRESPONSE[non_json] + NOTE: The cluster status will remain yellow if you are only running a single @@ -332,82 +348,14 @@ yellow open bank l7sSYV2cQXmu6_4rJWVIww 5 1 1000 0 12 [[getting-started-search]] == Start searching -Now let's start with some simple searches. There are two basic ways to run searches: one is by sending search parameters through the {ref}/search-uri-request.html[REST request URI] and the other by sending them through the {ref}/search-request-body.html[REST request body]. The request body method allows you to be more expressive and also to define your searches in a more readable JSON format. We'll try one example of the request URI method but for the remainder of this tutorial, we will exclusively be using the request body method. - -The REST API for search is accessible from the `_search` endpoint. This example returns all documents in the bank index: - -[source,js] --------------------------------------------------- -GET /bank/_search?q=*&sort=account_number:asc&pretty --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Let's first dissect the search call. We are searching (`_search` endpoint) in the bank index, and the `q=*` parameter instructs Elasticsearch to match all documents in the index. The `sort=account_number:asc` parameter indicates to sort the results using the `account_number` field of each document in an ascending order. The `pretty` parameter, again, just tells Elasticsearch to return pretty-printed JSON results. - -And the response (partially shown): - -[source,js] --------------------------------------------------- -{ - "took" : 63, - "timed_out" : false, - "_shards" : { - "total" : 5, - "successful" : 5, - "skipped" : 0, - "failed" : 0 - }, - "hits" : { - "total" : { - "value": 1000, - "relation": "eq" - }, - "max_score" : null, - "hits" : [ { - "_index" : "bank", - "_type" : "_doc", - "_id" : "0", - "sort": [0], - "_score" : null, - "_source" : {"account_number":0,"balance":16623,"firstname":"Bradshaw","lastname":"Mckenzie","age":29,"gender":"F","address":"244 Columbus Place","employer":"Euron","email":"bradshawmckenzie@euron.com","city":"Hobucken","state":"CO"} - }, { - "_index" : "bank", - "_type" : "_doc", - "_id" : "1", - "sort": [1], - "_score" : null, - "_source" : {"account_number":1,"balance":39225,"firstname":"Amber","lastname":"Duke","age":32,"gender":"M","address":"880 Holmes Lane","employer":"Pyrami","email":"amberduke@pyrami.com","city":"Brogan","state":"IL"} - }, ... - ] - } -} --------------------------------------------------- -// TESTRESPONSE[s/"took" : 63/"took" : $body.took/] -// TESTRESPONSE[s/\.\.\./$body.hits.hits.2, $body.hits.hits.3, $body.hits.hits.4, $body.hits.hits.5, $body.hits.hits.6, $body.hits.hits.7, $body.hits.hits.8, $body.hits.hits.9/] +Once you have ingested some data into an {es} index, you can search it +by sending requests to the `_search` endpoint. To access the full suite of +search capabilities, you use the {es} Query DSL to specify the +search criteria in the request body. You specify the name of the index you +want to search in the request URI. -As for the response, we see the following parts: - -* `took` – time in milliseconds for Elasticsearch to execute the search -* `timed_out` – tells us if the search timed out or not -* `_shards` – tells us how many shards were searched, as well as a count of the successful/failed searched shards -* `hits` – search results -* `hits.total` – an object that contains information about the total number of documents matching our search criteria -** `hits.total.value` - the value of the total hit count (must be interpreted in the context of `hits.total.relation`). -** `hits.total.relation` - whether `hits.total.value` is the exact hit count, in which case it is equal to `"eq"` or a - lower bound of the total hit count (greater than or equals), in which case it is equal to `gte`. -* `hits.hits` – actual array of search results (defaults to first 10 documents) -* `hits.sort` - sort value of the sort key for each result (missing if sorting by score) -* `hits._score` and `max_score` - ignore these fields for now - -The accuracy of `hits.total` is controlled by the request parameter `track_total_hits`, when set to true -the request will track the total hits accurately (`"relation": "eq"`). It defaults to `10,000` -which means that the total hit count is accurately tracked up to `10,000` documents. -You can force an accurate count by setting `track_total_hits` to true explicitly. -See the <> documentation -for more details. - -Here is the same exact search above using the alternative request body method: +For example, the following request retrieves all documents in the `bank` +index sorted by account number: [source,js] -------------------------------------------------- @@ -422,11 +370,8 @@ GET /bank/_search // CONSOLE // TEST[continued] -The difference here is that instead of passing `q=*` in the URI, we provide a JSON-style query request body to the `_search` API. We'll discuss this JSON query in the next section. - -//// -Hidden response just so we can assert that it is indeed the same but don't have -to clutter the docs with it: +By default, the `hits` section of the response includes the first 10 documents +that match the search criteria: [source,js] -------------------------------------------------- @@ -441,23 +386,23 @@ to clutter the docs with it: }, "hits" : { "total" : { - "value": 1000, - "relation": "eq" + "value": 1000, + "relation": "eq" }, - "max_score": null, + "max_score" : null, "hits" : [ { "_index" : "bank", "_type" : "_doc", "_id" : "0", "sort": [0], - "_score": null, + "_score" : null, "_source" : {"account_number":0,"balance":16623,"firstname":"Bradshaw","lastname":"Mckenzie","age":29,"gender":"F","address":"244 Columbus Place","employer":"Euron","email":"bradshawmckenzie@euron.com","city":"Hobucken","state":"CO"} }, { "_index" : "bank", "_type" : "_doc", "_id" : "1", "sort": [1], - "_score": null, + "_score" : null, "_source" : {"account_number":1,"balance":39225,"firstname":"Amber","lastname":"Duke","age":32,"gender":"M","address":"880 Holmes Lane","employer":"Pyrami","email":"amberduke@pyrami.com","city":"Brogan","state":"IL"} }, ... ] @@ -467,54 +412,31 @@ to clutter the docs with it: // TESTRESPONSE[s/"took" : 63/"took" : $body.took/] // TESTRESPONSE[s/\.\.\./$body.hits.hits.2, $body.hits.hits.3, $body.hits.hits.4, $body.hits.hits.5, $body.hits.hits.6, $body.hits.hits.7, $body.hits.hits.8, $body.hits.hits.9/] -//// - -It is important to understand that once you get your search results back, Elasticsearch is completely done with the request and does not maintain any kind of server-side resources or open cursors into your results. This is in stark contrast to many other platforms such as SQL wherein you may initially get a partial subset of your query results up-front and then you have to continuously go back to the server if you want to fetch (or page through) the rest of the results using some kind of stateful server-side cursor. - -[float] -[[getting-started-query-lang]] -=== Introducing the Query Language - -Elasticsearch provides a JSON-style domain-specific language that you can use to execute queries. This is referred to as the {ref}/query-dsl.html[Query DSL]. The query language is quite comprehensive and can be intimidating at first glance but the best way to actually learn it is to start with a few basic examples. +The response also provides the following information about the search request: -Going back to our last example, we executed this query: - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { "match_all": {} } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Dissecting the above, the `query` part tells us what our query definition is and the `match_all` part is simply the type of query that we want to run. The `match_all` query is simply a search for all documents in the specified index. - -In addition to the `query` parameter, we also can pass other parameters to -influence the search results. In the example in the section above we passed in -`sort`, here we pass in `size`: - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { "match_all": {} }, - "size": 1 -} --------------------------------------------------- -// CONSOLE -// TEST[continued] +* `took` – how long it took {es} to run the query, in milliseconds +* `timed_out` – whether or not the search request timed out +* `_shards` – how many shards were searched and a breakdown of how many shards +succeeded, failed, or were skipped. +* `max_score` – the score of the most relevant document found +* `hits.total.value` - how many matching documents were found +* `hits.sort` - the document's sort position (when not sorting by relevance score) +* `hits._score` - the document's relevance score (not applicable when using `match_all`) -Note that if `size` is not specified, it defaults to 10. +Each search request is self-contained: {es} does not maintain any +state information across requests. To page through the search hits, specify +the `from` and `size` parameters in your request. -This example does a `match_all` and returns documents 10 through 19: +For example, the following request gets hits 10 through 19: [source,js] -------------------------------------------------- GET /bank/_search { "query": { "match_all": {} }, + "sort": [ + { "account_number": "asc" } + ], "from": 10, "size": 10 } @@ -522,67 +444,12 @@ GET /bank/_search // CONSOLE // TEST[continued] -The `from` parameter (0-based) specifies which document index to start from and the `size` parameter specifies how many documents to return starting at the from parameter. This feature is useful when implementing paging of search results. Note that if `from` is not specified, it defaults to 0. - -This example does a `match_all` and sorts the results by account balance in descending order and returns the top 10 (default size) documents. - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { "match_all": {} }, - "sort": { "balance": { "order": "desc" } } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Now that we have seen a few of the basic search parameters, let's dig in some more into the Query DSL. Let's first take a look at the returned document fields. By default, the full JSON document is returned as part of all searches. This is referred to as the source (`_source` field in the search hits). If we don't want the entire source document returned, we have the ability to request only a few fields from within source to be returned. - -This example shows how to return two fields, `account_number` and `balance` (inside of `_source`), from the search: - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { "match_all": {} }, - "_source": ["account_number", "balance"] -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Note that the above example simply reduces the `_source` field. It will still only return one field named `_source` but within it, only the fields `account_number` and `balance` are included. +Now that you've seen how to submit a basic search request, you can start to +construct queries that are a bit more interesting than `match_all`. -If you come from a SQL background, the above is somewhat similar in concept to the `SQL SELECT FROM` field list. - -Now let's move on to the query part. Previously, we've seen how the `match_all` query is used to match all documents. Let's now introduce a new query called the {ref}/query-dsl-match-query.html[`match` query], which can be thought of as a basic fielded search query (i.e. a search done against a specific field or set of fields). - -This example returns the account numbered 20: - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { "match": { "account_number": 20 } } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -This example returns all accounts containing the term "mill" in the address: - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { "match": { "address": "mill" } } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -This example returns all accounts containing the term "mill" or "lane" in the address: +To search for specific terms within a field, you can use a `match` query. +For example, the following request searches the `address` field to find +customers whose addresses contain `mill` or `lane`: [source,js] -------------------------------------------------- @@ -594,7 +461,9 @@ GET /bank/_search // CONSOLE // TEST[continued] -This example is a variant of `match` (`match_phrase`) that returns all accounts containing the phrase "mill lane" in the address: +To perform a phrase search rather than matching individual terms, you use +`match_phrase` instead of `match`. For example, the following request only +matches addresses that contain the phrase `mill lane`: [source,js] -------------------------------------------------- @@ -606,74 +475,13 @@ GET /bank/_search // CONSOLE // TEST[continued] -Let's now introduce the {ref}/query-dsl-bool-query.html[`bool` query]. The `bool` query allows us to compose smaller queries into bigger queries using boolean logic. - -This example composes two `match` queries and returns all accounts containing "mill" and "lane" in the address: - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { - "bool": { - "must": [ - { "match": { "address": "mill" } }, - { "match": { "address": "lane" } } - ] - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -In the above example, the `bool must` clause specifies all the queries that must be true for a document to be considered a match. - -In contrast, this example composes two `match` queries and returns all accounts containing "mill" or "lane" in the address: - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { - "bool": { - "should": [ - { "match": { "address": "mill" } }, - { "match": { "address": "lane" } } - ] - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -In the above example, the `bool should` clause specifies a list of queries either of which must be true for a document to be considered a match. +To construct more complex queries, you can use a `bool` query to combine +multiple query criteria. You can designate criteria as required (must match), +desirable (should match), or undesirable (must not match). -This example composes two `match` queries and returns all accounts that contain neither "mill" nor "lane" in the address: - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { - "bool": { - "must_not": [ - { "match": { "address": "mill" } }, - { "match": { "address": "lane" } } - ] - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -In the above example, the `bool must_not` clause specifies a list of queries none of which must be true for a document to be considered a match. - -We can combine `must`, `should`, and `must_not` clauses simultaneously inside a `bool` query. Furthermore, we can compose `bool` queries inside any of these `bool` clauses to mimic any complex multi-level boolean logic. - -This example returns all accounts of anybody who is 40 years old but doesn't live in ID(aho): +For example, the following request searches the `bank` index for accounts that +belong to customers who are 40 years old, but excludes anyone who lives in +Idaho (ID): [source,js] -------------------------------------------------- @@ -694,17 +502,19 @@ GET /bank/_search // CONSOLE // TEST[continued] -[float] -[[getting-started-filters]] -=== Executing filters - -In the previous section, we skipped over a little detail called the document score (`_score` field in the search results). The score is a numeric value that is a relative measure of how well the document matches the search query that we specified. The higher the score, the more relevant the document is, the lower the score, the less relevant the document is. - -But queries do not always need to produce scores, in particular when they are only used for "filtering" the document set. Elasticsearch detects these situations and automatically optimizes query execution in order not to compute useless scores. +Each `must`, `should`, and `must_not` element in a Boolean query is referred +to as a query clause. How well a document meets the criteria in each `must` or +`should` clause contributes to the document's _relevance score_. The higher the +score, the better the document matches your search criteria. By default, {es} +returns documents ranked by these relevance scores. -The {ref}/query-dsl-bool-query.html[`bool` query] that we introduced in the previous section also supports `filter` clauses which allow us to use a query to restrict the documents that will be matched by other clauses, without changing how scores are computed. As an example, let's introduce the {ref}/query-dsl-range-query.html[`range` query], which allows us to filter documents by a range of values. This is generally used for numeric or date filtering. +The criteria in a `must_not` clause is treated as a _filter_. It affects whether +or not the document is included in the results, but does not contribute to +how documents are scored. You can also explicitly specify arbitrary filters to +include or exclude documents based on structured data. -This example uses a bool query to return all accounts with balances between 20000 and 30000, inclusive. In other words, we want to find accounts with a balance that is greater than or equal to 20000 and less than or equal to 30000. +For example, the following request uses a range filter to limit the results to +accounts with a balance between $20,000 and $30,000 (inclusive). [source,js] -------------------------------------------------- @@ -728,16 +538,18 @@ GET /bank/_search // CONSOLE // TEST[continued] -Dissecting the above, the bool query contains a `match_all` query (the query part) and a `range` query (the filter part). We can substitute any other queries into the query and the filter parts. In the above case, the range query makes perfect sense since documents falling into the range all match "equally", i.e., no document is more relevant than another. - -In addition to the `match_all`, `match`, `bool`, and `range` queries, there are a lot of other query types that are available and we won't go into them here. Since we already have a basic understanding of how they work, it shouldn't be too difficult to apply this knowledge in learning and experimenting with the other query types. - [[getting-started-aggregations]] == Analyze results with aggregations -Aggregations provide the ability to group and extract statistics from your data. The easiest way to think about aggregations is by roughly equating it to the SQL GROUP BY and the SQL aggregate functions. In Elasticsearch, you have the ability to execute searches returning hits and at the same time return aggregated results separate from the hits all in one response. This is very powerful and efficient in the sense that you can run queries and multiple aggregations and get the results back of both (or either) operations in one shot avoiding network roundtrips using a concise and simplified API. +{es} aggregations enable you to get meta-information about your search results +and answer questions like, "How many account holders are in Texas?" or +"What's the average balance of accounts in Tennessee?" You can search +documents, filter hits, and use aggregations to analyze the results all in one +request. -To start with, this example groups all the accounts by state, and then returns the top 10 (default) states sorted by count descending (also default): +For example, the following request uses a `terms` aggregation to group +all of the accounts in the `bank` index by state, and returns the ten states +with the most accounts in descending order: [source,js] -------------------------------------------------- @@ -756,14 +568,10 @@ GET /bank/_search // CONSOLE // TEST[continued] -In SQL, the above aggregation is similar in concept to: - -[source,sh] --------------------------------------------------- -SELECT state, COUNT(*) FROM bank GROUP BY state ORDER BY COUNT(*) DESC LIMIT 10; --------------------------------------------------- - -And the response (partially shown): +The `buckets` in the response are the values of the `state` field. The +`doc_count` shows the number of accounts in each state. For example, you +can see that there are 27 accounts in `ID` (Idaho). Because the request +set `size=0`, the response only contains the aggregation results. [source,js] -------------------------------------------------- @@ -825,12 +633,11 @@ And the response (partially shown): -------------------------------------------------- // TESTRESPONSE[s/"took": 29/"took": $body.took/] -We can see that there are 27 accounts in `ID` (Idaho), followed by 27 accounts -in `TX` (Texas), followed by 25 accounts in `AL` (Alabama), and so forth. - -Note that we set `size=0` to not show search hits because we only want to see the aggregation results in the response. -Building on the previous aggregation, this example calculates the average account balance by state (again only for the top 10 states sorted by count in descending order): +You can combine aggregations to build more complex summaries of your data. For +example, the following request nests an `avg` aggregation within the previous +`group_by_state` aggregation to calculate the average account balances for +each state. [source,js] -------------------------------------------------- @@ -856,9 +663,8 @@ GET /bank/_search // CONSOLE // TEST[continued] -Notice how we nested the `average_balance` aggregation inside the `group_by_state` aggregation. This is a common pattern for all the aggregations. You can nest aggregations inside aggregations arbitrarily to extract pivoted summarizations that you require from your data. - -Building on the previous aggregation, let's now sort on the average balance in descending order: +Instead of sorting the results by count, you could sort using the result of +the nested aggregation by specifying the order within the `terms` aggregation: [source,js] -------------------------------------------------- @@ -887,54 +693,14 @@ GET /bank/_search // CONSOLE // TEST[continued] -This example demonstrates how we can group by age brackets (ages 20-29, 30-39, and 40-49), then by gender, and then finally get the average account balance, per age bracket, per gender: - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "size": 0, - "aggs": { - "group_by_age": { - "range": { - "field": "age", - "ranges": [ - { - "from": 20, - "to": 30 - }, - { - "from": 30, - "to": 40 - }, - { - "from": 40, - "to": 50 - } - ] - }, - "aggs": { - "group_by_gender": { - "terms": { - "field": "gender.keyword" - }, - "aggs": { - "average_balance": { - "avg": { - "field": "balance" - } - } - } - } - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] +In addition to basic bucketing and metrics aggregations like these, {es} +provides specialized aggregations for operating on multiple fields and +analyzing particular types of data such as dates, IP addresses, and geo +data. You can also feed the results of individual aggregations into pipeline +aggregations for further analysis. -There are many other aggregations capabilities that we won't go into detail here. The {ref}/search-aggregations.html[aggregations reference guide] is a great starting point if you want to do further experimentation. +The core analysis capabilities provided by aggregations enable advanced +features such as using machine learning to detect anomalies. [[getting-started-next-steps]] == Where to go from here diff --git a/docs/reference/images/spatial/error_distance.png b/docs/reference/images/spatial/error_distance.png new file mode 100644 index 0000000000000..a3274d778c047 Binary files /dev/null and b/docs/reference/images/spatial/error_distance.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-1-driver-manager.png b/docs/reference/images/sql/client-apps/dbvis-1-driver-manager.png index cde4d9cc7cf26..d9290e63deacb 100644 Binary files a/docs/reference/images/sql/client-apps/dbvis-1-driver-manager.png and b/docs/reference/images/sql/client-apps/dbvis-1-driver-manager.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-2-driver.png b/docs/reference/images/sql/client-apps/dbvis-2-driver.png index cae3824547bc3..a5cbddfefbda5 100644 Binary files a/docs/reference/images/sql/client-apps/dbvis-2-driver.png and b/docs/reference/images/sql/client-apps/dbvis-2-driver.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-3-add-driver.png b/docs/reference/images/sql/client-apps/dbvis-3-add-driver.png new file mode 100644 index 0000000000000..bab82fae3f229 Binary files /dev/null and b/docs/reference/images/sql/client-apps/dbvis-3-add-driver.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-3-new-conn.png b/docs/reference/images/sql/client-apps/dbvis-3-new-conn.png deleted file mode 100644 index 332895a2c8a8b..0000000000000 Binary files a/docs/reference/images/sql/client-apps/dbvis-3-new-conn.png and /dev/null differ diff --git a/docs/reference/images/sql/client-apps/dbvis-4-conn-props.png b/docs/reference/images/sql/client-apps/dbvis-4-conn-props.png deleted file mode 100644 index d854dc826b1e1..0000000000000 Binary files a/docs/reference/images/sql/client-apps/dbvis-4-conn-props.png and /dev/null differ diff --git a/docs/reference/images/sql/client-apps/dbvis-4-new-conn.png b/docs/reference/images/sql/client-apps/dbvis-4-new-conn.png new file mode 100644 index 0000000000000..3001641b5314c Binary files /dev/null and b/docs/reference/images/sql/client-apps/dbvis-4-new-conn.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-5-conn-props.png b/docs/reference/images/sql/client-apps/dbvis-5-conn-props.png new file mode 100644 index 0000000000000..e59e8215ec608 Binary files /dev/null and b/docs/reference/images/sql/client-apps/dbvis-5-conn-props.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-5-data.png b/docs/reference/images/sql/client-apps/dbvis-5-data.png deleted file mode 100644 index c67336568edc0..0000000000000 Binary files a/docs/reference/images/sql/client-apps/dbvis-5-data.png and /dev/null differ diff --git a/docs/reference/images/sql/client-apps/dbvis-6-data.png b/docs/reference/images/sql/client-apps/dbvis-6-data.png new file mode 100644 index 0000000000000..65f8a04eb5d17 Binary files /dev/null and b/docs/reference/images/sql/client-apps/dbvis-6-data.png differ diff --git a/docs/reference/images/sql/client-apps/workbench-2-add-driver.png b/docs/reference/images/sql/client-apps/workbench-2-add-driver.png deleted file mode 100644 index 659cfd0c40760..0000000000000 Binary files a/docs/reference/images/sql/client-apps/workbench-2-add-driver.png and /dev/null differ diff --git a/docs/reference/images/sql/client-apps/workbench-2-select-driver.png b/docs/reference/images/sql/client-apps/workbench-2-select-driver.png new file mode 100644 index 0000000000000..94d26b2d2d36e Binary files /dev/null and b/docs/reference/images/sql/client-apps/workbench-2-select-driver.png differ diff --git a/docs/reference/images/sql/client-apps/workbench-3-add-jar.png b/docs/reference/images/sql/client-apps/workbench-3-add-jar.png new file mode 100644 index 0000000000000..b10aa9ad9f134 Binary files /dev/null and b/docs/reference/images/sql/client-apps/workbench-3-add-jar.png differ diff --git a/docs/reference/images/sql/client-apps/workbench-3-connection.png b/docs/reference/images/sql/client-apps/workbench-4-connection.png similarity index 100% rename from docs/reference/images/sql/client-apps/workbench-3-connection.png rename to docs/reference/images/sql/client-apps/workbench-4-connection.png diff --git a/docs/reference/images/sql/client-apps/workbench-4-data.png b/docs/reference/images/sql/client-apps/workbench-5-data.png similarity index 100% rename from docs/reference/images/sql/client-apps/workbench-4-data.png rename to docs/reference/images/sql/client-apps/workbench-5-data.png diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc index b3c9166437073..403b9c7903a7a 100644 --- a/docs/reference/indices.asciidoc +++ b/docs/reference/indices.asciidoc @@ -12,6 +12,7 @@ index settings, aliases, mappings, and index templates. * <> * <> * <> +* <> * <> * <> * <> @@ -33,6 +34,9 @@ index settings, aliases, mappings, and index templates. [float] [[alias-management]] === Alias management: +* <> +* <> +* <> * <> [float] @@ -41,6 +45,12 @@ index settings, aliases, mappings, and index templates. * <> * <> * <> + +[float] +[[index-templates]] +=== Index templates: +* <> +* <> * <> [float] @@ -67,6 +77,8 @@ include::indices/get-index.asciidoc[] include::indices/indices-exists.asciidoc[] +include::indices/close.asciidoc[] + include::indices/open-close.asciidoc[] include::indices/shrink-index.asciidoc[] @@ -89,6 +101,12 @@ include::indices/get-field-mapping.asciidoc[] include::indices/types-exists.asciidoc[] +include::indices/add-alias.asciidoc[] + +include::indices/get-alias.asciidoc[] + +include::indices/alias-exists.asciidoc[] + include::indices/aliases.asciidoc[] include::indices/update-settings.asciidoc[] @@ -97,6 +115,10 @@ include::indices/get-settings.asciidoc[] include::indices/analyze.asciidoc[] +include::indices/delete-index-template.asciidoc[] + +include::indices/template-exists.asciidoc[] + include::indices/templates.asciidoc[] include::indices/stats.asciidoc[] diff --git a/docs/reference/indices/add-alias.asciidoc b/docs/reference/indices/add-alias.asciidoc new file mode 100644 index 0000000000000..a5c7d4c49e90b --- /dev/null +++ b/docs/reference/indices/add-alias.asciidoc @@ -0,0 +1,139 @@ +[[indices-add-alias]] +=== Add index alias API +++++ +Add index alias +++++ + +Creates or updates an index alias. + +include::alias-exists.asciidoc[tag=index-alias-def] + +[source,js] +---- +PUT /twitter/_alias/alias1 +---- +// CONSOLE +// TEST[setup:twitter] + + +[[add-alias-api-request]] +==== {api-request-title} + +`PUT //_alias/` + +`POST //_alias/` + +`PUT //_aliases/` + +`POST //_aliases/` + + +[[add-alias-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +Comma-separated list or wildcard expression of index names +to add to the alias. ++ +To add all indices in the cluster to the alias, +use a value of `_all`. + +``:: +(Required, string) +Name of the index alias to create or update. + + +[[add-alias-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + + +[[add-alias-api-request-body]] +==== {api-request-body-title} + +`filter`:: +(Required, query object) +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-alias-filter] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-routing] + +[[add-alias-api-example]] +==== {api-examples-title} + +[[alias-adding]] +===== Add a time-based alias + +The following request creates an alias, `2030`, +for the `logs_20302801` index. + +[source,js] +-------------------------------------------------- +PUT /logs_20302801/_alias/2030 +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT logs_20302801\n/] + +[[add-alias-api-user-ex]] +===== Add a user-based alias + +First, create an index, `users`, +with a mapping for the `user_id` field: + +[source,js] +-------------------------------------------------- +PUT /users +{ + "mappings" : { + "properties" : { + "user_id" : {"type" : "integer"} + } + } +} +-------------------------------------------------- +// CONSOLE + +Then add the index alias for a specific user, `user_12`: + +[source,js] +-------------------------------------------------- +PUT /users/_alias/user_12 +{ + "routing" : "12", + "filter" : { + "term" : { + "user_id" : 12 + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[[alias-index-creation]] +===== Add an alias during index creation + +You can use the <> +to add an index alias during index creation. + +[source,js] +-------------------------------------------------- +PUT /logs_20302801 +{ + "mappings" : { + "properties" : { + "year" : {"type" : "integer"} + } + }, + "aliases" : { + "current_day" : {}, + "2030" : { + "filter" : { + "term" : {"year" : 2030 } + } + } + } +} +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/indices/alias-exists.asciidoc b/docs/reference/indices/alias-exists.asciidoc new file mode 100644 index 0000000000000..e1398f079952e --- /dev/null +++ b/docs/reference/indices/alias-exists.asciidoc @@ -0,0 +1,78 @@ +[[indices-alias-exists]] +=== Index alias exists API +++++ +Index alias exists +++++ + +Checks if an index alias exists. + +//tag::index-alias-def[] +An index alias is a secondary name +used to refer to one or more existing indices. +//end::index-alias-def[] + +The returned HTTP status code indicates whether the index alias exists or not. +A `404` means it does not exist, +and `200` means it does. + +[source,js] +---- +HEAD /_alias/alias1 +---- +// CONSOLE +// TEST[setup:twitter] +// TEST[s/^/PUT twitter\/_alias\/alias1\n/] + + +[[alias-exists-api-request]] +==== {api-request-title} + +`HEAD /_alias/` + +`HEAD //_alias/` + + +[[alias-exists-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-alias] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index] + +[[alias-exists-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] ++ +Defaults to `all`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=local] + + +[[alias-exists-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Indicates all specified index aliases exist. + + `404`:: +Indicates one or more specified index aliases **do not** exist. + + +[[alias-exists-api-example]] +==== {api-examples-title} + +[source,js] +---- +HEAD /_alias/2030 +HEAD /_alias/20* +HEAD /logs_20302801/_alias/* +---- +// CONSOLE +// TEST[s/^/PUT logs_20302801\nPUT logs_20302801\/_alias\/2030\n/] diff --git a/docs/reference/indices/aliases.asciidoc b/docs/reference/indices/aliases.asciidoc index 03fa18bc8a448..9d3507f5ac8d6 100644 --- a/docs/reference/indices/aliases.asciidoc +++ b/docs/reference/indices/aliases.asciidoc @@ -1,5 +1,34 @@ [[indices-aliases]] -=== Index Aliases +=== Update index alias API +++++ +Update index alias +++++ + +Adds or removes index aliases. + +include::alias-exists.asciidoc[tag=index-alias-def] + +[source,js] +---- +POST /_aliases +{ + "actions" : [ + { "add" : { "index" : "twitter", "alias" : "alias1" } } + ] +} +---- +// CONSOLE +// TEST[setup:twitter] + + +[[indices-aliases-api-request]] +==== {api-request-title} + +`POST /_aliases` + + +[[indices-aliases-api-desc]] +==== {api-description-title} APIs in Elasticsearch accept an index name when working against a specific index, and several indices when applicable. The index aliases @@ -10,7 +39,119 @@ automatically expand to the aliased indices. An alias can also be associated with a filter that will automatically be applied when searching, and routing values. An alias cannot have the same name as an index. -Here is a sample of associating the alias `alias1` with index `test1`: + +[[indices-aliases-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + + +[[indices-aliases-api-request-body]] +==== {api-request-body-title} + +`actions`:: ++ +-- +(Required, array of actions) +Set of actions to perform. +Valid actions include: + +`add`:: +Adds an alias to an index. + +`remove`:: +Removes an alias from an index. + +`remove_index`:: +Deletes an index or index alias, +like the <>. + +You can perform these actions on alias objects. +Valid parameters for alias objects include: + +`index`:: +(String) +Wildcard expression of index names +used to perform the action. ++ +If the `indices` parameter is not specified, +this parameter is required. + +`indices`:: +(Array) +Array of index names +used to perform the action. ++ +If the `index` parameter is not specified, +this parameter is required. + +`alias`:: +(String) +Comma-separated list or wildcard expression of index alias names to +add, remove, or delete. ++ +If the `aliases` parameter is not specified, +this parameter is required for the `add` or `remove` action. + +`aliases`:: +(String) +Comma-separated list or wildcard expression of index alias names to +add, remove, or delete. ++ +If the `alias` parameter is not specified, +this parameter is required for the `add` or `remove` action. + +`filter`:: +(Optional, query object) +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-alias-filter] ++ +See <> for an example. + +`is_write_index`:: +(Optional, boolean) +If `true`, assigns the index as an alias's write index. +Defaults to `false`. ++ +An alias can have one write index at a time. ++ +See <> for an example. ++ +[IMPORTANT] +==== +Aliases that do not explicitly set `is_write_index: true` for an index, and +only reference one index, will have that referenced index behave as if it is the write index +until an additional index is referenced. At that point, there will be no write index and +writes will be rejected. +==== + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-routing] ++ +See <> for an example. + +`index_routing`:: +(Optional, string) +Custom <> used +for the alias's indexing operations. ++ +See <> for an example. + +`search_routing`:: +`index_routing`:: +(Optional, string) +Custom <> used +for the alias's search operations. ++ +See <> for an example. +-- + + +[[indices-aliases-api-example]] +==== {api-examples-title} + +[[indices-aliases-api-add-alias-ex]] +===== Add an alias + +The following request adds the `alias1` alias to the `test1` index. [source,js] -------------------------------------------------- @@ -24,7 +165,10 @@ POST /_aliases // CONSOLE // TEST[s/^/PUT test1\nPUT test2\n/] -And here is removing that same alias: +[[indices-aliases-api-remove-alias-ex]] +===== Remove an alias + +The following request removes the `alias1` alias. [source,js] -------------------------------------------------- @@ -38,6 +182,9 @@ POST /_aliases // CONSOLE // TEST[continued] +[[indices-aliases-api-rename-alias-ex]] +===== Rename an alias + Renaming an alias is a simple `remove` then `add` operation within the same API. This operation is atomic, no need to worry about a short period of time where the alias does not point to an index: @@ -48,13 +195,16 @@ POST /_aliases { "actions" : [ { "remove" : { "index" : "test1", "alias" : "alias1" } }, - { "add" : { "index" : "test2", "alias" : "alias1" } } + { "add" : { "index" : "test1", "alias" : "alias2" } } ] } -------------------------------------------------- // CONSOLE // TEST[continued] +[[indices-aliases-api-add-multi-alias-ex]] +===== Add an alias to multiple indices + Associating an alias with more than one index is simply several `add` actions: @@ -128,9 +278,8 @@ POST /_aliases <2> The index we should have added <3> `remove_index` is just like <> -[float] [[filtered]] -==== Filtered Aliases +===== Filtered aliases Aliases with filters provide an easy way to create different "views" of the same index. The filter can be defined using Query DSL and is applied @@ -175,7 +324,6 @@ POST /_aliases // CONSOLE // TEST[continued] -[float] [[aliases-routing]] ===== Routing @@ -242,9 +390,8 @@ GET /alias2/_search?q=user:kimchy&routing=2,3 // CONSOLE // TEST[continued] -[float] [[aliases-write-index]] -===== Write Index +===== Write index It is possible to associate the index pointed to by an alias as the write index. When specified, all index and update requests against an alias that point to multiple @@ -331,260 +478,3 @@ POST /_aliases -------------------------------------------------- // CONSOLE // TEST[s/^/PUT test\nPUT test2\n/] - -[IMPORTANT] -===================================== -Aliases that do not explicitly set `is_write_index: true` for an index, and -only reference one index, will have that referenced index behave as if it is the write index -until an additional index is referenced. At that point, there will be no write index and -writes will be rejected. -===================================== - -[float] -[[alias-adding]] -==== Add a single alias - -An alias can also be added with the endpoint - -`PUT /{index}/_alias/{name}` - - -where - -[horizontal] -`index`:: The index the alias refers to. Can be any of `* | _all | glob pattern | name1, name2, …` -`name`:: The name of the alias. This is a required option. -`routing`:: An optional routing that can be associated with an alias. -`filter`:: An optional filter that can be associated with an alias. - -You can also use the plural `_aliases`. - -[float] -===== Examples: - -Adding time based alias:: -+ --- -[source,js] --------------------------------------------------- -PUT /logs_201305/_alias/2013 --------------------------------------------------- -// CONSOLE -// TEST[s/^/PUT logs_201305\n/] --- - -Adding a user alias:: -+ --- -First create the index and add a mapping for the `user_id` field: - -[source,js] --------------------------------------------------- -PUT /users -{ - "mappings" : { - "properties" : { - "user_id" : {"type" : "integer"} - } - } -} --------------------------------------------------- -// CONSOLE - -Then add the alias for a specific user: - -[source,js] --------------------------------------------------- -PUT /users/_alias/user_12 -{ - "routing" : "12", - "filter" : { - "term" : { - "user_id" : 12 - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - --- - -[float] -[[alias-index-creation]] -==== Aliases during index creation - -Aliases can also be specified during <>: - -[source,js] --------------------------------------------------- -PUT /logs_20162801 -{ - "mappings" : { - "properties" : { - "year" : {"type" : "integer"} - } - }, - "aliases" : { - "current_day" : {}, - "2016" : { - "filter" : { - "term" : {"year" : 2016 } - } - } - } -} --------------------------------------------------- -// CONSOLE - -[float] -[[deleting]] -==== Delete aliases - - -The rest endpoint is: `/{index}/_alias/{name}` - -where - -[horizontal] -`index`:: `* | _all | glob pattern | name1, name2, …` -`name`:: `* | _all | glob pattern | name1, name2, …` - -Alternatively you can use the plural `_aliases`. Example: - -[source,js] --------------------------------------------------- -DELETE /logs_20162801/_alias/current_day --------------------------------------------------- -// CONSOLE -// TEST[continued] - -[float] -[[alias-retrieving]] -==== Retrieving existing aliases - -The get index alias API allows to filter by -alias name and index name. This api redirects to the master and fetches -the requested index aliases, if available. This api only serialises the -found index aliases. - -Possible options: -[horizontal] -`index`:: - The index name to get aliases for. Partial names are - supported via wildcards, also multiple index names can be specified - separated with a comma. Also the alias name for an index can be used. - -`alias`:: - The name of alias to return in the response. Like the index - option, this option supports wildcards and the option the specify - multiple alias names separated by a comma. - -`ignore_unavailable`:: - What to do if an specified index name doesn't - exist. If set to `true` then those indices are ignored. - -The rest endpoint is: `/{index}/_alias/{alias}`. - -[float] -===== Examples: - -All aliases for the index `logs_20162801`: - -[source,js] --------------------------------------------------- -GET /logs_20162801/_alias/* --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Response: - -[source,js] --------------------------------------------------- -{ - "logs_20162801" : { - "aliases" : { - "2016" : { - "filter" : { - "term" : { - "year" : 2016 - } - } - } - } - } -} --------------------------------------------------- -// TESTRESPONSE - -All aliases with the name 2016 in any index: - -[source,js] --------------------------------------------------- -GET /_alias/2016 --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Response: - -[source,js] --------------------------------------------------- -{ - "logs_20162801" : { - "aliases" : { - "2016" : { - "filter" : { - "term" : { - "year" : 2016 - } - } - } - } - } -} --------------------------------------------------- -// TESTRESPONSE - -All aliases that start with 20 in any index: - -[source,js] --------------------------------------------------- -GET /_alias/20* --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Response: - -[source,js] --------------------------------------------------- -{ - "logs_20162801" : { - "aliases" : { - "2016" : { - "filter" : { - "term" : { - "year" : 2016 - } - } - } - } - } -} --------------------------------------------------- -// TESTRESPONSE - -There is also a HEAD variant of the get indices aliases api to check if -index aliases exist. The indices aliases exists api supports the same -option as the get indices aliases api. Examples: - -[source,js] --------------------------------------------------- -HEAD /_alias/2016 -HEAD /_alias/20* -HEAD /logs_20162801/_alias/* --------------------------------------------------- -// CONSOLE -// TEST[continued] diff --git a/docs/reference/indices/analyze.asciidoc b/docs/reference/indices/analyze.asciidoc index 50dc88f3711d2..b48243c6b59f6 100644 --- a/docs/reference/indices/analyze.asciidoc +++ b/docs/reference/indices/analyze.asciidoc @@ -1,15 +1,145 @@ [[indices-analyze]] -=== Analyze +=== Analyze API +++++ +Analyze +++++ -Performs the analysis process on a text and return the tokens breakdown -of the text. +Performs <> on a text string +and returns the resulting tokens. -Can be used without specifying an index against one of the many built in -analyzers: +[source,js] +-------------------------------------------------- +GET /_analyze +{ + "analyzer" : "standard", + "text" : "Quick Brown Foxes!" +} +-------------------------------------------------- +// CONSOLE + + +[[analyze-api-request]] +==== {api-request-title} + +`GET /_analyze` + +`POST /_analyze` + +`GET //_analyze` + +`POST //_analyze` + + +[[analyze-api-path-params]] +==== {api-path-parms-title} + +``:: ++ +-- +(Optional, string) +Index used to derive the analyzer. + +If specified, +the `analyzer` or `` parameter overrides this value. + +If no analyzer or field are specified, +the analyze API uses the default analyzer for the index. + +If no index is specified +or the index does not have a default analyzer, +the analyze API uses the <>. +-- + + +[[analyze-api-query-params]] +==== {api-query-parms-title} + +`analyzer`:: ++ +-- +(Optional, string or <>) +Analyzer used to analyze for the provided `text`. + +See <> for a list of built-in analyzers. +You can also provide a <>. + +If this parameter is not specified, +the analyze API uses the analyzer defined in the field's mapping. + +If no field is specified, +the analyze API uses the default analyzer for the index. + +If no index is specified, +or the index does not have a default analyzer, +the analyze API uses the <>. +-- + +`attributes`:: +(Optional, array of strings) +Array of token attributes used to filter the output of the `explain` parameter. + +`char_filter`:: +(Optional, array of strings) +Array of character filters used to preprocess characters before the tokenizer. +See <> for a list of character filters. + +`explain`:: +(Optional, boolean) +If `true`, the response includes token attributes and additional details. +Defaults to `false`. +experimental:[The format of the additional detail information is labelled as experimental in Lucene and it may change in the future.] + +`field`:: ++ +-- +(Optional, string) +Field used to derive the analyzer. +To use this parameter, +you must specify an index. + +If specified, +the `analyzer` parameter overrides this value. + +If no field is specified, +the analyze API uses the default analyzer for the index. + +If no index is specified +or the index does not have a default analyzer, +the analyze API uses the <>. +-- + +`filter`:: +(Optional, Array of strings) +Array of token filters used to apply after the tokenizer. +See <> for a list of token filters. + +`normalizer`:: +(Optional, string) +Normalizer to use to convert text into a single token. +See <> for a list of normalizers. + +`text`:: +(Required, string or array of strings) +Text to analyze. +If an array of strings is provided, it is analyzed as a multi-value field. + +`tokenizer`:: +(Optional, string) +Tokenizer to use to convert text into tokens. +See <> for a list of tokenizers. + +[[analyze-api-example]] +==== {api-examples-title} + +[[analyze-api-no-index-ex]] +===== No index specified + +You can apply any of the built-in analyzers to the text string without +specifying an index. [source,js] -------------------------------------------------- -GET _analyze +GET /_analyze { "analyzer" : "standard", "text" : "this is a test" @@ -17,11 +147,14 @@ GET _analyze -------------------------------------------------- // CONSOLE -If text parameter is provided as array of strings, it is analyzed as a multi-valued field. +[[analyze-api-text-array-ex]] +===== Array of text strings + +If the `text` parameter is provided as array of strings, it is analyzed as a multi-value field. [source,js] -------------------------------------------------- -GET _analyze +GET /_analyze { "analyzer" : "standard", "text" : ["this is a test", "the second text"] @@ -29,13 +162,16 @@ GET _analyze -------------------------------------------------- // CONSOLE -Or by building a custom transient analyzer out of tokenizers, -token filters and char filters. Token filters can use the shorter 'filter' -parameter name: +[[analyze-api-custom-analyzer-ex]] +===== Custom analyzer + +You can use the analyze API to test a custom transient analyzer built from +tokenizers, token filters, and char filters. Token filters use the `filter` +parameter: [source,js] -------------------------------------------------- -GET _analyze +GET /_analyze { "tokenizer" : "keyword", "filter" : ["lowercase"], @@ -46,7 +182,7 @@ GET _analyze [source,js] -------------------------------------------------- -GET _analyze +GET /_analyze { "tokenizer" : "keyword", "filter" : ["lowercase"], @@ -62,7 +198,7 @@ Custom tokenizers, token filters, and character filters can be specified in the [source,js] -------------------------------------------------- -GET _analyze +GET /_analyze { "tokenizer" : "whitespace", "filter" : ["lowercase", {"type": "stop", "stopwords": ["a", "is", "this"]}], @@ -71,11 +207,14 @@ GET _analyze -------------------------------------------------- // CONSOLE -It can also run against a specific index: +[[analyze-api-specific-index-ex]] +===== Specific index + +You can also run the analyze API against a specific index: [source,js] -------------------------------------------------- -GET analyze_sample/_analyze +GET /analyze_sample/_analyze { "text" : "this is a test" } @@ -89,7 +228,7 @@ can also be provided to use a different analyzer: [source,js] -------------------------------------------------- -GET analyze_sample/_analyze +GET /analyze_sample/_analyze { "analyzer" : "whitespace", "text" : "this is a test" @@ -98,11 +237,14 @@ GET analyze_sample/_analyze // CONSOLE // TEST[setup:analyze_sample] -Also, the analyzer can be derived based on a field mapping, for example: +[[analyze-api-field-ex]] +===== Derive analyzer from a field mapping + +The analyzer can be derived based on a field mapping, for example: [source,js] -------------------------------------------------- -GET analyze_sample/_analyze +GET /analyze_sample/_analyze { "field" : "obj1.field1", "text" : "this is a test" @@ -114,11 +256,14 @@ GET analyze_sample/_analyze Will cause the analysis to happen based on the analyzer configured in the mapping for `obj1.field1` (and if not, the default index analyzer). +[[analyze-api-normalizer-ex]] +===== Normalizer + A `normalizer` can be provided for keyword field with normalizer associated with the `analyze_sample` index. [source,js] -------------------------------------------------- -GET analyze_sample/_analyze +GET /analyze_sample/_analyze { "normalizer" : "my_normalizer", "text" : "BaR" @@ -131,7 +276,7 @@ Or by building a custom transient normalizer out of token filters and char filte [source,js] -------------------------------------------------- -GET _analyze +GET /_analyze { "filter" : ["lowercase"], "text" : "BaR" @@ -140,7 +285,7 @@ GET _analyze // CONSOLE [[explain-analyze-api]] -==== Explain Analyze +===== Explain analyze If you want to get more advanced details, set `explain` to `true` (defaults to `false`). It will output all token attributes for each token. You can filter token attributes you want to output by setting `attributes` option. @@ -149,7 +294,7 @@ NOTE: The format of the additional detail information is labelled as experimenta [source,js] -------------------------------------------------- -GET _analyze +GET /_analyze { "tokenizer" : "standard", "filter" : ["snowball"], @@ -210,8 +355,7 @@ The request returns the following result: <1> Output only "keyword" attribute, since specify "attributes" in the request. [[tokens-limit-settings]] -[float] -=== Settings to prevent tokens explosion +===== Setting a token limit Generating excessive amount of tokens may cause a node to run out of memory. The following setting allows to limit the number of tokens that can be produced: @@ -225,7 +369,7 @@ The following setting allows to limit the number of tokens that can be produced: [source,js] -------------------------------------------------- -PUT analyze_sample +PUT /analyze_sample { "settings" : { "index.analyze.max_token_count" : 20000 @@ -237,7 +381,7 @@ PUT analyze_sample [source,js] -------------------------------------------------- -GET analyze_sample/_analyze +GET /analyze_sample/_analyze { "text" : "this is a test" } diff --git a/docs/reference/indices/close.asciidoc b/docs/reference/indices/close.asciidoc new file mode 100644 index 0000000000000..71a5f8b634203 --- /dev/null +++ b/docs/reference/indices/close.asciidoc @@ -0,0 +1,85 @@ +[[indices-close]] +=== Close index API +++++ +Close index +++++ + +Closes an index. + +[source,js] +-------------------------------------------------- +POST /twitter/_close +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + + +[[close-index-api-request]] +==== {api-request-title} + +`POST //_close` + + +[[close-index-api-desc]] +==== {api-description-title} + +You use the close index API to close open indices. + +include::{docdir}/indices/open-close.asciidoc[tag=closed-index] + + +[[close-index-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index] ++ +To close all indices, use `_all` or `*`. +To disallow the closing of indices with `_all` or wildcard expressions, +change the `action.destructive_requires_name` cluster setting to `true`. +You can update this setting in the `elasticsearch.yml` file +or using the <> API. + + +[[close-index-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] ++ +Defaults to `open`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + + +[[close-index-api-example]] +==== {api-examples-title} + +The following example shows how to close an index: + +[source,js] +-------------------------------------------------- +POST /my_index/_close +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT my_index\n/] + +The API returns following response: + +[source,js] +-------------------------------------------------- +{ + "acknowledged" : true, + "shards_acknowledged" : true, + "indices" : { + "my_index" : { + "closed" : true + } + } +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/docs/reference/indices/create-index.asciidoc b/docs/reference/indices/create-index.asciidoc index f90ba08f939a1..17e629024b7b4 100644 --- a/docs/reference/indices/create-index.asciidoc +++ b/docs/reference/indices/create-index.asciidoc @@ -1,23 +1,42 @@ [[indices-create-index]] -=== Create Index +=== Create index API +++++ +Create index +++++ -The Create Index API is used to manually create an index in Elasticsearch. All documents in Elasticsearch -are stored inside of one index or another. - -The most basic command is the following: +Creates a new index. [source,js] -------------------------------------------------- -PUT twitter +PUT /twitter -------------------------------------------------- // CONSOLE -This creates an index named `twitter` with all default setting. -[NOTE] -.Index name limitations -====================================================== -There are several limitations to what you can name your index. The complete list of limitations are: +[[indices-create-api-request]] +==== {api-request-title} + +`PUT /` + +[[indices-create-api-desc]] +==== {api-description-title} +You can use the create index API to add a new index to an {es} cluster. When +creating an index, you can specify the following: + +* Settings for the index +* Mappings for fields in the index +* Index aliases + + +[[indices-create-api-path-params]] +==== {api-path-parms-title} + +``:: ++ +-- +(Optional, string) Name of the index you wish to create. + +Index names must meet the following criteria: - Lowercase only - Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, ` ` (space character), `,`, `#` @@ -25,19 +44,55 @@ There are several limitations to what you can name your index. The complete lis - Cannot start with `-`, `_`, `+` - Cannot be `.` or `..` - Cannot be longer than 255 bytes (note it is bytes, so multi-byte characters will count towards the 255 limit faster) +-- + + +[[indices-create-api-query-params]] +==== {api-query-parms-title} -====================================================== +include::{docdir}/rest-api/common-parms.asciidoc[tag=include-type-name] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + + +[[indices-create-api-request-body]] +==== {api-request-body-title} + +`aliases`:: +(Optional, <>) Index aliases which include the +index. See <>. + +`mappings`:: ++ +-- +(Optional, <>) Mapping for fields in the index. If +specified, this mapping can include: + +* Field names +* <> +* <> + +See <>. +-- + +`settings`:: +(Optional, <>) Configuration +options for the index. See <>. + +[[indices-create-api-example]] +==== {api-examples-title} -[float] [[create-index-settings]] -==== Index Settings +===== Index settings Each index created can have specific settings associated with it, defined in the body: [source,js] -------------------------------------------------- -PUT twitter +PUT /twitter { "settings" : { "index" : { @@ -55,7 +110,7 @@ or more simplified [source,js] -------------------------------------------------- -PUT twitter +PUT /twitter { "settings" : { "number_of_shards" : 3, @@ -73,16 +128,14 @@ For more information regarding all the different index level settings that can be set when creating an index, please check the <> section. - -[float] [[mappings]] -==== Mappings +===== Mappings The create index API allows for providing a mapping definition: [source,js] -------------------------------------------------- -PUT test +PUT /test { "settings" : { "number_of_shards" : 1 @@ -100,15 +153,14 @@ NOTE: Before 7.0.0, the 'mappings' definition used to include a type name. Altho types in requests is now deprecated, a type can still be provided if the request parameter include_type_name is set. For more details, please see <>. -[float] [[create-index-aliases]] -==== Aliases +===== Aliases The create index API allows also to provide a set of <>: [source,js] -------------------------------------------------- -PUT test +PUT /test { "aliases" : { "alias_1" : {}, @@ -123,9 +175,8 @@ PUT test -------------------------------------------------- // CONSOLE -[float] [[create-index-wait-for-active-shards]] -==== Wait For Active Shards +===== Wait For active shards By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. The index creation response will indicate @@ -158,7 +209,7 @@ the `wait_for_active_shards` value on all subsequent write operations): [source,js] -------------------------------------------------- -PUT test +PUT /test { "settings": { "index.write.wait_for_active_shards": "2" @@ -172,7 +223,7 @@ or through the request parameter `wait_for_active_shards`: [source,js] -------------------------------------------------- -PUT test?wait_for_active_shards=2 +PUT /test?wait_for_active_shards=2 -------------------------------------------------- // CONSOLE // TEST[skip:requires two nodes] diff --git a/docs/reference/indices/delete-index-template.asciidoc b/docs/reference/indices/delete-index-template.asciidoc new file mode 100644 index 0000000000000..9ee1158fa447f --- /dev/null +++ b/docs/reference/indices/delete-index-template.asciidoc @@ -0,0 +1,54 @@ +[[indices-delete-template]] +=== Delete index template API +++++ +Delete template index +++++ + +Deletes an existing index. + +//// +[source,js] +-------------------------------------------------- +PUT _template/template_1 +{ + "index_patterns" : ["te*"], + "settings": { + "number_of_shards": 1 + } +} +-------------------------------------------------- +// CONSOLE +// TESTSETUP +//// + +[source,js] +-------------------------------------------------- +DELETE /_template/template_1 +-------------------------------------------------- +// CONSOLE + + +[[delete-template-api-request]] +==== {api-request-title} + +`DELETE /_template/` + + +[[delete-template-api-desc]] +==== {api-description-title} + +Use the delete index template API to delete one or more index templates + +include::templates.asciidoc[tag=index-template-def] + + +[[delete-template-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-template] + + +[[delete-template-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] \ No newline at end of file diff --git a/docs/reference/indices/get-alias.asciidoc b/docs/reference/indices/get-alias.asciidoc new file mode 100644 index 0000000000000..9a66eb65d4d54 --- /dev/null +++ b/docs/reference/indices/get-alias.asciidoc @@ -0,0 +1,185 @@ +[[indices-get-alias]] +=== Get index alias API +++++ +Get index alias +++++ + +Returns information about one or more index aliases. + +include::alias-exists.asciidoc[tag=index-alias-def] + +[source,js] +---- +GET /twitter/_alias/alias1 +---- +// CONSOLE +// TEST[setup:twitter] +// TEST[s/^/PUT twitter\/_alias\/alias1\n/] + + +[[get-alias-api-request]] +==== {api-request-title} + +`GET /_alias` + +`GET /_alias/` + +`GET //_alias/` + + +[[get-alias-api-path-params]] +==== {api-path-parms-title} + +``:: +(Optional, string) +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-alias] ++ +To retrieve information for all index aliases, +use a value of `_all` or `*`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index] + + +[[get-alias-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] ++ +Defaults to `all`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=local] + + +[[get-alias-api-example]] +==== {api-examples-title} + +[[get-alias-api-all-ex]] +===== Get all aliases for an index + +You can add index aliases during index creation +using a <> request. + +The following create index API request creates the `logs_20302801` index +with two aliases: + +* `current_day` +* `2030`, which only returns documents +in the `logs_20302801` index +with a `year` field value of `2030` + +[source,js] +-------------------------------------------------- +PUT /logs_20302801 +{ + "aliases" : { + "current_day" : {}, + "2030" : { + "filter" : { + "term" : {"year" : 2030 } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +The following get index alias API request returns all aliases +for the index `logs_20302801`: + +[source,js] +-------------------------------------------------- +GET /logs_20302801/_alias/* +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The API returns the following response: + +[source,js] +-------------------------------------------------- +{ + "logs_20302801" : { + "aliases" : { + "current_day" : { + }, + "2030" : { + "filter" : { + "term" : { + "year" : 2030 + } + } + } + } + } +} +-------------------------------------------------- +// TESTRESPONSE + + +[[get-alias-api-named-ex]] +===== Get a specific alias + +The following index alias API request returns the `2030` alias: + +[source,js] +-------------------------------------------------- +GET /_alias/2030 +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The API returns the following response: + +[source,js] +-------------------------------------------------- +{ + "logs_20302801" : { + "aliases" : { + "2030" : { + "filter" : { + "term" : { + "year" : 2030 + } + } + } + } + } +} +-------------------------------------------------- +// TESTRESPONSE + +[[get-alias-api-wildcard-ex]] +===== Get aliases based on a wildcard + +The following index alias API request returns any alias that begin with `20`: + +[source,js] +-------------------------------------------------- +GET /_alias/20* +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The API returns the following response: + +[source,js] +-------------------------------------------------- +{ + "logs_20302801" : { + "aliases" : { + "2030" : { + "filter" : { + "term" : { + "year" : 2030 + } + } + } + } + } +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/docs/reference/indices/get-field-mapping.asciidoc b/docs/reference/indices/get-field-mapping.asciidoc index 2223231436624..9397052b2871b 100644 --- a/docs/reference/indices/get-field-mapping.asciidoc +++ b/docs/reference/indices/get-field-mapping.asciidoc @@ -1,15 +1,70 @@ [[indices-get-field-mapping]] -=== Get Field Mapping +=== Get field mapping API +++++ +Get field mapping +++++ -The get field mapping API allows you to retrieve mapping definitions for one or more fields. -This is useful when you do not need the complete type mapping returned by -the <> API. +Retrieves <> for one or more fields. This is useful +if you don't need the <> of an index or +your index contains a large number of fields. -For example, consider the following mapping: +[source,js] +---- +GET /twitter/_mapping/field/user +---- +// CONSOLE +// TEST[setup:twitter] + + +[[get-field-mapping-api-request]] +==== {api-request-title} + +`GET /_mapping/field/` + +`GET //_mapping/field/` + + +[[get-field-mapping-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index] + +``:: +(Optional, string) Comma-separated list or wildcard expression of fields used to +limit returned information. + + +[[get-field-mapping-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=include-type-name] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +`include_defaults`:: +(Optional, boolean) If `true`, the response includes default mapping values. +Defaults to `false`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=local] + + +[[get-field-mapping-api-example]] +==== {api-examples-title} + +[[get-field-mapping-api-basic-ex]] +===== Example with index setup + +You can provide field mappings when creating a new index. The following +<> API request creates the `publications` +index with several field mappings. [source,js] -------------------------------------------------- -PUT publications +PUT /publications { "mappings": { "properties": { @@ -26,7 +81,6 @@ PUT publications } } -------------------------------------------------- -// TESTSETUP // CONSOLE The following returns the mapping of the field `title` only: @@ -36,8 +90,9 @@ The following returns the mapping of the field `title` only: GET publications/_mapping/field/title -------------------------------------------------- // CONSOLE +// TEST[continued] -For which the response is: +The API returns the following response: [source,js] -------------------------------------------------- @@ -58,30 +113,8 @@ For which the response is: -------------------------------------------------- // TESTRESPONSE -[float] -==== Multiple Indices and Fields - -The get field mapping API can be used to get the mapping of multiple fields from more than one index -with a single call. General usage of the API follows the -following syntax: `host:port/{index}/_mapping/field/{field}` where -`{index}` and `{field}` can stand for comma-separated list of names or wild cards. To -get mappings for all indices you can use `_all` for `{index}`. The -following are some examples: - -[source,js] --------------------------------------------------- -GET /twitter,kimchy/_mapping/field/message - -GET /_all/_mapping/field/message,user.id - -GET /_all/_mapping/field/*.id --------------------------------------------------- -// CONSOLE -// TEST[setup:twitter] -// TEST[s/^/PUT kimchy\nPUT book\n/] - -[float] -==== Specifying fields +[[get-field-mapping-api-specific-fields-ex]] +===== Specifying fields The get mapping api allows you to specify a comma-separated list of fields. @@ -92,6 +125,7 @@ For instance to select the `id` of the `author` field, you must use its full nam GET publications/_mapping/field/author.id,abstract,name -------------------------------------------------- // CONSOLE +// TEST[continued] returns: @@ -129,6 +163,7 @@ The get field mapping API also supports wildcard notation. GET publications/_mapping/field/a* -------------------------------------------------- // CONSOLE +// TEST[continued] returns: @@ -167,11 +202,24 @@ returns: -------------------------------------------------- // TESTRESPONSE -[float] -==== Other options +[[get-field-mapping-api-multi-index-ex]] +===== Multiple indices and fields -[horizontal] -`include_defaults`:: +The get field mapping API can be used to get the mapping of multiple fields from more than one index +with a single call. General usage of the API follows the +following syntax: `host:port//_mapping/field/` where +`` and `` can stand for comma-separated list of names or wild cards. To +get mappings for all indices you can use `_all` for ``. The +following are some examples: + +[source,js] +-------------------------------------------------- +GET /twitter,kimchy/_mapping/field/message - adding `include_defaults=true` to the query string will cause the response - to include default values, which are normally suppressed. +GET /_all/_mapping/field/message,user.id + +GET /_all/_mapping/field/*.id +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] +// TEST[s/^/PUT kimchy\nPUT book\n/] diff --git a/docs/reference/indices/get-settings.asciidoc b/docs/reference/indices/get-settings.asciidoc index 7be8760232b59..ce1ada57bd6d6 100644 --- a/docs/reference/indices/get-settings.asciidoc +++ b/docs/reference/indices/get-settings.asciidoc @@ -1,7 +1,10 @@ [[indices-get-settings]] -=== Get Settings +=== Get index settings API +++++ +Get index settings +++++ -The get settings API allows to retrieve settings of index/indices: +Returns setting information for an index. [source,js] -------------------------------------------------- @@ -10,14 +13,54 @@ GET /twitter/_settings // CONSOLE // TEST[setup:twitter] -[float] -==== Multiple Indices and Types -The get settings API can be used to get settings for more than one index -with a single call. General usage of the API follows the -following syntax: `host:port/{index}/_settings` where -`{index}` can stand for comma-separated list of index names and aliases. To -get settings for all indices you can use `_all` for `{index}`. +[[get-index-settings-api-request]] +==== {api-request-title} + +`GET //_settings` + +`GET //_settings/` + + +[[get-index-settings-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index] ++ +Use a value of `_all` to retrieve information for all indices in the cluster. + +``:: +(Optional, string) Comma-separated list or wildcard expression of setting names +used to limit the request. + + +[[get-index-settings-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] ++ +Defaults to `all`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=flat-settings] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=include-defaults] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=local] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout] + + +[[get-index-settings-api-example]] +==== {api-examples-title} + +===== Multiple indices + +The get settings API can be used to get settings for more than one index with a +single call. To get settings for all indices you can use `_all` for ``. Wildcard expressions are also supported. The following are some examples: [source,js] @@ -32,8 +75,7 @@ GET /log_2013_*/_settings // TEST[setup:twitter] // TEST[s/^/PUT kimchy\nPUT log_2013_01_01\n/] -[float] -==== Filtering settings by name +===== Filtering settings by name The settings that are returned can be filtered with wildcard matching as follows: diff --git a/docs/reference/indices/indices-exists.asciidoc b/docs/reference/indices/indices-exists.asciidoc index 4a1b9c36cac9c..b85210af96d15 100644 --- a/docs/reference/indices/indices-exists.asciidoc +++ b/docs/reference/indices/indices-exists.asciidoc @@ -1,17 +1,59 @@ [[indices-exists]] -=== Indices Exists +=== Indices exists API +++++ +Indices exists +++++ -Used to check if the index (indices) exists or not. For example: +Checks if an index exists. +The returned HTTP status code indicates if the index exists or not. +A `404` means it does not exist, and `200` means it does. [source,js] -------------------------------------------------- -HEAD twitter +HEAD /twitter -------------------------------------------------- // CONSOLE // TEST[setup:twitter] -The HTTP status code indicates if the index exists or not. A `404` means -it does not exist, and `200` means it does. -IMPORTANT: This request does not distinguish between an index and an alias, +[[indices-exists-api-request]] +==== {api-request-title} + +`HEAD /` + + +[[indices-exists-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index] ++ +IMPORTANT: This parameter does not distinguish between an index name and <>, i.e. status code `200` is also returned if an alias exists with that name. + + +[[indices-exists-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] ++ +Defaults to `open`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=flat-settings] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=include-defaults] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=local] + + +[[indices-exists-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Indicates all specified indices or index aliases exist. + + `404`:: +Indicates one or more specified indices or index aliases **do not** exist. diff --git a/docs/reference/indices/open-close.asciidoc b/docs/reference/indices/open-close.asciidoc index efbd289758a6b..49d73060458f6 100644 --- a/docs/reference/indices/open-close.asciidoc +++ b/docs/reference/indices/open-close.asciidoc @@ -1,8 +1,32 @@ [[indices-open-close]] -=== Open / Close Index API +=== Open index API +++++ +Open index +++++ -The open and close index APIs allow to close an index, and later on -opening it. +Opens a closed index. + +[source,js] +-------------------------------------------------- +POST /twitter/_open +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] +// TEST[s/^/POST \/twitter\/_close\n/] + + +[[open-index-api-request]] +==== {api-request-title} + +`POST //_open` + + +[[open-index-api-desc]] +==== {api-description-title} + +You use the open index API to re-open closed indices. + +// tag::closed-index[] A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. It is not possible to index @@ -18,34 +42,61 @@ data of opened/closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. -The REST endpoint is `/{index}/_close` and `/{index}/_open`. +You can open and close multiple indices. An error is thrown +if the request explicitly refers to a missing index. This behaviour can be +disabled using the `ignore_unavailable=true` parameter. -The following example shows how to close an index: +All indices can be opened or closed at once using `_all` as the index name +or specifying patterns that identify them all (e.g. `*`). -[source,js] --------------------------------------------------- -POST /my_index/_close --------------------------------------------------- -// CONSOLE -// TEST[s/^/PUT my_index\n/] +Identifying indices via wildcards or `_all` can be disabled by setting the +`action.destructive_requires_name` flag in the config file to `true`. +This setting can also be changed via the cluster update settings api. -This will return the following response: +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be disabled via the cluster settings +API by setting `cluster.indices.close.enable` to `false`. The default is `true`. -[source,js] --------------------------------------------------- -{ - "acknowledged" : true, - "shards_acknowledged" : true, - "indices" : { - "my_index" : { - "closed" : true - } - } -} --------------------------------------------------- -// TESTRESPONSE +===== Wait For active shards -A closed index can be reopened like this: +Because opening or closing an index allocates its shards, the +<> setting on +index creation applies to the `_open` and `_close` index actions as well. + +// end::closed-index[] + + +[[open-index-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index] ++ +To open all indices, use `_all` or `*`. +To disallow the opening of indices with `_all` or wildcard expressions, +change the `action.destructive_requires_name` cluster setting to `true`. +You can update this setting in the `elasticsearch.yml` file +or using the <> API. + + +[[open-index-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] ++ +Defaults to `closed`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + + +[[open-index-api-example]] +==== {api-examples-title} + +A closed index can be re-opened like this: [source,js] -------------------------------------------------- @@ -54,7 +105,7 @@ POST /my_index/_open // CONSOLE // TEST[s/^/PUT my_index\nPOST my_index\/_close\n/] -which will yield the following response: +The API returns the following response: [source,js] -------------------------------------------------- @@ -64,24 +115,3 @@ which will yield the following response: } -------------------------------------------------- // TESTRESPONSE - -It is possible to open and close multiple indices. An error will be thrown -if the request explicitly refers to a missing index. This behaviour can be -disabled using the `ignore_unavailable=true` parameter. - -All indices can be opened or closed at once using `_all` as the index name -or specifying patterns that identify them all (e.g. `*`). - -Identifying indices via wildcards or `_all` can be disabled by setting the -`action.destructive_requires_name` flag in the config file to `true`. -This setting can also be changed via the cluster update settings api. - -Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be disabled via the cluster settings -API by setting `cluster.indices.close.enable` to `false`. The default is `true`. - -[float] -==== Wait For Active Shards - -Because opening or closing an index allocates its shards, the -<> setting on -index creation applies to the `_open` and `_close` index actions as well. diff --git a/docs/reference/indices/template-exists.asciidoc b/docs/reference/indices/template-exists.asciidoc new file mode 100644 index 0000000000000..550297a505982 --- /dev/null +++ b/docs/reference/indices/template-exists.asciidoc @@ -0,0 +1,56 @@ +[[indices-template-exists]] +=== Index template exists API +++++ +Index template exists +++++ + +Checks if an index template exists. + + + +[source,js] +----------------------------------------------- +HEAD /_template/template_1 +----------------------------------------------- +// CONSOLE + + +[[template-exists-api-request]] +==== {api-request-title} + +`HEAD /_template/` + + +[[template-exists-api-desc]] +==== {api-description-title} + +Use the index template exists API +to determine whether one or more index templates exist. + +include::templates.asciidoc[tag=index-template-def] + + +[[template-exists-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-template] + + +[[template-exists-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=flat-settings] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=local] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout] + + +[[template-exists-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Indicates all specified index templates exist. + +`404`:: +Indicates one or more specified index templates **do not** exist. diff --git a/docs/reference/indices/templates.asciidoc b/docs/reference/indices/templates.asciidoc index 60e65c3139454..6d6591cc4ff3a 100644 --- a/docs/reference/indices/templates.asciidoc +++ b/docs/reference/indices/templates.asciidoc @@ -1,11 +1,12 @@ [[indices-templates]] === Index Templates -Index templates allow you to define templates that will automatically be -applied when new indices are created. The templates include both -<> and <> -and a simple pattern template that controls whether the template should be -applied to the new index. +// tag::index-template-def[] +Index templates define <> and <> +that you can automatically apply when creating new indices. +{es} applies templates to new indices +based on an index pattern that matches the index name. +// end::index-template-def[] NOTE: Templates are only applied at index creation time. Changing a template will have no impact on existing indices. When using the create index API, the @@ -119,26 +120,6 @@ GET /_template -------------------------------------------------- // CONSOLE -[float] -[[indices-templates-exists]] -==== Template exists - -Used to check if the template exists or not. For example: - -[source,js] ------------------------------------------------ -HEAD _template/template_1 ------------------------------------------------ -// CONSOLE - -The HTTP status code indicates if the template with the given name -exists or not. Status code `200` means it exists and `404` means -it does not. - -NOTE: Before 7.0.0, the 'mappings' definition used to include a type name. Although mappings -no longer contain a type name by default, you can still use the old format by setting -the parameter include_type_name. For more details, please see <>. - [float] [[multiple-templates]] ==== Multiple Templates Matching diff --git a/docs/reference/indices/update-settings.asciidoc b/docs/reference/indices/update-settings.asciidoc index 9962182edadcb..d959af25b91ed 100644 --- a/docs/reference/indices/update-settings.asciidoc +++ b/docs/reference/indices/update-settings.asciidoc @@ -1,11 +1,10 @@ [[indices-update-settings]] -=== Update Indices Settings +=== Update index settings API +++++ +Update index settings +++++ -Change specific index level settings in real time. - -The REST endpoint is `/_settings` (to update all indices) or -`{index}/_settings` to update one (or more) indices settings. -The body of the request includes the updated settings, for example: +Changes an <> in real time. [source,js] -------------------------------------------------- @@ -19,7 +18,55 @@ PUT /twitter/_settings // CONSOLE // TEST[setup:twitter] -To reset a setting back to the default value, use `null`. For example: + +[[update-index-settings-api-request]] +==== {api-request-title} + +`PUT //_settings` + + +[[update-index-settings-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index] ++ +To update a setting for all indices, +use `_all` or exclude this parameter. + + +[[update-index-settings-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] ++ +Defaults to `open`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=flat-settings] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +`preserve_existing`:: +(Optional, boolean) If `true`, existing index settings remain unchanged. +Defaults to `false`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + + +[[sample-api-query-params]] +==== {api-query-parms-title} + +`settings`:: +(Optional, <>) Configuration +options for the index. See <>. + +[[sample-api-example]] +==== {api-examples-title} + +[[reset-index-setting]] +===== Reset an index setting +To revert a setting to the default value, use `null`. For example: [source,js] -------------------------------------------------- @@ -38,9 +85,8 @@ indices can be found in <>. To preserve existing settings from being updated, the `preserve_existing` request parameter can be set to `true`. -[float] [[bulk]] -==== Bulk Indexing Usage +===== Bulk indexing usage For example, the update settings API can be used to dynamically change the index from being more performant for bulk indexing, and then move it @@ -86,16 +132,17 @@ POST /twitter/_forcemerge?max_num_segments=5 // CONSOLE // TEST[continued] -[float] [[update-settings-analysis]] -==== Updating Index Analysis +===== Update index analysis -It is also possible to define new <> for the index. -But it is required to <> the index -first and <> it after the changes are made. +You can only define new analyzers on closed indices. -For example if `content` analyzer hasn't been defined on `myindex` yet -you can use the following commands to add it: +To add an analyzer, +you must close the index, +define the analyzer, +and reopen the index. +For example, +the following commands add the `content` analyzer to `myindex`: [source,js] -------------------------------------------------- diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index f955f25bdc2fb..e0f5254f8f0cd 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -839,6 +839,7 @@ See {plugins}/ingest.html[Ingest plugins] for information about the available in include::processors/append.asciidoc[] include::processors/bytes.asciidoc[] +include::processors/circle.asciidoc[] include::processors/convert.asciidoc[] include::processors/date.asciidoc[] include::processors/date-index-name.asciidoc[] diff --git a/docs/reference/ingest/processors/circle.asciidoc b/docs/reference/ingest/processors/circle.asciidoc new file mode 100644 index 0000000000000..97120fe918154 --- /dev/null +++ b/docs/reference/ingest/processors/circle.asciidoc @@ -0,0 +1,165 @@ +[role="xpack"] +[testenv="basic"] +[[ingest-circle-processor]] +=== Circle Processor +Converts circle definitions of shapes to regular polygons which approximate them. + +[[circle-processor-options]] +.Circle Processor Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The string-valued field to trim whitespace from +| `target_field` | no | `field` | The field to assign the polygon shape to, by default `field` is updated in-place +| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +| `error_distance` | yes | - | The difference between the resulting inscribed distance from center to side and the circle's radius (measured in meters for `geo_shape`, unit-less for `shape`) +| `shape_type` | yes | - | which field mapping type is to be used when processing the circle: `geo_shape` or `shape` +include::common-options.asciidoc[] +|====== + + +image:images/spatial/error_distance.png[] + +[source,js] +-------------------------------------------------- +PUT circles +{ + "mappings": { + "properties": { + "circle": { + "type": "geo_shape" + } + } + } +} + +PUT _ingest/pipeline/polygonize_circles +{ + "description": "translate circle to polygon", + "processors": [ + { + "circle": { + "field": "circle", + "error_distance": 28.0, + "shape_type": "geo_shape" + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +Using the above pipeline, we can attempt to index a document into the `circles` index. +The circle can be represented as either a WKT circle or a GeoJSON circle. The resulting +polygon will be represented and indexed using the same format as the input circle. WKT will +be translated to a WKT polygon, and GeoJSON circles will be translated to GeoJSON polygons. + +==== Example: Circle defined in Well Known Text + +In this example a circle defined in WKT format is indexed + +[source,js] +-------------------------------------------------- +PUT circles/_doc/1?pipeline=polygonize_circles +{ + "circle": "CIRCLE (30 10 40)" +} + +GET circles/_doc/1 +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The response from the above index request: + +[source,js] +-------------------------------------------------- +{ + "found": true, + "_index": "circles", + "_type": "_doc", + "_id": "1", + "_version": 1, + "_seq_no": 22, + "_primary_term": 1, + "_source": { + "circle": "polygon ((30.000365257263184 10.0, 30.000111397193788 10.00034284530941, 29.999706043744222 10.000213571721195, 29.999706043744222 9.999786428278805, 30.000111397193788 9.99965715469059, 30.000365257263184 10.0))" + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term": 1/"_primary_term" : $body._primary_term/] + +==== Example: Circle defined in GeoJSON + +In this example a circle defined in GeoJSON format is indexed + +[source,js] +-------------------------------------------------- +PUT circles/_doc/2?pipeline=polygonize_circles +{ + "circle": { + "type": "circle", + "radius": "40m", + "coordinates": [30, 10] + } +} + +GET circles/_doc/2 +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The response from the above index request: + +[source,js] +-------------------------------------------------- +{ + "found": true, + "_index": "circles", + "_type": "_doc", + "_id": "2", + "_version": 1, + "_seq_no": 22, + "_primary_term": 1, + "_source": { + "circle": { + "coordinates": [ + [ + [30.000365257263184, 10.0], + [30.000111397193788, 10.00034284530941], + [29.999706043744222, 10.000213571721195], + [29.999706043744222, 9.999786428278805], + [30.000111397193788, 9.99965715469059], + [30.000365257263184, 10.0] + ] + ], + "type": "polygon" + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term": 1/"_primary_term" : $body._primary_term/] + + +==== Notes on Accuracy + +Accuracy of the polygon that represents the circle is defined as `error_distance`. The smaller this +difference is, the closer to a perfect circle the polygon is. + +Below is a table that aims to help capture how the radius of the circle affects the resulting number of sides +of the polygon given different inputs. + +The minimum number of sides is `4` and the maximum is `1000`. + +[[circle-processor-accuracy]] +.Circle Processor Accuracy +[options="header"] +|====== +| error_distance | radius in meters | number of sides of polygon +| 1.00 | 1.0 | 4 +| 1.00 | 10.0 | 14 +| 1.00 | 100.0 | 45 +| 1.00 | 1000.0 | 141 +| 1.00 | 10000.0 | 445 +| 1.00 | 100000.0 | 1000 +|====== diff --git a/docs/reference/mapping/params/similarity.asciidoc b/docs/reference/mapping/params/similarity.asciidoc index 8085adf9110de..8f1ce02fff7ab 100644 --- a/docs/reference/mapping/params/similarity.asciidoc +++ b/docs/reference/mapping/params/similarity.asciidoc @@ -3,7 +3,7 @@ Elasticsearch allows you to configure a scoring algorithm or _similarity_ per field. The `similarity` setting provides a simple way of choosing a similarity -algorithm other than the default `BM25`, such as `TF/IDF`. +algorithm other than the default `BM25`, such as `boolean`. Similarities are mostly useful for <> fields, but can also apply to other field types. @@ -20,11 +20,6 @@ configuration are: See {defguide}/pluggable-similarites.html[Pluggable Similarity Algorithms] for more information. -`classic`:: - The TF/IDF algorithm which used to be the default in Elasticsearch and - Lucene. See {defguide}/practical-scoring-function.html[Lucene’s Practical Scoring Function] - for more information. - `boolean`:: A simple boolean similarity, which is used when full-text ranking is not needed and the score should only be based on whether the query terms match or not. diff --git a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc index 9e31b4b3e25bf..85fb6d23e3242 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc @@ -72,6 +72,11 @@ those same roles. `indices`:: (Required, array) An array of index names. Wildcards are supported. For example: `["it_ops_metrics", "server*"]`. ++ +-- +NOTE: If any indices are in remote clusters then `cluster.remote.connect` must +not be set to `false` on any ML node. +-- `job_id`:: (Required, string) A numerical character string that uniquely identifies the diff --git a/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc b/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc index 34535ba274bdf..7ba480eb3d256 100644 --- a/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc +++ b/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc @@ -19,6 +19,15 @@ `analyzed_fields` is not set, only the relevant fields will be included. For example all the numeric fields for {oldetection}. + `analyzed_fields.includes`::: + (array) An array of strings that defines the fields that will be included in + the analysis. + + `analyzed_fields.excludes`::: + (array) An array of strings that defines the fields that will be excluded + from the analysis. + + [source,js] -------------------------------------------------- PUT _ml/data_frame/analytics/loganalytics @@ -46,11 +55,15 @@ PUT _ml/data_frame/analytics/loganalytics (Optional, string) A description of the job. `dest`:: - (object) The destination configuration of the analysis. The `index` property - (string) is the name of the index in which to store the results of the - {dfanalytics-job}. The `results_field` (string) property defines the name of - the field in which to store the results of the analysis. The default value is - `ml`. + (object) The destination configuration of the analysis. + + `index`::: + (Required, string) Defines the _destination index_ to store the results of + the {dfanalytics-job}. + + `results_field`::: + (Optional, string) Defines the name of the field in which to store the + results of the analysis. Default to `ml`. `id`:: (string) The unique identifier for the {dfanalytics-job}. This identifier can @@ -67,14 +80,20 @@ PUT _ml/data_frame/analytics/loganalytics that setting. For more information, see <>. `source`:: - (object) The source configuration, consisting of `index` (array) which is an - array of index names on which to perform the analysis. It can be a single - index or index pattern as well as an array of indices or patterns. Optionally, - `source` can have a `query` (object) property. The {es} query domain-specific - language (DSL). This value corresponds to the query object in an {es} search - POST body. All the options that are supported by {es} can be used, as this - object is passed verbatim to {es}. By default, this property has the following - value: `{"match_all": {}}`. + (object) The source configuration consisting an `index` and optionally a + `query` object. + + `index`::: + (Required, string or array) Index or indices on which to perform the + analysis. It can be a single index or index pattern as well as an array of + indices or patterns. + + `query`::: + (Optional, object) The {es} query domain-specific language + (<>). This value corresponds to the query object in an {es} + search POST body. All the options that are supported by {es} can be used, + as this object is passed verbatim to {es}. By default, this property has + the following value: `{"match_all": {}}`. [[dfanalytics-types]] ==== Analysis objects diff --git a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc index d695876c9f0f3..b46de02ca0bf5 100644 --- a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc @@ -16,6 +16,7 @@ experimental[] `PUT _ml/data_frame/analytics/` + [[ml-put-dfanalytics-prereq]] ==== {api-prereq-title} @@ -25,6 +26,7 @@ also have `read` and `view_index_metadata` privileges on the source index and more information, see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. + [[ml-put-dfanalytics-desc]] ==== {api-description-title} @@ -45,6 +47,7 @@ If the destination index already exists, then it will be use as is. This makes it possible to set up the destination index in advance with custom settings and mappings. + [[ml-put-dfanalytics-path-params]] ==== {api-path-parms-title} @@ -54,6 +57,7 @@ and mappings. characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. + [[ml-put-dfanalytics-request-body]] ==== {api-request-body-title} @@ -62,17 +66,32 @@ and mappings. index. For example: `outlier_detection`. See <>. `analyzed_fields`:: - (Optional, object) You can specify both `includes` and/or `excludes` patterns. If - `analyzed_fields` is not set, only the relevant fields will be included. For - example, all the numeric fields for {oldetection}. + (Optional, object) You can specify both `includes` and/or `excludes` patterns. + If `analyzed_fields` is not set, only the relevant fields will be included. + For example, all the numeric fields for {oldetection}. + + `analyzed_fields.includes`::: + (Optional, array) An array of strings that defines the fields that will be + included in the analysis. + + `analyzed_fields.excludes`::: + (Optional, array) An array of strings that defines the fields that will be + excluded from the analysis. `description`:: (Optional, string) A description of the job. `dest`:: (Required, object) The destination configuration, consisting of `index` and - optionally `results_field` (`ml` by default). See - <>. + optionally `results_field` (`ml` by default). + + `index`::: + (Required, string) Defines the _destination index_ to store the results of + the {dfanalytics-job}. + + `results_field`::: + (Optional, string) Defines the name of the field in which to store the + results of the analysis. Default to `ml`. `model_memory_limit`:: (Optional, string) The approximate maximum amount of memory resources that are @@ -84,8 +103,20 @@ and mappings. `source`:: (Required, object) The source configuration, consisting of `index` and - optionally a `query`. See - <>. + optionally a `query`. + + `index`::: + (Required, string or array) Index or indices on which to perform the + analysis. It can be a single index or index pattern as well as an array of + indices or patterns. + + `query`::: + (Optional, object) The {es} query domain-specific language + (<>). This value corresponds to the query object in an {es} + search POST body. All the options that are supported by {es} can be used, + as this object is passed verbatim to {es}. By default, this property has + the following value: `{"match_all": {}}`. + [[ml-put-dfanalytics-example]] ==== {api-examples-title} @@ -113,6 +144,7 @@ PUT _ml/data_frame/analytics/loganalytics // CONSOLE // TEST[setup:setup_logdata] + The API returns the following result: [source,js] diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index a996a253c4826..f281c2112fecf 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -1,4 +1,9 @@ +tag::index-alias[] +Comma-separated list or wildcard expression of index alias names +used to limit the request. +end::index-alias[] + tag::allow-no-indices[] `allow_no_indices`:: (Optional, boolean) If `true`, the request returns an error if a wildcard @@ -7,11 +12,34 @@ parameter also applies to <> that point to a missing or closed index. end::allow-no-indices[] +tag::analyzer[] +analyzer`:: +(Optional, string) Analyzer to use for the query string. +end::analyzer[] + +tag::analyze_wildcard[] +`analyze_wildcard`:: +(Optional, boolean) If `true`, wildcard and prefix queries are +analyzed. Defaults to `false`. +end::analyze_wildcard[] + tag::bytes[] `bytes`:: (Optional, <>) Unit used to display byte values. end::bytes[] +tag::default_operator[] +`default_operator`:: +(Optional, string) The default operator for query string query: AND or OR. +Defaults to `OR`. +end::default_operator[] + +tag::df[] +`df`:: +(Optional, string) Field to use as default where no field prefix is +given in the query string. +end::df[] + tag::expand-wildcards[] `expand_wildcards`:: + @@ -33,22 +61,19 @@ Wildcard expressions are not accepted. -- end::expand-wildcards[] -tag::cat-h[] -`h`:: -(Optional, string) Comma-separated list of column names to display. -end::cat-h[] - tag::flat-settings[] `flat_settings`:: (Optional, boolean) If `true`, returns settings in flat format. Defaults to `false`. end::flat-settings[] -tag::help[] -`help`:: -(Optional, boolean) If `true`, the response returns help information. Defaults -to `false`. -end::help[] +tag::index-alias-filter[] +<> +used to limit the index alias. ++ +If specified, +the index alias only applies to documents returned by the filter. +end::index-alias-filter[] tag::http-format[] `format`:: @@ -57,6 +82,34 @@ https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html[HTTP accept header]. Valid values include JSON, YAML, etc. end::http-format[] +tag::from[] +`from`:: +(Optional, integer) Starting document offset. Defaults to `0`. +end::from[] + +tag::cat-h[] +`h`:: +(Optional, string) Comma-separated list of column names to display. +end::cat-h[] + +tag::help[] +`help`:: +(Optional, boolean) If `true`, the response returns help information. Defaults +to `false`. +end::help[] + +tag::if_primary_term[] +`if_primary_term`:: +(Optional, integer) Only perform the operation if the document has +this primary term. See <>. +end::if_primary_term[] + +tag::if_seq_no[] +`if_seq_no`:: +(Optional, integer) Only perform the operation if the document has this +sequence number. See <>. +end::if_seq_no[] + tag::include-defaults[] `include_defaults`:: (Optional, string) If `true`, return all default settings in the response. @@ -88,6 +141,19 @@ tag::index[] used to limit the request. end::index[] +tag::index-template[] +``:: +(Required, string) +Comma-separated list or wildcard expression of index template names +used to limit the request. +end::index-template[] + +tag::lenient[] +`lenient`:: +(Optional, boolean) If `true`, format-based query failures (such as +providing text to a numeric field) will be ignored. Defaults to `false`. +end::lenient[] + tag::local[] `local`:: (Optional, boolean) If `true`, the request retrieves information from the local @@ -95,6 +161,12 @@ node only. Defaults to `false`, which means information is retrieved from the master node. end::local[] +tag::max_docs[] +`max_docs`:: +(Optional, integer) Maximum number of documents to process. Defaults to all +documents. +end::max_docs[] + tag::name[] ``:: (Optional, string) Comma-separated list of alias names to return. @@ -106,68 +178,118 @@ tag::node-id[] returned information. end::node-id[] -tag::cat-s[] -`s`:: -(Optional, string) Comma-separated list of column names or column aliases used -to sort the response. -end::cat-s[] - -tag::cat-v[] -`v`:: -(Optional, boolean) If `true`, the response includes column headings. Defaults -to `false`. -end::cat-v[] - -tag::doc-pipeline[] +tag::pipeline[] `pipeline`:: (Optional, string) ID of the pipeline to use to preprocess incoming documents. -end::doc-pipeline[] +end::pipeline[] + +tag::preference[] +`preference`:: + (Optional, string) Specifies the node or shard the operation should be + performed on. Random by default. +end::preference[] -tag::doc-refresh[] +tag::search-q[] +`q`:: +(Optional, string) Query in the Lucene query string syntax. +end::search-q[] + +tag::refresh[] `refresh`:: (Optional, enum) If `true`, {es} refreshes the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. Default: `false`. -end::doc-refresh[] +end::refresh[] -tag::doc-seq-no[] -`if_seq_no`:: -(Optional, integer) Only perform the operation if the document has this -sequence number. See <>. -end::doc-seq-no[] +tag::request_cache[] +`request_cache`:: +(Optional, boolean) Specifies if the request cache should be used for this +request. Defaults to the index-level setting. +end::request_cache[] -tag::doc-primary-term[] -`if_primary_term`:: -(Optional, integer) Only perform the operation if the document has -this primary term. See <>. -end::doc-primary-term[] +tag::requests_per_second[] +`requests_per_second`:: + (Optional, integer) The throttle for this request in sub-requests per second. + -1 means no throttle. Defaults to 0. +end::requests_per_second[] -tag::doc-routing[] +tag::routing[] `routing`:: (Optional, string) Target the specified primary shard. -end::doc-routing[] - -tag::doc-version[] -`version`:: -(Optional, integer) Explicit version number for concurrency control. -The specified version must match the current version of the document for the -request to succeed. -end::doc-version[] +end::routing[] -tag::doc-version-type[] -`version_type`:: -(Optional, enum) Specific version type: `internal`, `external`, -`external_gte`, `force`. -end::doc-version-type[] +tag::cat-s[] +`s`:: +(Optional, string) Comma-separated list of column names or column aliases used +to sort the response. +end::cat-s[] -tag::doc-wait-for-active-shards[] -`wait_for_active_shards`:: -(Optional, string) The number of shard copies that must be active before -proceeding with the operation. Set to `all` or any positive integer up -to the total number of shards in the index (`number_of_replicas+1`). -Default: 1, the primary shard. -end::doc-wait-for-active-shards[] +tag::scroll[] +`scroll`:: +(Optional, <>) Specifies how long a consistent view of +the index should be maintained for scrolled search. +end::scroll[] + +tag::scroll_size[] +`scroll_size`:: +(Optional, integer) Size of the scroll request that powers the operation. +Defaults to 100. +end::scroll_size[] + +tag::search_timeout[] +`search_timeout`:: +(Optional, <> Explicit timeout for each search +request. Defaults to no timeout. +end::search_timeout[] + +tag::search_type[] +`search_type`:: +(Optional, string) The type of the search operation. Available options: +* `query_then_fetch` +* `dfs_query_then_fetch` +end::search_type[] + +tag::slices[] +`slices`:: +(Optional, integer) The number of slices this task should be divided into. +Defaults to 1 meaning the task isn't sliced into subtasks. +end::slices[] + +tag::sort[] +`sort`:: +(Optional, string) A comma-separated list of : pairs. +end::sort[] + +tag::source[] +`_source`:: +(Optional, string) True or false to return the `_source` field or not, or a +list of fields to return. +end::source[] + +tag::source_excludes[] +`_source_excludes`:: +(Optional, string) A list of fields to exclude from the returned `_source` +field. +end::source_excludes[] + +tag::source_includes[] +`_source_includes`:: +(Optional, string) A list of fields to extract and return from the `_source` +field. +end::source_includes[] + +tag::stats[] +`stats`:: + (Optional, string) Specific `tag` of the request for logging and statistical + purposes. +end::stats[] + +tag::terminate_after[] +`terminate_after`:: +(Optional, integer) The maximum number of documents to collect for each shard, +upon reaching which the query execution will terminate early. +end::terminate_after[] tag::timeoutparms[] @@ -186,3 +308,53 @@ expires, the request fails and returns an error. Defaults to `30s`. end::master-timeout[] end::timeoutparms[] + +tag::cat-v[] +`v`:: +(Optional, boolean) If `true`, the response includes column headings. Defaults +to `false`. +end::cat-v[] + +tag::version[] +`version`:: +(Optional, boolean) If `true`, returns the document version as part of a hit. +end::version[] + +tag::index-routing[] +`routing`:: +(Optional, string) +Custom <> +used to route operations to a specific shard. +end::index-routing[] + +tag::doc-version[] +`version`:: +(Optional, integer) Explicit version number for concurrency control. +The specified version must match the current version of the document for the +request to succeed. +end::doc-version[] + +tag::version_type[] +`version_type`:: +(Optional, enum) Specific version type: `internal`, `external`, +`external_gte`, `force`. +end::version_type[] + +tag::wait_for_active_shards[] +`wait_for_active_shards`:: ++ +-- +(Optional, string) The number of shard copies that must be active before +proceeding with the operation. Set to `all` or any positive integer up +to the total number of shards in the index (`number_of_replicas+1`). +Default: 1, the primary shard. + +See <>. +-- +end::wait_for_active_shards[] + +tag::wait_for_completion[] +`wait_for_completion`:: +(Optional, boolean) Should the request block until the operation is +complete. Defaults to `true`. +end::wait_for_completion[] diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index d91be315e86e3..224e77bd1e0cd 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -71,6 +71,10 @@ Example response: "available" : true, "enabled" : true }, + "analytics" : { + "available" : true, + "enabled" : true + }, "flattened" : { "available" : true, "enabled" : true diff --git a/docs/reference/search/uri-request.asciidoc b/docs/reference/search/uri-request.asciidoc index d4f058f3c4d74..86e2ee4551b49 100644 --- a/docs/reference/search/uri-request.asciidoc +++ b/docs/reference/search/uri-request.asciidoc @@ -1,10 +1,7 @@ [[search-uri-request]] === URI Search -A search request can be executed purely using a URI by providing request -parameters. Not all search options are exposed when executing a search -using this mode, but it can be handy for quick "curl tests". Here is an -example: +Specifies search criteria as query parameters in the request URI. [source,js] -------------------------------------------------- @@ -13,7 +10,115 @@ GET twitter/_search?q=user:kimchy // CONSOLE // TEST[setup:twitter] -And here is a sample response: + +[[search-uri-request-api-request]] +==== {api-request-title} + +`GET //_search?q=` + + +[[search-uri-request-api-desc]] +==== {api-description-title} + +You can use query parameters to define your search criteria directly in the +request URI, rather than in the request body. Request URI searches do not +support the full {es} Query DSL, but are handy for testing. + + +[[search-uri-request-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index] + + +[[search-uri-request-api-query-params]] +==== {api-query-parms-title} + +`allow_partial_search_results`:: + (Optional, boolean) Set to `false` to fail the request if only partial results + are available. Defaults to `true`, which returns partial results in the event + of timeouts or partial failures You can override the default behavior for all + requests by setting `search.default_allow_partial_results` to `false` in the + cluster settings. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=analyze_wildcard] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=analyzer] + +`batched_reduce_size`:: + (Optional, integer) The number of shard results that should be reduced at once + on the coordinating node. This value should be used as a protection mechanism + to reduce the memory overhead per search request if the potential number of + shards in the request can be large. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=default_operator] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=df] + +`explain`:: + (Optional, string) For each hit, include an explanation of how the score was + computed. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=from] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=lenient] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=search-q] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=search_type] + +`size`:: + (Optional, integer) The number of hits to return. Defaults to `10`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=source] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=source_excludes] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=source_includes] + +`stored_fields`:: + (Optional, string) The selective stored fields of the document to return for + each hit, comma delimited. Not specifying any value will cause no fields to + return. + +`sort`:: + (Optional, string) Sorting to perform. Can either be in the form of + `fieldName`, or `fieldName:asc`/`fieldName:desc`. The fieldName can either be + an actual field within the document, or the special `_score` name to indicate + sorting based on scores. There can be several `sort` parameters (order is + important). + +`track_scores`:: + (Optional, boolean) When sorting, set to `true` in order to still track scores + and return them as part of each hit. + +`track_total_hits`:: + (Optional, integer) Defaults to `10,000`. Set to `false` in order to disable + the tracking of the total number of hits that match the query. It also accepts + an integer which in this case represents the number of hits to count + accurately. (See the <> + documentation for more details). + +`timeout`:: + (Optional, <>) A search timeout, bounding the search + request to be executed within the specified time value and bail with the hits + accumulated up to that point when expired. Defaults to no timeout. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=terminate_after] + + +[[search-uri-request-api-example]] +==== {api-examples-title} + +[source,js] +-------------------------------------------------- +GET twitter/_search?q=user:kimchy +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + + +The API returns the following response: [source,js] -------------------------------------------------- @@ -50,86 +155,3 @@ And here is a sample response: } -------------------------------------------------- // TESTRESPONSE[s/"took": 62/"took": "$body.took"/] - -[float] -==== Parameters - -The parameters allowed in the URI are: - -[cols="<,<",options="header",] -|======================================================================= -|Name |Description -|`q` |The query string (maps to the `query_string` query, see -<> for more details). - -|`df` |The default field to use when no field prefix is defined within the -query. - -|`analyzer` |The analyzer name to be used when analyzing the query string. - -|`analyze_wildcard` |Should wildcard and prefix queries be analyzed or -not. Defaults to `false`. - -|`batched_reduce_size` | The number of shard results that should be reduced -at once on the coordinating node. This value should be used as a protection -mechanism to reduce the memory overhead per search request if the potential -number of shards in the request can be large. - -|`default_operator` |The default operator to be used, can be `AND` or -`OR`. Defaults to `OR`. - -|`lenient` |If set to true will cause format based failures (like -providing text to a numeric field) to be ignored. Defaults to false. - -|`explain` |For each hit, contain an explanation of how scoring of the -hits was computed. - -|`_source`|Set to `false` to disable retrieval of the `_source` field. You can also retrieve -part of the document by using `_source_includes` & `_source_excludes` (see the <> -documentation for more details) - -|`stored_fields` |The selective stored fields of the document to return for each hit, -comma delimited. Not specifying any value will cause no fields to return. - -|`sort` |Sorting to perform. Can either be in the form of `fieldName`, or -`fieldName:asc`/`fieldName:desc`. The fieldName can either be an actual -field within the document, or the special `_score` name to indicate -sorting based on scores. There can be several `sort` parameters (order -is important). - -|`track_scores` |When sorting, set to `true` in order to still track -scores and return them as part of each hit. - -|`track_total_hits` |Defaults to `10,000`. Set to `false` in order to disable the tracking -of the total number of hits that match the query. -It also accepts an integer which in this case represents the number of -hits to count accurately. -(See the <> documentation -for more details). - -|`timeout` |A search timeout, bounding the search request to be executed -within the specified time value and bail with the hits accumulated up to -that point when expired. Defaults to no timeout. - -|`terminate_after` |The maximum number of documents to collect for -each shard, upon reaching which the query execution will terminate early. -If set, the response will have a boolean field `terminated_early` to -indicate whether the query execution has actually terminated_early. -Defaults to no terminate_after. - -|`from` |The starting from index of the hits to return. Defaults to `0`. - -|`size` |The number of hits to return. Defaults to `10`. - -|`search_type` |The type of the search operation to perform. Can be -`dfs_query_then_fetch` or `query_then_fetch`. -Defaults to `query_then_fetch`. See -<> for -more details on the different types of search that can be performed. - -|`allow_partial_search_results` |Set to `false` to return an overall failure if the request would produce -partial results. Defaults to true, which will allow partial results in the case of timeouts -or partial failures. This default can be controlled using the cluster-level setting -`search.default_allow_partial_results`. -|======================================================================= diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index a70ed45ad3731..bcde849402086 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -855,6 +855,17 @@ Defaults to `20m`. Specifies the maximum number of user entries that the cache can contain. Defaults to `100000`. +`delegation.enabled`:: +Generally, in order for the clients to be authenticated by the PKI realm they +must connect directly to {es}. That is, they must not pass through proxies +which terminate the TLS connection. In order to allow for a *trusted* and +*smart* proxy, such as Kibana, to sit before {es} and terminate TLS +connections, but still allow clients to be authenticated on {es} by this realm, +you need to toggle this to `true`. Defaults to `false`. If delegation is +enabled, then either `truststore.path` or `certificate_authorities` setting +must be defined. For more details, see <>. + [[ref-saml-settings]] [float] ===== SAML realm settings diff --git a/docs/reference/setup/install/deb.asciidoc b/docs/reference/setup/install/deb.asciidoc index feebc0cb79426..12c240b17db11 100644 --- a/docs/reference/setup/install/deb.asciidoc +++ b/docs/reference/setup/install/deb.asciidoc @@ -238,6 +238,12 @@ locations for a Debian-based system: | /var/lib/elasticsearch | path.data +| jdk + | The bundled Java Development Kit used to run Elasticsearch. Can + be overriden by setting the `JAVA_HOME` environment variable. + | /usr/share/elasticsearch/jdk + d| + | logs | Log files location. | /var/log/elasticsearch diff --git a/docs/reference/setup/install/rpm.asciidoc b/docs/reference/setup/install/rpm.asciidoc index 9612781933860..1917d52c20923 100644 --- a/docs/reference/setup/install/rpm.asciidoc +++ b/docs/reference/setup/install/rpm.asciidoc @@ -225,6 +225,12 @@ locations for an RPM-based system: | /var/lib/elasticsearch | path.data +| jdk + | The bundled Java Development Kit used to run Elasticsearch. Can + be overriden by setting the `JAVA_HOME` environment variable. + | /usr/share/elasticsearch/jdk + d| + | logs | Log files location. | /var/log/elasticsearch diff --git a/docs/reference/sql/endpoints/client-apps/dbvis.asciidoc b/docs/reference/sql/endpoints/client-apps/dbvis.asciidoc index 2dd9f7aaf101d..ba5fea9375dd2 100644 --- a/docs/reference/sql/endpoints/client-apps/dbvis.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/dbvis.asciidoc @@ -12,6 +12,7 @@ IMPORTANT: Elastic does not endorse, promote or provide support for this applica ==== Prerequisites +* DbVisualizer 10.0.21 or higher * {es-sql} <> ==== Add {es} JDBC driver @@ -20,20 +21,23 @@ Add the {es} JDBC driver to DbVisualizer through *Tools* > *Driver Manager*: image:images/sql/client-apps/dbvis-1-driver-manager.png[] -Create a new driver entry through *Driver* > *Create Driver* entry and add the JDBC driver in the files panel -through the buttons on the right. Once specify, the driver class and its version should be automatically picked up - one can force the refresh through the *Find driver in listed locations* button, the second from the bottom on the right hand side: - +Select *Elasticsearch* driver name from the left sidebar and add the JDBC driver in the files panel through the buttons on the right; if there is no such driver name check the DbVisualizer version or create a new driver entry through *Driver* > *Create Driver*. + image:images/sql/client-apps/dbvis-2-driver.png[] +Once specified, the driver class and its version should be automatically picked up - one can force the refresh through the *Find driver in listed locations* button, the second from the bottom on the right hand side: + +image:images/sql/client-apps/dbvis-3-add-driver.png[] + ==== Create a new connection Once the {es} driver is in place, create a new connection: -image:images/sql/client-apps/dbvis-3-new-conn.png[] +image:images/sql/client-apps/dbvis-4-new-conn.png[] One can use the wizard or add the settings all at once: -image:images/sql/client-apps/dbvis-4-conn-props.png[] +image:images/sql/client-apps/dbvis-5-conn-props.png[] Press *Connect* and the driver version (as that of the cluster) should show up under *Connection Message*. @@ -41,4 +45,4 @@ Press *Connect* and the driver version (as that of the cluster) should show up u The setup is done. DbVisualizer can be used to run queries against {es} and explore its content: -image:images/sql/client-apps/dbvis-5-data.png[] +image:images/sql/client-apps/dbvis-6-data.png[] diff --git a/docs/reference/sql/endpoints/client-apps/workbench.asciidoc b/docs/reference/sql/endpoints/client-apps/workbench.asciidoc index 2891b542900cf..061a93dcae7f7 100644 --- a/docs/reference/sql/endpoints/client-apps/workbench.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/workbench.asciidoc @@ -12,6 +12,7 @@ IMPORTANT: Elastic does not endorse, promote or provide support for this applica ==== Prerequisites +* SQL Workbench/J build 125 or higher * {es-sql} <> ==== Add {es} JDBC driver @@ -20,15 +21,19 @@ Add the {es} JDBC driver to SQL Workbench/J through *Manage Drivers* either from image:images/sql/client-apps/workbench-1-manage-drivers.png[] -Add a new entry to the list through the blank page button in the upper left corner. Add the JDBC jar, provide a name and click on the magnifier button to have the driver *Classname* picked-up automatically: +Select *Elasticsearch* profile from the left-hand side (if it is missing check the SQL Workbench/J version or add a new entry to the list through the blank page button in the upper left corner): -image:images/sql/client-apps/workbench-2-add-driver.png[] +image:images/sql/client-apps/workbench-2-select-driver.png[] + +Add the JDBC jar (if the driver name hasn't been picked up already, click on the magnifier button): + +image:images/sql/client-apps/workbench-3-add-jar.png[] ==== Create a new connection profile With the driver configured, create a new connection profile through *File* > *Connect Window* (or Alt+C shortcut): -image:images/sql/client-apps/workbench-3-connection.png[] +image:images/sql/client-apps/workbench-4-connection.png[] Select the previously configured driver and set the URL of your cluster using the JDBC syntax. Verify the connection through the *Test* button - a confirmation window should appear that everything is properly configured. @@ -39,4 +44,4 @@ The setup is complete. SQL Workbench/J is ready to talk to {es} through SQL: click on the profile created to execute statements or explore the data: -image:images/sql/client-apps/workbench-4-data.png[] +image:images/sql/client-apps/workbench-5-data.png[] diff --git a/docs/reference/sql/functions/conditional.asciidoc b/docs/reference/sql/functions/conditional.asciidoc index 0206115c182c1..630943f42fe20 100644 --- a/docs/reference/sql/functions/conditional.asciidoc +++ b/docs/reference/sql/functions/conditional.asciidoc @@ -248,7 +248,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[ifNullReturnSecond] ---- [[sql-functions-conditional-iif]] -==== `IFF` +==== `IIF` .Synopsis: [source, sql] diff --git a/docs/reference/upgrade.asciidoc b/docs/reference/upgrade.asciidoc index 3bd35197dcd5f..0509f2297522e 100644 --- a/docs/reference/upgrade.asciidoc +++ b/docs/reference/upgrade.asciidoc @@ -8,7 +8,46 @@ process so upgrading does not interrupt service. Rolling upgrades are supported: * Between minor versions * From 5.6 to 6.8 -* From 6.8 to {version} +* From 6.8 to {prev-major-version} +* From {prev-major-version} to {version} + + +The following table shows the recommended upgrade paths to {version}. + +[cols="<1m,3",options="header",] +|==== +|Upgrade from +|Recommended upgrade path to {version} + +|{prev-major-version} +|<> to {version} + +|7.0–7.3 +a| +. https://www.elastic.co/guide/en/elasticsearch/reference/{prev-major-version}/rolling-upgrades.html[Rolling upgrade] to {prev-major-version} +. <> to {version} + +|6.8 +a| +. https://www.elastic.co/guide/en/elasticsearch/reference/{prev-major-version}/rolling-upgrades.html[Rolling upgrade] to {prev-major-version} +. <> to {version} + +|6.0–6.7 +a| + +. https://www.elastic.co/guide/en/elasticsearch/reference/6.8/rolling-upgrades.html[Rolling upgrade] to 6.8 +. https://www.elastic.co/guide/en/elasticsearch/reference/{prev-major-version}/rolling-upgrades.html[Rolling upgrade] to {prev-major-version} +. <> to {version} +|==== + + +[WARNING] +==== +The following upgrade paths are *not* supported: + +* 6.8 to 7.0. +* 6.7 to 7.1.–{prev-major-version}. +==== {es} can read indices created in the previous major version. If you have indices created in 5.x or before, you must reindex or delete them diff --git a/docs/reference/upgrade/set-paths-tip.asciidoc b/docs/reference/upgrade/set-paths-tip.asciidoc index ee1a7ba73ecd5..e7b767fff1333 100644 --- a/docs/reference/upgrade/set-paths-tip.asciidoc +++ b/docs/reference/upgrade/set-paths-tip.asciidoc @@ -2,8 +2,7 @@ ================================================ When you extract the zip or tarball packages, the `elasticsearch-n.n.n` -directory contains the {es} `config`, `data`, `logs` and -`plugins` directories. +directory contains the {es} `config`, `data`, and `logs` directories. We recommend moving these directories out of the {es} directory so that there is no chance of deleting them when you upgrade {es}. diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index d2038e2e2bfde..9c744d1602c65 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -66,7 +66,7 @@ import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RangeFieldMapper; -import org.elasticsearch.index.mapper.RangeFieldMapper.RangeType; +import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.BoostingQueryBuilder; import org.elasticsearch.index.query.ConstantScoreQueryBuilder; diff --git a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java index a27b091cfc037..6e759756a7289 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java @@ -82,7 +82,7 @@ public class URLRepository extends BlobStoreRepository { */ public URLRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, ThreadPool threadPool) { - super(metadata, environment.settings(), namedXContentRegistry, threadPool, BlobPath.cleanPath()); + super(metadata, namedXContentRegistry, threadPool, BlobPath.cleanPath()); if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(environment.settings()) == false) { throw new RepositoryException(metadata.name(), "missing url"); diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index ff21cfa30d357..62e2d6aa2bf86 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -17,6 +17,8 @@ * under the License. */ +import org.elasticsearch.gradle.test.RestIntegTestTask + /* TODOs: * fix permissions such that only netty4 can open sockets etc? @@ -51,9 +53,6 @@ test { * other if we allow them to set the number of available processors as it's set-once in Netty. */ systemProperty 'es.set.netty.runtime.available.processors', 'false' - - // Disable direct buffer pooling as it is disabled by default in Elasticsearch - systemProperty 'io.netty.allocator.numDirectArenas', '0' } integTestRunner { @@ -62,10 +61,23 @@ integTestRunner { * other if we allow them to set the number of available processors as it's set-once in Netty. */ systemProperty 'es.set.netty.runtime.available.processors', 'false' +} - // Disable direct buffer pooling as it is disabled by default in Elasticsearch - systemProperty 'io.netty.allocator.numDirectArenas', '0' +TaskProvider pooledTest = tasks.register("pooledTest", Test) { + include '**/*Tests.class' + systemProperty 'es.set.netty.runtime.available.processors', 'false' + systemProperty 'io.netty.allocator.type', 'pooled' +} +// TODO: we can't use task avoidance here because RestIntegTestTask does the testcluster creation +RestIntegTestTask pooledIntegTest = tasks.create("pooledIntegTest", RestIntegTestTask) { + runner { + systemProperty 'es.set.netty.runtime.available.processors', 'false' + } +} +testClusters.pooledIntegTest { + systemProperty 'io.netty.allocator.type', 'pooled' } +check.dependsOn(pooledTest, pooledIntegTest) thirdPartyAudit { ignoreMissingClasses ( diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/CopyBytesSocketChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/CopyBytesSocketChannel.java index dd7ba05601041..230611e27f51a 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/CopyBytesSocketChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/CopyBytesSocketChannel.java @@ -40,6 +40,7 @@ import io.netty.channel.socket.nio.NioSocketChannel; import org.elasticsearch.common.SuppressForbidden; +import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.SocketChannel; @@ -74,7 +75,6 @@ public CopyBytesSocketChannel() { @Override protected void doWrite(ChannelOutboundBuffer in) throws Exception { - SocketChannel ch = javaChannel(); int writeSpinCount = config().getWriteSpinCount(); do { if (in.isEmpty()) { @@ -99,7 +99,7 @@ protected void doWrite(ChannelOutboundBuffer in) throws Exception { ioBuffer.flip(); int attemptedBytes = ioBuffer.remaining(); - final int localWrittenBytes = ch.write(ioBuffer); + final int localWrittenBytes = writeToSocketChannel(javaChannel(), ioBuffer); if (localWrittenBytes <= 0) { incompleteWrite(true); return; @@ -119,7 +119,7 @@ protected int doReadBytes(ByteBuf byteBuf) throws Exception { final RecvByteBufAllocator.Handle allocHandle = unsafe().recvBufAllocHandle(); allocHandle.attemptedBytesRead(byteBuf.writableBytes()); ByteBuffer ioBuffer = getIoBuffer(); - int bytesRead = javaChannel().read(ioBuffer); + int bytesRead = readFromSocketChannel(javaChannel(), ioBuffer); ioBuffer.flip(); if (bytesRead > 0) { byteBuf.writeBytes(ioBuffer); @@ -127,6 +127,16 @@ protected int doReadBytes(ByteBuf byteBuf) throws Exception { return bytesRead; } + // Protected so that tests can verify behavior and simulate partial writes + protected int writeToSocketChannel(SocketChannel socketChannel, ByteBuffer ioBuffer) throws IOException { + return socketChannel.write(ioBuffer); + } + + // Protected so that tests can verify behavior + protected int readFromSocketChannel(SocketChannel socketChannel, ByteBuffer ioBuffer) throws IOException { + return socketChannel.read(ioBuffer); + } + private static ByteBuffer getIoBuffer() { ByteBuffer ioBuffer = CopyBytesSocketChannel.ioBuffer.get(); ioBuffer.clear(); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/CopyBytesSocketChannelTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/CopyBytesSocketChannelTests.java new file mode 100644 index 0000000000000..e94ae94d32dc8 --- /dev/null +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/CopyBytesSocketChannelTests.java @@ -0,0 +1,185 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import io.netty.bootstrap.Bootstrap; +import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.buffer.UnpooledByteBufAllocator; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.nio.NioEventLoopGroup; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.nio.channels.SocketChannel; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +public class CopyBytesSocketChannelTests extends ESTestCase { + + private final UnpooledByteBufAllocator alloc = new UnpooledByteBufAllocator(false); + private final AtomicReference accepted = new AtomicReference<>(); + private final AtomicInteger serverBytesReceived = new AtomicInteger(); + private final AtomicInteger clientBytesReceived = new AtomicInteger(); + private final ConcurrentLinkedQueue serverReceived = new ConcurrentLinkedQueue<>(); + private final ConcurrentLinkedQueue clientReceived = new ConcurrentLinkedQueue<>(); + private NioEventLoopGroup eventLoopGroup; + private InetSocketAddress serverAddress; + private Channel serverChannel; + + @Override + @SuppressForbidden(reason = "calls getLocalHost") + public void setUp() throws Exception { + super.setUp(); + eventLoopGroup = new NioEventLoopGroup(1); + ServerBootstrap serverBootstrap = new ServerBootstrap(); + serverBootstrap.channel(CopyBytesServerSocketChannel.class); + serverBootstrap.group(eventLoopGroup); + serverBootstrap.option(ChannelOption.ALLOCATOR, alloc); + serverBootstrap.childOption(ChannelOption.ALLOCATOR, alloc); + serverBootstrap.childHandler(new ChannelInitializer<>() { + @Override + protected void initChannel(Channel ch) { + accepted.set((CopyBytesSocketChannel) ch); + ch.pipeline().addLast(new SimpleChannelInboundHandler<>() { + @Override + protected void channelRead0(ChannelHandlerContext ctx, Object msg) { + ByteBuf buffer = (ByteBuf) msg; + serverBytesReceived.addAndGet(buffer.readableBytes()); + serverReceived.add(buffer.retain()); + } + }); + } + }); + + ChannelFuture bindFuture = serverBootstrap.bind(new InetSocketAddress(InetAddress.getLocalHost(), 0)); + assertTrue(bindFuture.await(10, TimeUnit.SECONDS)); + serverAddress = (InetSocketAddress) bindFuture.channel().localAddress(); + bindFuture.isSuccess(); + serverChannel = bindFuture.channel(); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + try { + assertTrue(serverChannel.close().await(10, TimeUnit.SECONDS)); + } finally { + eventLoopGroup.shutdownGracefully().await(10, TimeUnit.SECONDS); + } + } + + public void testSendAndReceive() throws Exception { + final Bootstrap bootstrap = new Bootstrap(); + bootstrap.group(eventLoopGroup); + bootstrap.channel(VerifyingCopyChannel.class); + bootstrap.option(ChannelOption.ALLOCATOR, alloc); + bootstrap.handler(new ChannelInitializer<>() { + @Override + protected void initChannel(Channel ch) { + ch.pipeline().addLast(new SimpleChannelInboundHandler<>() { + @Override + protected void channelRead0(ChannelHandlerContext ctx, Object msg) { + ByteBuf buffer = (ByteBuf) msg; + clientBytesReceived.addAndGet(buffer.readableBytes()); + clientReceived.add(buffer.retain()); + } + }); + } + }); + + ChannelFuture connectFuture = bootstrap.connect(serverAddress); + connectFuture.await(10, TimeUnit.SECONDS); + assertTrue(connectFuture.isSuccess()); + CopyBytesSocketChannel copyChannel = (CopyBytesSocketChannel) connectFuture.channel(); + ByteBuf clientData = generateData(); + ByteBuf serverData = generateData(); + + try { + assertBusy(() -> assertNotNull(accepted.get())); + int clientBytesToWrite = clientData.readableBytes(); + ChannelFuture clientWriteFuture = copyChannel.writeAndFlush(clientData.retainedSlice()); + clientWriteFuture.await(10, TimeUnit.SECONDS); + assertBusy(() -> assertEquals(clientBytesToWrite, serverBytesReceived.get())); + + int serverBytesToWrite = serverData.readableBytes(); + ChannelFuture serverWriteFuture = accepted.get().writeAndFlush(serverData.retainedSlice()); + assertTrue(serverWriteFuture.await(10, TimeUnit.SECONDS)); + assertBusy(() -> assertEquals(serverBytesToWrite, clientBytesReceived.get())); + + ByteBuf compositeServerReceived = Unpooled.wrappedBuffer(serverReceived.toArray(new ByteBuf[0])); + assertEquals(clientData, compositeServerReceived); + ByteBuf compositeClientReceived = Unpooled.wrappedBuffer(clientReceived.toArray(new ByteBuf[0])); + assertEquals(serverData, compositeClientReceived); + } finally { + clientData.release(); + serverData.release(); + serverReceived.forEach(ByteBuf::release); + clientReceived.forEach(ByteBuf::release); + assertTrue(copyChannel.close().await(10, TimeUnit.SECONDS)); + } + } + + private ByteBuf generateData() { + return Unpooled.wrappedBuffer(randomAlphaOfLength(randomIntBetween(1 << 22, 1 << 23)).getBytes(StandardCharsets.UTF_8)); + } + + public static class VerifyingCopyChannel extends CopyBytesSocketChannel { + + public VerifyingCopyChannel() { + super(); + } + + @Override + protected int writeToSocketChannel(SocketChannel socketChannel, ByteBuffer ioBuffer) throws IOException { + assertTrue("IO Buffer must be a direct byte buffer", ioBuffer.isDirect()); + int remaining = ioBuffer.remaining(); + int originalLimit = ioBuffer.limit(); + // If greater than a KB, possibly invoke a partial write. + if (remaining > 1024) { + if (randomBoolean()) { + int bytes = randomIntBetween(remaining / 2, remaining); + ioBuffer.limit(ioBuffer.position() + bytes); + } + } + int written = socketChannel.write(ioBuffer); + ioBuffer.limit(originalLimit); + return written; + } + + @Override + protected int readFromSocketChannel(SocketChannel socketChannel, ByteBuffer ioBuffer) throws IOException { + assertTrue("IO Buffer must be a direct byte buffer", ioBuffer.isDirect()); + return socketChannel.read(ioBuffer); + } + } +} diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 47ee57d3a3e78..d21553c36973a 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -23,7 +23,7 @@ esplugin { } dependencies { - compile 'com.microsoft.azure:azure-storage:8.0.0' + compile 'com.microsoft.azure:azure-storage:8.4.0' compile 'com.microsoft.azure:azure-keyvault-core:1.0.0' compile 'com.google.guava:guava:20.0' compile 'org.apache.commons:commons-lang3:3.4' diff --git a/plugins/repository-azure/licenses/azure-storage-8.0.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-8.0.0.jar.sha1 deleted file mode 100644 index 4e333ad824184..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-8.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6759c16ade4e2a05bc1dfbaf55161b9ed0e78b9 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-8.4.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-8.4.0.jar.sha1 new file mode 100644 index 0000000000000..db3b2baba0644 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-8.4.0.jar.sha1 @@ -0,0 +1 @@ +002c6b7827f06869b8d04880bf913ce4efcc9ad4 \ No newline at end of file diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 08b3bb3b45897..b5c6ed70ad0d2 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.threadpool.ThreadPool; @@ -76,9 +75,12 @@ public static final class Repository { private final AzureStorageService storageService; private final boolean readonly; - public AzureRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, - AzureStorageService storageService, ThreadPool threadPool) { - super(metadata, environment.settings(), namedXContentRegistry, threadPool, buildBasePath(metadata)); + public AzureRepository( + final RepositoryMetaData metadata, + final NamedXContentRegistry namedXContentRegistry, + final AzureStorageService storageService, + final ThreadPool threadPool) { + super(metadata, namedXContentRegistry, threadPool, buildBasePath(metadata)); this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings()); this.storageService = storageService; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index 809ba9d515834..a9b236a48a02e 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -57,7 +57,7 @@ public AzureRepositoryPlugin(Settings settings) { public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry, ThreadPool threadPool) { return Collections.singletonMap(AzureRepository.TYPE, - (metadata) -> new AzureRepository(metadata, env, namedXContentRegistry, azureStoreService, threadPool)); + (metadata) -> new AzureRepository(metadata, namedXContentRegistry, azureStoreService, threadPool)); } @Override diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java index 71f16b1413a01..341a1d1436deb 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; -import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -43,7 +42,7 @@ private AzureRepository azureRepository(Settings settings) { .put(settings) .build(); final AzureRepository azureRepository = new AzureRepository(new RepositoryMetaData("foo", "azure", internalSettings), - TestEnvironment.newEnvironment(internalSettings), NamedXContentRegistry.EMPTY, mock(AzureStorageService.class), + NamedXContentRegistry.EMPTY, mock(AzureStorageService.class), mock(ThreadPool.class)); assertThat(azureRepository.getBlobStore(), is(nullValue())); return azureRepository; diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java index 8e46b305a3350..70c4dcf3a9889 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java @@ -54,7 +54,7 @@ protected GoogleCloudStorageService createStorageService() { public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry, ThreadPool threadPool) { return Collections.singletonMap(GoogleCloudStorageRepository.TYPE, - metadata -> new GoogleCloudStorageRepository(metadata, env, namedXContentRegistry, this.storageService, threadPool)); + metadata -> new GoogleCloudStorageRepository(metadata, namedXContentRegistry, this.storageService, threadPool)); } @Override diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index 6382a537c4682..4b17fd6bef3ea 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.threadpool.ThreadPool; @@ -61,10 +60,12 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { private final String bucket; private final String clientName; - GoogleCloudStorageRepository(RepositoryMetaData metadata, Environment environment, - NamedXContentRegistry namedXContentRegistry, - GoogleCloudStorageService storageService, ThreadPool threadPool) { - super(metadata, environment.settings(), namedXContentRegistry, threadPool, buildBasePath(metadata)); + GoogleCloudStorageRepository( + final RepositoryMetaData metadata, + final NamedXContentRegistry namedXContentRegistry, + final GoogleCloudStorageService storageService, + final ThreadPool threadPool) { + super(metadata, namedXContentRegistry, threadPool, buildBasePath(metadata)); this.storageService = storageService; this.chunkSize = getSetting(CHUNK_SIZE, metadata); diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index 0e3ecde69c4f0..fa9631d1a0010 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; import org.junit.After; @@ -34,9 +33,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.instanceOf; - public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase { private static final String BUCKET = "gcs-repository-test"; @@ -46,25 +42,22 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepos private static final ConcurrentMap blobs = new ConcurrentHashMap<>(); @Override - protected Collection> nodePlugins() { - return Collections.singletonList(MockGoogleCloudStoragePlugin.class); + protected String repositoryType() { + return GoogleCloudStorageRepository.TYPE; } @Override - protected void createTestRepository(String name, boolean verify) { - assertAcked(client().admin().cluster().preparePutRepository(name) - .setType(GoogleCloudStorageRepository.TYPE) - .setVerify(verify) - .setSettings(Settings.builder() - .put("bucket", BUCKET) - .put("base_path", GoogleCloudStorageBlobStoreRepositoryTests.class.getSimpleName()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + protected Settings repositorySettings() { + return Settings.builder() + .put(super.repositorySettings()) + .put("bucket", BUCKET) + .put("base_path", GoogleCloudStorageBlobStoreRepositoryTests.class.getSimpleName()) + .build(); } @Override - protected void afterCreationCheck(Repository repository) { - assertThat(repository, instanceOf(GoogleCloudStorageRepository.class)); + protected Collection> nodePlugins() { + return Collections.singletonList(MockGoogleCloudStoragePlugin.class); } @After diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index b51f843162a74..72430bcd36631 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -68,7 +68,7 @@ public final class HdfsRepository extends BlobStoreRepository { public HdfsRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, ThreadPool threadPool) { - super(metadata, environment.settings(), namedXContentRegistry, threadPool, BlobPath.cleanPath()); + super(metadata, namedXContentRegistry, threadPool, BlobPath.cleanPath()); this.environment = environment; this.chunkSize = metadata.settings().getAsBytesSize("chunk_size", null); diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 040b839cc474c..dad3aecbc10e6 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -65,17 +65,8 @@ bundlePlugin { } } -task testRepositoryCreds(type: Test) { - include '**/RepositoryCredentialsTests.class' - include '**/S3BlobStoreRepositoryTests.class' - systemProperty 'es.allow_insecure_settings', 'true' -} -check.dependsOn(testRepositoryCreds) - test { - // these are tested explicitly in separate test tasks - exclude '**/*CredentialsTests.class' - exclude '**/S3BlobStoreRepositoryTests.class' + // this is tested explicitly in separate test tasks exclude '**/S3RepositoryThirdPartyTests.class' } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java index fee00786a2ab3..843208078b0e1 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java @@ -182,20 +182,14 @@ S3ClientSettings refine(RepositoryMetaData metadata) { final boolean usePathStyleAccess = getRepoSettingOrDefault(USE_PATH_STYLE_ACCESS, normalizedSettings, pathStyleAccess); final boolean newDisableChunkedEncoding = getRepoSettingOrDefault( DISABLE_CHUNKED_ENCODING, normalizedSettings, disableChunkedEncoding); - final S3BasicCredentials newCredentials; - if (checkDeprecatedCredentials(repoSettings)) { - newCredentials = loadDeprecatedCredentials(repoSettings); - } else { - newCredentials = credentials; - } if (Objects.equals(endpoint, newEndpoint) && protocol == newProtocol && Objects.equals(proxyHost, newProxyHost) && proxyPort == newProxyPort && newReadTimeoutMillis == readTimeoutMillis && maxRetries == newMaxRetries - && newThrottleRetries == throttleRetries && Objects.equals(credentials, newCredentials) + && newThrottleRetries == throttleRetries && newDisableChunkedEncoding == disableChunkedEncoding) { return this; } return new S3ClientSettings( - newCredentials, + credentials, newEndpoint, newProtocol, newProxyHost, @@ -229,29 +223,6 @@ static Map load(Settings settings) { return Collections.unmodifiableMap(clients); } - static boolean checkDeprecatedCredentials(Settings repositorySettings) { - if (S3Repository.ACCESS_KEY_SETTING.exists(repositorySettings)) { - if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings) == false) { - throw new IllegalArgumentException("Repository setting [" + S3Repository.ACCESS_KEY_SETTING.getKey() - + " must be accompanied by setting [" + S3Repository.SECRET_KEY_SETTING.getKey() + "]"); - } - return true; - } else if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings)) { - throw new IllegalArgumentException("Repository setting [" + S3Repository.SECRET_KEY_SETTING.getKey() - + " must be accompanied by setting [" + S3Repository.ACCESS_KEY_SETTING.getKey() + "]"); - } - return false; - } - - // backcompat for reading keys out of repository settings (clusterState) - private static S3BasicCredentials loadDeprecatedCredentials(Settings repositorySettings) { - assert checkDeprecatedCredentials(repositorySettings); - try (SecureString key = S3Repository.ACCESS_KEY_SETTING.get(repositorySettings); - SecureString secret = S3Repository.SECRET_KEY_SETTING.get(repositorySettings)) { - return new S3BasicCredentials(key.toString(), secret.toString()); - } - } - private static S3BasicCredentials loadCredentials(Settings settings, String clientName) { try (SecureString accessKey = getConfigValue(settings, clientName, ACCESS_KEY_SETTING); SecureString secretKey = getConfigValue(settings, clientName, SECRET_KEY_SETTING); diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 50b6e3265f9d3..a164900b635e7 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -25,11 +25,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.settings.SecureSetting; -import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -55,16 +51,9 @@ */ class S3Repository extends BlobStoreRepository { private static final Logger logger = LogManager.getLogger(S3Repository.class); - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); static final String TYPE = "s3"; - /** The access key to authenticate with s3. This setting is insecure because cluster settings are stored in cluster state */ - static final Setting ACCESS_KEY_SETTING = SecureSetting.insecureString("access_key"); - - /** The secret key to authenticate with s3. This setting is insecure because cluster settings are stored in cluster state */ - static final Setting SECRET_KEY_SETTING = SecureSetting.insecureString("secret_key"); - /** * Default is to use 100MB (S3 defaults) for heaps above 2GB and 5% of * the available memory for smaller heaps. @@ -159,11 +148,12 @@ class S3Repository extends BlobStoreRepository { /** * Constructs an s3 backed repository */ - S3Repository(final RepositoryMetaData metadata, - final Settings settings, - final NamedXContentRegistry namedXContentRegistry, - final S3Service service, final ThreadPool threadPool) { - super(metadata, settings, namedXContentRegistry, threadPool, buildBasePath(metadata)); + S3Repository( + final RepositoryMetaData metadata, + final NamedXContentRegistry namedXContentRegistry, + final S3Service service, + final ThreadPool threadPool) { + super(metadata, namedXContentRegistry, threadPool, buildBasePath(metadata)); this.service = service; // Parse and validate the user's S3 Storage Class setting @@ -186,12 +176,6 @@ class S3Repository extends BlobStoreRepository { this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); this.cannedACL = CANNED_ACL_SETTING.get(metadata.settings()); - if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { - // provided repository settings - deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead " - + "store these in named clients and the elasticsearch keystore for secure settings."); - } - logger.debug( "using bucket [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], cannedACL [{}], storageClass [{}]", bucket, diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 118197902f600..461eb9e9592cf 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -76,17 +76,17 @@ public S3RepositoryPlugin(final Settings settings) { } // proxy method for testing - protected S3Repository createRepository(final RepositoryMetaData metadata, - final Settings settings, - final NamedXContentRegistry registry, final ThreadPool threadPool) { - return new S3Repository(metadata, settings, registry, service, threadPool); + protected S3Repository createRepository( + final RepositoryMetaData metadata, + final NamedXContentRegistry registry, + final ThreadPool threadPool) { + return new S3Repository(metadata, registry, service, threadPool); } @Override public Map getRepositories(final Environment env, final NamedXContentRegistry registry, final ThreadPool threadPool) { - return Collections.singletonMap(S3Repository.TYPE, - metadata -> createRepository(metadata, env.settings(), registry, threadPool)); + return Collections.singletonMap(S3Repository.TYPE, metadata -> createRepository(metadata, registry, threadPool)); } @Override @@ -105,9 +105,7 @@ public List> getSettings() { S3ClientSettings.READ_TIMEOUT_SETTING, S3ClientSettings.MAX_RETRIES_SETTING, S3ClientSettings.USE_THROTTLE_RETRIES_SETTING, - S3ClientSettings.USE_PATH_STYLE_ACCESS, - S3Repository.ACCESS_KEY_SETTING, - S3Repository.SECRET_KEY_SETTING); + S3ClientSettings.USE_PATH_STYLE_ACCESS); } @Override diff --git a/plugins/repository-s3/src/main/plugin-metadata/plugin-security.policy b/plugins/repository-s3/src/main/plugin-metadata/plugin-security.policy index 5fd69b4c2fc3f..206335431f897 100644 --- a/plugins/repository-s3/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/repository-s3/src/main/plugin-metadata/plugin-security.policy @@ -18,6 +18,7 @@ */ grant { + // needed because of problems in ClientConfiguration // TODO: get these fixed in aws sdk permission java.lang.RuntimePermission "accessDeclaredMembers"; @@ -38,6 +39,5 @@ grant { // s3 client opens socket connections for to access repository permission java.net.SocketPermission "*", "connect"; - // only for tests : org.elasticsearch.repositories.s3.S3RepositoryPlugin - permission java.util.PropertyPermission "es.allow_insecure_settings", "read,write"; + }; diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 89cc35ccf0cc3..f7d5ba021e25c 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -25,201 +25,155 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.metadata.RepositoryMetaData; -import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; -import java.io.IOException; -import java.security.AccessController; -import java.security.PrivilegedAction; +import java.util.Collection; +import java.util.List; +import static org.elasticsearch.repositories.s3.S3ClientSettings.ACCESS_KEY_SETTING; +import static org.elasticsearch.repositories.s3.S3ClientSettings.SECRET_KEY_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; -import static org.mockito.Mockito.mock; +import static org.hamcrest.Matchers.notNullValue; -@SuppressForbidden(reason = "test fixture requires System.setProperty") -public class RepositoryCredentialsTests extends ESTestCase { +public class RepositoryCredentialsTests extends ESSingleNodeTestCase { - static { - AccessController.doPrivileged((PrivilegedAction) () -> { - // required for client settings overwriting - System.setProperty("es.allow_insecure_settings", "true"); - return null; - }); + @Override + protected Collection> getPlugins() { + return List.of(ProxyS3RepositoryPlugin.class); } - static final class ProxyS3RepositoryPlugin extends S3RepositoryPlugin { + @Override + protected boolean resetNodeAfterTest() { + return true; + } - static final class ClientAndCredentials extends AmazonS3Wrapper { - final AWSCredentialsProvider credentials; + @Override + protected Settings nodeSettings() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(ACCESS_KEY_SETTING.getConcreteSettingForNamespace("default").getKey(), "secure_default_key"); + secureSettings.setString(SECRET_KEY_SETTING.getConcreteSettingForNamespace("default").getKey(), "secure_default_secret"); + secureSettings.setString(ACCESS_KEY_SETTING.getConcreteSettingForNamespace("other").getKey(), "secure_other_key"); + secureSettings.setString(SECRET_KEY_SETTING.getConcreteSettingForNamespace("other").getKey(), "secure_other_secret"); + + return Settings.builder() + .setSecureSettings(secureSettings) + .put(super.nodeSettings()) + .build(); + } - ClientAndCredentials(AmazonS3 delegate, AWSCredentialsProvider credentials) { - super(delegate); - this.credentials = credentials; - } + public void testReinitSecureCredentials() { + final String clientName = randomFrom("default", "other"); - } + final Settings.Builder repositorySettings = Settings.builder(); + repositorySettings.put(S3Repository.CLIENT_NAME.getKey(), clientName); - static final class ProxyS3Service extends S3Service { + final String repositoryName = "repo-reinit-creds"; + createRepository(repositoryName, repositorySettings.build()); - private static final Logger logger = LogManager.getLogger(ProxyS3Service.class); + final RepositoriesService repositories = getInstanceFromNode(RepositoriesService.class); + assertThat(repositories.repository(repositoryName), notNullValue()); + assertThat(repositories.repository(repositoryName), instanceOf(S3Repository.class)); - @Override - AmazonS3 buildClient(final S3ClientSettings clientSettings) { - final AmazonS3 client = super.buildClient(clientSettings); - return new ClientAndCredentials(client, buildCredentials(logger, clientSettings)); + final S3Repository repository = (S3Repository) repositories.repository(repositoryName); + try (AmazonS3Reference clientReference = ((S3BlobStore) repository.blobStore()).clientReference()) { + final AmazonS3 client = clientReference.client(); + assertThat(client, instanceOf(ProxyS3RepositoryPlugin.ClientAndCredentials.class)); + + final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) client).credentials.getCredentials(); + if ("other".equals(clientName)) { + assertThat(credentials.getAWSAccessKeyId(), is("secure_other_key")); + assertThat(credentials.getAWSSecretKey(), is("secure_other_secret")); + } else { + assertThat(credentials.getAWSAccessKeyId(), is("secure_default_key")); + assertThat(credentials.getAWSSecretKey(), is("secure_default_secret")); + } + + // new settings + final MockSecureSettings newSecureSettings = new MockSecureSettings(); + newSecureSettings.setString("s3.client." + clientName + ".access_key", "new_secret_aws_key"); + newSecureSettings.setString("s3.client." + clientName + ".secret_key", "new_secret_aws_secret"); + final Settings newSettings = Settings.builder().setSecureSettings(newSecureSettings).build(); + // reload S3 plugin settings + final PluginsService plugins = getInstanceFromNode(PluginsService.class); + final ProxyS3RepositoryPlugin plugin = plugins.filterPlugins(ProxyS3RepositoryPlugin.class).get(0); + plugin.reload(newSettings); + + // check the not-yet-closed client reference still has the same credentials + if ("other".equals(clientName)) { + assertThat(credentials.getAWSAccessKeyId(), is("secure_other_key")); + assertThat(credentials.getAWSSecretKey(), is("secure_other_secret")); + } else { + assertThat(credentials.getAWSAccessKeyId(), is("secure_default_key")); + assertThat(credentials.getAWSSecretKey(), is("secure_default_secret")); } + } + + // check credentials have been updated + try (AmazonS3Reference clientReference = ((S3BlobStore) repository.blobStore()).clientReference()) { + final AmazonS3 client = clientReference.client(); + assertThat(client, instanceOf(ProxyS3RepositoryPlugin.ClientAndCredentials.class)); + final AWSCredentials newCredentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) client).credentials.getCredentials(); + assertThat(newCredentials.getAWSAccessKeyId(), is("new_secret_aws_key")); + assertThat(newCredentials.getAWSSecretKey(), is("new_secret_aws_secret")); } + } + + private void createRepository(final String name, final Settings repositorySettings) { + assertAcked(client().admin().cluster().preparePutRepository(name) + .setType(S3Repository.TYPE) + .setVerify(false) + .setSettings(repositorySettings)); + } - ProxyS3RepositoryPlugin(Settings settings) { + /** + * A S3 repository plugin that keeps track of the credentials used to build an AmazonS3 client + */ + public static final class ProxyS3RepositoryPlugin extends S3RepositoryPlugin { + + public ProxyS3RepositoryPlugin(Settings settings) { super(settings, new ProxyS3Service()); } @Override - protected S3Repository createRepository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry registry, - ThreadPool threadPool) { - return new S3Repository(metadata, settings, registry, service, threadPool){ + protected S3Repository createRepository(RepositoryMetaData metadata, + NamedXContentRegistry registry, ThreadPool threadPool) { + return new S3Repository(metadata, registry, service, threadPool) { @Override protected void assertSnapshotOrGenericThread() { // eliminate thread name check as we create repo manually on test/main threads } }; } - } - public void testRepositoryCredentialsOverrideSecureCredentials() throws IOException { - final int clientsCount = randomIntBetween(0, 4); - final String[] clientNames = new String[clientsCount + 1]; - clientNames[0] = "default"; - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("s3.client.default.access_key", "secure_aws_key"); - secureSettings.setString("s3.client.default.secret_key", "secure_aws_secret"); - for (int i = 0; i < clientsCount; i++) { - final String clientName = "client_" + i; - secureSettings.setString("s3.client." + clientName + ".access_key", "secure_aws_key_" + i); - secureSettings.setString("s3.client." + clientName + ".secret_key", "secure_aws_secret_" + i); - clientNames[i + 1] = clientName; - } - final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); - // repository settings for credentials override node secure settings - final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder() - .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key") - .put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret").build()); - try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings); - S3Repository s3repo = createAndStartRepository(metadata, s3Plugin, mock(ThreadPool.class)); - AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { - final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials(); - assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); - assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); - } - assertWarnings( - "[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release!" - + " See the breaking changes documentation for the next major version.", - "Using s3 access/secret key from repository settings. Instead store these in named clients and" - + " the elasticsearch keystore for secure settings.", - "[access_key] setting was deprecated in Elasticsearch and will be removed in a future release!" - + " See the breaking changes documentation for the next major version."); - } + public static final class ClientAndCredentials extends AmazonS3Wrapper { + final AWSCredentialsProvider credentials; - public void testRepositoryCredentialsOnly() throws IOException { - // repository settings for credentials override node secure settings - final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", - Settings.builder() - .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key") - .put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret") - .build()); - try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(Settings.EMPTY); - S3Repository s3repo = createAndStartRepository(metadata, s3Plugin, mock(ThreadPool.class)); - AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { - final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials(); - assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); - assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); + ClientAndCredentials(AmazonS3 delegate, AWSCredentialsProvider credentials) { + super(delegate); + this.credentials = credentials; + } } - assertWarnings( - "[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release!" - + " See the breaking changes documentation for the next major version.", - "Using s3 access/secret key from repository settings. Instead store these in named clients and" - + " the elasticsearch keystore for secure settings.", - "[access_key] setting was deprecated in Elasticsearch and will be removed in a future release!" - + " See the breaking changes documentation for the next major version."); - } - private S3Repository createAndStartRepository(RepositoryMetaData metadata, S3RepositoryPlugin s3Plugin, ThreadPool threadPool) { - final S3Repository repository = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, threadPool); - repository.start(); - return repository; - } + public static final class ProxyS3Service extends S3Service { - public void testReinitSecureCredentials() throws IOException { - final String clientName = randomFrom("default", "some_client"); - // initial client node settings - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("s3.client." + clientName + ".access_key", "secure_aws_key"); - secureSettings.setString("s3.client." + clientName + ".secret_key", "secure_aws_secret"); - final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); - // repository settings - final Settings.Builder builder = Settings.builder(); - final boolean repositorySettings = randomBoolean(); - if (repositorySettings) { - builder.put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key"); - builder.put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret"); - } else { - builder.put(S3Repository.CLIENT_NAME.getKey(), clientName); - } - final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", builder.build()); - try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings); - S3Repository s3repo = createAndStartRepository(metadata, s3Plugin, mock(ThreadPool.class))) { - try (AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { - final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials - .getCredentials(); - if (repositorySettings) { - assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); - assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); - } else { - assertThat(credentials.getAWSAccessKeyId(), is("secure_aws_key")); - assertThat(credentials.getAWSSecretKey(), is("secure_aws_secret")); - } - // new settings - final MockSecureSettings newSecureSettings = new MockSecureSettings(); - newSecureSettings.setString("s3.client." + clientName + ".access_key", "new_secret_aws_key"); - newSecureSettings.setString("s3.client." + clientName + ".secret_key", "new_secret_aws_secret"); - final Settings newSettings = Settings.builder().setSecureSettings(newSecureSettings).build(); - // reload S3 plugin settings - s3Plugin.reload(newSettings); - // check the not-yet-closed client reference still has the same credentials - if (repositorySettings) { - assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); - assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); - } else { - assertThat(credentials.getAWSAccessKeyId(), is("secure_aws_key")); - assertThat(credentials.getAWSSecretKey(), is("secure_aws_secret")); - } - } - // check credentials have been updated - try (AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { - final AWSCredentials newCredentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials - .getCredentials(); - if (repositorySettings) { - assertThat(newCredentials.getAWSAccessKeyId(), is("insecure_aws_key")); - assertThat(newCredentials.getAWSSecretKey(), is("insecure_aws_secret")); - } else { - assertThat(newCredentials.getAWSAccessKeyId(), is("new_secret_aws_key")); - assertThat(newCredentials.getAWSSecretKey(), is("new_secret_aws_secret")); - } + private static final Logger logger = LogManager.getLogger(ProxyS3Service.class); + + @Override + AmazonS3 buildClient(final S3ClientSettings clientSettings) { + final AmazonS3 client = super.buildClient(clientSettings); + return new ClientAndCredentials(client, buildCredentials(logger, clientSettings)); } - } - if (repositorySettings) { - assertWarnings( - "[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release!" - + " See the breaking changes documentation for the next major version.", - "Using s3 access/secret key from repository settings. Instead store these in named clients and" - + " the elasticsearch keystore for secure settings.", - "[access_key] setting was deprecated in Elasticsearch and will be removed in a future release!" - + " See the breaking changes documentation for the next major version."); + } } - } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index 89ea3e55bfb69..da2fc588e4d56 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; +import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -46,7 +47,6 @@ import java.io.IOException; import java.io.InputStream; -import java.net.Inet6Address; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.SocketTimeoutException; @@ -99,13 +99,10 @@ private BlobContainer createBlobContainer(final @Nullable Integer maxRetries, final Settings.Builder clientSettings = Settings.builder(); final String clientName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); - final String endpoint; - if (httpServer.getAddress().getAddress() instanceof Inet6Address) { - endpoint = "http://[" + httpServer.getAddress().getHostString() + "]:" + httpServer.getAddress().getPort(); - } else { - endpoint = "http://" + httpServer.getAddress().getHostString() + ":" + httpServer.getAddress().getPort(); - } + final InetSocketAddress address = httpServer.getAddress(); + final String endpoint = "http://" + InetAddresses.toUriString(address.getAddress()) + ":" + address.getPort(); clientSettings.put(ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint); + if (maxRetries != null) { clientSettings.put(MAX_RETRIES_SETTING.getConcreteSettingForNamespace(clientName).getKey(), maxRetries); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 77f5bfcf72d9b..3e764b69a6022 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -18,89 +18,92 @@ */ package org.elasticsearch.repositories.s3; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.CannedAccessControlList; -import com.amazonaws.services.s3.model.StorageClass; -import org.elasticsearch.client.node.NodeClient; +import com.amazonaws.http.AmazonHttpClient; +import com.amazonaws.services.s3.Headers; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.apache.http.HttpStatus; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.env.Environment; +import org.elasticsearch.mocksocket.MockHttpServer; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; -import org.elasticsearch.rest.AbstractRestChannel; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.admin.cluster.RestGetRepositoriesAction; -import org.elasticsearch.test.rest.FakeRestRequest; -import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.RestUtils; import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; import org.junit.BeforeClass; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.Locale; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.atomic.AtomicInteger; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.not; -import static org.mockito.Mockito.mock; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.hamcrest.Matchers.nullValue; +@SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase { - private static final ConcurrentMap blobs = new ConcurrentHashMap<>(); - private static String bucket; - private static ByteSizeValue bufferSize; - private static boolean serverSideEncryption; - private static String cannedACL; - private static String storageClass; + private static HttpServer httpServer; @BeforeClass - public static void setUpRepositorySettings() { - bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - bufferSize = new ByteSizeValue(randomIntBetween(5, 50), ByteSizeUnit.MB); - serverSideEncryption = randomBoolean(); - if (randomBoolean()) { - cannedACL = randomFrom(CannedAccessControlList.values()).toString(); - } + public static void startHttpServer() throws Exception { + httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + httpServer.start(); + } + + @Before + public void setUpHttpServer() { + HttpHandler handler = new InternalHttpHandler(); if (randomBoolean()) { - storageClass = randomValueOtherThan(StorageClass.Glacier, () -> randomFrom(StorageClass.values())).toString(); + handler = new ErroneousHttpHandler(handler, randomIntBetween(2, 3)); } + httpServer.createContext("/bucket", handler); + } + + @AfterClass + public static void stopHttpServer() { + httpServer.stop(0); + httpServer = null; } @After - public void wipeRepository() { - blobs.clear(); + public void tearDownHttpServer() { + httpServer.removeContext("/bucket"); } @Override - protected void createTestRepository(final String name, boolean verify) { - assertAcked(client().admin().cluster().preparePutRepository(name) - .setType(S3Repository.TYPE) - .setVerify(verify) - .setSettings(Settings.builder() - .put(S3Repository.BUCKET_SETTING.getKey(), bucket) - .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), bufferSize) - .put(S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), serverSideEncryption) - .put(S3Repository.CANNED_ACL_SETTING.getKey(), cannedACL) - .put(S3Repository.STORAGE_CLASS_SETTING.getKey(), storageClass) - .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "not_used_but_this_is_a_secret") - .put(S3Repository.SECRET_KEY_SETTING.getKey(), "not_used_but_this_is_a_secret"))); + protected String repositoryType() { + return S3Repository.TYPE; } @Override - protected void afterCreationCheck(Repository repository) { - assertThat(repository, instanceOf(S3Repository.class)); + protected Settings repositorySettings() { + return Settings.builder() + .put(S3Repository.BUCKET_SETTING.getKey(), "bucket") + .put(S3Repository.CLIENT_NAME.getKey(), "test") + .build(); } @Override @@ -108,6 +111,26 @@ protected Collection> nodePlugins() { return Collections.singletonList(TestS3RepositoryPlugin.class); } + @Override + protected Settings nodeSettings(int nodeOrdinal) { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(S3ClientSettings.ACCESS_KEY_SETTING.getConcreteSettingForNamespace("test").getKey(), "access"); + secureSettings.setString(S3ClientSettings.SECRET_KEY_SETTING.getConcreteSettingForNamespace("test").getKey(), "secret"); + + final InetSocketAddress address = httpServer.getAddress(); + final String endpoint = "http://" + InetAddresses.toUriString(address.getAddress()) + ":" + address.getPort(); + + return Settings.builder() + .put(Settings.builder() + .put(S3ClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace("test").getKey(), endpoint) + // Disable chunked encoding as it simplifies a lot the request parsing on the httpServer side + .put(S3ClientSettings.DISABLE_CHUNKED_ENCODING.getConcreteSettingForNamespace("test").getKey(), true) + .build()) + .put(super.nodeSettings(nodeOrdinal)) + .setSecureSettings(secureSettings) + .build(); + } + public static class TestS3RepositoryPlugin extends S3RepositoryPlugin { public TestS3RepositoryPlugin(final Settings settings) { @@ -115,43 +138,150 @@ public TestS3RepositoryPlugin(final Settings settings) { } @Override - public Map getRepositories(final Environment env, final NamedXContentRegistry registry, - final ThreadPool threadPool) { - return Collections.singletonMap(S3Repository.TYPE, - metadata -> new S3Repository(metadata, env.settings(), registry, new S3Service() { - @Override - AmazonS3 buildClient(S3ClientSettings clientSettings) { - return new MockAmazonS3(blobs, bucket, serverSideEncryption, cannedACL, storageClass); - } - }, threadPool)); + public List> getSettings() { + final List> settings = new ArrayList<>(super.getSettings()); + settings.add(S3ClientSettings.DISABLE_CHUNKED_ENCODING); + return settings; } } - public void testInsecureRepositoryCredentials() throws Exception { - final String repositoryName = "testInsecureRepositoryCredentials"; - createAndCheckTestRepository(repositoryName); - final NodeClient nodeClient = internalCluster().getInstance(NodeClient.class); - final RestGetRepositoriesAction getRepoAction = new RestGetRepositoriesAction(mock(RestController.class), - internalCluster().getInstance(SettingsFilter.class)); - final RestRequest getRepoRequest = new FakeRestRequest(); - getRepoRequest.params().put("repository", repositoryName); - final CountDownLatch getRepoLatch = new CountDownLatch(1); - final AtomicReference getRepoError = new AtomicReference<>(); - getRepoAction.handleRequest(getRepoRequest, new AbstractRestChannel(getRepoRequest, true) { - @Override - public void sendResponse(RestResponse response) { - try { - assertThat(response.content().utf8ToString(), not(containsString("not_used_but_this_is_a_secret"))); - } catch (final AssertionError ex) { - getRepoError.set(ex); + /** + * Minimal HTTP handler that acts as a S3 compliant server + */ + @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") + private static class InternalHttpHandler implements HttpHandler { + + private final ConcurrentMap blobs = new ConcurrentHashMap<>(); + + @Override + public void handle(final HttpExchange exchange) throws IOException { + final String request = exchange.getRequestMethod() + " " + exchange.getRequestURI().toString(); + try { + if (Regex.simpleMatch("PUT /bucket/*", request)) { + blobs.put(exchange.getRequestURI().toString(), Streams.readFully(exchange.getRequestBody())); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); + + } else if (Regex.simpleMatch("GET /bucket/?prefix=*", request)) { + final Map params = new HashMap<>(); + RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params); + assertThat("Test must be adapted for GET Bucket (List Objects) Version 2", params.get("list-type"), nullValue()); + + final StringBuilder list = new StringBuilder(); + list.append(""); + list.append(""); + final String prefix = params.get("prefix"); + if (prefix != null) { + list.append("").append(prefix).append(""); + } + for (Map.Entry blob : blobs.entrySet()) { + if (prefix == null || blob.getKey().startsWith("/bucket/" + prefix)) { + list.append(""); + list.append("").append(blob.getKey().replace("/bucket/", "")).append(""); + list.append("").append(blob.getValue().length()).append(""); + list.append(""); + } + } + list.append(""); + + byte[] response = list.toString().getBytes(StandardCharsets.UTF_8); + exchange.getResponseHeaders().add("Content-Type", "application/xml"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); + exchange.getResponseBody().write(response); + + } else if (Regex.simpleMatch("GET /bucket/*", request)) { + final BytesReference blob = blobs.get(exchange.getRequestURI().toString()); + if (blob != null) { + exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), blob.length()); + blob.writeTo(exchange.getResponseBody()); + } else { + exchange.sendResponseHeaders(RestStatus.NOT_FOUND.getStatus(), -1); + } + + } else if (Regex.simpleMatch("DELETE /bucket/*", request)) { + int deletions = 0; + for (Iterator> iterator = blobs.entrySet().iterator(); iterator.hasNext(); ) { + Map.Entry blob = iterator.next(); + if (blob.getKey().startsWith(exchange.getRequestURI().toString())) { + iterator.remove(); + deletions++; + } + } + exchange.sendResponseHeaders((deletions > 0 ? RestStatus.OK : RestStatus.NO_CONTENT).getStatus(), -1); + + } else if (Regex.simpleMatch("POST /bucket/?delete", request)) { + final String requestBody = Streams.copyToString(new InputStreamReader(exchange.getRequestBody(), UTF_8)); + + final StringBuilder deletes = new StringBuilder(); + deletes.append(""); + deletes.append(""); + for (Iterator> iterator = blobs.entrySet().iterator(); iterator.hasNext(); ) { + Map.Entry blob = iterator.next(); + String key = blob.getKey().replace("/bucket/", ""); + if (requestBody.contains("" + key + "")) { + deletes.append("").append(key).append(""); + iterator.remove(); + } + } + deletes.append(""); + + byte[] response = deletes.toString().getBytes(StandardCharsets.UTF_8); + exchange.getResponseHeaders().add("Content-Type", "application/xml"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); + exchange.getResponseBody().write(response); + + } else { + exchange.sendResponseHeaders(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), -1); } - getRepoLatch.countDown(); + } finally { + exchange.close(); } - }, nodeClient); - getRepoLatch.await(); - if (getRepoError.get() != null) { - throw getRepoError.get(); } } + /** + * HTTP handler that injects random S3 service errors + * + * Note: it is not a good idea to allow this handler to simulate too many errors as it would + * slow down the test suite and/or could trigger SDK client request throttling (and request + * would fail before reaching the max retry attempts - this can be mitigated by disabling + * {@link S3ClientSettings#USE_THROTTLE_RETRIES_SETTING}) + */ + @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") + private static class ErroneousHttpHandler implements HttpHandler { + + // first key is the remote address, second key is the HTTP request unique id provided by the AWS SDK client, + // value is the number of times the request has been seen + private final Map requests; + private final HttpHandler delegate; + private final int maxErrorsPerRequest; + + private ErroneousHttpHandler(final HttpHandler delegate, final int maxErrorsPerRequest) { + this.requests = new ConcurrentHashMap<>(); + this.delegate = delegate; + this.maxErrorsPerRequest = maxErrorsPerRequest; + assert maxErrorsPerRequest > 1; + } + + @Override + public void handle(final HttpExchange exchange) throws IOException { + final String requestId = exchange.getRequestHeaders().getFirst(AmazonHttpClient.HEADER_SDK_TRANSACTION_ID); + assert Strings.hasText(requestId); + + final int count = requests.computeIfAbsent(requestId, req -> new AtomicInteger(0)).incrementAndGet(); + if (count >= maxErrorsPerRequest || randomBoolean()) { + requests.remove(requestId); + delegate.handle(exchange); + } else { + handleAsError(exchange, requestId); + } + } + + private void handleAsError(final HttpExchange exchange, final String requestId) throws IOException { + Streams.readFully(exchange.getRequestBody()); + exchange.getResponseHeaders().add(Headers.REQUEST_ID, requestId); + exchange.sendResponseHeaders(HttpStatus.SC_INTERNAL_SERVER_ERROR, -1); + exchange.close(); + } + } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index af04c420408ad..45177dbebbf48 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -120,7 +120,7 @@ public void testDefaultBufferSize() { } private S3Repository createS3Repo(RepositoryMetaData metadata) { - return new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service(), mock(ThreadPool.class)) { + return new S3Repository(metadata, NamedXContentRegistry.EMPTY, new DummyS3Service(), mock(ThreadPool.class)) { @Override protected void assertSnapshotOrGenericThread() { // eliminate thread name check as we create repo manually on test/main threads diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SearchRestCancellationIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SearchRestCancellationIT.java index 73ba8cdf8b331..698f42c43ca03 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SearchRestCancellationIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SearchRestCancellationIT.java @@ -154,7 +154,7 @@ private static Map readNodesInfo() { return nodeIdToName; } - private static void ensureSearchTaskIsCancelled(Function nodeIdToName) { + private static void ensureSearchTaskIsCancelled(Function nodeIdToName) throws Exception { SetOnce searchTask = new SetOnce<>(); ListTasksResponse listTasksResponse = client().admin().cluster().prepareListTasks().get(); for (TaskInfo task : listTasksResponse.getTasks()) { @@ -165,10 +165,12 @@ private static void ensureSearchTaskIsCancelled(Function nodeIdT assertNotNull(searchTask.get()); TaskId taskId = searchTask.get().getTaskId(); String nodeName = nodeIdToName.apply(taskId.getNodeId()); - TaskManager taskManager = internalCluster().getInstance(TransportService.class, nodeName).getTaskManager(); - Task task = taskManager.getTask(taskId.getId()); - assertThat(task, instanceOf(CancellableTask.class)); - assertTrue(((CancellableTask)task).isCancelled()); + assertBusy(() -> { + TaskManager taskManager = internalCluster().getInstance(TransportService.class, nodeName).getTaskManager(); + Task task = taskManager.getTask(taskId.getId()); + assertThat(task, instanceOf(CancellableTask.class)); + assertTrue(((CancellableTask)task).isCancelled()); + }); } private static void indexTestData() { diff --git a/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java b/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java index 9fa9bcacc0ac6..d12ecacbdc7d3 100644 --- a/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java @@ -32,6 +32,7 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.mapper.RangeType; import java.io.IOException; import java.util.Objects; @@ -40,13 +41,13 @@ public final class BinaryDocValuesRangeQuery extends Query { private final String fieldName; private final QueryType queryType; - private final LengthType lengthType; + private final RangeType.LengthType lengthType; private final BytesRef from; private final BytesRef to; private final Object originalFrom; private final Object originalTo; - public BinaryDocValuesRangeQuery(String fieldName, QueryType queryType, LengthType lengthType, + public BinaryDocValuesRangeQuery(String fieldName, QueryType queryType, RangeType.LengthType lengthType, BytesRef from, BytesRef to, Object originalFrom, Object originalTo) { this.fieldName = fieldName; @@ -178,42 +179,4 @@ boolean matches(BytesRef from, BytesRef to, BytesRef otherFrom, BytesRef otherTo } - public enum LengthType { - FIXED_4 { - @Override - int readLength(byte[] bytes, int offset) { - return 4; - } - }, - FIXED_8 { - @Override - int readLength(byte[] bytes, int offset) { - return 8; - } - }, - FIXED_16 { - @Override - int readLength(byte[] bytes, int offset) { - return 16; - } - }, - VARIABLE { - @Override - int readLength(byte[] bytes, int offset) { - // the first bit encodes the sign and the next 4 bits encode the number - // of additional bytes - int token = Byte.toUnsignedInt(bytes[offset]); - int length = (token >>> 3) & 0x0f; - if ((token & 0x80) == 0) { - length = 0x0f - length; - } - return 1 + length; - } - }; - - /** - * Return the length of the value that starts at {@code offset} in {@code bytes}. - */ - abstract int readLength(byte[] bytes, int offset); - } } diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 51662414e0d07..92bfa32807f3e 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -961,8 +961,8 @@ private enum ElasticsearchExceptionHandle { RESOURCE_ALREADY_EXISTS_EXCEPTION(ResourceAlreadyExistsException.class, ResourceAlreadyExistsException::new, 123, UNKNOWN_VERSION_ADDED), // 124 used to be Script.ScriptParseException - HTTP_ON_TRANSPORT_EXCEPTION(TcpTransport.HttpOnTransportException.class, - TcpTransport.HttpOnTransportException::new, 125, UNKNOWN_VERSION_ADDED), + HTTP_REQUEST_ON_TRANSPORT_EXCEPTION(TcpTransport.HttpRequestOnTransportException.class, + TcpTransport.HttpRequestOnTransportException::new, 125, UNKNOWN_VERSION_ADDED), MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class, org.elasticsearch.index.mapper.MapperParsingException::new, 126, UNKNOWN_VERSION_ADDED), SEARCH_CONTEXT_EXCEPTION(org.elasticsearch.search.SearchContextException.class, diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index dae40f04b5c93..c7f99d63fa8d3 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -67,6 +67,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_3_1 = new Version(7030199, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final Version V_7_3_2 = new Version(7030299, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final Version V_7_4_0 = new Version(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0); + public static final Version V_7_5_0 = new Version(7050099, org.apache.lucene.util.Version.LUCENE_8_2_0); public static final Version V_8_0_0 = new Version(8000099, org.apache.lucene.util.Version.LUCENE_8_2_0); public static final Version CURRENT = V_8_0_0; diff --git a/server/src/main/java/org/elasticsearch/action/ActionListener.java b/server/src/main/java/org/elasticsearch/action/ActionListener.java index c21aa3b9d4b8f..957f46e6116dc 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/ActionListener.java @@ -22,6 +22,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.CheckedSupplier; import java.util.ArrayList; @@ -226,6 +227,37 @@ public void onFailure(Exception e) { }; } + /** + * Wraps a given listener and returns a new listener which executes the provided {@code runBefore} + * callback before the listener is notified via either {@code #onResponse} or {@code #onFailure}. + * If the callback throws an exception then it will be passed to the listener's {@code #onFailure} and its {@code #onResponse} will + * not be executed. + */ + static ActionListener runBefore(ActionListener delegate, CheckedRunnable runBefore) { + return new ActionListener<>() { + @Override + public void onResponse(Response response) { + try { + runBefore.run(); + } catch (Exception ex) { + delegate.onFailure(ex); + return; + } + delegate.onResponse(response); + } + + @Override + public void onFailure(Exception e) { + try { + runBefore.run(); + } catch (Exception ex) { + e.addSuppressed(ex); + } + delegate.onFailure(e); + } + }; + } + /** * Wraps a given listener and returns a new listener which makes sure {@link #onResponse(Object)} * and {@link #onFailure(Exception)} of the provided listener will be called at most once. diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 8475272a5e2cd..bce6d02a8c7fc 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -140,7 +140,7 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha } else { Boolean allowPartialResults = request.allowPartialSearchResults(); assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults"; - if (allowPartialResults == false && shardFailures.get() != null) { + if (allowPartialResults == false && successfulOps.get() != getNumShards()) { // check if there are actual failures in the atomic array since // successful retries can reset the failures to null ShardOperationFailedException[] shardSearchFailures = buildShardFailures(); @@ -154,6 +154,15 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha } onPhaseFailure(currentPhase, "Partial shards failure", null); return; + } else { + int discrepancy = getNumShards() - successfulOps.get(); + assert discrepancy > 0 : "discrepancy: " + discrepancy; + if (logger.isDebugEnabled()) { + logger.debug("Partial shards failure (unavailable: {}, successful: {}, skipped: {}, num-shards: {}, phase: {})", + discrepancy, successfulOps.get(), skippedOps.get(), getNumShards(), currentPhase.getName()); + } + onPhaseFailure(currentPhase, "Partial shards failure (" + discrepancy + " shards unavailable)", null); + return; } } if (logger.isTraceEnabled()) { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/FilePermissionUtils.java b/server/src/main/java/org/elasticsearch/bootstrap/FilePermissionUtils.java index 5355ffb455e59..12f858cd08c77 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/FilePermissionUtils.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/FilePermissionUtils.java @@ -32,8 +32,6 @@ public class FilePermissionUtils { /** no instantiation */ private FilePermissionUtils() {} - private static final boolean VERSION_IS_AT_LEAST_JAVA_9 = JavaVersion.current().compareTo(JavaVersion.parse("9")) >= 0; - /** * Add access to single file path * @param policy current policy to add permissions to @@ -43,10 +41,12 @@ private FilePermissionUtils() {} @SuppressForbidden(reason = "only place where creating Java-9 compatible FilePermission objects is possible") public static void addSingleFilePath(Permissions policy, Path path, String permissions) throws IOException { policy.add(new FilePermission(path.toString(), permissions)); - if (VERSION_IS_AT_LEAST_JAVA_9 && Files.exists(path)) { - // Java 9 FilePermission model requires this due to the removal of pathname canonicalization, - // see also https://github.com/elastic/elasticsearch/issues/21534 - Path realPath = path.toRealPath(); + if (Files.exists(path)) { + /* + * The file permission model since JDK 9 requires this due to the removal of pathname canonicalization. See also + * https://github.com/elastic/elasticsearch/issues/21534. + */ + final Path realPath = path.toRealPath(); if (path.toString().equals(realPath.toString()) == false) { policy.add(new FilePermission(realPath.toString(), permissions)); } @@ -73,14 +73,15 @@ public static void addDirectoryPath(Permissions policy, String configurationName // add each path twice: once for itself, again for files underneath it policy.add(new FilePermission(path.toString(), permissions)); policy.add(new FilePermission(path.toString() + path.getFileSystem().getSeparator() + "-", permissions)); - if (VERSION_IS_AT_LEAST_JAVA_9) { - // Java 9 FilePermission model requires this due to the removal of pathname canonicalization, - // see also https://github.com/elastic/elasticsearch/issues/21534 - Path realPath = path.toRealPath(); - if (path.toString().equals(realPath.toString()) == false) { - policy.add(new FilePermission(realPath.toString(), permissions)); - policy.add(new FilePermission(realPath.toString() + realPath.getFileSystem().getSeparator() + "-", permissions)); - } + /* + * The file permission model since JDK 9 requires this due to the removal of pathname canonicalization. See also + * https://github.com/elastic/elasticsearch/issues/21534. + */ + final Path realPath = path.toRealPath(); + if (path.toString().equals(realPath.toString()) == false) { + policy.add(new FilePermission(realPath.toString(), permissions)); + policy.add(new FilePermission(realPath.toString() + realPath.getFileSystem().getSeparator() + "-", permissions)); } } + } diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 1a5d0e64fcd2c..d07897199ae8e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -47,9 +47,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ReceiveTimeoutTransportException; -import java.util.ArrayList; -import java.util.Collections; import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; @@ -88,7 +87,7 @@ public class InternalClusterInfoService implements ClusterInfoService, LocalNode private final ClusterService clusterService; private final ThreadPool threadPool; private final NodeClient client; - private final List> listeners = Collections.synchronizedList(new ArrayList<>(1)); + private final List> listeners = new CopyOnWriteArrayList<>(); public InternalClusterInfoService(Settings settings, ClusterService clusterService, ThreadPool threadPool, NodeClient client) { this.leastAvailableSpaceUsages = ImmutableOpenMap.of(); @@ -275,6 +274,11 @@ private void maybeRefresh() { } } + // allow tests to adjust the node stats on receipt + List adjustNodesStats(List nodeStats) { + return nodeStats; + } + /** * Refreshes the ClusterInfo in a blocking fashion */ @@ -284,12 +288,13 @@ public final ClusterInfo refresh() { } final CountDownLatch nodeLatch = updateNodeStats(new ActionListener() { @Override - public void onResponse(NodesStatsResponse nodeStatses) { - ImmutableOpenMap.Builder newLeastAvaiableUsages = ImmutableOpenMap.builder(); - ImmutableOpenMap.Builder newMostAvaiableUsages = ImmutableOpenMap.builder(); - fillDiskUsagePerNode(logger, nodeStatses.getNodes(), newLeastAvaiableUsages, newMostAvaiableUsages); - leastAvailableSpaceUsages = newLeastAvaiableUsages.build(); - mostAvailableSpaceUsages = newMostAvaiableUsages.build(); + public void onResponse(NodesStatsResponse nodesStatsResponse) { + ImmutableOpenMap.Builder leastAvailableUsagesBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder mostAvailableUsagesBuilder = ImmutableOpenMap.builder(); + fillDiskUsagePerNode(logger, adjustNodesStats(nodesStatsResponse.getNodes()), + leastAvailableUsagesBuilder, mostAvailableUsagesBuilder); + leastAvailableSpaceUsages = leastAvailableUsagesBuilder.build(); + mostAvailableSpaceUsages = mostAvailableUsagesBuilder.build(); } @Override @@ -402,7 +407,7 @@ static void fillDiskUsagePerNode(Logger logger, List nodeStatsArray, if (leastAvailablePath == null) { assert mostAvailablePath == null; mostAvailablePath = leastAvailablePath = info; - } else if (leastAvailablePath.getAvailable().getBytes() > info.getAvailable().getBytes()){ + } else if (leastAvailablePath.getAvailable().getBytes() > info.getAvailable().getBytes()) { leastAvailablePath = info; } else if (mostAvailablePath.getAvailable().getBytes() < info.getAvailable().getBytes()) { mostAvailablePath = info; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java index 0dad13d87a15b..fcd907a60fd0c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -221,6 +221,8 @@ public void onNewInfo(ClusterInfo info) { .collect(Collectors.toSet()); if (indicesToAutoRelease.isEmpty() == false) { + logger.info("releasing read-only block on indices " + indicesToAutoRelease + + " since they are now allocated to nodes with sufficient disk space"); updateIndicesReadOnly(indicesToAutoRelease, listener, false); } else { listener.onResponse(null); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 0838999c4f367..6c99cfa8ee056 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -90,16 +90,36 @@ static long sizeOfRelocatingShards(RoutingNode node, RoutingAllocation allocatio boolean subtractShardsMovingAway, String dataPath) { ClusterInfo clusterInfo = allocation.clusterInfo(); long totalSize = 0; - for (ShardRouting routing : node.shardsWithState(ShardRoutingState.RELOCATING, ShardRoutingState.INITIALIZING)) { - String actualPath = clusterInfo.getDataPath(routing); - if (dataPath.equals(actualPath)) { - if (routing.initializing() && routing.relocatingNodeId() != null) { - totalSize += getExpectedShardSize(routing, allocation, 0); - } else if (subtractShardsMovingAway && routing.relocating()) { + + for (ShardRouting routing : node.shardsWithState(ShardRoutingState.INITIALIZING)) { + if (routing.relocatingNodeId() == null) { + // in practice the only initializing-but-not-relocating shards with a nonzero expected shard size will be ones created + // by a resize (shrink/split/clone) operation which we expect to happen using hard links, so they shouldn't be taking + // any additional space and can be ignored here + continue; + } + + final String actualPath = clusterInfo.getDataPath(routing); + // if we don't yet know the actual path of the incoming shard then conservatively assume it's going to the path with the least + // free space + if (actualPath == null || actualPath.equals(dataPath)) { + totalSize += getExpectedShardSize(routing, allocation, 0); + } + } + + if (subtractShardsMovingAway) { + for (ShardRouting routing : node.shardsWithState(ShardRoutingState.RELOCATING)) { + String actualPath = clusterInfo.getDataPath(routing); + if (actualPath == null) { + // we might know the path of this shard from before when it was relocating + actualPath = clusterInfo.getDataPath(routing.cancelRelocation()); + } + if (dataPath.equals(actualPath)) { totalSize -= getExpectedShardSize(routing, allocation, 0); } } } + return totalSize; } diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 907277b53dde9..ff8404f4cfb76 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -137,6 +137,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING, EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING, + IndexSettings.INDEX_FLUSH_AFTER_MERGE_THRESHOLD_SIZE_SETTING, IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING, IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING, IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING, diff --git a/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java b/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java index e022e4e3760a5..9f902febc7730 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java @@ -19,14 +19,13 @@ package org.elasticsearch.common.settings; +import org.elasticsearch.common.util.ArrayUtils; + import java.io.InputStream; import java.security.GeneralSecurityException; import java.util.EnumSet; import java.util.Set; -import org.elasticsearch.common.Booleans; -import org.elasticsearch.common.util.ArrayUtils; - /** * A secure setting. * @@ -34,9 +33,6 @@ */ public abstract class SecureSetting extends Setting { - /** Determines whether legacy settings with sensitive values should be allowed. */ - private static final boolean ALLOW_INSECURE_SETTINGS = Booleans.parseBoolean(System.getProperty("es.allow_insecure_settings", "false")); - private static final Set ALLOWED_PROPERTIES = EnumSet.of(Property.Deprecated, Property.Consistent); private static final Property[] FIXED_PROPERTIES = { @@ -139,14 +135,6 @@ public static Setting secureString(String name, Setting insecureString(String name) { - return new InsecureStringSetting(name); - } - /** * A setting which contains a file. Reading the setting opens an input stream to the file. * @@ -179,24 +167,6 @@ SecureString getFallback(Settings settings) { } } - private static class InsecureStringSetting extends Setting { - private final String name; - - private InsecureStringSetting(String name) { - super(name, "", SecureString::new, Property.Deprecated, Property.Filtered, Property.NodeScope); - this.name = name; - } - - @Override - public SecureString get(Settings settings) { - if (ALLOW_INSECURE_SETTINGS == false && exists(settings)) { - throw new IllegalArgumentException("Setting [" + name + "] is insecure, " + - "but property [allow_insecure_settings] is not set"); - } - return super.get(settings); - } - } - private static class SecureFileSetting extends SecureSetting { private final Setting fallback; diff --git a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 094ce5349b862..adef9fec912f8 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -63,11 +63,27 @@ public SettingsModule( List> additionalSettings, List settingsFilter, Set> settingUpgraders) { + this( + settings, + additionalSettings, + settingsFilter, + settingUpgraders, + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS, + IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); + } + + SettingsModule( + final Settings settings, + final List> additionalSettings, + final List settingsFilter, + final Set> settingUpgraders, + final Set> registeredClusterSettings, + final Set> registeredIndexSettings) { this.settings = settings; - for (Setting setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) { + for (Setting setting : registeredClusterSettings) { registerSetting(setting); } - for (Setting setting : IndexScopedSettings.BUILT_IN_INDEX_SETTINGS) { + for (Setting setting : registeredIndexSettings) { registerSetting(setting); } @@ -143,7 +159,7 @@ public SettingsModule( // by now we are fully configured, lets check node level settings for unregistered index settings clusterSettings.validate(settings, true); this.settingsFilter = new SettingsFilter(settingsFilterPattern); - } + } @Override public void configure(Binder binder) { @@ -159,6 +175,9 @@ public void configure(Binder binder) { * the setting during startup. */ private void registerSetting(Setting setting) { + if (setting.getKey().contains(".") == false) { + throw new IllegalArgumentException("setting [" + setting.getKey() + "] is not in any namespace, its name must contain a dot"); + } if (setting.isFiltered()) { if (settingsFilterPattern.contains(setting.getKey()) == false) { registerSettingsFilter(setting.getKey()); diff --git a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java index 5191a8d85cef6..098a01410897c 100644 --- a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java +++ b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java @@ -168,12 +168,13 @@ private void addCookies(HttpResponse response) { // Determine if the request connection should be closed on completion. private boolean isCloseConnection() { - final boolean http10 = isHttp10(); - return CLOSE.equalsIgnoreCase(request.header(CONNECTION)) || (http10 && !KEEP_ALIVE.equalsIgnoreCase(request.header(CONNECTION))); - } - - // Determine if the request protocol version is HTTP 1.0 - private boolean isHttp10() { - return request.getHttpRequest().protocolVersion() == HttpRequest.HttpVersion.HTTP_1_0; + try { + final boolean http10 = request.getHttpRequest().protocolVersion() == HttpRequest.HttpVersion.HTTP_1_0; + return CLOSE.equalsIgnoreCase(request.header(CONNECTION)) + || (http10 && !KEEP_ALIVE.equalsIgnoreCase(request.header(CONNECTION))); + } catch (Exception e) { + // In case we fail to parse the http protocol version out of the request we always close the connection + return true; + } } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index e610691b8512d..495bb71ab2104 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -195,6 +195,14 @@ public final class IndexSettings { new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), Property.Dynamic, Property.IndexScope); + /** + * The minimum size of a merge that triggers a flush in order to free resources + */ + public static final Setting INDEX_FLUSH_AFTER_MERGE_THRESHOLD_SIZE_SETTING = + Setting.byteSizeSetting("index.flush_after_merge", new ByteSizeValue(512, ByteSizeUnit.MB), + new ByteSizeValue(0, ByteSizeUnit.BYTES), // always flush after merge + new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), // never flush after merge + Property.Dynamic, Property.IndexScope); /** * The maximum size of a translog generation. This is independent of the maximum size of * translog operations that have not been flushed. @@ -338,6 +346,7 @@ public final class IndexSettings { private volatile TimeValue translogRetentionAge; private volatile ByteSizeValue translogRetentionSize; private volatile ByteSizeValue generationThresholdSize; + private volatile ByteSizeValue flushAfterMergeThresholdSize; private final MergeSchedulerConfig mergeSchedulerConfig; private final MergePolicyConfig mergePolicyConfig; private final IndexSortConfig indexSortConfig; @@ -470,6 +479,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti refreshInterval = scopedSettings.get(INDEX_REFRESH_INTERVAL_SETTING); flushThresholdSize = scopedSettings.get(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING); generationThresholdSize = scopedSettings.get(INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING); + flushAfterMergeThresholdSize = scopedSettings.get(INDEX_FLUSH_AFTER_MERGE_THRESHOLD_SIZE_SETTING); mergeSchedulerConfig = new MergeSchedulerConfig(this); gcDeletesInMillis = scopedSettings.get(INDEX_GC_DELETES_SETTING).getMillis(); softDeleteEnabled = scopedSettings.get(INDEX_SOFT_DELETES_SETTING); @@ -530,6 +540,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(INDEX_WARMER_ENABLED_SETTING, this::setEnableWarmer); scopedSettings.addSettingsUpdateConsumer(INDEX_GC_DELETES_SETTING, this::setGCDeletes); scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING, this::setTranslogFlushThresholdSize); + scopedSettings.addSettingsUpdateConsumer(INDEX_FLUSH_AFTER_MERGE_THRESHOLD_SIZE_SETTING, this::setFlushAfterMergeThresholdSize); scopedSettings.addSettingsUpdateConsumer( INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING, this::setGenerationThresholdSize); @@ -555,6 +566,10 @@ private void setTranslogFlushThresholdSize(ByteSizeValue byteSizeValue) { this.flushThresholdSize = byteSizeValue; } + private void setFlushAfterMergeThresholdSize(ByteSizeValue byteSizeValue) { + this.flushAfterMergeThresholdSize = byteSizeValue; + } + private void setTranslogRetentionSize(ByteSizeValue byteSizeValue) { if (softDeleteEnabled && byteSizeValue.getBytes() >= 0) { // ignore the translog retention settings if soft-deletes enabled @@ -744,6 +759,11 @@ public TimeValue getRefreshInterval() { */ public ByteSizeValue getFlushThresholdSize() { return flushThresholdSize; } + /** + * Returns the merge threshold size when to forcefully flush the index and free resources. + */ + public ByteSizeValue getFlushAfterMergeThresholdSize() { return flushAfterMergeThresholdSize; } + /** * Returns the transaction log retention size which controls how much of the translog is kept around to allow for ops based recoveries */ diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 1222b5c520301..8fe9547cd7335 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -102,6 +102,7 @@ import java.util.Arrays; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -173,6 +174,7 @@ public class InternalEngine extends Engine { private final AtomicBoolean trackTranslogLocation = new AtomicBoolean(false); private final KeyedLock noOpKeyedLock = new KeyedLock<>(); + private final AtomicBoolean shouldPeriodicallyFlushAfterBigMerge = new AtomicBoolean(false); @Nullable private final String historyUUID; @@ -1406,9 +1408,19 @@ private DeleteResult deleteInLucene(Delete delete, DeletionStrategy plan) throws } return new DeleteResult( plan.versionOfDeletion, delete.primaryTerm(), delete.seqNo(), plan.currentlyDeleted == false); - } catch (Exception ex) { + } catch (final Exception ex) { + /* + * Document level failures when deleting are unexpected, we likely hit something fatal such as the Lucene index being corrupt, + * or the Lucene document limit. We have already issued a sequence number here so this is fatal, fail the engine. + */ if (ex instanceof AlreadyClosedException == false && indexWriter.getTragicException() == null) { - throw new AssertionError("delete operation should never fail at document level", ex); + final String reason = String.format( + Locale.ROOT, + "delete id[%s] origin [%s] seq#[%d] failed at the document level", + delete.id(), + delete.origin(), + delete.seqNo()); + failEngine(reason, ex); } throw ex; } @@ -1510,9 +1522,14 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException { : "Noop tombstone document but _tombstone field is not set [" + doc + " ]"; doc.add(softDeletesField); indexWriter.addDocument(doc); - } catch (Exception ex) { + } catch (final Exception ex) { + /* + * Document level failures when adding a no-op are unexpected, we likely hit something fatal such as the Lucene + * index being corrupt, or the Lucene document limit. We have already issued a sequence number here so this is + * fatal, fail the engine. + */ if (ex instanceof AlreadyClosedException == false && indexWriter.getTragicException() == null) { - throw new AssertionError("noop operation should never fail at document level", ex); + failEngine("no-op origin[" + noOp.origin() + "] seq#[" + noOp.seqNo() + "] failed at document level", ex); } throw ex; } @@ -1678,6 +1695,9 @@ final boolean tryRenewSyncCommit() { @Override public boolean shouldPeriodicallyFlush() { ensureOpen(); + if (shouldPeriodicallyFlushAfterBigMerge.get()) { + return true; + } final long translogGenerationOfLastCommit = Long.parseLong(lastCommittedSegmentInfos.userData.get(Translog.TRANSLOG_GENERATION_KEY)); final long flushThreshold = config().getIndexSettings().getFlushThresholdSize().getBytes(); @@ -2327,7 +2347,7 @@ public void onFailure(Exception e) { } @Override - protected void doRun() throws Exception { + protected void doRun() { // if we have no pending merges and we are supposed to flush once merges have finished // we try to renew a sync commit which is the case when we are having a big merge after we // are inactive. If that didn't work we go and do a real flush which is ok since it only doesn't work @@ -2339,7 +2359,11 @@ protected void doRun() throws Exception { } } }); - + } else if (merge.getTotalBytesSize() >= engineConfig.getIndexSettings().getFlushAfterMergeThresholdSize().getBytes()) { + // we hit a significant merge which would allow us to free up memory if we'd commit it hence on the next change + // we should execute a flush on the next operation if that's a flush after inactive or indexing a document. + // we could fork a thread and do it right away but we try to minimize forking and piggyback on outside events. + shouldPeriodicallyFlushAfterBigMerge.set(true); } } @@ -2407,7 +2431,7 @@ protected void commitIndexWriter(final IndexWriter writer, final Translog transl logger.trace("committing writer with commit data [{}]", commitData); return commitData.entrySet().iterator(); }); - + shouldPeriodicallyFlushAfterBigMerge.set(false); writer.commit(); } catch (final Exception ex) { try { @@ -2844,4 +2868,5 @@ private void restoreVersionMapAndCheckpointTracker(DirectoryReader directoryRead // remove live entries in the version map refresh("restore_version_map_and_checkpoint_tracker", SearcherScope.INTERNAL, true); } + } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java index 5732a872c8f58..529bdb84b12ac 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.indices.breaker.CircuitBreakerService; import java.util.Set; @@ -71,6 +72,7 @@ public static class Builder implements IndexFieldData.Builder { private NumericType numericType; private Function> scriptFunction = AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION; + private RangeType rangeType; public Builder numericType(NumericType type) { this.numericType = type; @@ -82,12 +84,17 @@ public Builder scriptFunction(Function> s return this; } + public Builder setRangeType(RangeType rangeType) { + this.rangeType = rangeType; + return this; + } + @Override public IndexFieldData build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache, CircuitBreakerService breakerService, MapperService mapperService) { // Ignore Circuit Breaker final String fieldName = fieldType.name(); - if (BINARY_INDEX_FIELD_NAMES.contains(fieldName)) { + if (BINARY_INDEX_FIELD_NAMES.contains(fieldName) || rangeType != null) { assert numericType == null; return new BinaryDVIndexFieldData(indexSettings.getIndex(), fieldName); } else if (numericType != null) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java b/server/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java index b58c6deba8cd3..42157688dd7ba 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java @@ -19,12 +19,17 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.common.TriFunction; import java.io.IOException; +import java.net.InetAddress; import java.util.ArrayList; +import java.util.Arrays; import java.util.Comparator; import java.util.List; import java.util.Set; @@ -33,6 +38,32 @@ enum BinaryRangeUtil { ; + static BytesRef encodeIPRanges(Set ranges) throws IOException { + final byte[] encoded = new byte[5 + (16 * 2) * ranges.size()]; + ByteArrayDataOutput out = new ByteArrayDataOutput(encoded); + out.writeVInt(ranges.size()); + for (RangeFieldMapper.Range range : ranges) { + InetAddress fromValue = (InetAddress) range.from; + byte[] encodedFromValue = InetAddressPoint.encode(fromValue); + out.writeBytes(encodedFromValue, 0, encodedFromValue.length); + + InetAddress toValue = (InetAddress) range.to; + byte[] encodedToValue = InetAddressPoint.encode(toValue); + out.writeBytes(encodedToValue, 0, encodedToValue.length); + } + return new BytesRef(encoded, 0, out.getPosition()); + } + + static List decodeIPRanges(BytesRef encodedRanges) { + return decodeRanges(encodedRanges, RangeType.IP, BinaryRangeUtil::decodeIP); + } + + private static InetAddress decodeIP(byte[] bytes, int offset, int length) { + // offset + length because copyOfRange wants a from and a to, not an offset & length + byte[] slice = Arrays.copyOfRange(bytes, offset, offset + length); + return InetAddressPoint.decode(slice); + } + static BytesRef encodeLongRanges(Set ranges) throws IOException { List sortedRanges = new ArrayList<>(ranges); Comparator fromComparator = Comparator.comparingLong(range -> ((Number) range.from).longValue()); @@ -51,6 +82,11 @@ static BytesRef encodeLongRanges(Set ranges) throws IOEx return new BytesRef(encoded, 0, out.getPosition()); } + static List decodeLongRanges(BytesRef encodedRanges) { + return decodeRanges(encodedRanges, RangeType.LONG, + BinaryRangeUtil::decodeLong); + } + static BytesRef encodeDoubleRanges(Set ranges) throws IOException { List sortedRanges = new ArrayList<>(ranges); Comparator fromComparator = Comparator.comparingDouble(range -> ((Number) range.from).doubleValue()); @@ -69,6 +105,43 @@ static BytesRef encodeDoubleRanges(Set ranges) throws IO return new BytesRef(encoded, 0, out.getPosition()); } + static List decodeDoubleRanges(BytesRef encodedRanges) { + return decodeRanges(encodedRanges, RangeType.DOUBLE, + BinaryRangeUtil::decodeDouble); + } + + static List decodeFloatRanges(BytesRef encodedRanges) { + return decodeRanges(encodedRanges, RangeType.FLOAT, + BinaryRangeUtil::decodeFloat); + } + + static List decodeRanges(BytesRef encodedRanges, RangeType rangeType, + TriFunction decodeBytes) { + + RangeType.LengthType lengthType = rangeType.lengthType; + ByteArrayDataInput in = new ByteArrayDataInput(); + in.reset(encodedRanges.bytes, encodedRanges.offset, encodedRanges.length); + int numRanges = in.readVInt(); + + List ranges = new ArrayList<>(numRanges); + + final byte[] bytes = encodedRanges.bytes; + int offset = in.getPosition(); + for (int i = 0; i < numRanges; i++) { + int length = lengthType.readLength(bytes, offset); + Object from = decodeBytes.apply(bytes, offset, length); + offset += length; + + length = lengthType.readLength(bytes, offset); + Object to = decodeBytes.apply(bytes, offset, length); + offset += length; + // TODO: Support for exclusive ranges, pending resolution of #40601 + RangeFieldMapper.Range decodedRange = new RangeFieldMapper.Range(rangeType, from, to, true, true); + ranges.add(decodedRange); + } + return ranges; + } + static BytesRef encodeFloatRanges(Set ranges) throws IOException { List sortedRanges = new ArrayList<>(ranges); Comparator fromComparator = Comparator.comparingDouble(range -> ((Number) range.from).floatValue()); @@ -93,12 +166,20 @@ static byte[] encodeDouble(double number) { return encoded; } + static double decodeDouble(byte[] bytes, int offset, int length){ + return NumericUtils.sortableLongToDouble(NumericUtils.sortableBytesToLong(bytes, offset)); + } + static byte[] encodeFloat(float number) { byte[] encoded = new byte[4]; NumericUtils.intToSortableBytes(NumericUtils.floatToSortableInt(number), encoded, 0); return encoded; } + static float decodeFloat(byte[] bytes, int offset, int length) { + return NumericUtils.sortableIntToFloat(NumericUtils.sortableBytesToInt(bytes, offset)); + } + /** * Encodes the specified number of type long in a variable-length byte format. * The byte format preserves ordering, which means the returned byte array can be used for comparing as is. @@ -114,6 +195,23 @@ static byte[] encodeLong(long number) { return encode(number, sign); } + static long decodeLong(byte[] bytes, int offset, int length) { + boolean isNegative = (bytes[offset] & 128) == 0; + // Start by masking off the last three bits of the first byte - that's the start of our number + long decoded; + if (isNegative) { + decoded = -8 | bytes[offset]; + } else { + decoded = bytes[offset] & 7; + } + for (int i = 1; i < length; i++) { + decoded <<= 8; + decoded += Byte.toUnsignedInt(bytes[offset + i]); + } + + return decoded; + } + private static byte[] encode(long l, int sign) { assert l >= 0; @@ -158,4 +256,5 @@ private static byte[] encode(long l, int sign) { } return encoded; } + } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index c29fd4c112c19..57bc296b081fe 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -328,7 +328,7 @@ protected DateMathParser dateMathParser() { return dateMathParser; } - long parse(String value) { + public long parse(String value) { return resolution.convert(DateFormatters.from(dateTimeFormatter().parse(value)).toInstant()); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java index dfa3703050651..9256319df3744 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java @@ -19,30 +19,16 @@ package org.elasticsearch.index.mapper; -import org.apache.lucene.document.DoubleRange; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.FloatRange; -import org.apache.lucene.document.InetAddressPoint; -import org.apache.lucene.document.InetAddressRange; -import org.apache.lucene.document.IntRange; -import org.apache.lucene.document.LongRange; -import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.BinaryDocValuesRangeQuery; -import org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DocValuesFieldExistsQuery; -import org.apache.lucene.search.IndexOrDocValuesQuery; -import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; -import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Explicit; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.geo.ShapeRelation; @@ -56,16 +42,14 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; import java.time.ZoneId; -import java.time.ZoneOffset; -import java.util.ArrayList; -import java.util.Arrays; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -73,7 +57,6 @@ import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.function.BiFunction; import static org.elasticsearch.index.query.RangeQueryBuilder.GTE_FIELD; import static org.elasticsearch.index.query.RangeQueryBuilder.GT_FIELD; @@ -230,6 +213,8 @@ public static final class RangeFieldType extends MappedFieldType { } } + public RangeType rangeType() { return rangeType; } + @Override public MappedFieldType clone() { return new RangeFieldType(this); @@ -250,6 +235,12 @@ public int hashCode() { return Objects.hash(super.hashCode(), rangeType, dateTimeFormatter); } + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { + failIfNoDocValues(); + return new DocValuesIndexFieldData.Builder().setRangeType(rangeType); + } + @Override public String typeName() { return rangeType.name; @@ -439,557 +430,6 @@ private static Range parseIpRangeFromCidr(final XContentParser parser) throws IO } } - /** Enum defining the type of range */ - public enum RangeType { - IP("ip_range") { - @Override - public Field getRangeField(String name, Range r) { - return new InetAddressRange(name, (InetAddress)r.from, (InetAddress)r.to); - } - @Override - public InetAddress parseFrom(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) - throws IOException { - InetAddress address = InetAddresses.forString(parser.text()); - return included ? address : nextUp(address); - } - @Override - public InetAddress parseTo(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) - throws IOException { - InetAddress address = InetAddresses.forString(parser.text()); - return included ? address : nextDown(address); - } - @Override - public InetAddress parse(Object value, boolean coerce) { - if (value instanceof InetAddress) { - return (InetAddress) value; - } else { - if (value instanceof BytesRef) { - value = ((BytesRef) value).utf8ToString(); - } - return InetAddresses.forString(value.toString()); - } - } - @Override - public InetAddress minValue() { - return InetAddressPoint.MIN_VALUE; - } - @Override - public InetAddress maxValue() { - return InetAddressPoint.MAX_VALUE; - } - @Override - public InetAddress nextUp(Object value) { - return InetAddressPoint.nextUp((InetAddress)value); - } - @Override - public InetAddress nextDown(Object value) { - return InetAddressPoint.nextDown((InetAddress)value); - } - - @Override - public BytesRef encodeRanges(Set ranges) throws IOException { - final byte[] encoded = new byte[5 + (16 * 2) * ranges.size()]; - ByteArrayDataOutput out = new ByteArrayDataOutput(encoded); - out.writeVInt(ranges.size()); - for (Range range : ranges) { - InetAddress fromValue = (InetAddress) range.from; - byte[] encodedFromValue = InetAddressPoint.encode(fromValue); - out.writeBytes(encodedFromValue, 0, encodedFromValue.length); - - InetAddress toValue = (InetAddress) range.to; - byte[] encodedToValue = InetAddressPoint.encode(toValue); - out.writeBytes(encodedToValue, 0, encodedToValue.length); - } - return new BytesRef(encoded, 0, out.getPosition()); - } - - @Override - public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { - if (includeFrom == false) { - from = nextUp(from); - } - - if (includeTo == false) { - to = nextDown(to); - } - - byte[] encodedFrom = InetAddressPoint.encode((InetAddress) from); - byte[] encodedTo = InetAddressPoint.encode((InetAddress) to); - return new BinaryDocValuesRangeQuery(field, queryType, BinaryDocValuesRangeQuery.LengthType.FIXED_16, - new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to); - } - - @Override - public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { - return createQuery(field, from, to, includeFrom, includeTo, - (f, t) -> InetAddressRange.newWithinQuery(field, f, t)); - } - @Override - public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { - return createQuery(field, from, to, includeFrom, includeTo, - (f, t) -> InetAddressRange.newContainsQuery(field, f, t )); - } - @Override - public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { - return createQuery(field, from, to, includeFrom, includeTo, - (f, t) -> InetAddressRange.newIntersectsQuery(field, f ,t )); - } - - private Query createQuery(String field, Object lower, Object upper, boolean includeLower, boolean includeUpper, - BiFunction querySupplier) { - byte[] lowerBytes = InetAddressPoint.encode((InetAddress) lower); - byte[] upperBytes = InetAddressPoint.encode((InetAddress) upper); - if (Arrays.compareUnsigned(lowerBytes, 0, lowerBytes.length, upperBytes, 0, upperBytes.length) > 0) { - throw new IllegalArgumentException( - "Range query `from` value (" + lower + ") is greater than `to` value (" + upper + ")"); - } - InetAddress correctedFrom = includeLower ? (InetAddress) lower : nextUp(lower); - InetAddress correctedTo = includeUpper ? (InetAddress) upper : nextDown(upper);; - lowerBytes = InetAddressPoint.encode(correctedFrom); - upperBytes = InetAddressPoint.encode(correctedTo); - if (Arrays.compareUnsigned(lowerBytes, 0, lowerBytes.length, upperBytes, 0, upperBytes.length) > 0) { - return new MatchNoDocsQuery("float range didn't intersect anything"); - } else { - return querySupplier.apply(correctedFrom, correctedTo); - } - } - }, - DATE("date_range", NumberType.LONG) { - @Override - public Field getRangeField(String name, Range r) { - return new LongRange(name, new long[] {((Number)r.from).longValue()}, new long[] {((Number)r.to).longValue()}); - } - private Number parse(DateMathParser dateMathParser, String dateStr) { - return dateMathParser.parse(dateStr, () -> {throw new IllegalArgumentException("now is not used at indexing time");}) - .toEpochMilli(); - } - @Override - public Number parseFrom(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) - throws IOException { - Number value = parse(fieldType.dateMathParser, parser.text()); - return included ? value : nextUp(value); - } - @Override - public Number parseTo(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) - throws IOException{ - Number value = parse(fieldType.dateMathParser, parser.text()); - return included ? value : nextDown(value); - } - @Override - public Long minValue() { - return Long.MIN_VALUE; - } - @Override - public Long maxValue() { - return Long.MAX_VALUE; - } - @Override - public Long nextUp(Object value) { - return (long) LONG.nextUp(value); - } - @Override - public Long nextDown(Object value) { - return (long) LONG.nextDown(value); - } - - @Override - public BytesRef encodeRanges(Set ranges) throws IOException { - return LONG.encodeRanges(ranges); - } - - @Override - public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { - return LONG.dvRangeQuery(field, queryType, from, to, includeFrom, includeTo); - } - - @Override - public Query rangeQuery(String field, boolean hasDocValues, Object lowerTerm, Object upperTerm, boolean includeLower, - boolean includeUpper, ShapeRelation relation, @Nullable ZoneId timeZone, - @Nullable DateMathParser parser, QueryShardContext context) { - ZoneId zone = (timeZone == null) ? ZoneOffset.UTC : timeZone; - - DateMathParser dateMathParser = (parser == null) ? - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser() : parser; - Long low = lowerTerm == null ? Long.MIN_VALUE : - dateMathParser.parse(lowerTerm instanceof BytesRef ? ((BytesRef) lowerTerm).utf8ToString() : lowerTerm.toString(), - context::nowInMillis, false, zone).toEpochMilli(); - Long high = upperTerm == null ? Long.MAX_VALUE : - dateMathParser.parse(upperTerm instanceof BytesRef ? ((BytesRef) upperTerm).utf8ToString() : upperTerm.toString(), - context::nowInMillis, false, zone).toEpochMilli(); - - return super.rangeQuery(field, hasDocValues, low, high, includeLower, includeUpper, relation, zone, - dateMathParser, context); - } - @Override - public Query withinQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) { - return LONG.withinQuery(field, from, to, includeLower, includeUpper); - } - @Override - public Query containsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) { - return LONG.containsQuery(field, from, to, includeLower, includeUpper); - } - @Override - public Query intersectsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) { - return LONG.intersectsQuery(field, from, to, includeLower, includeUpper); - } - }, - // todo support half_float - FLOAT("float_range", NumberType.FLOAT) { - @Override - public Float minValue() { - return Float.NEGATIVE_INFINITY; - } - @Override - public Float maxValue() { - return Float.POSITIVE_INFINITY; - } - @Override - public Float nextUp(Object value) { - return Math.nextUp(((Number)value).floatValue()); - } - @Override - public Float nextDown(Object value) { - return Math.nextDown(((Number)value).floatValue()); - } - - @Override - public BytesRef encodeRanges(Set ranges) throws IOException { - return BinaryRangeUtil.encodeFloatRanges(ranges); - } - - @Override - public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { - if (includeFrom == false) { - from = nextUp(from); - } - - if (includeTo == false) { - to = nextDown(to); - } - - byte[] encodedFrom = BinaryRangeUtil.encodeFloat((Float) from); - byte[] encodedTo = BinaryRangeUtil.encodeFloat((Float) to); - return new BinaryDocValuesRangeQuery(field, queryType, BinaryDocValuesRangeQuery.LengthType.FIXED_4, - new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to); - } - - @Override - public Field getRangeField(String name, Range r) { - return new FloatRange(name, new float[] {((Number)r.from).floatValue()}, new float[] {((Number)r.to).floatValue()}); - } - @Override - public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { - return createQuery(field, (Float) from, (Float) to, includeFrom, includeTo, - (f, t) -> FloatRange.newWithinQuery(field, new float[] { f }, new float[] { t }), RangeType.FLOAT); - } - @Override - public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { - return createQuery(field, (Float) from, (Float) to, includeFrom, includeTo, - (f, t) -> FloatRange.newContainsQuery(field, new float[] { f }, new float[] { t }), RangeType.FLOAT); - } - @Override - public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { - return createQuery(field, (Float) from, (Float) to, includeFrom, includeTo, - (f, t) -> FloatRange.newIntersectsQuery(field, new float[] { f }, new float[] { t }), RangeType.FLOAT); - } - }, - DOUBLE("double_range", NumberType.DOUBLE) { - @Override - public Double minValue() { - return Double.NEGATIVE_INFINITY; - } - @Override - public Double maxValue() { - return Double.POSITIVE_INFINITY; - } - @Override - public Double nextUp(Object value) { - return Math.nextUp(((Number)value).doubleValue()); - } - @Override - public Double nextDown(Object value) { - return Math.nextDown(((Number)value).doubleValue()); - } - - @Override - public BytesRef encodeRanges(Set ranges) throws IOException { - return BinaryRangeUtil.encodeDoubleRanges(ranges); - } - - @Override - public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { - if (includeFrom == false) { - from = nextUp(from); - } - - if (includeTo == false) { - to = nextDown(to); - } - - byte[] encodedFrom = BinaryRangeUtil.encodeDouble((Double) from); - byte[] encodedTo = BinaryRangeUtil.encodeDouble((Double) to); - return new BinaryDocValuesRangeQuery(field, queryType, BinaryDocValuesRangeQuery.LengthType.FIXED_8, - new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to); - } - - @Override - public Field getRangeField(String name, Range r) { - return new DoubleRange(name, new double[] {((Number)r.from).doubleValue()}, new double[] {((Number)r.to).doubleValue()}); - } - @Override - public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { - return createQuery(field, (Double) from, (Double) to, includeFrom, includeTo, - (f, t) -> DoubleRange.newWithinQuery(field, new double[] { f }, new double[] { t }), RangeType.DOUBLE); - } - @Override - public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { - return createQuery(field, (Double) from, (Double) to, includeFrom, includeTo, - (f, t) -> DoubleRange.newContainsQuery(field, new double[] { f }, new double[] { t }), RangeType.DOUBLE); - } - @Override - public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { - return createQuery(field, (Double) from, (Double) to, includeFrom, includeTo, - (f, t) -> DoubleRange.newIntersectsQuery(field, new double[] { f }, new double[] { t }), RangeType.DOUBLE); - } - - }, - // todo add BYTE support - // todo add SHORT support - INTEGER("integer_range", NumberType.INTEGER) { - @Override - public Integer minValue() { - return Integer.MIN_VALUE; - } - @Override - public Integer maxValue() { - return Integer.MAX_VALUE; - } - @Override - public Integer nextUp(Object value) { - return ((Number)value).intValue() + 1; - } - @Override - public Integer nextDown(Object value) { - return ((Number)value).intValue() - 1; - } - - @Override - public BytesRef encodeRanges(Set ranges) throws IOException { - return LONG.encodeRanges(ranges); - } - - @Override - public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { - return LONG.dvRangeQuery(field, queryType, from, to, includeFrom, includeTo); - } - - @Override - public Field getRangeField(String name, Range r) { - return new IntRange(name, new int[] {((Number)r.from).intValue()}, new int[] {((Number)r.to).intValue()}); - } - @Override - public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { - return createQuery(field, (Integer) from, (Integer) to, includeFrom, includeTo, - (f, t) -> IntRange.newWithinQuery(field, new int[] { f }, new int[] { t }), RangeType.INTEGER); - } - @Override - public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { - return createQuery(field, (Integer) from, (Integer) to, includeFrom, includeTo, - (f, t) -> IntRange.newContainsQuery(field, new int[] { f }, new int[] { t }), RangeType.INTEGER); - } - @Override - public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { - return createQuery(field, (Integer) from, (Integer) to, includeFrom, includeTo, - (f, t) -> IntRange.newIntersectsQuery(field, new int[] { f }, new int[] { t }), RangeType.INTEGER); - } - }, - LONG("long_range", NumberType.LONG) { - @Override - public Long minValue() { - return Long.MIN_VALUE; - } - @Override - public Long maxValue() { - return Long.MAX_VALUE; - } - @Override - public Long nextUp(Object value) { - return ((Number)value).longValue() + 1; - } - @Override - public Long nextDown(Object value) { - return ((Number)value).longValue() - 1; - } - - @Override - public BytesRef encodeRanges(Set ranges) throws IOException { - return BinaryRangeUtil.encodeLongRanges(ranges); - } - - @Override - public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { - if (includeFrom == false) { - from = nextUp(from); - } - - if (includeTo == false) { - to = nextDown(to); - } - - byte[] encodedFrom = BinaryRangeUtil.encodeLong(((Number) from).longValue()); - byte[] encodedTo = BinaryRangeUtil.encodeLong(((Number) to).longValue()); - return new BinaryDocValuesRangeQuery(field, queryType, BinaryDocValuesRangeQuery.LengthType.VARIABLE, - new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to); - } - - @Override - public Field getRangeField(String name, Range r) { - return new LongRange(name, new long[] {((Number)r.from).longValue()}, - new long[] {((Number)r.to).longValue()}); - } - @Override - public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { - return createQuery(field, (Long) from, (Long) to, includeFrom, includeTo, - (f, t) -> LongRange.newWithinQuery(field, new long[] { f }, new long[] { t }), RangeType.LONG); - } - @Override - public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { - return createQuery(field, (Long) from, (Long) to, includeFrom, includeTo, - (f, t) -> LongRange.newContainsQuery(field, new long[] { f }, new long[] { t }), RangeType.LONG); - } - @Override - public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { - return createQuery(field, (Long) from, (Long) to, includeFrom, includeTo, - (f, t) -> LongRange.newIntersectsQuery(field, new long[] { f }, new long[] { t }), RangeType.LONG); - } - }; - - RangeType(String name) { - this.name = name; - this.numberType = null; - } - - RangeType(String name, NumberType type) { - this.name = name; - this.numberType = type; - } - - /** Get the associated type name. */ - public final String typeName() { - return name; - } - - /** - * Internal helper to create the actual {@link Query} using the provided supplier function. Before creating the query we check if - * the intervals min > max, in which case an {@link IllegalArgumentException} is raised. The method adapts the interval bounds - * based on whether the edges should be included or excluded. In case where after this correction the interval would be empty - * because min > max, we simply return a {@link MatchNoDocsQuery}. - * This helper handles all {@link Number} cases and dates, the IP range type uses its own logic. - */ - private static > Query createQuery(String field, T from, T to, boolean includeFrom, boolean includeTo, - BiFunction querySupplier, RangeType rangeType) { - if (from.compareTo(to) > 0) { - // wrong argument order, this is an error the user should fix - throw new IllegalArgumentException("Range query `from` value (" + from + ") is greater than `to` value (" + to + ")"); - } - - @SuppressWarnings("unchecked") - T correctedFrom = includeFrom ? from : (T) rangeType.nextUp(from); - @SuppressWarnings("unchecked") - T correctedTo = includeTo ? to : (T) rangeType.nextDown(to); - if (correctedFrom.compareTo(correctedTo) > 0) { - return new MatchNoDocsQuery("range didn't intersect anything"); - } else { - return querySupplier.apply(correctedFrom, correctedTo); - } - } - - public abstract Field getRangeField(String name, Range range); - public List createFields(ParseContext context, String name, Range range, boolean indexed, - boolean docValued, boolean stored) { - assert range != null : "range cannot be null when creating fields"; - List fields = new ArrayList<>(); - if (indexed) { - fields.add(getRangeField(name, range)); - } - if (docValued) { - BinaryRangesDocValuesField field = (BinaryRangesDocValuesField) context.doc().getByKey(name); - if (field == null) { - field = new BinaryRangesDocValuesField(name, range, this); - context.doc().addWithKey(name, field); - } else { - field.add(range); - } - } - if (stored) { - fields.add(new StoredField(name, range.toString())); - } - return fields; - } - /** parses from value. rounds according to included flag */ - public Object parseFrom(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) throws IOException { - Number value = numberType.parse(parser, coerce); - return included ? value : (Number)nextUp(value); - } - /** parses to value. rounds according to included flag */ - public Object parseTo(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) throws IOException { - Number value = numberType.parse(parser, coerce); - return included ? value : (Number)nextDown(value); - } - - public abstract Object minValue(); - public abstract Object maxValue(); - public abstract Object nextUp(Object value); - public abstract Object nextDown(Object value); - public abstract Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo); - public abstract Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo); - public abstract Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo); - public Object parse(Object value, boolean coerce) { - return numberType.parse(value, coerce); - } - public Query rangeQuery(String field, boolean hasDocValues, Object from, Object to, boolean includeFrom, boolean includeTo, - ShapeRelation relation, @Nullable ZoneId timeZone, @Nullable DateMathParser dateMathParser, - QueryShardContext context) { - Object lower = from == null ? minValue() : parse(from, false); - Object upper = to == null ? maxValue() : parse(to, false); - Query indexQuery; - if (relation == ShapeRelation.WITHIN) { - indexQuery = withinQuery(field, lower, upper, includeFrom, includeTo); - } else if (relation == ShapeRelation.CONTAINS) { - indexQuery = containsQuery(field, lower, upper, includeFrom, includeTo); - } else { - indexQuery = intersectsQuery(field, lower, upper, includeFrom, includeTo); - } - if (hasDocValues) { - final QueryType queryType; - if (relation == ShapeRelation.WITHIN) { - queryType = QueryType.WITHIN; - } else if (relation == ShapeRelation.CONTAINS) { - queryType = QueryType.CONTAINS; - } else { - queryType = QueryType.INTERSECTS; - } - Query dvQuery = dvRangeQuery(field, queryType, lower, upper, includeFrom, includeTo); - return new IndexOrDocValuesQuery(indexQuery, dvQuery); - } else { - return indexQuery; - } - } - - // No need to take into account Range#includeFrom or Range#includeTo, because from and to have already been - // rounded up via parseFrom and parseTo methods. - public abstract BytesRef encodeRanges(Set ranges) throws IOException; - - public abstract Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, - boolean includeFrom, boolean includeTo); - - public final String name; - private final NumberType numberType; - - - - } - /** Class defining a range */ public static class Range { RangeType type; @@ -1006,6 +446,27 @@ public Range(RangeType type, Object from, Object to, boolean includeFrom, boolea this.includeTo = includeTo; } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Range range = (Range) o; + return includeFrom == range.includeFrom && + includeTo == range.includeTo && + type == range.type && + from.equals(range.from) && + to.equals(range.to); + } + + @Override + public int hashCode() { + return Objects.hash(type, from, to, includeFrom, includeTo); + } + @Override public String toString() { StringBuilder sb = new StringBuilder(); @@ -1018,6 +479,14 @@ public String toString() { sb.append(includeTo ? ']' : ')'); return sb.toString(); } + + public Object getFrom() { + return from; + } + + public Object getTo() { + return to; + } } static class BinaryRangesDocValuesField extends CustomDocValuesField { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java new file mode 100644 index 0000000000000..256325eba5974 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java @@ -0,0 +1,715 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANYDa + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.DoubleRange; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FloatRange; +import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.document.InetAddressRange; +import org.apache.lucene.document.IntRange; +import org.apache.lucene.document.LongRange; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.queries.BinaryDocValuesRangeQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.geo.ShapeRelation; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.time.DateMathParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; + +import java.io.IOException; +import java.net.InetAddress; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Set; +import java.util.function.BiFunction; + +/** Enum defining the type of range */ +public enum RangeType { + IP("ip_range", LengthType.FIXED_16) { + @Override + public Field getRangeField(String name, RangeFieldMapper.Range r) { + return new InetAddressRange(name, (InetAddress)r.from, (InetAddress)r.to); + } + @Override + public InetAddress parseFrom(RangeFieldMapper.RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) + throws IOException { + InetAddress address = InetAddresses.forString(parser.text()); + return included ? address : nextUp(address); + } + @Override + public InetAddress parseTo(RangeFieldMapper.RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) + throws IOException { + InetAddress address = InetAddresses.forString(parser.text()); + return included ? address : nextDown(address); + } + @Override + public InetAddress parse(Object value, boolean coerce) { + if (value instanceof InetAddress) { + return (InetAddress) value; + } else { + if (value instanceof BytesRef) { + value = ((BytesRef) value).utf8ToString(); + } + return InetAddresses.forString(value.toString()); + } + } + @Override + public InetAddress minValue() { + return InetAddressPoint.MIN_VALUE; + } + @Override + public InetAddress maxValue() { + return InetAddressPoint.MAX_VALUE; + } + @Override + public InetAddress nextUp(Object value) { + return InetAddressPoint.nextUp((InetAddress)value); + } + @Override + public InetAddress nextDown(Object value) { + return InetAddressPoint.nextDown((InetAddress)value); + } + + @Override + public BytesRef encodeRanges(Set ranges) throws IOException { + return BinaryRangeUtil.encodeIPRanges(ranges); + } + + @Override + public List decodeRanges(BytesRef bytes) { + // TODO: Implement this. + throw new UnsupportedOperationException(); + } + + @Override + public Double doubleValue (Object endpointValue) { + throw new UnsupportedOperationException("IP ranges cannot be safely converted to doubles"); + } + + @Override + public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom, + boolean includeTo) { + if (includeFrom == false) { + from = nextUp(from); + } + + if (includeTo == false) { + to = nextDown(to); + } + + byte[] encodedFrom = InetAddressPoint.encode((InetAddress) from); + byte[] encodedTo = InetAddressPoint.encode((InetAddress) to); + return new BinaryDocValuesRangeQuery(field, queryType, LengthType.FIXED_16, + new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to); + } + + @Override + public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { + return createQuery(field, from, to, includeFrom, includeTo, + (f, t) -> InetAddressRange.newWithinQuery(field, f, t)); + } + @Override + public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { + return createQuery(field, from, to, includeFrom, includeTo, + (f, t) -> InetAddressRange.newContainsQuery(field, f, t )); + } + @Override + public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { + return createQuery(field, from, to, includeFrom, includeTo, + (f, t) -> InetAddressRange.newIntersectsQuery(field, f ,t )); + } + + private Query createQuery(String field, Object lower, Object upper, boolean includeLower, boolean includeUpper, + BiFunction querySupplier) { + byte[] lowerBytes = InetAddressPoint.encode((InetAddress) lower); + byte[] upperBytes = InetAddressPoint.encode((InetAddress) upper); + if (Arrays.compareUnsigned(lowerBytes, 0, lowerBytes.length, upperBytes, 0, upperBytes.length) > 0) { + throw new IllegalArgumentException( + "Range query `from` value (" + lower + ") is greater than `to` value (" + upper + ")"); + } + InetAddress correctedFrom = includeLower ? (InetAddress) lower : nextUp(lower); + InetAddress correctedTo = includeUpper ? (InetAddress) upper : nextDown(upper);; + lowerBytes = InetAddressPoint.encode(correctedFrom); + upperBytes = InetAddressPoint.encode(correctedTo); + if (Arrays.compareUnsigned(lowerBytes, 0, lowerBytes.length, upperBytes, 0, upperBytes.length) > 0) { + return new MatchNoDocsQuery("float range didn't intersect anything"); + } else { + return querySupplier.apply(correctedFrom, correctedTo); + } + } + }, + DATE("date_range", LengthType.VARIABLE, NumberFieldMapper.NumberType.LONG) { + @Override + public Field getRangeField(String name, RangeFieldMapper.Range r) { + return new LongRange(name, new long[] {((Number)r.from).longValue()}, new long[] {((Number)r.to).longValue()}); + } + private Number parse(DateMathParser dateMathParser, String dateStr) { + return dateMathParser.parse(dateStr, () -> {throw new IllegalArgumentException("now is not used at indexing time");}) + .toEpochMilli(); + } + @Override + public Number parseFrom(RangeFieldMapper.RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) + throws IOException { + Number value = parse(fieldType.dateMathParser, parser.text()); + return included ? value : nextUp(value); + } + @Override + public Number parseTo(RangeFieldMapper.RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) + throws IOException{ + Number value = parse(fieldType.dateMathParser, parser.text()); + return included ? value : nextDown(value); + } + @Override + public Long minValue() { + return Long.MIN_VALUE; + } + @Override + public Long maxValue() { + return Long.MAX_VALUE; + } + @Override + public Long nextUp(Object value) { + return (long) LONG.nextUp(value); + } + @Override + public Long nextDown(Object value) { + return (long) LONG.nextDown(value); + } + + @Override + public BytesRef encodeRanges(Set ranges) throws IOException { + return LONG.encodeRanges(ranges); + } + + @Override + public List decodeRanges(BytesRef bytes) { + return LONG.decodeRanges(bytes); + } + + @Override + public Double doubleValue (Object endpointValue) { + return LONG.doubleValue(endpointValue); + } + + @Override + public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom, + boolean includeTo) { + return LONG.dvRangeQuery(field, queryType, from, to, includeFrom, includeTo); + } + + @Override + public Query rangeQuery(String field, boolean hasDocValues, Object lowerTerm, Object upperTerm, boolean includeLower, + boolean includeUpper, ShapeRelation relation, @Nullable ZoneId timeZone, + @Nullable DateMathParser parser, QueryShardContext context) { + ZoneId zone = (timeZone == null) ? ZoneOffset.UTC : timeZone; + + DateMathParser dateMathParser = (parser == null) ? + DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser() : parser; + Long low = lowerTerm == null ? Long.MIN_VALUE : + dateMathParser.parse(lowerTerm instanceof BytesRef ? ((BytesRef) lowerTerm).utf8ToString() : lowerTerm.toString(), + context::nowInMillis, false, zone).toEpochMilli(); + Long high = upperTerm == null ? Long.MAX_VALUE : + dateMathParser.parse(upperTerm instanceof BytesRef ? ((BytesRef) upperTerm).utf8ToString() : upperTerm.toString(), + context::nowInMillis, false, zone).toEpochMilli(); + + return super.rangeQuery(field, hasDocValues, low, high, includeLower, includeUpper, relation, zone, + dateMathParser, context); + } + @Override + public Query withinQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) { + return LONG.withinQuery(field, from, to, includeLower, includeUpper); + } + @Override + public Query containsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) { + return LONG.containsQuery(field, from, to, includeLower, includeUpper); + } + @Override + public Query intersectsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) { + return LONG.intersectsQuery(field, from, to, includeLower, includeUpper); + } + }, + // todo support half_float + FLOAT("float_range", LengthType.FIXED_4, NumberFieldMapper.NumberType.FLOAT) { + @Override + public Float minValue() { + return Float.NEGATIVE_INFINITY; + } + @Override + public Float maxValue() { + return Float.POSITIVE_INFINITY; + } + @Override + public Float nextUp(Object value) { + return Math.nextUp(((Number)value).floatValue()); + } + @Override + public Float nextDown(Object value) { + return Math.nextDown(((Number)value).floatValue()); + } + + @Override + public BytesRef encodeRanges(Set ranges) throws IOException { + return BinaryRangeUtil.encodeFloatRanges(ranges); + } + + @Override + public List decodeRanges(BytesRef bytes) { + return BinaryRangeUtil.decodeFloatRanges(bytes); + } + + @Override + public Double doubleValue(Object endpointValue) { + assert endpointValue instanceof Float; + return ((Float) endpointValue).doubleValue(); + } + + @Override + public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom, + boolean includeTo) { + if (includeFrom == false) { + from = nextUp(from); + } + + if (includeTo == false) { + to = nextDown(to); + } + + byte[] encodedFrom = BinaryRangeUtil.encodeFloat((Float) from); + byte[] encodedTo = BinaryRangeUtil.encodeFloat((Float) to); + return new BinaryDocValuesRangeQuery(field, queryType, LengthType.FIXED_4, + new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to); + } + + @Override + public Field getRangeField(String name, RangeFieldMapper.Range r) { + return new FloatRange(name, new float[] {((Number)r.from).floatValue()}, new float[] {((Number)r.to).floatValue()}); + } + @Override + public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { + return createQuery(field, (Float) from, (Float) to, includeFrom, includeTo, + (f, t) -> FloatRange.newWithinQuery(field, new float[] { f }, new float[] { t }), RangeType.FLOAT); + } + @Override + public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { + return createQuery(field, (Float) from, (Float) to, includeFrom, includeTo, + (f, t) -> FloatRange.newContainsQuery(field, new float[] { f }, new float[] { t }), RangeType.FLOAT); + } + @Override + public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { + return createQuery(field, (Float) from, (Float) to, includeFrom, includeTo, + (f, t) -> FloatRange.newIntersectsQuery(field, new float[] { f }, new float[] { t }), RangeType.FLOAT); + } + }, + DOUBLE("double_range", LengthType.FIXED_8, NumberFieldMapper.NumberType.DOUBLE) { + @Override + public Double minValue() { + return Double.NEGATIVE_INFINITY; + } + @Override + public Double maxValue() { + return Double.POSITIVE_INFINITY; + } + @Override + public Double nextUp(Object value) { + return Math.nextUp(((Number)value).doubleValue()); + } + @Override + public Double nextDown(Object value) { + return Math.nextDown(((Number)value).doubleValue()); + } + + @Override + public BytesRef encodeRanges(Set ranges) throws IOException { + return BinaryRangeUtil.encodeDoubleRanges(ranges); + } + + @Override + public List decodeRanges(BytesRef bytes) { + return BinaryRangeUtil.decodeDoubleRanges(bytes); + } + + @Override + public Double doubleValue(Object endpointValue) { + assert endpointValue instanceof Double; + return (Double) endpointValue; + } + + @Override + public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom, + boolean includeTo) { + if (includeFrom == false) { + from = nextUp(from); + } + + if (includeTo == false) { + to = nextDown(to); + } + + byte[] encodedFrom = BinaryRangeUtil.encodeDouble((Double) from); + byte[] encodedTo = BinaryRangeUtil.encodeDouble((Double) to); + return new BinaryDocValuesRangeQuery(field, queryType, LengthType.FIXED_8, + new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to); + } + + @Override + public Field getRangeField(String name, RangeFieldMapper.Range r) { + return new DoubleRange(name, new double[] {((Number)r.from).doubleValue()}, new double[] {((Number)r.to).doubleValue()}); + } + @Override + public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { + return createQuery(field, (Double) from, (Double) to, includeFrom, includeTo, + (f, t) -> DoubleRange.newWithinQuery(field, new double[] { f }, new double[] { t }), RangeType.DOUBLE); + } + @Override + public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { + return createQuery(field, (Double) from, (Double) to, includeFrom, includeTo, + (f, t) -> DoubleRange.newContainsQuery(field, new double[] { f }, new double[] { t }), RangeType.DOUBLE); + } + @Override + public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { + return createQuery(field, (Double) from, (Double) to, includeFrom, includeTo, + (f, t) -> DoubleRange.newIntersectsQuery(field, new double[] { f }, new double[] { t }), RangeType.DOUBLE); + } + + }, + // todo add BYTE support + // todo add SHORT support + INTEGER("integer_range", LengthType.VARIABLE, NumberFieldMapper.NumberType.INTEGER) { + @Override + public Integer minValue() { + return Integer.MIN_VALUE; + } + @Override + public Integer maxValue() { + return Integer.MAX_VALUE; + } + @Override + public Integer nextUp(Object value) { + return ((Number)value).intValue() + 1; + } + @Override + public Integer nextDown(Object value) { + return ((Number)value).intValue() - 1; + } + + @Override + public BytesRef encodeRanges(Set ranges) throws IOException { + return LONG.encodeRanges(ranges); + } + + @Override + public List decodeRanges(BytesRef bytes) { + return LONG.decodeRanges(bytes); + } + + @Override + public Double doubleValue(Object endpointValue) { + return LONG.doubleValue(endpointValue); + } + + @Override + public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom, + boolean includeTo) { + return LONG.dvRangeQuery(field, queryType, from, to, includeFrom, includeTo); + } + + @Override + public Field getRangeField(String name, RangeFieldMapper.Range r) { + return new IntRange(name, new int[] {((Number)r.from).intValue()}, new int[] {((Number)r.to).intValue()}); + } + @Override + public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { + return createQuery(field, (Integer) from, (Integer) to, includeFrom, includeTo, + (f, t) -> IntRange.newWithinQuery(field, new int[] { f }, new int[] { t }), RangeType.INTEGER); + } + @Override + public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { + return createQuery(field, (Integer) from, (Integer) to, includeFrom, includeTo, + (f, t) -> IntRange.newContainsQuery(field, new int[] { f }, new int[] { t }), RangeType.INTEGER); + } + @Override + public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { + return createQuery(field, (Integer) from, (Integer) to, includeFrom, includeTo, + (f, t) -> IntRange.newIntersectsQuery(field, new int[] { f }, new int[] { t }), RangeType.INTEGER); + } + }, + LONG("long_range", LengthType.VARIABLE, NumberFieldMapper.NumberType.LONG) { + @Override + public Long minValue() { + return Long.MIN_VALUE; + } + @Override + public Long maxValue() { + return Long.MAX_VALUE; + } + @Override + public Long nextUp(Object value) { + return ((Number)value).longValue() + 1; + } + @Override + public Long nextDown(Object value) { + return ((Number)value).longValue() - 1; + } + + @Override + public BytesRef encodeRanges(Set ranges) throws IOException { + return BinaryRangeUtil.encodeLongRanges(ranges); + } + + @Override + public List decodeRanges(BytesRef bytes) { + return BinaryRangeUtil.decodeLongRanges(bytes); + } + + @Override + public Double doubleValue(Object endpointValue) { + assert endpointValue instanceof Long; + return ((Long) endpointValue).doubleValue(); + } + + @Override + public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom, + boolean includeTo) { + if (includeFrom == false) { + from = nextUp(from); + } + + if (includeTo == false) { + to = nextDown(to); + } + + byte[] encodedFrom = BinaryRangeUtil.encodeLong(((Number) from).longValue()); + byte[] encodedTo = BinaryRangeUtil.encodeLong(((Number) to).longValue()); + return new BinaryDocValuesRangeQuery(field, queryType, LengthType.VARIABLE, + new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to); + } + + @Override + public Field getRangeField(String name, RangeFieldMapper.Range r) { + return new LongRange(name, new long[] {((Number)r.from).longValue()}, + new long[] {((Number)r.to).longValue()}); + } + @Override + public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { + return createQuery(field, (Long) from, (Long) to, includeFrom, includeTo, + (f, t) -> LongRange.newWithinQuery(field, new long[] { f }, new long[] { t }), RangeType.LONG); + } + @Override + public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { + return createQuery(field, (Long) from, (Long) to, includeFrom, includeTo, + (f, t) -> LongRange.newContainsQuery(field, new long[] { f }, new long[] { t }), RangeType.LONG); + } + @Override + public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { + return createQuery(field, (Long) from, (Long) to, includeFrom, includeTo, + (f, t) -> LongRange.newIntersectsQuery(field, new long[] { f }, new long[] { t }), RangeType.LONG); + } + }; + + RangeType(String name, LengthType lengthType) { + this.name = name; + this.numberType = null; + this.lengthType = lengthType; + } + + RangeType(String name, LengthType lengthType, NumberFieldMapper.NumberType type) { + this.name = name; + this.numberType = type; + this.lengthType = lengthType; + } + + /** Get the associated type name. */ + public final String typeName() { + return name; + } + + /** + * Internal helper to create the actual {@link Query} using the provided supplier function. Before creating the query we check if + * the intervals min > max, in which case an {@link IllegalArgumentException} is raised. The method adapts the interval bounds + * based on whether the edges should be included or excluded. In case where after this correction the interval would be empty + * because min > max, we simply return a {@link MatchNoDocsQuery}. + * This helper handles all {@link Number} cases and dates, the IP range type uses its own logic. + */ + private static > Query createQuery(String field, T from, T to, boolean includeFrom, boolean includeTo, + BiFunction querySupplier, RangeType rangeType) { + if (from.compareTo(to) > 0) { + // wrong argument order, this is an error the user should fix + throw new IllegalArgumentException("Range query `from` value (" + from + ") is greater than `to` value (" + to + ")"); + } + + @SuppressWarnings("unchecked") + T correctedFrom = includeFrom ? from : (T) rangeType.nextUp(from); + @SuppressWarnings("unchecked") + T correctedTo = includeTo ? to : (T) rangeType.nextDown(to); + if (correctedFrom.compareTo(correctedTo) > 0) { + return new MatchNoDocsQuery("range didn't intersect anything"); + } else { + return querySupplier.apply(correctedFrom, correctedTo); + } + } + + public abstract Field getRangeField(String name, RangeFieldMapper.Range range); + public List createFields(ParseContext context, String name, RangeFieldMapper.Range range, boolean indexed, + boolean docValued, boolean stored) { + assert range != null : "range cannot be null when creating fields"; + List fields = new ArrayList<>(); + if (indexed) { + fields.add(getRangeField(name, range)); + } + if (docValued) { + RangeFieldMapper.BinaryRangesDocValuesField field = (RangeFieldMapper.BinaryRangesDocValuesField) context.doc().getByKey(name); + if (field == null) { + field = new RangeFieldMapper.BinaryRangesDocValuesField(name, range, this); + context.doc().addWithKey(name, field); + } else { + field.add(range); + } + } + if (stored) { + fields.add(new StoredField(name, range.toString())); + } + return fields; + } + /** parses from value. rounds according to included flag */ + public Object parseFrom(RangeFieldMapper.RangeFieldType fieldType, XContentParser parser, boolean coerce, + boolean included) throws IOException { + Number value = numberType.parse(parser, coerce); + return included ? value : (Number)nextUp(value); + } + /** parses to value. rounds according to included flag */ + public Object parseTo(RangeFieldMapper.RangeFieldType fieldType, XContentParser parser, boolean coerce, + boolean included) throws IOException { + Number value = numberType.parse(parser, coerce); + return included ? value : (Number)nextDown(value); + } + + public abstract Object minValue(); + public abstract Object maxValue(); + public abstract Object nextUp(Object value); + public abstract Object nextDown(Object value); + public abstract Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo); + public abstract Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo); + public abstract Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo); + public Object parse(Object value, boolean coerce) { + return numberType.parse(value, coerce); + } + public Query rangeQuery(String field, boolean hasDocValues, Object from, Object to, boolean includeFrom, boolean includeTo, + ShapeRelation relation, @Nullable ZoneId timeZone, @Nullable DateMathParser dateMathParser, + QueryShardContext context) { + Object lower = from == null ? minValue() : parse(from, false); + Object upper = to == null ? maxValue() : parse(to, false); + Query indexQuery; + if (relation == ShapeRelation.WITHIN) { + indexQuery = withinQuery(field, lower, upper, includeFrom, includeTo); + } else if (relation == ShapeRelation.CONTAINS) { + indexQuery = containsQuery(field, lower, upper, includeFrom, includeTo); + } else { + indexQuery = intersectsQuery(field, lower, upper, includeFrom, includeTo); + } + if (hasDocValues) { + final BinaryDocValuesRangeQuery.QueryType queryType; + if (relation == ShapeRelation.WITHIN) { + queryType = BinaryDocValuesRangeQuery.QueryType.WITHIN; + } else if (relation == ShapeRelation.CONTAINS) { + queryType = BinaryDocValuesRangeQuery.QueryType.CONTAINS; + } else { + queryType = BinaryDocValuesRangeQuery.QueryType.INTERSECTS; + } + Query dvQuery = dvRangeQuery(field, queryType, lower, upper, includeFrom, includeTo); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } else { + return indexQuery; + } + } + + // No need to take into account Range#includeFrom or Range#includeTo, because from and to have already been + // rounded up via parseFrom and parseTo methods. + public abstract BytesRef encodeRanges(Set ranges) throws IOException; + public abstract List decodeRanges(BytesRef bytes); + + /** + * Given the Range.to or Range.from Object value from a Range instance, converts that value into a Double. Before converting, it + * asserts that the object is of the expected type. Operation is not supported on IP ranges (because of loss of precision) + * + * @param endpointValue Object value for Range.to or Range.from + * @return endpointValue as a Double + */ + public abstract Double doubleValue(Object endpointValue); + + public boolean isNumeric() { + return numberType != null; + } + + public abstract Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, + boolean includeFrom, boolean includeTo); + + public final String name; + private final NumberFieldMapper.NumberType numberType; + public final LengthType lengthType; + + public enum LengthType { + FIXED_4 { + @Override + public int readLength(byte[] bytes, int offset) { + return 4; + } + }, + FIXED_8 { + @Override + public int readLength(byte[] bytes, int offset) { + return 8; + } + }, + FIXED_16 { + @Override + public int readLength(byte[] bytes, int offset) { + return 16; + } + }, + VARIABLE { + @Override + public int readLength(byte[] bytes, int offset) { + // the first bit encodes the sign and the next 4 bits encode the number + // of additional bytes + int token = Byte.toUnsignedInt(bytes[offset]); + int length = (token >>> 3) & 0x0f; + if ((token & 0x80) == 0) { + length = 0x0f - length; + } + return 1 + length; + } + }; + + /** + * Return the length of the value that starts at {@code offset} in {@code bytes}. + */ + public abstract int readLength(byte[] bytes, int offset); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/merge/OnGoingMerge.java b/server/src/main/java/org/elasticsearch/index/merge/OnGoingMerge.java index e39158720c375..5c09b66077733 100644 --- a/server/src/main/java/org/elasticsearch/index/merge/OnGoingMerge.java +++ b/server/src/main/java/org/elasticsearch/index/merge/OnGoingMerge.java @@ -30,11 +30,12 @@ public class OnGoingMerge { private final String id; - private final List mergedSegments; + private final MergePolicy.OneMerge oneMerge; public OnGoingMerge(MergePolicy.OneMerge merge) { this.id = Integer.toString(System.identityHashCode(merge)); - this.mergedSegments = merge.segments; + this.oneMerge = merge; + } /** @@ -44,10 +45,20 @@ public String getId() { return id; } + + /** + * Returns the total size in bytes of this merge. Note that this does not + * indicate the size of the merged segment, but the + * input total size. + */ + public long getTotalBytesSize() { + return oneMerge.totalBytesSize(); + } + /** * The list of segments that are being merged. */ public List getMergedSegments() { - return mergedSegments; + return oneMerge.segments; } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index d4fe2e8f9b7d2..784bfd273c227 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -213,8 +213,11 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl protected volatile ShardRouting shardRouting; protected volatile IndexShardState state; + // ensure happens-before relation between addRefreshListener() and postRecovery() + private final Object postRecoveryMutex = new Object(); private volatile long pendingPrimaryTerm; // see JavaDocs for getPendingPrimaryTerm - protected final AtomicReference currentEngineReference = new AtomicReference<>(); + private final Object engineMutex = new Object(); // lock ordering: engineMutex -> mutex + private final AtomicReference currentEngineReference = new AtomicReference<>(); final EngineFactory engineFactory; private final IndexingOperationListener indexingOperationListeners; @@ -1192,20 +1195,23 @@ public Engine.IndexCommitRef acquireSafeIndexCommit() throws EngineException { * @throws java.nio.file.NoSuchFileException if one or more files referenced by a commit are not present. */ public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException { + assert Thread.holdsLock(mutex) == false : "snapshotting store metadata under mutex"; Engine.IndexCommitRef indexCommit = null; store.incRef(); try { - Engine engine; - synchronized (mutex) { + synchronized (engineMutex) { // if the engine is not running, we can access the store directly, but we need to make sure no one starts - // the engine on us. If the engine is running, we can get a snapshot via the deletion policy which is initialized. - // That can be done out of mutex, since the engine can be closed half way. - engine = getEngineOrNull(); - if (engine == null) { + // the engine on us. If the engine is running, we can get a snapshot via the deletion policy of the engine. + synchronized (mutex) { + final Engine engine = getEngineOrNull(); + if (engine != null) { + indexCommit = engine.acquireLastIndexCommit(false); + } + } + if (indexCommit == null) { return store.getMetadata(null, true); } } - indexCommit = engine.acquireLastIndexCommit(false); return store.getMetadata(indexCommit.getIndexCommit()); } finally { store.decRef(); @@ -1334,23 +1340,24 @@ public void close(String reason, boolean flushEngine) throws IOException { } } - public IndexShard postRecovery(String reason) - throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException { - synchronized (mutex) { - if (state == IndexShardState.CLOSED) { - throw new IndexShardClosedException(shardId); - } - if (state == IndexShardState.STARTED) { - throw new IndexShardStartedException(shardId); - } + public void postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException { + synchronized (postRecoveryMutex) { // we need to refresh again to expose all operations that were index until now. Otherwise // we may not expose operations that were indexed with a refresh listener that was immediately - // responded to in addRefreshListener. + // responded to in addRefreshListener. The refresh must happen under the same mutex used in addRefreshListener + // and before moving this shard to POST_RECOVERY state (i.e., allow to read from this shard). getEngine().refresh("post_recovery"); - recoveryState.setStage(RecoveryState.Stage.DONE); - changeState(IndexShardState.POST_RECOVERY, reason); + synchronized (mutex) { + if (state == IndexShardState.CLOSED) { + throw new IndexShardClosedException(shardId); + } + if (state == IndexShardState.STARTED) { + throw new IndexShardStartedException(shardId); + } + recoveryState.setStage(RecoveryState.Stage.DONE); + changeState(IndexShardState.POST_RECOVERY, reason); + } } - return this; } /** @@ -1583,6 +1590,7 @@ public void openEngineAndSkipTranslogRecovery() throws IOException { } private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) throws IOException { + assert Thread.holdsLock(mutex) == false : "opening engine under mutex"; if (state != IndexShardState.RECOVERING) { throw new IndexShardNotRecoveringException(shardId, state); } @@ -1595,16 +1603,24 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t assert recoveryState.getRecoverySource().expectEmptyRetentionLeases() == false || getRetentionLeases().leases().isEmpty() : "expected empty set of retention leases with recovery source [" + recoveryState.getRecoverySource() + "] but got " + getRetentionLeases(); - synchronized (mutex) { - verifyNotClosed(); - assert currentEngineReference.get() == null : "engine is running"; + synchronized (engineMutex) { // we must create a new engine under mutex (see IndexShard#snapshotStoreMetadata). final Engine newEngine = engineFactory.newReadWriteEngine(config); - onNewEngine(newEngine); - currentEngineReference.set(newEngine); - // We set active because we are now writing operations to the engine; this way, - // if we go idle after some time and become inactive, we still give sync'd flush a chance to run. - active.set(true); + synchronized (mutex) { + try { + verifyNotClosed(); + assert currentEngineReference.get() == null : "engine is running"; + onNewEngine(newEngine); + currentEngineReference.set(newEngine); + // We set active because we are now writing operations to the engine; this way, + // if we go idle after some time and become inactive, we still give sync'd flush a chance to run. + active.set(true); + } finally { + if (currentEngineReference.get() != newEngine) { + newEngine.close(); + } + } + } } // time elapses after the engine is created above (pulling the config settings) until we set the engine reference, during // which settings changes could possibly have happened, so here we forcefully push any config changes to the new engine. @@ -1627,6 +1643,7 @@ private boolean assertSequenceNumbersInCommit() throws IOException { } private void onNewEngine(Engine newEngine) { + assert Thread.holdsLock(engineMutex); refreshListeners.setCurrentRefreshLocationSupplier(newEngine::getTranslogLastWriteLocation); } @@ -2675,7 +2692,13 @@ private DocumentMapperForType docMapper(String type) { } private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) { - Sort indexSort = indexSortSupplier.get(); + final Sort indexSort = indexSortSupplier.get(); + final Engine.Warmer warmer = reader -> { + assert Thread.holdsLock(mutex) == false : "warming engine under mutex"; + if (this.warmer != null) { + this.warmer.warm(reader); + } + }; return new EngineConfig(shardId, shardRouting.allocationId().getId(), threadPool, indexSettings, warmer, store, indexSettings.getMergePolicy(), mapperService != null ? mapperService.indexAnalyzer() : null, @@ -3237,10 +3260,10 @@ public void addRefreshListener(Translog.Location location, Consumer lis if (isReadAllowed()) { readAllowed = true; } else { - // check again under mutex. this is important to create a happens before relationship + // check again under postRecoveryMutex. this is important to create a happens before relationship // between the switch to POST_RECOVERY + associated refresh. Otherwise we may respond // to a listener before a refresh actually happened that contained that operation. - synchronized (mutex) { + synchronized (postRecoveryMutex) { readAllowed = isReadAllowed(); } } @@ -3305,6 +3328,7 @@ public ParsedDocument newNoopTombstoneDoc(String reason) { * Rollback the current engine to the safe commit, then replay local translog up to the global checkpoint. */ void resetEngineToGlobalCheckpoint() throws IOException { + assert Thread.holdsLock(engineMutex) == false : "resetting engine under mutex"; assert getActiveOperationsCount() == OPERATIONS_BLOCKED : "resetting engine without blocking operations; active operations are [" + getActiveOperations() + ']'; sync(); // persist the global checkpoint to disk @@ -3316,15 +3340,17 @@ assert getActiveOperationsCount() == OPERATIONS_BLOCKED SetOnce newEngineReference = new SetOnce<>(); final long globalCheckpoint = getLastKnownGlobalCheckpoint(); assert globalCheckpoint == getLastSyncedGlobalCheckpoint(); - synchronized (mutex) { - verifyNotClosed(); - // we must create both new read-only engine and new read-write engine under mutex to ensure snapshotStoreMetadata, + synchronized (engineMutex) { + // we must create both new read-only engine and new read-write engine under engineMutex to ensure snapshotStoreMetadata, // acquireXXXCommit and close works. final Engine readOnlyEngine = new ReadOnlyEngine(newEngineConfig(replicationTracker), seqNoStats, translogStats, false, Function.identity()) { @Override public IndexCommitRef acquireLastIndexCommit(boolean flushFirst) { - synchronized (mutex) { + synchronized (engineMutex) { + if (newEngineReference.get() == null) { + throw new AlreadyClosedException("engine was closed"); + } // ignore flushFirst since we flushed above and we do not want to interfere with ongoing translog replay return newEngineReference.get().acquireLastIndexCommit(false); } @@ -3332,7 +3358,10 @@ public IndexCommitRef acquireLastIndexCommit(boolean flushFirst) { @Override public IndexCommitRef acquireSafeIndexCommit() { - synchronized (mutex) { + synchronized (engineMutex) { + if (newEngineReference.get() == null) { + throw new AlreadyClosedException("engine was closed"); + } return newEngineReference.get().acquireSafeIndexCommit(); } } @@ -3349,9 +3378,28 @@ public void close() throws IOException { IOUtils.close(super::close, newEngine); } }; - IOUtils.close(currentEngineReference.getAndSet(readOnlyEngine)); - newEngineReference.set(engineFactory.newReadWriteEngine(newEngineConfig(replicationTracker))); - onNewEngine(newEngineReference.get()); + synchronized (mutex) { + try { + verifyNotClosed(); + IOUtils.close(currentEngineReference.getAndSet(readOnlyEngine)); + } finally { + if (currentEngineReference.get() != readOnlyEngine) { + readOnlyEngine.close(); + } + } + } + final Engine newReadWriteEngine = engineFactory.newReadWriteEngine(newEngineConfig(replicationTracker)); + synchronized (mutex) { + try { + verifyNotClosed(); + newEngineReference.set(newReadWriteEngine); + onNewEngine(newReadWriteEngine); + } finally { + if (newEngineReference.get() != newReadWriteEngine) { + newReadWriteEngine.close(); // shard was closed + } + } + } } final Engine.TranslogRecoveryRunner translogRunner = (engine, snapshot) -> runTranslogRecovery( engine, snapshot, Engine.Operation.Origin.LOCAL_RESET, () -> { diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java index 135a3afee1ea5..99b1d34e9257d 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java @@ -29,7 +29,6 @@ import org.apache.lucene.search.similarities.BasicModelIn; import org.apache.lucene.search.similarities.BasicModelIne; import org.apache.lucene.search.similarities.BooleanSimilarity; -import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.DFISimilarity; import org.apache.lucene.search.similarities.DFRSimilarity; import org.apache.lucene.search.similarities.Distribution; @@ -259,16 +258,6 @@ public static BooleanSimilarity createBooleanSimilarity(Settings settings, Versi return new BooleanSimilarity(); } - public static ClassicSimilarity createClassicSimilarity(Settings settings, Version indexCreatedVersion) { - assertSettingsIsSubsetOf("classic", indexCreatedVersion, settings, DISCOUNT_OVERLAPS); - - boolean discountOverlaps = settings.getAsBoolean(DISCOUNT_OVERLAPS, true); - - ClassicSimilarity similarity = new ClassicSimilarity(); - similarity.setDiscountOverlaps(discountOverlaps); - return similarity; - } - public static DFRSimilarity createDfrSimilarity(Settings settings, Version indexCreatedVersion) { assertSettingsIsSubsetOf("DFR", indexCreatedVersion, settings, "basic_model", "after_effect", "normalization", diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java index 3fe20a1f3b26e..214a8492a8984 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.Explanation; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.similarities.BooleanSimilarity; -import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.search.similarities.Similarity.SimScorer; @@ -53,27 +52,10 @@ public final class SimilarityService extends AbstractIndexComponent { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(SimilarityService.class)); public static final String DEFAULT_SIMILARITY = "BM25"; - private static final String CLASSIC_SIMILARITY = "classic"; private static final Map>> DEFAULTS; public static final Map> BUILT_IN; static { Map>> defaults = new HashMap<>(); - defaults.put(CLASSIC_SIMILARITY, version -> { - if (version.onOrAfter(Version.V_7_0_0)) { - return () -> { - throw new IllegalArgumentException("The [classic] similarity may not be used anymore. Please use the [BM25] " - + "similarity or build a custom [scripted] similarity instead."); - }; - } else { - final ClassicSimilarity similarity = SimilarityProviders.createClassicSimilarity(Settings.EMPTY, version); - return () -> { - deprecationLogger.deprecated("The [classic] similarity is now deprecated in favour of BM25, which is generally " - + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " - + "instead."); - return similarity; - }; - } - }); defaults.put("BM25", version -> { final LegacyBM25Similarity similarity = SimilarityProviders.createBM25Similarity(Settings.EMPTY, version); return () -> similarity; @@ -84,18 +66,6 @@ public final class SimilarityService extends AbstractIndexComponent { }); Map> builtIn = new HashMap<>(); - builtIn.put(CLASSIC_SIMILARITY, - (settings, version, script) -> { - if (version.onOrAfter(Version.V_7_0_0)) { - throw new IllegalArgumentException("The [classic] similarity may not be used anymore. Please use the [BM25] " - + "similarity or build a custom [scripted] similarity instead."); - } else { - deprecationLogger.deprecated("The [classic] similarity is now deprecated in favour of BM25, which is generally " - + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " - + "instead."); - return SimilarityProviders.createClassicSimilarity(settings, version); - } - }); builtIn.put("BM25", (settings, version, scriptService) -> SimilarityProviders.createBM25Similarity(settings, version)); builtIn.put("boolean", diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index d7a969db174d3..dae7e1ed4e9b4 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -49,6 +49,7 @@ import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; @@ -109,7 +110,7 @@ public static Map getMappers(List mappe for (NumberFieldMapper.NumberType type : NumberFieldMapper.NumberType.values()) { mappers.put(type.typeName(), new NumberFieldMapper.TypeParser(type)); } - for (RangeFieldMapper.RangeType type : RangeFieldMapper.RangeType.values()) { + for (RangeType type : RangeType.values()) { mappers.put(type.typeName(), new RangeFieldMapper.TypeParser(type)); } mappers.put(BooleanFieldMapper.CONTENT_TYPE, new BooleanFieldMapper.TypeParser()); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 52a40df8a1806..05384655d8646 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -729,6 +729,11 @@ public synchronized void beforeIndexShardClosed(ShardId shardId, @Nullable Index recoveryStats.addTotals(indexShard.recoveryStats()); } } + + @Override + public void afterIndexShardClosed(ShardId shardId, IndexShard indexShard, Settings indexSettings) { + + } } /** diff --git a/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java b/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java index 29ae578a64371..c725157d8de0c 100644 --- a/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java +++ b/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java @@ -189,6 +189,26 @@ public static Integer readIntProperty(String processorType, String processorTag, } } + /** + * Returns and removes the specified property from the specified configuration map. + * + * If the property value isn't of type int a {@link ElasticsearchParseException} is thrown. + * If the property is missing an {@link ElasticsearchParseException} is thrown + */ + public static Double readDoubleProperty(String processorType, String processorTag, Map configuration, + String propertyName) { + Object value = configuration.remove(propertyName); + if (value == null) { + throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing"); + } + try { + return Double.parseDouble(value.toString()); + } catch (Exception e) { + throw newConfigurationException(processorType, processorTag, propertyName, + "property cannot be converted to a double [" + value.toString() + "]"); + } + } + /** * Returns and removes the specified property of type list from the specified configuration map. * diff --git a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java index 8c9eff0698835..6d9cba05748ea 100644 --- a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java @@ -81,9 +81,9 @@ public void initializeSnapshot(SnapshotId snapshotId, List indices, Met @Override public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, List shardFailures, long repositoryStateId, boolean includeGlobalState, - Map userMetadata) { + MetaData metaData, Map userMetadata) { return in.finalizeSnapshot(snapshotId, indices, startTime, failure, totalShards, shardFailures, repositoryStateId, - includeGlobalState, userMetadata); + includeGlobalState, metaData, userMetadata); } @Override @@ -121,13 +121,11 @@ public boolean isReadOnly() { return in.isReadOnly(); } - @Override public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, - IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { - in.snapshotShard(store, mapperService, snapshotId, indexId, snapshotIndexCommit, snapshotStatus); + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, ActionListener listener) { + in.snapshotShard(store, mapperService, snapshotId, indexId, snapshotIndexCommit, snapshotStatus, listener); } - @Override public void restoreShard(Store store, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index 788459b16c540..a02975e120b37 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -48,9 +48,7 @@ *

* To perform a snapshot: *

    - *
  • Master calls {@link #initializeSnapshot(SnapshotId, List, org.elasticsearch.cluster.metadata.MetaData)} - * with list of indices that will be included into the snapshot
  • - *
  • Data nodes call {@link Repository#snapshotShard(Store, MapperService, SnapshotId, IndexId, IndexCommit, IndexShardSnapshotStatus)} + *
  • Data nodes call {@link Repository#snapshotShard} * for each shard
  • *
  • When all shard calls return master calls {@link #finalizeSnapshot} with possible list of failures
  • *
@@ -116,7 +114,11 @@ default Repository create(RepositoryMetaData metaData, Function indices, MetaData metaData); /** @@ -136,7 +138,7 @@ default Repository create(RepositoryMetaData metaData, Function indices, long startTime, String failure, int totalShards, List shardFailures, long repositoryStateId, boolean includeGlobalState, - Map userMetadata); + MetaData clusterMetaData, Map userMetadata); /** * Deletes snapshot @@ -204,9 +206,10 @@ SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long * @param indexId id for the index being snapshotted * @param snapshotIndexCommit commit point * @param snapshotStatus snapshot status + * @param listener listener invoked on completion */ void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, - IndexShardSnapshotStatus snapshotStatus); + IndexShardSnapshotStatus snapshotStatus, ActionListener listener); /** * Restores snapshot of the shard. diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 681f5734334ad..9d32cc61d421b 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -32,6 +32,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.StepListener; import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -86,7 +87,6 @@ import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.RepositoryVerificationException; -import org.elasticsearch.snapshots.InvalidSnapshotNameException; import org.elasticsearch.snapshots.SnapshotCreationException; import org.elasticsearch.snapshots.SnapshotException; import org.elasticsearch.snapshots.SnapshotId; @@ -109,6 +109,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.Executor; import java.util.stream.Collectors; import static org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName; @@ -141,7 +142,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private static final String TESTS_FILE = "tests-"; - private static final String METADATA_PREFIX = "meta-"; + public static final String METADATA_PREFIX = "meta-"; public static final String METADATA_NAME_FORMAT = METADATA_PREFIX + "%s.dat"; @@ -166,8 +167,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp */ public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", true, Setting.Property.NodeScope); - private final Settings settings; - private final boolean compress; private final RateLimiter snapshotRateLimiter; @@ -201,12 +200,13 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp /** * Constructs new BlobStoreRepository * @param metadata The metadata for this repository including name and settings - * @param settings Settings for the node this repository object is created on * @param threadPool Threadpool to run long running repository manipulations on asynchronously */ - protected BlobStoreRepository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry, - ThreadPool threadPool, BlobPath basePath) { - this.settings = settings; + protected BlobStoreRepository( + final RepositoryMetaData metadata, + final NamedXContentRegistry namedXContentRegistry, + final ThreadPool threadPool, + final BlobPath basePath) { this.metadata = metadata; this.threadPool = threadPool; this.compress = COMPRESS_SETTING.get(metadata.settings()); @@ -357,23 +357,13 @@ public RepositoryMetaData getMetadata() { @Override public void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData clusterMetaData) { - if (isReadOnly()) { - throw new RepositoryException(metadata.name(), "cannot create snapshot in a readonly repository"); - } try { - final String snapshotName = snapshotId.getName(); - // check if the snapshot name already exists in the repository - final RepositoryData repositoryData = getRepositoryData(); - if (repositoryData.getSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) { - throw new InvalidSnapshotNameException(metadata.name(), snapshotId.getName(), "snapshot with the same name already exists"); - } - // Write Global MetaData - globalMetaDataFormat.write(clusterMetaData, blobContainer(), snapshotId.getUUID()); + globalMetaDataFormat.write(clusterMetaData, blobContainer(), snapshotId.getUUID(), true); // write the index metadata for each index in the snapshot for (IndexId index : indices) { - indexMetaDataFormat.write(clusterMetaData.index(index.getName()), indexContainer(index), snapshotId.getUUID()); + indexMetaDataFormat.write(clusterMetaData.index(index.getName()), indexContainer(index), snapshotId.getUUID(), true); } } catch (IOException ex) { throw new SnapshotCreationException(metadata.name(), snapshotId, ex); @@ -609,14 +599,34 @@ public SnapshotInfo finalizeSnapshot(final SnapshotId snapshotId, final List shardFailures, final long repositoryStateId, final boolean includeGlobalState, + final MetaData clusterMetaData, final Map userMetadata) { SnapshotInfo blobStoreSnapshot = new SnapshotInfo(snapshotId, indices.stream().map(IndexId::getName).collect(Collectors.toList()), startTime, failure, threadPool.absoluteTimeInMillis(), totalShards, shardFailures, includeGlobalState, userMetadata); + + try { + // We ignore all FileAlreadyExistsException here since otherwise a master failover while in this method will + // mean that no snap-${uuid}.dat blob is ever written for this snapshot. This is safe because any updated version of the + // index or global metadata will be compatible with the segments written in this snapshot as well. + // Failing on an already existing index-${repoGeneration} below ensures that the index.latest blob is not updated in a way that + // decrements the generation it points at + + // Write Global MetaData + globalMetaDataFormat.write(clusterMetaData, blobContainer(), snapshotId.getUUID(), false); + + // write the index metadata for each index in the snapshot + for (IndexId index : indices) { + indexMetaDataFormat.write(clusterMetaData.index(index.getName()), indexContainer(index), snapshotId.getUUID(), false); + } + } catch (IOException ex) { + throw new SnapshotException(metadata.name(), snapshotId, "failed to write metadata for snapshot", ex); + } + try { final RepositoryData updatedRepositoryData = getRepositoryData().addSnapshot(snapshotId, blobStoreSnapshot.state(), indices); - snapshotFormat.write(blobStoreSnapshot, blobContainer(), snapshotId.getUUID()); + snapshotFormat.write(blobStoreSnapshot, blobContainer(), snapshotId.getUUID(), false); writeIndexGen(updatedRepositoryData, repositoryStateId); } catch (FileAlreadyExistsException ex) { // if another master was elected and took over finalizing the snapshot, it is possible @@ -678,8 +688,7 @@ private BlobContainer shardContainer(IndexId indexId, ShardId shardId) { * @return rate limiter or null of no throttling is needed */ private RateLimiter getRateLimiter(Settings repositorySettings, String setting, ByteSizeValue defaultRate) { - ByteSizeValue maxSnapshotBytesPerSec = repositorySettings.getAsBytesSize(setting, - settings.getAsBytesSize(setting, defaultRate)); + ByteSizeValue maxSnapshotBytesPerSec = repositorySettings.getAsBytesSize(setting, defaultRate); if (maxSnapshotBytesPerSec.getBytes() <= 0) { return null; } else { @@ -885,9 +894,15 @@ private void writeAtomic(final String blobName, final BytesReference bytesRef, b @Override public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, - IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, ActionListener listener) { final ShardId shardId = store.shardId(); final long startTime = threadPool.absoluteTimeInMillis(); + final StepListener snapshotDoneListener = new StepListener<>(); + snapshotDoneListener.whenComplete(listener::onResponse, e -> { + snapshotStatus.moveToFailed(threadPool.absoluteTimeInMillis(), ExceptionsHelper.detailedMessage(e)); + listener.onFailure(e instanceof IndexShardSnapshotFailedException ? (IndexShardSnapshotFailedException) e + : new IndexShardSnapshotFailedException(store.shardId(), e)); + }); try { logger.debug("[{}] [{}] snapshot to [{}] ...", shardId, snapshotId, metadata.name()); @@ -909,132 +924,145 @@ public void snapshotShard(Store store, MapperService mapperService, SnapshotId s } final List indexCommitPointFiles = new ArrayList<>(); + ArrayList filesToSnapshot = new ArrayList<>(); store.incRef(); + final Collection fileNames; + final Store.MetadataSnapshot metadataFromStore; try { - ArrayList filesToSnapshot = new ArrayList<>(); - final Store.MetadataSnapshot metadata; // TODO apparently we don't use the MetadataSnapshot#.recoveryDiff(...) here but we should - final Collection fileNames; try { logger.trace( "[{}] [{}] Loading store metadata using index commit [{}]", shardId, snapshotId, snapshotIndexCommit); - metadata = store.getMetadata(snapshotIndexCommit); + metadataFromStore = store.getMetadata(snapshotIndexCommit); fileNames = snapshotIndexCommit.getFileNames(); } catch (IOException e) { throw new IndexShardSnapshotFailedException(shardId, "Failed to get store file metadata", e); } - int indexIncrementalFileCount = 0; - int indexTotalNumberOfFiles = 0; - long indexIncrementalSize = 0; - long indexTotalFileCount = 0; - for (String fileName : fileNames) { - if (snapshotStatus.isAborted()) { - logger.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName); - throw new IndexShardSnapshotFailedException(shardId, "Aborted"); - } + } finally { + store.decRef(); + } + int indexIncrementalFileCount = 0; + int indexTotalNumberOfFiles = 0; + long indexIncrementalSize = 0; + long indexTotalFileCount = 0; + for (String fileName : fileNames) { + if (snapshotStatus.isAborted()) { + logger.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName); + throw new IndexShardSnapshotFailedException(shardId, "Aborted"); + } - logger.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName); - final StoreFileMetaData md = metadata.get(fileName); - BlobStoreIndexShardSnapshot.FileInfo existingFileInfo = null; - List filesInfo = snapshots.findPhysicalIndexFiles(fileName); - if (filesInfo != null) { - for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) { - if (fileInfo.isSame(md)) { - // a commit point file with the same name, size and checksum was already copied to repository - // we will reuse it for this snapshot - existingFileInfo = fileInfo; - break; - } + logger.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName); + final StoreFileMetaData md = metadataFromStore.get(fileName); + BlobStoreIndexShardSnapshot.FileInfo existingFileInfo = null; + List filesInfo = snapshots.findPhysicalIndexFiles(fileName); + if (filesInfo != null) { + for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) { + if (fileInfo.isSame(md)) { + // a commit point file with the same name, size and checksum was already copied to repository + // we will reuse it for this snapshot + existingFileInfo = fileInfo; + break; } } - - indexTotalFileCount += md.length(); - indexTotalNumberOfFiles++; - - if (existingFileInfo == null) { - indexIncrementalFileCount++; - indexIncrementalSize += md.length(); - // create a new FileInfo - BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = - new BlobStoreIndexShardSnapshot.FileInfo(DATA_BLOB_PREFIX + UUIDs.randomBase64UUID(), md, chunkSize()); - indexCommitPointFiles.add(snapshotFileInfo); - filesToSnapshot.add(snapshotFileInfo); - } else { - indexCommitPointFiles.add(existingFileInfo); - } } - snapshotStatus.moveToStarted(startTime, indexIncrementalFileCount, - indexTotalNumberOfFiles, indexIncrementalSize, indexTotalFileCount); - - for (BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo : filesToSnapshot) { - try { - snapshotFile(snapshotFileInfo, indexId, shardId, snapshotId, snapshotStatus, store); - } catch (IOException e) { - throw new IndexShardSnapshotFailedException(shardId, "Failed to perform snapshot (index files)", e); - } + indexTotalFileCount += md.length(); + indexTotalNumberOfFiles++; + + if (existingFileInfo == null) { + indexIncrementalFileCount++; + indexIncrementalSize += md.length(); + // create a new FileInfo + BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = + new BlobStoreIndexShardSnapshot.FileInfo(DATA_BLOB_PREFIX + UUIDs.randomBase64UUID(), md, chunkSize()); + indexCommitPointFiles.add(snapshotFileInfo); + filesToSnapshot.add(snapshotFileInfo); + } else { + indexCommitPointFiles.add(existingFileInfo); } - } finally { - store.decRef(); } - final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration()); + snapshotStatus.moveToStarted(startTime, indexIncrementalFileCount, + indexTotalNumberOfFiles, indexIncrementalSize, indexTotalFileCount); - // now create and write the commit point - final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getName(), - lastSnapshotStatus.getIndexVersion(), - indexCommitPointFiles, - lastSnapshotStatus.getStartTime(), - threadPool.absoluteTimeInMillis() - lastSnapshotStatus.getStartTime(), - lastSnapshotStatus.getIncrementalFileCount(), - lastSnapshotStatus.getIncrementalSize() - ); + assert indexIncrementalFileCount == filesToSnapshot.size(); - logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId); - try { - indexShardSnapshotFormat.write(snapshot, shardContainer, snapshotId.getUUID()); - } catch (IOException e) { - throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e); - } + final StepListener> allFilesUploadedListener = new StepListener<>(); + allFilesUploadedListener.whenComplete(v -> { + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = + snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration()); - // delete all files that are not referenced by any commit point - // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones - List newSnapshotsList = new ArrayList<>(); - newSnapshotsList.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); - for (SnapshotFiles point : snapshots) { - newSnapshotsList.add(point); - } - final String indexGeneration = Long.toString(fileListGeneration + 1); - final List blobsToDelete; - try { - final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(newSnapshotsList); - indexShardSnapshotsFormat.writeAtomic(updatedSnapshots, shardContainer, indexGeneration); - // Delete all previous index-N blobs - blobsToDelete = - blobs.keySet().stream().filter(blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX)).collect(Collectors.toList()); - assert blobsToDelete.stream().mapToLong(b -> Long.parseLong(b.replaceFirst(SNAPSHOT_INDEX_PREFIX, ""))) - .max().orElse(-1L) < Long.parseLong(indexGeneration) - : "Tried to delete an index-N blob newer than the current generation [" + indexGeneration + "] when deleting index-N" + - " blobs " + blobsToDelete; - } catch (IOException e) { - throw new IndexShardSnapshotFailedException(shardId, - "Failed to finalize snapshot creation [" + snapshotId + "] with shard index [" - + indexShardSnapshotsFormat.blobName(indexGeneration) + "]", e); + // now create and write the commit point + final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getName(), + lastSnapshotStatus.getIndexVersion(), + indexCommitPointFiles, + lastSnapshotStatus.getStartTime(), + threadPool.absoluteTimeInMillis() - lastSnapshotStatus.getStartTime(), + lastSnapshotStatus.getIncrementalFileCount(), + lastSnapshotStatus.getIncrementalSize() + ); + + logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId); + try { + indexShardSnapshotFormat.write(snapshot, shardContainer, snapshotId.getUUID(), false); + } catch (IOException e) { + throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e); + } + // delete all files that are not referenced by any commit point + // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones + List newSnapshotsList = new ArrayList<>(); + newSnapshotsList.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); + for (SnapshotFiles point : snapshots) { + newSnapshotsList.add(point); + } + final String indexGeneration = Long.toString(fileListGeneration + 1); + final List blobsToDelete; + try { + final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(newSnapshotsList); + indexShardSnapshotsFormat.writeAtomic(updatedSnapshots, shardContainer, indexGeneration); + // Delete all previous index-N blobs + blobsToDelete = + blobs.keySet().stream().filter(blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX)).collect(Collectors.toList()); + assert blobsToDelete.stream().mapToLong(b -> Long.parseLong(b.replaceFirst(SNAPSHOT_INDEX_PREFIX, ""))) + .max().orElse(-1L) < Long.parseLong(indexGeneration) + : "Tried to delete an index-N blob newer than the current generation [" + indexGeneration + + "] when deleting index-N blobs " + blobsToDelete; + } catch (IOException e) { + throw new IndexShardSnapshotFailedException(shardId, + "Failed to finalize snapshot creation [" + snapshotId + "] with shard index [" + + indexShardSnapshotsFormat.blobName(indexGeneration) + "]", e); + } + try { + shardContainer.deleteBlobsIgnoringIfNotExists(blobsToDelete); + } catch (IOException e) { + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete old index-N blobs during finalization", + snapshotId, shardId), e); + } + snapshotStatus.moveToDone(threadPool.absoluteTimeInMillis()); + snapshotDoneListener.onResponse(null); + }, snapshotDoneListener::onFailure); + if (indexIncrementalFileCount == 0) { + allFilesUploadedListener.onResponse(Collections.emptyList()); + return; } - try { - shardContainer.deleteBlobsIgnoringIfNotExists(blobsToDelete); - } catch (IOException e) { - logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete old index-N blobs during finalization", - snapshotId, shardId), e); + final GroupedActionListener filesListener = + new GroupedActionListener<>(allFilesUploadedListener, indexIncrementalFileCount); + final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); + for (BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo : filesToSnapshot) { + executor.execute(new ActionRunnable<>(filesListener) { + @Override + protected void doRun() { + try { + snapshotFile(snapshotFileInfo, indexId, shardId, snapshotId, snapshotStatus, store); + filesListener.onResponse(null); + } catch (IOException e) { + throw new IndexShardSnapshotFailedException(shardId, "Failed to perform snapshot (index files)", e); + } + } + }); } - snapshotStatus.moveToDone(threadPool.absoluteTimeInMillis()); } catch (Exception e) { - snapshotStatus.moveToFailed(threadPool.absoluteTimeInMillis(), ExceptionsHelper.detailedMessage(e)); - if (e instanceof IndexShardSnapshotFailedException) { - throw (IndexShardSnapshotFailedException) e; - } else { - throw new IndexShardSnapshotFailedException(store.shardId(), e); - } + snapshotDoneListener.onFailure(e); } } @@ -1221,6 +1249,7 @@ private void snapshotFile(BlobStoreIndexShardSnapshot.FileInfo fileInfo, IndexId IndexShardSnapshotStatus snapshotStatus, Store store) throws IOException { final BlobContainer shardContainer = shardContainer(indexId, shardId); final String file = fileInfo.physicalName(); + store.incRef(); try (IndexInput indexInput = store.openVerifyingInput(file, IOContext.READONCE, fileInfo.metadata())) { for (int i = 0; i < fileInfo.numberOfParts(); i++) { final long partBytes = fileInfo.partBytes(i); @@ -1260,6 +1289,8 @@ private void checkAborted() { failStoreIfCorrupted(store, t); snapshotStatus.addProcessedFile(0); throw t; + } finally { + store.decRef(); } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java index bd2b4900ece71..4d8a25ca8d50d 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -175,15 +175,16 @@ public void writeAtomic(T obj, BlobContainer blobContainer, String name) throws *

* The blob will be compressed and checksum will be written if required. * - * @param obj object to be serialized - * @param blobContainer blob container - * @param name blob name + * @param obj object to be serialized + * @param blobContainer blob container + * @param name blob name + * @param failIfAlreadyExists Whether to fail if the blob already exists */ - public void write(T obj, BlobContainer blobContainer, String name) throws IOException { + public void write(T obj, BlobContainer blobContainer, String name, boolean failIfAlreadyExists) throws IOException { final String blobName = blobName(name); writeTo(obj, blobName, bytesArray -> { try (InputStream stream = bytesArray.streamInput()) { - blobContainer.writeBlob(blobName, stream, bytesArray.length(), true); + blobContainer.writeBlob(blobName, stream, bytesArray.length(), failIfAlreadyExists); } }); } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/package-info.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/package-info.java index 0a66df3cf8521..5cc98f6c3e99b 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/package-info.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/package-info.java @@ -118,17 +118,19 @@ * *

Creating a snapshot in the repository happens in the three steps described in detail below.

* - *

Initializing a Snapshot in the Repository

+ *

Initializing a Snapshot in the Repository (Mixed Version Clusters only)

* - *

Creating a snapshot in the repository starts with a call to {@link org.elasticsearch.repositories.Repository#initializeSnapshot} which - * the blob store repository implements via the following actions:

+ *

In mixed version clusters that contain a node older than + * {@link org.elasticsearch.snapshots.SnapshotsService#NO_REPO_INITIALIZE_VERSION}, creating a snapshot in the repository starts with a + * call to {@link org.elasticsearch.repositories.Repository#initializeSnapshot} which the blob store repository implements via the + * following actions:

*
    *
  1. Verify that no snapshot by the requested name exists.
  2. *
  3. Write a blob containing the cluster metadata to the root of the blob store repository at {@code /meta-${snapshot-uuid}.dat}
  4. *
  5. Write the metadata for each index to a blob in that index's directory at * {@code /indices/${index-snapshot-uuid}/meta-${snapshot-uuid}.dat}
  6. *
- * TODO: This behavior is problematic, adjust these docs once https://github.com/elastic/elasticsearch/issues/41581 is fixed + * TODO: Remove this section once BwC logic it references is removed * *

Writing Shard Data (Segments)

* @@ -164,6 +166,9 @@ * to finalizing the snapshot by invoking {@link org.elasticsearch.repositories.Repository#finalizeSnapshot}. This method executes the * following actions in order:

*
    + *
  1. Write a blob containing the cluster metadata to the root of the blob store repository at {@code /meta-${snapshot-uuid}.dat}
  2. + *
  3. Write the metadata for each index to a blob in that index's directory at + * {@code /indices/${index-snapshot-uuid}/meta-${snapshot-uuid}.dat}
  4. *
  5. Write the {@link org.elasticsearch.snapshots.SnapshotInfo} blob for the given snapshot to the key {@code /snap-${snapshot-uuid}.dat} * directly under the repository root.
  6. *
  7. Write an updated {@code RepositoryData} blob to the key {@code /index-${N+1}} using the {@code N} determined when initializing the diff --git a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java index f338e0ee4cb08..61558e4f42efa 100644 --- a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java @@ -71,7 +71,7 @@ public class FsRepository extends BlobStoreRepository { */ public FsRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, ThreadPool threadPool) { - super(metadata, environment.settings(), namedXContentRegistry, threadPool, BlobPath.cleanPath()); + super(metadata, namedXContentRegistry, threadPool, BlobPath.cleanPath()); this.environment = environment; String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings()); if (location.isEmpty()) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index 1fd2b3b5133a0..07006d395890f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -37,6 +37,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType.Relation; import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -46,7 +47,6 @@ import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; @@ -68,7 +68,7 @@ /** * A builder for histograms on date fields. */ -public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuilder +public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuilder implements MultiBucketAggregationBuilder, DateIntervalConsumer { public static final String NAME = "date_histogram"; @@ -95,7 +95,7 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil private static final ObjectParser PARSER; static { PARSER = new ObjectParser<>(DateHistogramAggregationBuilder.NAME); - ValuesSourceParserHelper.declareNumericFields(PARSER, true, true, true); + ValuesSourceParserHelper.declareAnyFields(PARSER, true, true, true); DateIntervalWrapper.declareIntervalFields(PARSER); @@ -131,7 +131,7 @@ public static DateHistogramAggregationBuilder parse(String aggregationName, XCon /** Create a new builder with the given name. */ public DateHistogramAggregationBuilder(String name) { - super(name, ValuesSourceType.NUMERIC, ValueType.DATE); + super(name, ValuesSourceType.ANY, ValueType.DATE); } protected DateHistogramAggregationBuilder(DateHistogramAggregationBuilder clone, @@ -152,7 +152,7 @@ protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map innerBuild(SearchContext context, ValuesSourceConfig config, + protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { final ZoneId tz = timeZone(); final Rounding rounding = dateHistogramInterval.createRounding(tz); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index 419125b5f4e47..210012c20a781 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -20,13 +20,13 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.Rounding; +import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.internal.SearchContext; @@ -36,7 +36,7 @@ import java.util.Map; public final class DateHistogramAggregatorFactory - extends ValuesSourceAggregatorFactory { + extends ValuesSourceAggregatorFactory { private final long offset; private final BucketOrder order; @@ -46,7 +46,7 @@ public final class DateHistogramAggregatorFactory private final Rounding rounding; private final Rounding shardRounding; - public DateHistogramAggregatorFactory(String name, ValuesSourceConfig config, + public DateHistogramAggregatorFactory(String name, ValuesSourceConfig config, long offset, BucketOrder order, boolean keyed, long minDocCount, Rounding rounding, Rounding shardRounding, ExtendedBounds extendedBounds, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, @@ -66,12 +66,34 @@ public long minDocCount() { } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket, + protected ValuesSource resolveMissingAny(Object missing) { + if (missing instanceof Number) { + return ValuesSource.Numeric.EMPTY; + } + throw new IllegalArgumentException("Only numeric missing values are supported for date histogram aggregation, found [" + + missing + "]"); + } + + @Override + protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, Map metaData) throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, context, parent); } - return createAggregator(valuesSource, parent, pipelineAggregators, metaData); + if (valuesSource instanceof ValuesSource.Numeric) { + return createAggregator((ValuesSource.Numeric) valuesSource, parent, pipelineAggregators, metaData); + } else if (valuesSource instanceof ValuesSource.Range) { + ValuesSource.Range rangeValueSource = (ValuesSource.Range) valuesSource; + if (rangeValueSource.rangeType() != RangeType.DATE) { + throw new IllegalArgumentException("Expected date range type but found range type [" + rangeValueSource.rangeType().name + + "]"); + } + return createRangeAggregator((ValuesSource.Range) valuesSource, parent, pipelineAggregators, metaData); + } + else { + throw new IllegalArgumentException("Expected one of [Date, Range] values source, found [" + + valuesSource.toString() + "]"); + } } private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List pipelineAggregators, @@ -80,6 +102,13 @@ private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregato valuesSource, config.format(), context, parent, pipelineAggregators, metaData); } + private Aggregator createRangeAggregator(ValuesSource.Range valuesSource, Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return new DateRangeHistogramAggregator(name, factories, rounding, shardRounding, offset, order, keyed, minDocCount, extendedBounds, + valuesSource, config.format(), context, parent, pipelineAggregators, metaData); + } + @Override protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregator.java new file mode 100644 index 0000000000000..9eed2a542f9dd --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregator.java @@ -0,0 +1,195 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.histogram; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Rounding; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; +import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * An aggregator for date values. Every date is rounded down using a configured + * {@link Rounding}. + * + * @see Rounding + */ +class DateRangeHistogramAggregator extends BucketsAggregator { + + private final ValuesSource.Range valuesSource; + private final DocValueFormat formatter; + private final Rounding rounding; + private final Rounding shardRounding; + private final BucketOrder order; + private final boolean keyed; + + private final long minDocCount; + private final ExtendedBounds extendedBounds; + + private final LongHash bucketOrds; + private long offset; + + DateRangeHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, Rounding shardRounding, + long offset, BucketOrder order, boolean keyed, + long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Range valuesSource, + DocValueFormat formatter, SearchContext aggregationContext, + Aggregator parent, List pipelineAggregators, + Map metaData) throws IOException { + + super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); + this.rounding = rounding; + this.shardRounding = shardRounding; + this.offset = offset; + this.order = InternalOrder.validate(order, this); + this.keyed = keyed; + this.minDocCount = minDocCount; + this.extendedBounds = extendedBounds; + this.valuesSource = valuesSource; + this.formatter = formatter; + + bucketOrds = new LongHash(1, aggregationContext.bigArrays()); + } + + @Override + public ScoreMode scoreMode() { + if (valuesSource != null && valuesSource.needsScores()) { + return ScoreMode.COMPLETE; + } + return super.scoreMode(); + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, + final LeafBucketCollector sub) throws IOException { + if (valuesSource == null) { + return LeafBucketCollector.NO_OP_COLLECTOR; + } + final SortedBinaryDocValues values = valuesSource.bytesValues(ctx); + final RangeType rangeType = valuesSource.rangeType(); + return new LeafBucketCollectorBase(sub, values) { + @Override + public void collect(int doc, long bucket) throws IOException { + assert bucket == 0; + if (values.advanceExact(doc)) { + // Is it possible for valuesCount to be > 1 here? Multiple ranges are encoded into the same BytesRef in the binary doc + // values, so it isn't clear what we'd be iterating over. + final int valuesCount = values.docValueCount(); + assert valuesCount == 1 : "Value count for ranges should always be 1"; + long previousKey = Long.MIN_VALUE; + + for (int i = 0; i < valuesCount; i++) { + BytesRef encodedRanges = values.nextValue(); + List ranges = rangeType.decodeRanges(encodedRanges); + long previousFrom = Long.MIN_VALUE; + for (RangeFieldMapper.Range range : ranges) { + final Long from = (Long) range.getFrom(); + // The encoding should ensure that this assert is always true. + assert from >= previousFrom : "Start of range not >= previous start"; + final Long to = (Long) range.getTo(); + final long startKey = offsetAwareRounding(shardRounding, from, offset); + final long endKey = offsetAwareRounding(shardRounding, to, offset); + for (long key = startKey > previousKey ? startKey : previousKey; key <= endKey; + key = shardRounding.nextRoundingValue(key)) { + if (key == previousKey) { + continue; + } + // Bucket collection identical to NumericHistogramAggregator, could be refactored + long bucketOrd = bucketOrds.add(key); + if (bucketOrd < 0) { // already seen + bucketOrd = -1 - bucketOrd; + collectExistingBucket(sub, doc, bucketOrd); + } else { + collectBucket(sub, doc, bucketOrd); + } + } + if (endKey > previousKey) { + previousKey = endKey; + } + } + + } + } + } + }; + } + + private long offsetAwareRounding(Rounding rounding, long value, long offset) { + return rounding.round(value - offset) + offset; + } + + @Override + public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { + assert owningBucketOrdinal == 0; + consumeBucketsAndMaybeBreak((int) bucketOrds.size()); + + List buckets = new ArrayList<>((int) bucketOrds.size()); + for (long i = 0; i < bucketOrds.size(); i++) { + buckets.add(new InternalDateHistogram.Bucket(bucketOrds.get(i), bucketDocCount(i), keyed, formatter, bucketAggregations(i))); + } + + // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order + CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this)); + + // value source will be null for unmapped fields + // Important: use `rounding` here, not `shardRounding` + InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 + ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) + : null; + return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, formatter, keyed, + pipelineAggregators(), metaData()); + } + + @Override + public InternalAggregation buildEmptyAggregation() { + InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 + ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) + : null; + return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, offset, emptyBucketInfo, formatter, keyed, + pipelineAggregators(), metaData()); + } + + @Override + public void doClose() { + Releasables.close(bucketOrds); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java index ecb65df433a69..081d2055c2d89 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -34,7 +35,6 @@ import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; @@ -48,9 +48,10 @@ import java.util.Objects; /** - * A builder for histograms on numeric fields. + * A builder for histograms on numeric fields. This builder can operate on either base numeric fields, or numeric range fields. IP range + * fields are unsupported, and will throw at the factory layer. */ -public class HistogramAggregationBuilder extends ValuesSourceAggregationBuilder +public class HistogramAggregationBuilder extends ValuesSourceAggregationBuilder implements MultiBucketAggregationBuilder { public static final String NAME = "histogram"; @@ -65,7 +66,7 @@ public class HistogramAggregationBuilder extends ValuesSourceAggregationBuilder< private static final ObjectParser PARSER; static { PARSER = new ObjectParser<>(HistogramAggregationBuilder.NAME); - ValuesSourceParserHelper.declareNumericFields(PARSER, true, true, false); + ValuesSourceParserHelper.declareAnyFields(PARSER, true, true); PARSER.declareDouble(HistogramAggregationBuilder::interval, Histogram.INTERVAL_FIELD); @@ -95,9 +96,15 @@ public static HistogramAggregationBuilder parse(String aggregationName, XContent private boolean keyed = false; private long minDocCount = 0; + @Override + protected ValuesSourceType resolveScriptAny(Script script) { + // TODO: No idea how we'd support Range scripts here. + return ValuesSourceType.NUMERIC; + } + /** Create a new builder with the given name. */ public HistogramAggregationBuilder(String name) { - super(name, ValuesSourceType.NUMERIC, ValueType.DOUBLE); + super(name, ValuesSourceType.ANY, ValueType.NUMERIC); } protected HistogramAggregationBuilder(HistogramAggregationBuilder clone, Builder factoriesBuilder, Map metaData) { @@ -118,7 +125,7 @@ protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map innerBuild(SearchContext context, ValuesSourceConfig config, + protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { return new HistogramAggregatorFactory(name, config, interval, offset, order, keyed, minDocCount, minBound, maxBound, context, parent, subFactoriesBuilder, metaData); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java index b6828c8e84d1b..6fac7e514be9f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java @@ -22,10 +22,9 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.internal.SearchContext; @@ -34,7 +33,11 @@ import java.util.List; import java.util.Map; -public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFactory { +/** + * Constructs the per-shard aggregator instance for histogram aggregation. Selects the numeric or range field implementation based on the + * field type. + */ +public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFactory { private final double interval, offset; private final BucketOrder order; @@ -42,10 +45,19 @@ public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFact private final long minDocCount; private final double minBound, maxBound; - public HistogramAggregatorFactory(String name, ValuesSourceConfig config, double interval, double offset, - BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, - SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { + @Override + protected ValuesSource resolveMissingAny(Object missing) { + if (missing instanceof Number) { + return ValuesSource.Numeric.EMPTY; + } + throw new IllegalArgumentException("Only numeric missing values are supported for histogram aggregation, found [" + + missing + "]"); + } + + public HistogramAggregatorFactory(String name, ValuesSourceConfig config, double interval, double offset, + BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, + SearchContext context, AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); this.interval = interval; this.offset = offset; @@ -61,24 +73,34 @@ public long minDocCount() { } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket, + protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, Map metaData) throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, context, parent); } - return createAggregator(valuesSource, parent, pipelineAggregators, metaData); - } - - private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List pipelineAggregators, - Map metaData) throws IOException { - - return new HistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound, valuesSource, - config.format(), context, parent, pipelineAggregators, metaData); + if (valuesSource instanceof ValuesSource.Numeric) { + return new NumericHistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound, + (ValuesSource.Numeric) valuesSource, config.format(), context, parent, pipelineAggregators, metaData); + } else if (valuesSource instanceof ValuesSource.Range) { + ValuesSource.Range rangeValueSource = (ValuesSource.Range) valuesSource; + if (rangeValueSource.rangeType().isNumeric() == false) { + throw new IllegalArgumentException("Expected numeric range type but found non-numeric range [" + + rangeValueSource.rangeType().name + "]"); + } + return new RangeHistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound, + (ValuesSource.Range) valuesSource, config.format(), context, parent, pipelineAggregators, + metaData); + } + else { + throw new IllegalArgumentException("Expected one of [Numeric, Range] values source, found [" + + valuesSource.toString() + "]"); + } } @Override protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { - return createAggregator(null, parent, pipelineAggregators, metaData); + return new NumericHistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound, + null, config.format(), context, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/NumericHistogramAggregator.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/NumericHistogramAggregator.java index 1295cec2e4b6d..b63cf94a98085 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/NumericHistogramAggregator.java @@ -52,7 +52,7 @@ * written as {@code interval * x + offset} and yet is less than or equal to * {@code value}. */ -class HistogramAggregator extends BucketsAggregator { +class NumericHistogramAggregator extends BucketsAggregator { private final ValuesSource.Numeric valuesSource; private final DocValueFormat formatter; @@ -64,11 +64,11 @@ class HistogramAggregator extends BucketsAggregator { private final LongHash bucketOrds; - HistogramAggregator(String name, AggregatorFactories factories, double interval, double offset, - BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, - @Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter, - SearchContext context, Aggregator parent, - List pipelineAggregators, Map metaData) throws IOException { + NumericHistogramAggregator(String name, AggregatorFactories factories, double interval, double offset, + BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, + @Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter, + SearchContext context, Aggregator parent, + List pipelineAggregators, Map metaData) throws IOException { super(name, factories, context, parent, pipelineAggregators, metaData); if (interval <= 0) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/RangeHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/RangeHistogramAggregator.java new file mode 100644 index 0000000000000..1a722dc951418 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/RangeHistogramAggregator.java @@ -0,0 +1,175 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.histogram; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; +import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class RangeHistogramAggregator extends BucketsAggregator { + private final ValuesSource.Range valuesSource; + private final DocValueFormat formatter; + private final double interval, offset; + private final BucketOrder order; + private final boolean keyed; + private final long minDocCount; + private final double minBound, maxBound; + + private final LongHash bucketOrds; + + RangeHistogramAggregator(String name, AggregatorFactories factories, double interval, double offset, + BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, + @Nullable ValuesSource.Range valuesSource, DocValueFormat formatter, + SearchContext context, Aggregator parent, + List pipelineAggregators, Map metaData) throws IOException { + + super(name, factories, context, parent, pipelineAggregators, metaData); + if (interval <= 0) { + throw new IllegalArgumentException("interval must be positive, got: " + interval); + } + this.interval = interval; + this.offset = offset; + this.order = InternalOrder.validate(order, this); + this.keyed = keyed; + this.minDocCount = minDocCount; + this.minBound = minBound; + this.maxBound = maxBound; + this.valuesSource = valuesSource; + this.formatter = formatter; + + bucketOrds = new LongHash(1, context.bigArrays()); + } + + @Override + protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { + if (valuesSource == null) { + return LeafBucketCollector.NO_OP_COLLECTOR; + } + final SortedBinaryDocValues values = valuesSource.bytesValues(ctx); + final RangeType rangeType = valuesSource.rangeType(); + return new LeafBucketCollectorBase(sub, values) { + @Override + public void collect(int doc, long bucket) throws IOException { + assert bucket == 0; + if (values.advanceExact(doc)) { + // Is it possible for valuesCount to be > 1 here? Multiple ranges are encoded into the same BytesRef in the binary doc + // values, so it isn't clear what we'd be iterating over. + final int valuesCount = values.docValueCount(); + assert valuesCount == 1 : "Value count for ranges should always be 1"; + double previousKey = Double.NEGATIVE_INFINITY; + + for (int i = 0; i < valuesCount; i++) { + BytesRef encodedRanges = values.nextValue(); + List ranges = rangeType.decodeRanges(encodedRanges); + double previousFrom = Double.NEGATIVE_INFINITY; + for (RangeFieldMapper.Range range : ranges) { + final Double from = rangeType.doubleValue(range.getFrom()); + // The encoding should ensure that this assert is always true. + assert from >= previousFrom : "Start of range not >= previous start"; + final Double to = rangeType.doubleValue(range.getTo()); + final double startKey = Math.floor((from - offset) / interval); + final double endKey = Math.floor((to - offset) / interval); + for (double key = startKey > previousKey ? startKey : previousKey; key <= endKey; key++) { + if (key == previousKey) { + continue; + } + // Bucket collection identical to NumericHistogramAggregator, could be refactored + long bucketOrd = bucketOrds.add(Double.doubleToLongBits(key)); + if (bucketOrd < 0) { // already seen + bucketOrd = -1 - bucketOrd; + collectExistingBucket(sub, doc, bucketOrd); + } else { + collectBucket(sub, doc, bucketOrd); + } + } + if (endKey > previousKey) { + previousKey = endKey; + } + } + + } + } + } + }; + } + + // TODO: buildAggregation and buildEmptyAggregation are literally just copied out of NumericHistogramAggregator. We could refactor + // this to an abstract super class, if we wanted to. Might be overkill. + @Override + public InternalAggregation buildAggregation(long bucket) throws IOException { + assert bucket == 0; + consumeBucketsAndMaybeBreak((int) bucketOrds.size()); + List buckets = new ArrayList<>((int) bucketOrds.size()); + for (long i = 0; i < bucketOrds.size(); i++) { + double roundKey = Double.longBitsToDouble(bucketOrds.get(i)); + double key = roundKey * interval + offset; + buckets.add(new InternalHistogram.Bucket(key, bucketDocCount(i), keyed, formatter, bucketAggregations(i))); + } + + // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order + CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this)); + + InternalHistogram.EmptyBucketInfo emptyBucketInfo = null; + if (minDocCount == 0) { + emptyBucketInfo = new InternalHistogram.EmptyBucketInfo(interval, offset, minBound, maxBound, buildEmptySubAggregations()); + } + return new InternalHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, pipelineAggregators(), + metaData()); + } + + @Override + public InternalAggregation buildEmptyAggregation() { + InternalHistogram.EmptyBucketInfo emptyBucketInfo = null; + if (minDocCount == 0) { + emptyBucketInfo = new InternalHistogram.EmptyBucketInfo(interval, offset, minBound, maxBound, buildEmptySubAggregations()); + } + return new InternalHistogram(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed, + pipelineAggregators(), metaData()); + } + + @Override + public void doClose() { + Releasables.close(bucketOrds); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java index de729b619dcd4..c0fd5f26eb559 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.missing; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; @@ -78,7 +79,7 @@ protected void innerWriteTo(StreamOutput out) { } @Override - protected boolean serializeTargetValueType() { + protected boolean serializeTargetValueType(Version version) { return true; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java index 75b32f8abe062..dab9cf34dbb7c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.aggregations.bucket.significant; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -155,7 +156,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { } @Override - protected boolean serializeTargetValueType() { + protected boolean serializeTargetValueType(Version version) { return true; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java index 285869dd2e0cf..f22eaf4d28a59 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.aggregations.bucket.terms; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -94,7 +95,7 @@ public RareTermsAggregationBuilder(StreamInput in) throws IOException { } @Override - protected boolean serializeTargetValueType() { + protected boolean serializeTargetValueType(Version version) { return true; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java index a124feb115b19..7d5bda9ef1b81 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.aggregations.bucket.terms; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -140,7 +141,7 @@ public TermsAggregationBuilder(StreamInput in) throws IOException { } @Override - protected boolean serializeTargetValueType() { + protected boolean serializeTargetValueType(Version version) { return true; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java index 8d927e2fa59eb..0cc2b7d09c0c2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -96,7 +97,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { } @Override - protected boolean serializeTargetValueType() { + protected boolean serializeTargetValueType(Version version) { return true; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java index bfe82c6bef659..c3132a299042e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java @@ -80,7 +80,7 @@ public long getValue() { return counts == null ? 0 : counts.cardinality(0); } - HyperLogLogPlusPlus getCounts() { + public HyperLogLogPlusPlus getCounts() { return counts; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java index ccf8ef8ba3dca..845fab414a3ac 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; @@ -78,7 +79,7 @@ protected void innerWriteTo(StreamOutput out) { } @Override - protected boolean serializeTargetValueType() { + protected boolean serializeTargetValueType(Version version) { return true; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/MissingValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/MissingValues.java index d7b56af2439e0..c61091fd2a12c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/MissingValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/MissingValues.java @@ -49,6 +49,11 @@ public SortedBinaryDocValues bytesValues(LeafReaderContext context) throws IOExc SortedBinaryDocValues values = valuesSource.bytesValues(context); return replaceMissing(values, missing); } + + @Override + public String toString() { + return "anon ValuesSource.Bytes of [" + super.toString() + "]"; + } }; } @@ -82,6 +87,10 @@ public BytesRef nextValue() throws IOException { return missing; } } + @Override + public String toString() { + return "anon SortedBinaryDocValues of [" + super.toString() + "]"; + } }; } @@ -111,6 +120,10 @@ public SortedNumericDoubleValues doubleValues(LeafReaderContext context) throws final SortedNumericDoubleValues values = valuesSource.doubleValues(context); return replaceMissing(values, missing.doubleValue()); } + @Override + public String toString() { + return "anon ValuesSource.Numeric of [" + super.toString() + "]"; + } }; } @@ -145,6 +158,11 @@ public boolean advanceExact(int doc) throws IOException { return true; } + @Override + public String toString() { + return "anon SortedNumericDocValues of [" + super.toString() + "]"; + } + }; } @@ -179,6 +197,11 @@ public int docValueCount() { return count == 0 ? 1 : count; } + @Override + public String toString() { + return "anon SortedNumericDoubleValues of [" + super.toString() + "]"; + } + }; } @@ -209,6 +232,12 @@ public LongUnaryOperator globalOrdinalsMapping(LeafReaderContext context) throws valuesSource.globalOrdinalsValues(context), valuesSource.globalOrdinalsMapping(context), missing); } + + @Override + public String toString() { + return "anon ValuesSource.Bytes.WithOrdinals of [" + super.toString() + "]"; + } + }; } @@ -263,6 +292,12 @@ public boolean advanceExact(int doc) throws IOException { // the document does not have a value return true; } + + @Override + public String toString() { + return "anon AbstractSortedDocValues of [" + super.toString() + "]"; + } + }; } @@ -316,6 +351,11 @@ public boolean advanceExact(int doc) throws IOException { // the document does not have a value return true; } + + @Override + public String toString() { + return "anon AbstractSortedDocValues of [" + super.toString() + "]"; + } }; } @@ -369,6 +409,11 @@ public MultiGeoPointValues geoPointValues(LeafReaderContext context) { final MultiGeoPointValues values = valuesSource.geoPointValues(context); return replaceMissing(values, missing); } + + @Override + public String toString() { + return "anon ValuesSource.GeoPoint of [" + super.toString() + "]"; + } }; } @@ -402,6 +447,11 @@ public GeoPoint nextValue() throws IOException { return missing; } } + + @Override + public String toString() { + return "anon MultiGeoPointValues of [" + super.toString() + "]"; + } }; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java index fc23f72eddc9c..d130b385be89e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java @@ -26,6 +26,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; +import org.elasticsearch.index.fielddata.plain.BinaryDVIndexFieldData; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.search.DocValueFormat; @@ -48,7 +49,8 @@ public enum ValueType implements Writeable { // TODO: what is the difference between "number" and "numeric"? NUMERIC((byte) 7, "numeric", "numeric", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, DocValueFormat.RAW), GEOPOINT((byte) 8, "geo_point", "geo_point", ValuesSourceType.GEOPOINT, IndexGeoPointFieldData.class, DocValueFormat.GEOHASH), - BOOLEAN((byte) 9, "boolean", "boolean", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, DocValueFormat.BOOLEAN); + BOOLEAN((byte) 9, "boolean", "boolean", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, DocValueFormat.BOOLEAN), + RANGE((byte) 10, "range", "range", ValuesSourceType.RANGE, BinaryDVIndexFieldData.class, DocValueFormat.RAW); final String description; final ValuesSourceType valuesSourceType; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java index 7fd38288a821b..19a607a0f177c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java @@ -42,6 +42,7 @@ import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.fielddata.SortingBinaryDocValues; import org.elasticsearch.index.fielddata.SortingNumericDoubleValues; +import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.script.AggregationScript; import org.elasticsearch.search.aggregations.support.ValuesSource.WithScript.BytesValues; import org.elasticsearch.search.aggregations.support.values.ScriptBytesValues; @@ -65,6 +66,28 @@ public boolean needsScores() { return false; } + public static class Range extends ValuesSource { + private final RangeType rangeType; + protected final IndexFieldData indexFieldData; + + public Range(IndexFieldData indexFieldData, RangeType rangeType) { + this.indexFieldData = indexFieldData; + this.rangeType = rangeType; + } + + @Override + public SortedBinaryDocValues bytesValues(LeafReaderContext context) { + return indexFieldData.load(context).getBytesValues(); + } + + @Override + public DocValueBits docsWithValue(LeafReaderContext context) throws IOException { + final SortedBinaryDocValues bytes = bytesValues(context); + return org.elasticsearch.index.fielddata.FieldData.docsWithValue(bytes); + } + + public RangeType rangeType() { return rangeType; } + } public abstract static class Bytes extends ValuesSource { @Override @@ -193,6 +216,7 @@ public FieldData(IndexFieldData indexFieldData) { public SortedBinaryDocValues bytesValues(LeafReaderContext context) { return indexFieldData.load(context).getBytesValues(); } + } public static class Script extends Bytes { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index 11969b3f7dbeb..c0a8cc8a741fa 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.aggregations.support; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -61,7 +62,7 @@ protected LeafOnly(StreamInput in, ValuesSourceType valuesSourceType, ValueType /** * Read an aggregation from a stream that serializes its targetValueType. This should only be used by subclasses that override - * {@link #serializeTargetValueType()} to return true. + * {@link #serializeTargetValueType(Version)} to return true. */ protected LeafOnly(StreamInput in, ValuesSourceType valuesSourceType) throws IOException { super(in, valuesSourceType); @@ -108,24 +109,31 @@ protected ValuesSourceAggregationBuilder(ValuesSourceAggregationBuilder } /** - * Read an aggregation from a stream that does not serialize its targetValueType. This should be used by most subclasses. + * Read an aggregation from a stream that has a sensible default for TargetValueType. This should be used by most subclasses. + * Subclasses needing to maintain backward compatibility to a version that did not serialize TargetValueType should use this + * constructor, providing the old, constant value for TargetValueType and override {@link #serializeTargetValueType(Version)} to return + * true only for versions that support the serialization. */ protected ValuesSourceAggregationBuilder(StreamInput in, ValuesSourceType valuesSourceType, ValueType targetValueType) throws IOException { super(in); - assert false == serializeTargetValueType() : "Wrong read constructor called for subclass that provides its targetValueType"; this.valuesSourceType = valuesSourceType; - this.targetValueType = targetValueType; + if (serializeTargetValueType(in.getVersion())) { + this.targetValueType = in.readOptionalWriteable(ValueType::readFromStream); + } else { + this.targetValueType = targetValueType; + } read(in); } /** * Read an aggregation from a stream that serializes its targetValueType. This should only be used by subclasses that override - * {@link #serializeTargetValueType()} to return true. + * {@link #serializeTargetValueType(Version)} to return true. */ protected ValuesSourceAggregationBuilder(StreamInput in, ValuesSourceType valuesSourceType) throws IOException { super(in); - assert serializeTargetValueType() : "Wrong read constructor called for subclass that serializes its targetValueType"; + // TODO: Can we get rid of this constructor and always use the three value version? Does this assert provide any value? + assert serializeTargetValueType(in.getVersion()) : "Wrong read constructor called for subclass that serializes its targetValueType"; this.valuesSourceType = valuesSourceType; this.targetValueType = in.readOptionalWriteable(ValueType::readFromStream); read(in); @@ -149,7 +157,7 @@ private void read(StreamInput in) throws IOException { @Override protected final void doWriteTo(StreamOutput out) throws IOException { - if (serializeTargetValueType()) { + if (serializeTargetValueType(out.getVersion())) { out.writeOptionalWriteable(targetValueType); } out.writeOptionalString(field); @@ -177,8 +185,9 @@ protected final void doWriteTo(StreamOutput out) throws IOException { /** * Should this builder serialize its targetValueType? Defaults to false. All subclasses that override this to true should use the three * argument read constructor rather than the four argument version. + * @param version For backwards compatibility, subclasses can change behavior based on the version */ - protected boolean serializeTargetValueType() { + protected boolean serializeTargetValueType(Version version) { return false; } @@ -306,10 +315,31 @@ protected final ValuesSourceAggregatorFactory doBuild(SearchContext context, return factory; } + /** + * Provide a hook for aggregations to have finer grained control of the ValuesSourceType for script values. This will only be called if + * the user did not supply a type hint for the script. The script object is provided for reference. + * + * @param script - The user supplied script + * @return The ValuesSourceType we expect this script to yield. + */ + protected ValuesSourceType resolveScriptAny(Script script) { + return ValuesSourceType.BYTES; + } + + /** + * Provide a hook for aggregations to have finer grained control of the ValueType for script values. This will only be called if the + * user did not supply a type hint for the script. The script object is provided for reference + * @param script - the user supplied script + * @return The ValueType we expect this script to yield + */ + protected ValueType defaultValueType(Script script) { + return valueType; + } + protected ValuesSourceConfig resolveConfig(SearchContext context) { ValueType valueType = this.valueType != null ? this.valueType : targetValueType; return ValuesSourceConfig.resolve(context.getQueryShardContext(), - valueType, field, script, missing, timeZone, format); + valueType, field, script, missing, timeZone, format, this::resolveScriptAny); } protected abstract ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java index 919d1b752e22c..d906260c75694 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java @@ -28,6 +28,7 @@ import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.RangeFieldMapper; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.script.AggregationScript; import org.elasticsearch.script.Script; @@ -48,12 +49,27 @@ public class ValuesSourceConfig { * Resolve a {@link ValuesSourceConfig} given configuration parameters. */ public static ValuesSourceConfig resolve( - QueryShardContext context, - ValueType valueType, - String field, Script script, - Object missing, - ZoneId timeZone, - String format) { + QueryShardContext context, + ValueType valueType, + String field, Script script, + Object missing, + ZoneId timeZone, + String format) { + return resolve(context, valueType, field, script, missing, timeZone, format, s -> ValuesSourceType.BYTES); + } + + /** + * Resolve a {@link ValuesSourceConfig} given configuration parameters. + */ + public static ValuesSourceConfig resolve( + QueryShardContext context, + ValueType valueType, + String field, Script script, + Object missing, + ZoneId timeZone, + String format, + Function resolveScriptAny + ) { if (field == null) { if (script == null) { @@ -67,7 +83,7 @@ public static ValuesSourceConfig resolve( // we need to have a specific value source // type to know how to handle the script values, so we fallback // on Bytes - valuesSourceType = ValuesSourceType.BYTES; + valuesSourceType = resolveScriptAny.apply(script); } ValuesSourceConfig config = new ValuesSourceConfig<>(valuesSourceType); config.missing(missing); @@ -96,18 +112,21 @@ public static ValuesSourceConfig resolve( IndexFieldData indexFieldData = context.getForField(fieldType); ValuesSourceConfig config; - if (valueType == null) { - if (indexFieldData instanceof IndexNumericFieldData) { - config = new ValuesSourceConfig<>(ValuesSourceType.NUMERIC); - } else if (indexFieldData instanceof IndexGeoPointFieldData) { - config = new ValuesSourceConfig<>(ValuesSourceType.GEOPOINT); - } else { + if (indexFieldData instanceof IndexNumericFieldData) { + config = new ValuesSourceConfig<>(ValuesSourceType.NUMERIC); + } else if (indexFieldData instanceof IndexGeoPointFieldData) { + config = new ValuesSourceConfig<>(ValuesSourceType.GEOPOINT); + } else if (fieldType instanceof RangeFieldMapper.RangeFieldType) { + config = new ValuesSourceConfig<>(ValuesSourceType.RANGE); + } else { + if (valueType == null) { config = new ValuesSourceConfig<>(ValuesSourceType.BYTES); + } else { + config = new ValuesSourceConfig<>(valueType.getValuesSourceType()); } - } else { - config = new ValuesSourceConfig<>(valueType.getValuesSourceType()); } + config.fieldContext(new FieldContext(field, indexFieldData, fieldType)); config.missing(missing); config.timezone(timeZone); @@ -303,6 +322,9 @@ private VS originalValuesSource() { if (valueSourceType() == ValuesSourceType.GEOPOINT) { return (VS) geoPointField(); } + if (valueSourceType() == ValuesSourceType.RANGE) { + return (VS) rangeField(); + } // falling back to bytes values return (VS) bytesField(); } @@ -352,4 +374,14 @@ private ValuesSource.GeoPoint geoPointField() { return new ValuesSource.GeoPoint.Fielddata((IndexGeoPointFieldData) fieldContext().indexFieldData()); } + + private ValuesSource rangeField() { + MappedFieldType fieldType = fieldContext.fieldType(); + + if (fieldType instanceof RangeFieldMapper.RangeFieldType == false) { + throw new IllegalStateException("Asked for range ValuesSource, but field is of type " + fieldType.name()); + } + RangeFieldMapper.RangeFieldType rangeFieldType = (RangeFieldMapper.RangeFieldType)fieldType; + return new ValuesSource.Range(fieldContext().indexFieldData(), rangeFieldType.rangeType()); + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParserHelper.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParserHelper.java index 24bdffaa3fa89..567862ca92e3e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParserHelper.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParserHelper.java @@ -34,9 +34,15 @@ public final class ValuesSourceParserHelper { private ValuesSourceParserHelper() {} // utility class, no instantiation public static void declareAnyFields( - AbstractObjectParser, T> objectParser, - boolean scriptable, boolean formattable) { - declareFields(objectParser, scriptable, formattable, false, null); + AbstractObjectParser, T> objectParser, + boolean scriptable, boolean formattable) { + declareAnyFields(objectParser, scriptable, formattable, false); + } + + public static void declareAnyFields( + AbstractObjectParser, T> objectParser, + boolean scriptable, boolean formattable, boolean timezoneAware) { + declareFields(objectParser, scriptable, formattable, timezoneAware, null); } public static void declareNumericFields( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceType.java index a4da3e3e3c320..93398abe99e9a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceType.java @@ -30,7 +30,8 @@ public enum ValuesSourceType implements Writeable { ANY, NUMERIC, BYTES, - GEOPOINT; + GEOPOINT, + RANGE; public static ValuesSourceType fromString(String name) { return valueOf(name.trim().toUpperCase(Locale.ROOT)); diff --git a/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java b/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java index 5ae6cc739c362..751c1cd8bfbe7 100644 --- a/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java +++ b/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java @@ -33,13 +33,11 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FieldDoc; -import org.apache.lucene.search.FilterCollector; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; @@ -238,7 +236,15 @@ private SimpleTopDocsCollectorContext(IndexReader reader, this.sortAndFormats = sortAndFormats; final TopDocsCollector topDocsCollector; - if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) { + + if ((sortAndFormats == null || SortField.FIELD_SCORE.equals(sortAndFormats.sort.getSort()[0])) + && hasInfMaxScore(query)) { + // disable max score optimization since we have a mandatory clause + // that doesn't track the maximum score + topDocsCollector = createCollector(sortAndFormats, numHits, searchAfter, Integer.MAX_VALUE); + topDocsSupplier = new CachedSupplier<>(topDocsCollector::topDocs); + totalHitsSupplier = () -> topDocsSupplier.get().totalHits; + } else if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) { // don't compute hit counts via the collector topDocsCollector = createCollector(sortAndFormats, numHits, searchAfter, 1); topDocsSupplier = new CachedSupplier<>(topDocsCollector::topDocs); @@ -274,27 +280,7 @@ private SimpleTopDocsCollectorContext(IndexReader reader, maxScoreSupplier = () -> Float.NaN; } - final Collector collector = MultiCollector.wrap(topDocsCollector, maxScoreCollector); - if (sortAndFormats == null || - SortField.FIELD_SCORE.equals(sortAndFormats.sort.getSort()[0])) { - if (hasInfMaxScore(query)) { - // disable max score optimization since we have a mandatory clause - // that doesn't track the maximum score - this.collector = new FilterCollector(collector) { - @Override - public ScoreMode scoreMode() { - if (in.scoreMode() == ScoreMode.TOP_SCORES) { - return ScoreMode.COMPLETE; - } - return in.scoreMode(); - } - }; - } else { - this.collector = collector; - } - } else { - this.collector = collector; - } + this.collector = MultiCollector.wrap(topDocsCollector, maxScoreCollector); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 9815c93121378..06a8bb74c1648 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -23,7 +23,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; @@ -53,9 +52,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.SnapshotFailedEngineException; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; @@ -80,7 +78,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.concurrent.Executor; import java.util.function.Function; import java.util.stream.Collectors; @@ -298,46 +295,33 @@ private void startNewSnapshots(SnapshotsInProgress snapshotsInProgress) { } private void startNewShards(SnapshotsInProgress.Entry entry, Map startedShards) { - final Snapshot snapshot = entry.snapshot(); - final Map indicesMap = entry.indices().stream().collect(Collectors.toMap(IndexId::getName, Function.identity())); - final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); - for (final Map.Entry shardEntry : startedShards.entrySet()) { - final ShardId shardId = shardEntry.getKey(); - final IndexId indexId = indicesMap.get(shardId.getIndexName()); - assert indexId != null; - executor.execute(new AbstractRunnable() { - - private final SetOnce failure = new SetOnce<>(); - - @Override - public void doRun() { - final IndexShard indexShard = - indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()); - snapshot(indexShard, snapshot, indexId, shardEntry.getValue()); - } - - @Override - public void onFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e); - failure.set(e); - } - - @Override - public void onRejection(Exception e) { - failure.set(e); - } - - @Override - public void onAfter() { - final Exception exception = failure.get(); - if (exception != null) { - notifyFailedSnapshotShard(snapshot, shardId, ExceptionsHelper.detailedMessage(exception)); - } else { + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { + final Snapshot snapshot = entry.snapshot(); + final Map indicesMap = + entry.indices().stream().collect(Collectors.toMap(IndexId::getName, Function.identity())); + for (final Map.Entry shardEntry : startedShards.entrySet()) { + final ShardId shardId = shardEntry.getKey(); + final IndexShardSnapshotStatus snapshotStatus = shardEntry.getValue(); + final IndexId indexId = indicesMap.get(shardId.getIndexName()); + assert indexId != null; + snapshot(shardId, snapshot, indexId, snapshotStatus, new ActionListener<>() { + @Override + public void onResponse(final Void aVoid) { + if (logger.isDebugEnabled()) { + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); + logger.debug("snapshot ({}) completed to {} with {}", snapshot, snapshot.getRepository(), lastSnapshotStatus); + } notifySuccessfulSnapshotShard(snapshot, shardId); } - } - }); - } + + @Override + public void onFailure(Exception e) { + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e); + notifyFailedSnapshotShard(snapshot, shardId, ExceptionsHelper.detailedMessage(e)); + } + }); + } + }); } /** @@ -346,38 +330,37 @@ public void onAfter() { * @param snapshot snapshot * @param snapshotStatus snapshot status */ - private void snapshot(final IndexShard indexShard, final Snapshot snapshot, final IndexId indexId, - final IndexShardSnapshotStatus snapshotStatus) { - final ShardId shardId = indexShard.shardId(); - if (indexShard.routingEntry().primary() == false) { - throw new IndexShardSnapshotFailedException(shardId, "snapshot should be performed only on primary"); - } - if (indexShard.routingEntry().relocating()) { - // do not snapshot when in the process of relocation of primaries so we won't get conflicts - throw new IndexShardSnapshotFailedException(shardId, "cannot snapshot while relocating"); - } + private void snapshot(final ShardId shardId, final Snapshot snapshot, final IndexId indexId, + final IndexShardSnapshotStatus snapshotStatus, ActionListener listener) { + try { + final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()); + if (indexShard.routingEntry().primary() == false) { + throw new IndexShardSnapshotFailedException(shardId, "snapshot should be performed only on primary"); + } + if (indexShard.routingEntry().relocating()) { + // do not snapshot when in the process of relocation of primaries so we won't get conflicts + throw new IndexShardSnapshotFailedException(shardId, "cannot snapshot while relocating"); + } - final IndexShardState indexShardState = indexShard.state(); - if (indexShardState == IndexShardState.CREATED || indexShardState == IndexShardState.RECOVERING) { - // shard has just been created, or still recovering - throw new IndexShardSnapshotFailedException(shardId, "shard didn't fully recover yet"); - } + final IndexShardState indexShardState = indexShard.state(); + if (indexShardState == IndexShardState.CREATED || indexShardState == IndexShardState.RECOVERING) { + // shard has just been created, or still recovering + throw new IndexShardSnapshotFailedException(shardId, "shard didn't fully recover yet"); + } - final Repository repository = repositoriesService.repository(snapshot.getRepository()); - try { - // we flush first to make sure we get the latest writes snapshotted - try (Engine.IndexCommitRef snapshotRef = indexShard.acquireLastIndexCommit(true)) { + final Repository repository = repositoriesService.repository(snapshot.getRepository()); + Engine.IndexCommitRef snapshotRef = null; + try { + // we flush first to make sure we get the latest writes snapshotted + snapshotRef = indexShard.acquireLastIndexCommit(true); repository.snapshotShard(indexShard.store(), indexShard.mapperService(), snapshot.getSnapshotId(), indexId, - snapshotRef.getIndexCommit(), snapshotStatus); - if (logger.isDebugEnabled()) { - final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); - logger.debug("snapshot ({}) completed to {} with {}", snapshot, repository, lastSnapshotStatus); - } + snapshotRef.getIndexCommit(), snapshotStatus, ActionListener.runBefore(listener, snapshotRef::close)); + } catch (Exception e) { + IOUtils.close(snapshotRef); + throw e; } - } catch (SnapshotFailedEngineException | IndexShardSnapshotFailedException e) { - throw e; } catch (Exception e) { - throw new IndexShardSnapshotFailedException(shardId, "Failed to snapshot", e); + listener.onFailure(e); } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 427b75e302ca6..bbd6938802530 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -26,6 +26,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; @@ -68,6 +69,7 @@ import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; +import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.threadpool.ThreadPool; @@ -108,12 +110,19 @@ * the {@link SnapshotShardsService#sendSnapshotShardUpdate(Snapshot, ShardId, ShardSnapshotStatus)} method
  8. *
  9. When last shard is completed master node in {@link SnapshotShardsService#innerUpdateSnapshotState} method marks the snapshot * as completed
  10. - *
  11. After cluster state is updated, the {@link #endSnapshot(SnapshotsInProgress.Entry)} finalizes snapshot in the repository, + *
  12. After cluster state is updated, the {@link #endSnapshot(SnapshotsInProgress.Entry, MetaData)} finalizes snapshot in the repository, * notifies all {@link #snapshotCompletionListeners} that snapshot is completed, and finally calls * {@link #removeSnapshotFromClusterState(Snapshot, SnapshotInfo, Exception)} to remove snapshot from cluster state
  13. * */ public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateApplier { + + /** + * Minimum node version which does not use {@link Repository#initializeSnapshot(SnapshotId, List, MetaData)} to write snapshot metadata + * when starting a snapshot. + */ + public static final Version NO_REPO_INITIALIZE_VERSION = Version.V_8_0_0; + private static final Logger logger = LogManager.getLogger(SnapshotsService.class); private final ClusterService clusterService; @@ -398,24 +407,29 @@ protected void doRun() { assert initializingSnapshots.contains(snapshot.snapshot()); Repository repository = repositoriesService.repository(snapshot.snapshot().getRepository()); - MetaData metaData = clusterState.metaData(); - if (!snapshot.includeGlobalState()) { - // Remove global state from the cluster state - MetaData.Builder builder = MetaData.builder(); - for (IndexId index : snapshot.indices()) { - builder.put(metaData.index(index.getName()), false); - } - metaData = builder.build(); + if (repository.isReadOnly()) { + throw new RepositoryException(repository.getMetadata().name(), "cannot create snapshot in a readonly repository"); + } + final String snapshotName = snapshot.snapshot().getSnapshotId().getName(); + // check if the snapshot name already exists in the repository + if (repository.getRepositoryData().getSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) { + throw new InvalidSnapshotNameException( + repository.getMetadata().name(), snapshotName, "snapshot with the same name already exists"); + } + if (clusterState.nodes().getMinNodeVersion().onOrAfter(NO_REPO_INITIALIZE_VERSION) == false) { + // In mixed version clusters we initialize the snapshot in the repository so that in case of a master failover to an + // older version master node snapshot finalization (that assumes initializeSnapshot was called) produces a valid + // snapshot. + repository.initializeSnapshot( + snapshot.snapshot().getSnapshotId(), snapshot.indices(), metaDataForSnapshot(snapshot, clusterState.metaData())); } - - repository.initializeSnapshot(snapshot.snapshot().getSnapshotId(), snapshot.indices(), metaData); snapshotCreated = true; logger.info("snapshot [{}] started", snapshot.snapshot()); if (snapshot.indices().isEmpty()) { // No indices in this snapshot - we are done userCreateSnapshotListener.onResponse(snapshot.snapshot()); - endSnapshot(snapshot); + endSnapshot(snapshot, clusterState.metaData()); return; } clusterService.submitStateUpdateTask("update_snapshot [" + snapshot.snapshot() + "]", new ClusterStateUpdateTask() { @@ -498,7 +512,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS assert snapshotsInProgress != null; final SnapshotsInProgress.Entry entry = snapshotsInProgress.snapshot(snapshot.snapshot()); assert entry != null; - endSnapshot(entry); + endSnapshot(entry, newState.metaData()); } } }); @@ -556,6 +570,7 @@ private void cleanupAfterError(Exception exception) { Collections.emptyList(), snapshot.getRepositoryStateId(), snapshot.includeGlobalState(), + metaDataForSnapshot(snapshot, clusterService.state().metaData()), snapshot.userMetadata()); } catch (Exception inner) { inner.addSuppressed(exception); @@ -565,7 +580,18 @@ private void cleanupAfterError(Exception exception) { } userCreateSnapshotListener.onFailure(e); } + } + private static MetaData metaDataForSnapshot(SnapshotsInProgress.Entry snapshot, MetaData metaData) { + if (snapshot.includeGlobalState() == false) { + // Remove global state from the cluster state + MetaData.Builder builder = MetaData.builder(); + for (IndexId index : snapshot.indices()) { + builder.put(metaData.index(index.getName()), false); + } + metaData = builder.build(); + } + return metaData; } private static SnapshotInfo inProgressSnapshot(SnapshotsInProgress.Entry entry) { @@ -713,7 +739,7 @@ public void applyClusterState(ClusterChangedEvent event) { entry -> entry.state().completed() || initializingSnapshots.contains(entry.snapshot()) == false && (entry.state() == State.INIT || completed(entry.shards().values())) - ).forEach(this::endSnapshot); + ).forEach(entry -> endSnapshot(entry, event.state().metaData())); } if (newMaster) { finalizeSnapshotDeletionFromPreviousMaster(event); @@ -960,7 +986,7 @@ private static Tuple, Set> indicesWithMissingShards( * * @param entry snapshot */ - private void endSnapshot(final SnapshotsInProgress.Entry entry) { + private void endSnapshot(SnapshotsInProgress.Entry entry, MetaData metaData) { if (endingSnapshots.add(entry.snapshot()) == false) { return; } @@ -988,6 +1014,7 @@ protected void doRun() { unmodifiableList(shardFailures), entry.getRepositoryStateId(), entry.includeGlobalState(), + metaDataForSnapshot(entry, metaData), entry.userMetadata()); removeSnapshotFromClusterState(snapshot, snapshotInfo, null); logger.info("snapshot [{}] completed with state [{}]", snapshot, snapshotInfo.state()); diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 6f00d8e00d225..8605a1ae29798 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -601,12 +601,15 @@ public void onException(TcpChannel channel, Exception e) { "cancelled key exception caught on transport layer [{}], disconnecting from relevant node", channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant CloseableChannel.closeChannel(channel); - } else if (e instanceof TcpTransport.HttpOnTransportException) { + } else if (e instanceof HttpRequestOnTransportException) { // in case we are able to return data, serialize the exception content and sent it back to the client if (channel.isOpen()) { BytesArray message = new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8)); outboundHandler.sendBytes(channel, message, ActionListener.wrap(() -> CloseableChannel.closeChannel(channel))); } + } else if (e instanceof StreamCorruptedException) { + logger.warn(() -> new ParameterizedMessage("{}, [{}], closing connection", e.getMessage(), channel)); + CloseableChannel.closeChannel(channel); } else { logger.warn(() -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e); // close the channel, which will cause a node to be disconnected if relevant @@ -671,7 +674,7 @@ public void inboundMessage(TcpChannel channel, BytesReference message) { * @param bytesReference the bytes available to consume * @return the number of bytes consumed * @throws StreamCorruptedException if the message header format is not recognized - * @throws TcpTransport.HttpOnTransportException if the message header appears to be an HTTP message + * @throws HttpRequestOnTransportException if the message header appears to be an HTTP message * @throws IllegalArgumentException if the message length is greater that the maximum allowed frame size. * This is dependent on the available memory. */ @@ -693,7 +696,7 @@ public int consumeNetworkReads(TcpChannel channel, BytesReference bytesReference * @param networkBytes the will be read * @return the message decoded * @throws StreamCorruptedException if the message header format is not recognized - * @throws TcpTransport.HttpOnTransportException if the message header appears to be an HTTP message + * @throws HttpRequestOnTransportException if the message header appears to be an HTTP message * @throws IllegalArgumentException if the message length is greater that the maximum allowed frame size. * This is dependent on the available memory. */ @@ -720,7 +723,7 @@ static BytesReference decodeFrame(BytesReference networkBytes) throws IOExceptio * @param networkBytes the will be read * @return the length of the message * @throws StreamCorruptedException if the message header format is not recognized - * @throws TcpTransport.HttpOnTransportException if the message header appears to be an HTTP message + * @throws HttpRequestOnTransportException if the message header appears to be an HTTP message * @throws IllegalArgumentException if the message length is greater that the maximum allowed frame size. * This is dependent on the available memory. */ @@ -734,15 +737,26 @@ public static int readMessageLength(BytesReference networkBytes) throws IOExcept private static int readHeaderBuffer(BytesReference headerBuffer) throws IOException { if (headerBuffer.get(0) != 'E' || headerBuffer.get(1) != 'S') { - if (appearsToBeHTTP(headerBuffer)) { - throw new TcpTransport.HttpOnTransportException("This is not an HTTP port"); + if (appearsToBeHTTPRequest(headerBuffer)) { + throw new HttpRequestOnTransportException("This is not an HTTP port"); + } + + if (appearsToBeHTTPResponse(headerBuffer)) { + throw new StreamCorruptedException("received HTTP response on transport port, ensure that transport port (not " + + "HTTP port) of a remote node is specified in the configuration"); + } + + String firstBytes = "(" + + Integer.toHexString(headerBuffer.get(0) & 0xFF) + "," + + Integer.toHexString(headerBuffer.get(1) & 0xFF) + "," + + Integer.toHexString(headerBuffer.get(2) & 0xFF) + "," + + Integer.toHexString(headerBuffer.get(3) & 0xFF) + ")"; + + if (appearsToBeTLS(headerBuffer)) { + throw new StreamCorruptedException("SSL/TLS request received but SSL/TLS is not enabled on this node, got " + firstBytes); } - throw new StreamCorruptedException("invalid internal transport message format, got (" - + Integer.toHexString(headerBuffer.get(0) & 0xFF) + "," - + Integer.toHexString(headerBuffer.get(1) & 0xFF) + "," - + Integer.toHexString(headerBuffer.get(2) & 0xFF) + "," - + Integer.toHexString(headerBuffer.get(3) & 0xFF) + ")"); + throw new StreamCorruptedException("invalid internal transport message format, got " + firstBytes); } final int messageLength = headerBuffer.getInt(TcpHeader.MARKER_BYTES_SIZE); @@ -763,7 +777,7 @@ private static int readHeaderBuffer(BytesReference headerBuffer) throws IOExcept return messageLength; } - private static boolean appearsToBeHTTP(BytesReference headerBuffer) { + private static boolean appearsToBeHTTPRequest(BytesReference headerBuffer) { return bufferStartsWith(headerBuffer, "GET") || bufferStartsWith(headerBuffer, "POST") || bufferStartsWith(headerBuffer, "PUT") || @@ -775,6 +789,14 @@ private static boolean appearsToBeHTTP(BytesReference headerBuffer) { bufferStartsWith(headerBuffer, "TRACE"); } + private static boolean appearsToBeHTTPResponse(BytesReference headerBuffer) { + return bufferStartsWith(headerBuffer, "HTTP"); + } + + private static boolean appearsToBeTLS(BytesReference headerBuffer) { + return headerBuffer.get(0) == 0x16 && headerBuffer.get(1) == 0x03; + } + private static boolean bufferStartsWith(BytesReference buffer, String method) { char[] chars = method.toCharArray(); for (int i = 0; i < chars.length; i++) { @@ -789,9 +811,9 @@ private static boolean bufferStartsWith(BytesReference buffer, String method) { * A helper exception to mark an incoming connection as potentially being HTTP * so an appropriate error code can be returned */ - public static class HttpOnTransportException extends ElasticsearchException { + public static class HttpRequestOnTransportException extends ElasticsearchException { - private HttpOnTransportException(String msg) { + private HttpRequestOnTransportException(String msg) { super(msg); } @@ -800,7 +822,7 @@ public RestStatus status() { return RestStatus.BAD_REQUEST; } - public HttpOnTransportException(StreamInput in) throws IOException { + public HttpRequestOnTransportException(StreamInput in) throws IOException { super(in); } } diff --git a/server/src/test/java/org/apache/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java b/server/src/test/java/org/apache/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java index dc21ed6a2f799..4629cbb143b00 100644 --- a/server/src/test/java/org/apache/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java +++ b/server/src/test/java/org/apache/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java @@ -24,6 +24,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; import java.io.IOException; import java.util.Collections; @@ -84,7 +85,7 @@ protected final int dimension() { protected abstract String fieldName(); - protected abstract RangeFieldMapper.RangeType rangeType(); + protected abstract RangeType rangeType(); protected abstract static class AbstractRange extends Range { diff --git a/server/src/test/java/org/apache/lucene/queries/BinaryDocValuesRangeQueryTests.java b/server/src/test/java/org/apache/lucene/queries/BinaryDocValuesRangeQueryTests.java index 921d1ed5f1f23..c214aaaf37475 100644 --- a/server/src/test/java/org/apache/lucene/queries/BinaryDocValuesRangeQueryTests.java +++ b/server/src/test/java/org/apache/lucene/queries/BinaryDocValuesRangeQueryTests.java @@ -27,6 +27,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -41,7 +42,7 @@ public class BinaryDocValuesRangeQueryTests extends ESTestCase { public void testBasics() throws Exception { String fieldName = "long_field"; - RangeFieldMapper.RangeType rangeType = RangeFieldMapper.RangeType.LONG; + RangeType rangeType = RangeType.LONG; try (Directory dir = newDirectory()) { try (RandomIndexWriter writer = new RandomIndexWriter(random(), dir)) { // intersects (within) @@ -127,7 +128,7 @@ public void testBasics() throws Exception { public void testNoField() throws IOException { String fieldName = "long_field"; - RangeFieldMapper.RangeType rangeType = RangeFieldMapper.RangeType.LONG; + RangeType rangeType = RangeType.LONG; // no field in index try (Directory dir = newDirectory()) { diff --git a/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java b/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java index 9d05e119cbb78..b7b2107320e39 100644 --- a/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java +++ b/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java @@ -40,8 +40,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.similarities.BM25Similarity; -import org.apache.lucene.search.similarities.ClassicSimilarity; -import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.store.Directory; import org.elasticsearch.test.ESTestCase; @@ -208,8 +206,7 @@ public Term[] toTerms(String[] fields, String term) { } public IndexSearcher setSimilarity(IndexSearcher searcher) { - Similarity similarity = random().nextBoolean() ? new BM25Similarity() : new ClassicSimilarity(); - searcher.setSimilarity(similarity); + searcher.setSimilarity(new BM25Similarity()); return searcher; } diff --git a/server/src/test/java/org/apache/lucene/queries/DoubleRandomBinaryDocValuesRangeQueryTests.java b/server/src/test/java/org/apache/lucene/queries/DoubleRandomBinaryDocValuesRangeQueryTests.java index 984b1d72ef843..61add8be2a9d6 100644 --- a/server/src/test/java/org/apache/lucene/queries/DoubleRandomBinaryDocValuesRangeQueryTests.java +++ b/server/src/test/java/org/apache/lucene/queries/DoubleRandomBinaryDocValuesRangeQueryTests.java @@ -18,7 +18,7 @@ */ package org.apache.lucene.queries; -import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; public class DoubleRandomBinaryDocValuesRangeQueryTests extends BaseRandomBinaryDocValuesRangeQueryTestCase { @@ -28,8 +28,8 @@ protected String fieldName() { } @Override - protected RangeFieldMapper.RangeType rangeType() { - return RangeFieldMapper.RangeType.DOUBLE; + protected RangeType rangeType() { + return RangeType.DOUBLE; } @Override diff --git a/server/src/test/java/org/apache/lucene/queries/FloatRandomBinaryDocValuesRangeQueryTests.java b/server/src/test/java/org/apache/lucene/queries/FloatRandomBinaryDocValuesRangeQueryTests.java index a7f877392cf43..09755f165af94 100644 --- a/server/src/test/java/org/apache/lucene/queries/FloatRandomBinaryDocValuesRangeQueryTests.java +++ b/server/src/test/java/org/apache/lucene/queries/FloatRandomBinaryDocValuesRangeQueryTests.java @@ -18,7 +18,7 @@ */ package org.apache.lucene.queries; -import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; public class FloatRandomBinaryDocValuesRangeQueryTests extends BaseRandomBinaryDocValuesRangeQueryTestCase { @@ -28,8 +28,8 @@ protected String fieldName() { } @Override - protected RangeFieldMapper.RangeType rangeType() { - return RangeFieldMapper.RangeType.FLOAT; + protected RangeType rangeType() { + return RangeType.FLOAT; } @Override diff --git a/server/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java b/server/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java index ec468fd8d9b89..b70616f9b1f52 100644 --- a/server/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java +++ b/server/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java @@ -20,7 +20,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.FutureArrays; -import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; import java.net.InetAddress; import java.net.UnknownHostException; @@ -34,8 +34,8 @@ protected String fieldName() { } @Override - protected RangeFieldMapper.RangeType rangeType() { - return RangeFieldMapper.RangeType.IP; + protected RangeType rangeType() { + return RangeType.IP; } @Override diff --git a/server/src/test/java/org/apache/lucene/queries/IntegerRandomBinaryDocValuesRangeQueryTests.java b/server/src/test/java/org/apache/lucene/queries/IntegerRandomBinaryDocValuesRangeQueryTests.java index 1d04cdbaaca86..13c9bd5d32602 100644 --- a/server/src/test/java/org/apache/lucene/queries/IntegerRandomBinaryDocValuesRangeQueryTests.java +++ b/server/src/test/java/org/apache/lucene/queries/IntegerRandomBinaryDocValuesRangeQueryTests.java @@ -19,7 +19,7 @@ package org.apache.lucene.queries; import org.apache.lucene.util.TestUtil; -import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; public class IntegerRandomBinaryDocValuesRangeQueryTests extends BaseRandomBinaryDocValuesRangeQueryTestCase { @@ -29,8 +29,8 @@ protected String fieldName() { } @Override - protected RangeFieldMapper.RangeType rangeType() { - return RangeFieldMapper.RangeType.INTEGER; + protected RangeType rangeType() { + return RangeType.INTEGER; } @Override diff --git a/server/src/test/java/org/apache/lucene/queries/LongRandomBinaryDocValuesRangeQueryTests.java b/server/src/test/java/org/apache/lucene/queries/LongRandomBinaryDocValuesRangeQueryTests.java index e506c2c269028..6a8428ab9d3cb 100644 --- a/server/src/test/java/org/apache/lucene/queries/LongRandomBinaryDocValuesRangeQueryTests.java +++ b/server/src/test/java/org/apache/lucene/queries/LongRandomBinaryDocValuesRangeQueryTests.java @@ -19,7 +19,7 @@ package org.apache.lucene.queries; import org.apache.lucene.util.TestUtil; -import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; public class LongRandomBinaryDocValuesRangeQueryTests extends BaseRandomBinaryDocValuesRangeQueryTestCase { @@ -29,8 +29,8 @@ protected String fieldName() { } @Override - protected RangeFieldMapper.RangeType rangeType() { - return RangeFieldMapper.RangeType.LONG; + protected RangeType rangeType() { + return RangeType.LONG; } @Override diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 61d8532b5652a..411dc02f8adff 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -787,7 +787,7 @@ public void testIds() { ids.put(122, null); ids.put(123, org.elasticsearch.ResourceAlreadyExistsException.class); ids.put(124, null); - ids.put(125, TcpTransport.HttpOnTransportException.class); + ids.put(125, TcpTransport.HttpRequestOnTransportException.class); ids.put(126, org.elasticsearch.index.mapper.MapperParsingException.class); ids.put(127, org.elasticsearch.search.SearchContextException.class); ids.put(128, org.elasticsearch.search.builder.SearchSourceBuilderException.class); diff --git a/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java b/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java index cd3735b4843e6..4f9b63fb75e6c 100644 --- a/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java +++ b/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java @@ -171,6 +171,23 @@ public void testRunAfter() { } } + public void testRunBefore() { + { + AtomicBoolean afterSuccess = new AtomicBoolean(); + ActionListener listener = + ActionListener.runBefore(ActionListener.wrap(r -> {}, e -> {}), () -> afterSuccess.set(true)); + listener.onResponse(null); + assertThat(afterSuccess.get(), equalTo(true)); + } + { + AtomicBoolean afterFailure = new AtomicBoolean(); + ActionListener listener = + ActionListener.runBefore(ActionListener.wrap(r -> {}, e -> {}), () -> afterFailure.set(true)); + listener.onFailure(null); + assertThat(afterFailure.get(), equalTo(true)); + } + } + public void testNotifyOnce() { AtomicInteger onResponseTimes = new AtomicInteger(); AtomicInteger onFailureTimes = new AtomicInteger(); diff --git a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java index 16df17bef1ada..174b164aead26 100644 --- a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java @@ -83,7 +83,7 @@ private AbstractSearchAsyncAction createAction(SearchRequest return null; }; - return new AbstractSearchAsyncAction("test", null, null, nodeIdToConnection, + return new AbstractSearchAsyncAction("test", logger, null, nodeIdToConnection, Collections.singletonMap("foo", new AliasFilter(new MatchAllQueryBuilder())), Collections.singletonMap("foo", 2.0f), Collections.singletonMap("name", Sets.newHashSet("bar", "baz")), null, request, listener, new GroupShardsIterator<>( @@ -239,6 +239,29 @@ public void run() { assertEquals(requestIds, releasedContexts); } + public void testShardNotAvailableWithDisallowPartialFailures() { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false); + AtomicReference exception = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); + int numShards = randomIntBetween(2, 10); + InitialSearchPhase.ArraySearchPhaseResults phaseResults = + new InitialSearchPhase.ArraySearchPhaseResults<>(numShards); + AbstractSearchAsyncAction action = createAction(searchRequest, phaseResults, listener, false, new AtomicLong()); + // skip one to avoid the "all shards failed" failure. + SearchShardIterator skipIterator = new SearchShardIterator(null, null, Collections.emptyList(), null); + skipIterator.resetAndSkip(); + action.skipShard(skipIterator); + // expect at least 2 shards, so onPhaseDone should report failure. + action.onPhaseDone(); + assertThat(exception.get(), instanceOf(SearchPhaseExecutionException.class)); + SearchPhaseExecutionException searchPhaseExecutionException = (SearchPhaseExecutionException)exception.get(); + assertEquals("Partial shards failure (" + (numShards - 1) + " shards unavailable)", + searchPhaseExecutionException.getMessage()); + assertEquals("test", searchPhaseExecutionException.getPhaseName()); + assertEquals(0, searchPhaseExecutionException.shardFailures().length); + assertEquals(0, searchPhaseExecutionException.getSuppressed().length); + } + private static InitialSearchPhase.ArraySearchPhaseResults phaseResults(Set requestIds, List> nodeLookups, int numFailures) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 4c48ce7b36068..f1236cfd252a6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.ESAllocationTestCase; -import org.elasticsearch.cluster.MockInternalClusterInfoService.DevNullClusterInfo; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -1002,4 +1001,20 @@ public void logShardStates(ClusterState state) { rn.shardsWithState(RELOCATING), rn.shardsWithState(STARTED)); } + + /** + * ClusterInfo that always reports /dev/null for the shards' data paths. + */ + static class DevNullClusterInfo extends ClusterInfo { + DevNullClusterInfo(ImmutableOpenMap leastAvailableSpaceUsage, + ImmutableOpenMap mostAvailableSpaceUsage, + ImmutableOpenMap shardSizes) { + super(leastAvailableSpaceUsage, mostAvailableSpaceUsage, shardSizes, null); + } + + @Override + public String getDataPath(ShardRouting shardRouting) { + return "/dev/null"; + } + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index 8f790b41dad04..3f1975f35369a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -25,7 +25,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.ESAllocationTestCase; -import org.elasticsearch.cluster.MockInternalClusterInfoService.DevNullClusterInfo; +import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDeciderTests.DevNullClusterInfo; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index c7e47e98de3cb..389c54bb128c7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -16,191 +16,179 @@ * specific language governing permissions and limitations * under the License. */ - package org.elasticsearch.cluster.routing.allocation.decider; -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.MockInternalClusterInfoService; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.env.Environment; +import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import java.util.ArrayList; +import java.nio.file.Path; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; +import static org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING; +import static org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING; +import static org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING; +import static org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class MockDiskUsagesIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - // Use the mock internal cluster info service, which has fake-able disk usages return Collections.singletonList(MockInternalClusterInfoService.TestPlugin.class); } + @Override + public Settings indexSettings() { + // ensure that indices do not use custom data paths + return Settings.builder().put(super.indexSettings()).putNull(IndexMetaData.SETTING_DATA_PATH).build(); + } + + private static FsInfo.Path setDiskUsage(FsInfo.Path original, long totalBytes, long freeBytes) { + return new FsInfo.Path(original.getPath(), original.getMount(), totalBytes, freeBytes, freeBytes); + } + public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { - List nodes = internalCluster().startNodes(3); + for (int i = 0; i < 3; i++) { + // ensure that each node has a single data path + internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), createTempDir())); + } - // Start with all nodes at 50% usage - final MockInternalClusterInfoService cis = (MockInternalClusterInfoService) - internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName()); - cis.setUpdateFrequency(TimeValue.timeValueMillis(200)); - cis.onMaster(); - cis.setN1Usage(nodes.get(0), new DiskUsage(nodes.get(0), "n1", "/dev/null", 100, 50)); - cis.setN2Usage(nodes.get(1), new DiskUsage(nodes.get(1), "n2", "/dev/null", 100, 50)); - cis.setN3Usage(nodes.get(2), new DiskUsage(nodes.get(2), "n3", "/dev/null", 100, 50)); + final List nodeIds = StreamSupport.stream(client().admin().cluster().prepareState().get().getState() + .getRoutingNodes().spliterator(), false).map(RoutingNode::nodeId).collect(Collectors.toList()); + + final MockInternalClusterInfoService clusterInfoService = getMockInternalClusterInfoService(); + clusterInfoService.setUpdateFrequency(TimeValue.timeValueMillis(200)); + clusterInfoService.onMaster(); + + // prevent any effects from in-flight recoveries, since we are only simulating a 100-byte disk + clusterInfoService.shardSizeFunction = shardRouting -> 0L; + + // start with all nodes below the watermark + clusterInfoService.diskUsageFunction = (discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, 100, between(10, 100)); final boolean watermarkBytes = randomBoolean(); // we have to consistently use bytes or percentage for the disk watermark settings - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "20b" : "80%") - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%") - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), - watermarkBytes ? "0b" : "100%") - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "1ms")).get(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() + .put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%") + .put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%") + .put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), watermarkBytes ? "0b" : "100%") + .put(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "1ms"))); // Create an index with 10 shards so we can check allocation for it - prepareCreate("test").setSettings(Settings.builder() - .put("number_of_shards", 10) - .put("number_of_replicas", 0)).get(); + assertAcked(prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 10).put("number_of_replicas", 0))); ensureGreen("test"); - // Block until the "fake" cluster info is retrieved at least once assertBusy(() -> { - final ClusterInfo info = cis.getClusterInfo(); - logger.info("--> got: {} nodes", info.getNodeLeastAvailableDiskUsages().size()); - assertThat(info.getNodeLeastAvailableDiskUsages().size(), greaterThan(0)); + final Map shardCountByNodeId = getShardCountByNodeId(); + assertThat("node0 has at least 3 shards", shardCountByNodeId.get(nodeIds.get(0)), greaterThanOrEqualTo(3)); + assertThat("node1 has at least 3 shards", shardCountByNodeId.get(nodeIds.get(1)), greaterThanOrEqualTo(3)); + assertThat("node2 has at least 3 shards", shardCountByNodeId.get(nodeIds.get(2)), greaterThanOrEqualTo(3)); }); - final List realNodeNames = new ArrayList<>(); - { - final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - for (final RoutingNode node : clusterState.getRoutingNodes()) { - realNodeNames.add(node.nodeId()); - logger.info("--> node {} has {} shards", - node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); - } - } - - // Update the disk usages so one node has now passed the high watermark - cis.setN1Usage(realNodeNames.get(0), new DiskUsage(nodes.get(0), "n1", "_na_", 100, 50)); - cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", "_na_", 100, 50)); - cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", "_na_", 100, 0)); // nothing free on node3 + // move node2 above high watermark + clusterInfoService.diskUsageFunction = (discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, 100, + discoveryNode.getId().equals(nodeIds.get(2)) ? between(0, 9) : between(10, 100)); - logger.info("--> waiting for shards to relocate off node [{}]", realNodeNames.get(2)); + logger.info("--> waiting for shards to relocate off node [{}]", nodeIds.get(2)); assertBusy(() -> { - final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - final Map nodesToShardCount = new HashMap<>(); - for (final RoutingNode node : clusterState.getRoutingNodes()) { - logger.info("--> node {} has {} shards", - node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); - nodesToShardCount.put(node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); - } - assertThat("node1 has 5 shards", nodesToShardCount.get(realNodeNames.get(0)), equalTo(5)); - assertThat("node2 has 5 shards", nodesToShardCount.get(realNodeNames.get(1)), equalTo(5)); - assertThat("node3 has 0 shards", nodesToShardCount.get(realNodeNames.get(2)), equalTo(0)); + final Map shardCountByNodeId = getShardCountByNodeId(); + assertThat("node0 has 5 shards", shardCountByNodeId.get(nodeIds.get(0)), equalTo(5)); + assertThat("node1 has 5 shards", shardCountByNodeId.get(nodeIds.get(1)), equalTo(5)); + assertThat("node2 has 0 shards", shardCountByNodeId.get(nodeIds.get(2)), equalTo(0)); }); - // Update the disk usages so one node is now back under the high watermark - cis.setN1Usage(realNodeNames.get(0), new DiskUsage(nodes.get(0), "n1", "_na_", 100, 50)); - cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", "_na_", 100, 50)); - cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", "_na_", 100, 50)); // node3 has free space now + // move all nodes below watermark again + clusterInfoService.diskUsageFunction = (discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, 100, between(10, 100)); - logger.info("--> waiting for shards to rebalance back onto node [{}]", realNodeNames.get(2)); + logger.info("--> waiting for shards to rebalance back onto node [{}]", nodeIds.get(2)); assertBusy(() -> { - final Map nodesToShardCount = new HashMap<>(); - final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - for (final RoutingNode node : clusterState.getRoutingNodes()) { - logger.info("--> node {} has {} shards", - node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); - nodesToShardCount.put(node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); - } - assertThat("node1 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(0)), greaterThanOrEqualTo(3)); - assertThat("node2 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(1)), greaterThanOrEqualTo(3)); - assertThat("node3 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(2)), greaterThanOrEqualTo(3)); + final Map shardCountByNodeId = getShardCountByNodeId(); + assertThat("node0 has at least 3 shards", shardCountByNodeId.get(nodeIds.get(0)), greaterThanOrEqualTo(3)); + assertThat("node1 has at least 3 shards", shardCountByNodeId.get(nodeIds.get(1)), greaterThanOrEqualTo(3)); + assertThat("node2 has at least 3 shards", shardCountByNodeId.get(nodeIds.get(2)), greaterThanOrEqualTo(3)); }); } public void testAutomaticReleaseOfIndexBlock() throws Exception { - List nodes = internalCluster().startNodes(3); + for (int i = 0; i < 3; i++) { + // ensure that each node has a single data path + internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), createTempDir())); + } - // Wait for all 3 nodes to be up - assertBusy(() -> { - NodesStatsResponse resp = client().admin().cluster().prepareNodesStats().get(); - assertThat(resp.getNodes().size(), equalTo(3)); - }); + final List nodeIds = StreamSupport.stream(client().admin().cluster().prepareState().get().getState() + .getRoutingNodes().spliterator(), false).map(RoutingNode::nodeId).collect(Collectors.toList()); - // Start with all nodes at 50% usage - final MockInternalClusterInfoService cis = (MockInternalClusterInfoService) - internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName()); - cis.setUpdateFrequency(TimeValue.timeValueMillis(100)); - cis.onMaster(); - cis.setN1Usage(nodes.get(0), new DiskUsage(nodes.get(0), "n1", "/dev/null", 100, 50)); - cis.setN2Usage(nodes.get(1), new DiskUsage(nodes.get(1), "n2", "/dev/null", 100, 50)); - cis.setN3Usage(nodes.get(2), new DiskUsage(nodes.get(2), "n3", "/dev/null", 100, 50)); + final MockInternalClusterInfoService clusterInfoService = getMockInternalClusterInfoService(); + clusterInfoService.setUpdateFrequency(TimeValue.timeValueMillis(200)); + clusterInfoService.onMaster(); + + // prevent any effects from in-flight recoveries, since we are only simulating a 100-byte disk + clusterInfoService.shardSizeFunction = shardRouting -> 0L; + + // start with all nodes below the low watermark + clusterInfoService.diskUsageFunction = (discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, 100, between(15, 100)); final boolean watermarkBytes = randomBoolean(); // we have to consistently use bytes or percentage for the disk watermark settings - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "15b" : "85%") - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%") - .put( - DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), - watermarkBytes ? "5b" : "95%") - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "150ms")).get(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() + .put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%") + .put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%") + .put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), watermarkBytes ? "5b" : "95%") + .put(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "150ms"))); + // Create an index with 6 shards so we can check allocation for it prepareCreate("test").setSettings(Settings.builder() .put("number_of_shards", 6) .put("number_of_replicas", 0)).get(); ensureGreen("test"); - // Block until the "fake" cluster info is retrieved at least once - assertBusy(() -> { - ClusterInfo info = cis.getClusterInfo(); - logger.info("--> got: {} nodes", info.getNodeLeastAvailableDiskUsages().size()); - assertThat(info.getNodeLeastAvailableDiskUsages().size(), greaterThan(0)); - }); - - final List realNodeNames = new ArrayList<>(); - ClusterStateResponse resp = client().admin().cluster().prepareState().get(); - for (RoutingNode node : resp.getState().getRoutingNodes()) { - realNodeNames.add(node.nodeId()); - logger.info("--> node {} has {} shards", - node.nodeId(), resp.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + { + final Map shardCountByNodeId = getShardCountByNodeId(); + assertThat("node0 has 2 shards", shardCountByNodeId.get(nodeIds.get(0)), equalTo(2)); + assertThat("node1 has 2 shards", shardCountByNodeId.get(nodeIds.get(1)), equalTo(2)); + assertThat("node2 has 2 shards", shardCountByNodeId.get(nodeIds.get(2)), equalTo(2)); } - client().prepareIndex("test", "doc", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); - assertSearchHits(client().prepareSearch().get(), "1"); + client().prepareIndex("test", "doc", "1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + assertSearchHits(client().prepareSearch("test").get(), "1"); - // Block all nodes so that re-balancing does not occur (BalancedShardsAllocator) - cis.setN1Usage(realNodeNames.get(0), new DiskUsage(nodes.get(0), "n1", "_na_", 100, 3)); - cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", "_na_", 100, 3)); - cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", "_na_", 100, 3)); + // Move all nodes above the low watermark so no shard movement can occur, and at least one node above the flood stage watermark so + // the index is blocked + clusterInfoService.diskUsageFunction = (discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, 100, + discoveryNode.getId().equals(nodeIds.get(2)) ? between(0, 4) : between(0, 14)); - // Wait until index "test" is blocked - assertBusy(() -> assertBlocked(client().prepareIndex().setIndex("test").setType("doc").setId("1").setSource("foo", "bar"), + assertBusy(() -> assertBlocked( + client().prepareIndex().setIndex("test").setType("doc").setId("1").setSource("foo", "bar"), IndexMetaData.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK)); assertFalse(client().admin().cluster().prepareHealth("test").setWaitForEvents(Priority.LANGUID).get().isTimedOut()); @@ -208,23 +196,234 @@ public void testAutomaticReleaseOfIndexBlock() throws Exception { // Cannot add further documents assertBlocked(client().prepareIndex().setIndex("test").setType("doc").setId("2").setSource("foo", "bar"), IndexMetaData.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK); - assertSearchHits(client().prepareSearch().get(), "1"); + assertSearchHits(client().prepareSearch("test").get(), "1"); + + logger.info("--> index is confirmed read-only, releasing disk space"); - // Update the disk usages so all nodes are back under the high and flood watermarks - cis.setN1Usage(realNodeNames.get(0), new DiskUsage(nodes.get(0), "n1", "_na_", 100, 11)); - cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", "_na_", 100, 11)); - cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", "_na_", 100, 11)); + // Move all nodes below the high watermark so that the index is unblocked + clusterInfoService.diskUsageFunction = (discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, 100, between(10, 100)); // Attempt to create a new document until DiskUsageMonitor unblocks the index assertBusy(() -> { try { - client().prepareIndex("test", "doc", "3").setSource("{\"foo\": \"bar\"}", XContentType.JSON) + client().prepareIndex("test", "doc", "3").setSource("foo", "bar") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); } catch (ClusterBlockException e) { throw new AssertionError("retrying", e); } }); - assertSearchHits(client().prepareSearch().get(), "1", "3"); + assertSearchHits(client().prepareSearch("test").get(), "1", "3"); + } + + public void testOnlyMovesEnoughShardsToDropBelowHighWatermark() throws Exception { + for (int i = 0; i < 3; i++) { + // ensure that each node has a single data path + internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), createTempDir())); + } + + final MockInternalClusterInfoService clusterInfoService = getMockInternalClusterInfoService(); + + final AtomicReference masterAppliedClusterState = new AtomicReference<>(); + internalCluster().getCurrentMasterNodeInstance(ClusterService.class).addListener(event -> { + masterAppliedClusterState.set(event.state()); + clusterInfoService.refresh(); // so that a subsequent reroute sees disk usage according to the current state + }); + + // shards are 1 byte large + clusterInfoService.shardSizeFunction = shardRouting -> 1L; + + // start with all nodes below the watermark + clusterInfoService.diskUsageFunction = (discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, 1000L, 1000L); + + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() + .put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "90%") + .put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "90%") + .put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "100%") + .put(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "1ms"))); + + final List nodeIds = StreamSupport.stream(client().admin().cluster().prepareState().get().getState() + .getRoutingNodes().spliterator(), false).map(RoutingNode::nodeId).collect(Collectors.toList()); + + assertAcked(prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 6).put("number_of_replicas", 0))); + + ensureGreen("test"); + + assertBusy(() -> { + final Map shardCountByNodeId = getShardCountByNodeId(); + assertThat("node0 has 2 shards", shardCountByNodeId.get(nodeIds.get(0)), equalTo(2)); + assertThat("node1 has 2 shards", shardCountByNodeId.get(nodeIds.get(1)), equalTo(2)); + assertThat("node2 has 2 shards", shardCountByNodeId.get(nodeIds.get(2)), equalTo(2)); + }); + + // disable rebalancing, or else we might move too many shards away and then rebalance them back again + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE))); + + // node2 suddenly has 99 bytes free, less than 10%, but moving one shard is enough to bring it up to 100 bytes free: + clusterInfoService.diskUsageFunction = (discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, 1000L, + discoveryNode.getId().equals(nodeIds.get(2)) + ? 101L - masterAppliedClusterState.get().getRoutingNodes().node(nodeIds.get(2)).numberOfOwningShards() + : 1000L); + + clusterInfoService.refresh(); + + logger.info("--> waiting for shards to relocate off node [{}]", nodeIds.get(2)); + + // must wait for relocation to start + assertBusy(() -> assertThat("node2 has 1 shard", getShardCountByNodeId().get(nodeIds.get(2)), equalTo(1))); + + // ensure that relocations finished without moving any more shards + ensureGreen("test"); + assertThat("node2 has 1 shard", getShardCountByNodeId().get(nodeIds.get(2)), equalTo(1)); + } + + public void testDoesNotExceedLowWatermarkWhenRebalancing() throws Exception { + for (int i = 0; i < 3; i++) { + // ensure that each node has a single data path + internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), createTempDir())); + } + + final AtomicReference masterAppliedClusterState = new AtomicReference<>(); + + final MockInternalClusterInfoService clusterInfoService = getMockInternalClusterInfoService(); + + final List nodeIds = StreamSupport.stream(client().admin().cluster().prepareState().get().getState() + .getRoutingNodes().spliterator(), false).map(RoutingNode::nodeId).collect(Collectors.toList()); + + internalCluster().getCurrentMasterNodeInstance(ClusterService.class).addListener(event -> { + assertThat(event.state().getRoutingNodes().node(nodeIds.get(2)).size(), lessThanOrEqualTo(1)); + masterAppliedClusterState.set(event.state()); + clusterInfoService.refresh(); // so that a subsequent reroute sees disk usage according to the current state + }); + + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() + .put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "85%") + .put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "100%") + .put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "100%"))); + + // shards are 1 byte large + clusterInfoService.shardSizeFunction = shardRouting -> 1L; + + // node 2 only has space for one shard + clusterInfoService.diskUsageFunction = (discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, 1000L, + discoveryNode.getId().equals(nodeIds.get(2)) + ? 150L - masterAppliedClusterState.get().getRoutingNodes().node(nodeIds.get(2)).numberOfOwningShards() + : 1000L); + + assertAcked(prepareCreate("test").setSettings(Settings.builder() + .put("number_of_shards", 6) + .put("number_of_replicas", 0) + .put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getConcreteSettingForNamespace("_id").getKey(), nodeIds.get(2)))); + ensureGreen("test"); + + assertBusy(() -> { + final Map shardCountByNodeId = getShardCountByNodeId(); + assertThat("node0 has 3 shards", shardCountByNodeId.get(nodeIds.get(0)), equalTo(3)); + assertThat("node1 has 3 shards", shardCountByNodeId.get(nodeIds.get(1)), equalTo(3)); + assertThat("node2 has 0 shards", shardCountByNodeId.get(nodeIds.get(2)), equalTo(0)); + }); + + assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() + .putNull(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getConcreteSettingForNamespace("_id").getKey()))); + + logger.info("--> waiting for shards to relocate onto node [{}]", nodeIds.get(2)); + + ensureGreen("test"); + assertThat("node2 has 1 shard", getShardCountByNodeId().get(nodeIds.get(2)), equalTo(1)); + } + + public void testMovesShardsOffSpecificDataPathAboveWatermark() throws Exception { + + // start one node with two data paths + final Path pathOverWatermark = createTempDir(); + final Settings.Builder twoPathSettings = Settings.builder(); + if (randomBoolean()) { + twoPathSettings.putList(Environment.PATH_DATA_SETTING.getKey(), createTempDir().toString(), pathOverWatermark.toString()); + } else { + twoPathSettings.putList(Environment.PATH_DATA_SETTING.getKey(), pathOverWatermark.toString(), createTempDir().toString()); + } + internalCluster().startNode(twoPathSettings); + final String nodeWithTwoPaths = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0).getNode().getId(); + + // other two nodes have one data path each + internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), createTempDir())); + internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), createTempDir())); + + final MockInternalClusterInfoService clusterInfoService = getMockInternalClusterInfoService(); + + // prevent any effects from in-flight recoveries, since we are only simulating a 100-byte disk + clusterInfoService.shardSizeFunction = shardRouting -> 0L; + + // start with all paths below the watermark + clusterInfoService.diskUsageFunction = (discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, 100, between(10, 100)); + + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() + .put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "90%") + .put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "90%") + .put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "100%"))); + + final List nodeIds = StreamSupport.stream(client().admin().cluster().prepareState().get().getState() + .getRoutingNodes().spliterator(), false).map(RoutingNode::nodeId).collect(Collectors.toList()); + + assertAcked(prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 6).put("number_of_replicas", 0))); + + ensureGreen("test"); + + { + final Map shardCountByNodeId = getShardCountByNodeId(); + assertThat("node0 has 2 shards", shardCountByNodeId.get(nodeIds.get(0)), equalTo(2)); + assertThat("node1 has 2 shards", shardCountByNodeId.get(nodeIds.get(1)), equalTo(2)); + assertThat("node2 has 2 shards", shardCountByNodeId.get(nodeIds.get(2)), equalTo(2)); + } + + final long shardsOnGoodPath = Arrays.stream(client().admin().indices().prepareStats("test").get().getShards()) + .filter(shardStats -> shardStats.getShardRouting().currentNodeId().equals(nodeWithTwoPaths) + && shardStats.getDataPath().startsWith(pathOverWatermark.toString()) == false).count(); + logger.info("--> shards on good path: [{}]", shardsOnGoodPath); + + // one of the paths on node0 suddenly exceeds the high watermark + clusterInfoService.diskUsageFunction = (discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, 100L, + fsInfoPath.getPath().startsWith(pathOverWatermark.toString()) ? between(0, 9) : between(10, 100)); + + // disable rebalancing, or else we might move shards back onto the over-full path since we're not faking that + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE))); + + clusterInfoService.refresh(); + + logger.info("--> waiting for shards to relocate off path [{}]", pathOverWatermark); + + assertBusy(() -> { + for (final ShardStats shardStats : client().admin().indices().prepareStats("test").get().getShards()) { + assertThat(shardStats.getDataPath(), not(startsWith(pathOverWatermark.toString()))); + } + }); + + ensureGreen("test"); + + for (final ShardStats shardStats : client().admin().indices().prepareStats("test").get().getShards()) { + assertThat(shardStats.getDataPath(), not(startsWith(pathOverWatermark.toString()))); + } + + assertThat("should not have moved any shards off of the path that wasn't too full", + Arrays.stream(client().admin().indices().prepareStats("test").get().getShards()) + .filter(shardStats -> shardStats.getShardRouting().currentNodeId().equals(nodeWithTwoPaths) + && shardStats.getDataPath().startsWith(pathOverWatermark.toString()) == false).count(), equalTo(shardsOnGoodPath)); + } + + private Map getShardCountByNodeId() { + final Map shardCountByNodeId = new HashMap<>(); + final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + for (final RoutingNode node : clusterState.getRoutingNodes()) { + logger.info("----> node {} has {} shards", + node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + shardCountByNodeId.put(node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + } + return shardCountByNodeId; + } + + private MockInternalClusterInfoService getMockInternalClusterInfoService() { + return (MockInternalClusterInfoService) internalCluster().getCurrentMasterNodeInstance(ClusterInfoService.class); } } diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index efb46db59de96..6c0f6b0751a65 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -800,9 +800,9 @@ public void testValidate() { assertEquals("Failed to parse value [true] for setting [index.number_of_replicas]", e.getMessage()); e = expectThrows(IllegalArgumentException.class, () -> - settings.validate("index.similarity.classic.type", Settings.builder().put("index.similarity.classic.type", "mine").build(), + settings.validate("index.similarity.boolean.type", Settings.builder().put("index.similarity.boolean.type", "mine").build(), false)); - assertEquals("illegal value for [index.similarity.classic] cannot redefine built-in similarity", e.getMessage()); + assertEquals("illegal value for [index.similarity.boolean] cannot redefine built-in similarity", e.getMessage()); } public void testValidateSecureSettings() { diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java index 42c846d01f5a5..9f2ed63b6da61 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java @@ -24,9 +24,13 @@ import org.hamcrest.Matchers; import java.util.Arrays; +import java.util.List; +import java.util.Set; +import java.util.function.Supplier; import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.is; public class SettingsModuleTests extends ModuleTestCase { @@ -185,4 +189,31 @@ public void testMutuallyExclusiveScopes() { assertThat(e.getMessage(), containsString("Cannot register setting [foo.bar] twice")); } } + + public void testPluginSettingWithoutNamespace() { + final String key = randomAlphaOfLength(8); + final Setting setting = Setting.simpleString(key, Property.NodeScope); + runSettingWithoutNamespaceTest( + key, () -> new SettingsModule(Settings.EMPTY, List.of(setting), List.of(), Set.of(), Set.of(), Set.of())); + } + + public void testClusterSettingWithoutNamespace() { + final String key = randomAlphaOfLength(8); + final Setting setting = Setting.simpleString(key, Property.NodeScope); + runSettingWithoutNamespaceTest( + key, () -> new SettingsModule(Settings.EMPTY, List.of(), List.of(), Set.of(), Set.of(setting), Set.of())); + } + + public void testIndexSettingWithoutNamespace() { + final String key = randomAlphaOfLength(8); + final Setting setting = Setting.simpleString(key, Property.IndexScope); + runSettingWithoutNamespaceTest( + key, () -> new SettingsModule(Settings.EMPTY, List.of(), List.of(), Set.of(), Set.of(), Set.of(setting))); + } + + private void runSettingWithoutNamespaceTest(final String key, final Supplier supplier) { + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, supplier::get); + assertThat(e, hasToString(containsString("setting [" + key + "] is not in any namespace, its name must contain a dot"))); + } + } diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 63408c2669131..c850b9d61da5b 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -375,7 +375,7 @@ public void testRecoverBrokenIndexMetadata() throws Exception { final IndexMetaData brokenMeta = IndexMetaData.builder(metaData).settings(Settings.builder().put(metaData.getSettings()) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.minimumIndexCompatibilityVersion().id) // this is invalid but should be archived - .put("index.similarity.BM25.type", "classic") + .put("index.similarity.BM25.type", "boolean") // this one is not validated ahead of time and breaks allocation .put("index.analysis.filter.myCollator.type", "icu_collation") ).build(); @@ -397,7 +397,7 @@ public void testRecoverBrokenIndexMetadata() throws Exception { state = client().admin().cluster().prepareState().get().getState(); assertEquals(IndexMetaData.State.CLOSE, state.getMetaData().index(metaData.getIndex()).getState()); - assertEquals("classic", state.getMetaData().index(metaData.getIndex()).getSettings().get("archived.index.similarity.BM25.type")); + assertEquals("boolean", state.getMetaData().index(metaData.getIndex()).getSettings().get("archived.index.similarity.BM25.type")); // try to open it with the broken setting - fail again! ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> client().admin().indices().prepareOpen("test").get()); assertEquals(ex.getMessage(), "Failed to verify index " + metaData.getIndex()); diff --git a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java index c58ec6a4becbb..3bfc649820fee 100644 --- a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java +++ b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java @@ -57,6 +57,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; @@ -272,8 +273,13 @@ public void testReleaseInListener() throws IOException { public void testConnectionClose() throws Exception { final Settings settings = Settings.builder().build(); final HttpRequest httpRequest; - final boolean close = randomBoolean(); - if (randomBoolean()) { + final boolean brokenRequest = randomBoolean(); + final boolean close = brokenRequest || randomBoolean(); + if (brokenRequest) { + httpRequest = new TestRequest(() -> { + throw new IllegalArgumentException("Can't parse HTTP version"); + }, RestRequest.Method.GET, "/"); + } else if (randomBoolean()) { httpRequest = new TestRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); if (close) { httpRequest.getHeaders().put(DefaultRestChannel.CONNECTION, Collections.singletonList(DefaultRestChannel.CLOSE)); @@ -399,18 +405,21 @@ private TestResponse executeRequest(final Settings settings, final String origin private static class TestRequest implements HttpRequest { - private final HttpVersion version; + private final Supplier version; private final RestRequest.Method method; private final String uri; private HashMap> headers = new HashMap<>(); - private TestRequest(HttpVersion version, RestRequest.Method method, String uri) { - - this.version = version; + private TestRequest(Supplier versionSupplier, RestRequest.Method method, String uri) { + this.version = versionSupplier; this.method = method; this.uri = uri; } + private TestRequest(HttpVersion version, RestRequest.Method method, String uri) { + this(() -> version, method, uri); + } + @Override public RestRequest.Method method() { return method; @@ -438,7 +447,7 @@ public List strictCookies() { @Override public HttpVersion protocolVersion() { - return version; + return version.get(); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index ed8611aaee588..8704bd8be3490 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -81,6 +81,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; @@ -4515,7 +4516,6 @@ public void testRestoreLocalHistoryFromTranslog() throws IOException { final EngineConfig engineConfig; final SeqNoStats prevSeqNoStats; final List prevDocs; - final int totalTranslogOps; try (InternalEngine engine = createEngine(store, createTempDir(), globalCheckpoint::get)) { engineConfig = engine.config(); for (final long seqNo : seqNos) { @@ -4534,16 +4534,19 @@ public void testRestoreLocalHistoryFromTranslog() throws IOException { engine.syncTranslog(); prevSeqNoStats = engine.getSeqNoStats(globalCheckpoint.get()); prevDocs = getDocIds(engine, true); - totalTranslogOps = engine.getTranslog().totalOperations(); } try (InternalEngine engine = new InternalEngine(engineConfig)) { + final Translog.TranslogGeneration currrentTranslogGeneration = new Translog.TranslogGeneration( + engine.getTranslog().getTranslogUUID(), engine.getTranslog().currentFileGeneration()); engine.recoverFromTranslog(translogHandler, globalCheckpoint.get()); engine.restoreLocalHistoryFromTranslog(translogHandler); assertThat(getDocIds(engine, true), equalTo(prevDocs)); SeqNoStats seqNoStats = engine.getSeqNoStats(globalCheckpoint.get()); assertThat(seqNoStats.getLocalCheckpoint(), equalTo(prevSeqNoStats.getLocalCheckpoint())); assertThat(seqNoStats.getMaxSeqNo(), equalTo(prevSeqNoStats.getMaxSeqNo())); - assertThat(engine.getTranslog().totalOperations(), equalTo(totalTranslogOps)); + try (Translog.Snapshot snapshot = engine.getTranslog().newSnapshotFromGen(currrentTranslogGeneration, Long.MAX_VALUE)) { + assertThat("restore from local translog must not add operations to translog", snapshot, SnapshotMatchers.size(0)); + } } assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, createMapperService("test")); } @@ -5059,6 +5062,34 @@ public void testShouldPeriodicallyFlush() throws Exception { } } + public void testShouldPeriodicallyFlushAfterMerge() throws Exception { + assertThat("Empty engine does not need flushing", engine.shouldPeriodicallyFlush(), equalTo(false)); + ParsedDocument doc = + testParsedDocument(Integer.toString(0), null, testDocumentWithTextField(), SOURCE, null); + engine.index(indexForDoc(doc)); + engine.refresh("test"); + assertThat("Not exceeded translog flush threshold yet", engine.shouldPeriodicallyFlush(), equalTo(false)); + final IndexSettings indexSettings = engine.config().getIndexSettings(); + final IndexMetaData indexMetaData = IndexMetaData.builder(indexSettings.getIndexMetaData()) + .settings(Settings.builder().put(indexSettings.getSettings()) + .put(IndexSettings.INDEX_FLUSH_AFTER_MERGE_THRESHOLD_SIZE_SETTING.getKey(), "0b")).build(); + indexSettings.updateIndexMetaData(indexMetaData); + engine.onSettingsChanged(); + assertThat(engine.getTranslog().stats().getUncommittedOperations(), equalTo(1)); + assertThat(engine.shouldPeriodicallyFlush(), equalTo(false)); + doc = testParsedDocument(Integer.toString(1), null, testDocumentWithTextField(), SOURCE, null); + engine.index(indexForDoc(doc)); + assertThat(engine.getTranslog().stats().getUncommittedOperations(), equalTo(2)); + engine.refresh("test"); + engine.forceMerge(false, 1, false, false, false); + assertBusy(() -> { + // the merge listner runs concurrently after the force merge returned + assertThat(engine.shouldPeriodicallyFlush(), equalTo(true)); + }); + engine.flush(); + assertThat(engine.shouldPeriodicallyFlush(), equalTo(false)); + } + public void testStressShouldPeriodicallyFlush() throws Exception { final long flushThreshold = randomLongBetween(120, 5000); final long generationThreshold = randomLongBetween(1000, 5000); @@ -5978,4 +6009,78 @@ public void testAlwaysRecordReplicaOrPeerRecoveryOperationsToTranslog() throws E equalTo(seqNos)); } } + + public void testNoOpFailure() throws IOException { + engine.close(); + final Settings settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( + IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build()); + try (Store store = createStore(); + Engine engine = createEngine((dir, iwc) -> new IndexWriter(dir, iwc) { + + @Override + public long addDocument(Iterable doc) throws IOException { + throw new IllegalArgumentException("fatal"); + } + + }, + null, + null, + config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null))) { + final Engine.NoOp op = new Engine.NoOp(0, 0, PRIMARY, System.currentTimeMillis(), "test"); + final IllegalArgumentException e = expectThrows(IllegalArgumentException. class, () -> engine.noOp(op)); + assertThat(e.getMessage(), equalTo("fatal")); + assertTrue(engine.isClosed.get()); + assertThat(engine.failedEngine.get(), not(nullValue())); + assertThat(engine.failedEngine.get(), instanceOf(IllegalArgumentException.class)); + assertThat(engine.failedEngine.get().getMessage(), equalTo("fatal")); + } + } + + public void testDeleteFailureSoftDeletesEnabledDocAlreadyDeleted() throws IOException { + runTestDeleteFailure(true, InternalEngine::delete); + } + + public void testDeleteFailureSoftDeletesEnabled() throws IOException { + runTestDeleteFailure(true, (engine, op) -> {}); + } + + public void testDeleteFailureSoftDeletesDisabled() throws IOException { + runTestDeleteFailure(false, (engine, op) -> {}); + } + + private void runTestDeleteFailure( + final boolean softDeletesEnabled, + final CheckedBiConsumer consumer) throws IOException { + engine.close(); + final Settings settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), softDeletesEnabled).build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( + IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build()); + final AtomicReference iw = new AtomicReference<>(); + try (Store store = createStore(); + InternalEngine engine = createEngine( + (dir, iwc) -> { + iw.set(new ThrowingIndexWriter(dir, iwc)); + return iw.get(); + }, + null, + null, + config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null))) { + engine.index(new Engine.Index(newUid("0"), primaryTerm.get(), InternalEngineTests.createParsedDoc("0", null))); + final Engine.Delete op = new Engine.Delete("_doc", "0", newUid("0"), primaryTerm.get()); + consumer.accept(engine, op); + iw.get().setThrowFailure(() -> new IllegalArgumentException("fatal")); + final IllegalArgumentException e = expectThrows(IllegalArgumentException. class, () -> engine.delete(op)); + assertThat(e.getMessage(), equalTo("fatal")); + assertTrue(engine.isClosed.get()); + assertThat(engine.failedEngine.get(), not(nullValue())); + assertThat(engine.failedEngine.get(), instanceOf(IllegalArgumentException.class)); + assertThat(engine.failedEngine.get().getMessage(), equalTo("fatal")); + } + } + } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BinaryRangeUtilTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BinaryRangeUtilTests.java index 20d4af1f0b600..546cbbb2eaa6b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BinaryRangeUtilTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BinaryRangeUtilTests.java @@ -19,8 +19,14 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singleton; + public class BinaryRangeUtilTests extends ESTestCase { public void testBasics() { @@ -140,6 +146,81 @@ public void testEncode_Float() { } } + public void testDecodeLong() { + long[] cases = new long[] { Long.MIN_VALUE, -2049, -2048, -128, -3, -1, 0, 1, 3, 125, 2048, 2049, Long.MAX_VALUE}; + for (long expected : cases) { + byte[] encoded = BinaryRangeUtil.encodeLong(expected); + int offset = 0; + int length = RangeType.LengthType.VARIABLE.readLength(encoded, offset); + assertEquals(expected, BinaryRangeUtil.decodeLong(encoded, offset, length)); + } + } + + public void testDecodeLongRanges() throws IOException { + int iters = randomIntBetween(32, 1024); + for (int i = 0; i < iters; i++) { + long start = randomLong(); + long end = randomLongBetween(start + 1, Long.MAX_VALUE); + RangeFieldMapper.Range expected = new RangeFieldMapper.Range(RangeType.LONG, start, end, true, true); + List decoded = BinaryRangeUtil.decodeLongRanges(BinaryRangeUtil.encodeLongRanges(singleton(expected))); + assertEquals(1, decoded.size()); + RangeFieldMapper.Range actual = decoded.get(0); + assertEquals(expected, actual); + } + } + + public void testDecodeDoubleRanges() throws IOException { + int iters = randomIntBetween(32, 1024); + for (int i = 0; i < iters; i++) { + double start = randomDouble(); + double end = randomDoubleBetween(Math.nextUp(start), Double.MAX_VALUE, false); + RangeFieldMapper.Range expected = new RangeFieldMapper.Range(RangeType.DOUBLE, start, end, true, true); + List decoded = BinaryRangeUtil.decodeDoubleRanges(BinaryRangeUtil.encodeDoubleRanges( + singleton(expected))); + assertEquals(1, decoded.size()); + RangeFieldMapper.Range actual = decoded.get(0); + assertEquals(expected, actual); + } + } + + public void testDecodeFloatRanges() throws IOException { + int iters = randomIntBetween(32, 1024); + for (int i = 0; i < iters; i++) { + float start = randomFloat(); + // for some reason, ESTestCase doesn't provide randomFloatBetween + float end = randomFloat(); + if (start > end) { + float temp = start; + start = end; + end = temp; + } + RangeFieldMapper.Range expected = new RangeFieldMapper.Range(RangeType.FLOAT, start, end, true, true); + List decoded = BinaryRangeUtil.decodeFloatRanges(BinaryRangeUtil.encodeFloatRanges( + singleton(expected))); + assertEquals(1, decoded.size()); + RangeFieldMapper.Range actual = decoded.get(0); + assertEquals(expected, actual); + } + } + + public void testDecodeIPRanges() throws IOException { + RangeFieldMapper.Range[] cases = { + createIPRange("192.168.0.1", "192.168.0.100"), + createIPRange("::ffff:c0a8:107", "2001:db8::") + }; + for (RangeFieldMapper.Range expected : cases) { + List decoded = BinaryRangeUtil.decodeIPRanges(BinaryRangeUtil.encodeIPRanges(singleton(expected))); + assertEquals(1, decoded.size()); + RangeFieldMapper.Range actual = decoded.get(0); + assertEquals(expected, actual); + } + } + + private RangeFieldMapper.Range createIPRange(String start, String end) { + return new RangeFieldMapper.Range(RangeType.IP, InetAddresses.forString(start), InetAddresses.forString(end), + true, true); + } + private static int normalize(int cmp) { if (cmp < 0) { return -1; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java index 65dcd396ed740..913a5d65669fc 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java @@ -416,7 +416,7 @@ public void doTestNoBounds(String type) throws IOException { public void testIllegalArguments() throws Exception { XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", RangeFieldMapper.RangeType.INTEGER.name) + .startObject("properties").startObject("field").field("type", RangeType.INTEGER.name) .field("format", DATE_FORMAT).endObject().endObject().endObject().endObject(); ThrowingRunnable runnable = () -> parser.parse("type", new CompressedXContent(Strings.toString(mapping))); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java index 699f85f1b12b1..fb7386446c1b0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java @@ -69,7 +69,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws public void testIntegerRangeQuery() throws Exception { Query query = new QueryStringQueryBuilder(INTEGER_RANGE_FIELD_NAME + ":[-450 TO 45000]").toQuery(createShardContext()); Query range = IntRange.newIntersectsQuery(INTEGER_RANGE_FIELD_NAME, new int[]{-450}, new int[]{45000}); - Query dv = RangeFieldMapper.RangeType.INTEGER.dvRangeQuery(INTEGER_RANGE_FIELD_NAME, + Query dv = RangeType.INTEGER.dvRangeQuery(INTEGER_RANGE_FIELD_NAME, BinaryDocValuesRangeQuery.QueryType.INTERSECTS, -450, 45000, true, true); assertEquals(new IndexOrDocValuesQuery(range, dv), query); } @@ -77,7 +77,7 @@ public void testIntegerRangeQuery() throws Exception { public void testLongRangeQuery() throws Exception { Query query = new QueryStringQueryBuilder(LONG_RANGE_FIELD_NAME + ":[-450 TO 45000]").toQuery(createShardContext()); Query range = LongRange.newIntersectsQuery(LONG_RANGE_FIELD_NAME, new long[]{-450}, new long[]{45000}); - Query dv = RangeFieldMapper.RangeType.LONG.dvRangeQuery(LONG_RANGE_FIELD_NAME, + Query dv = RangeType.LONG.dvRangeQuery(LONG_RANGE_FIELD_NAME, BinaryDocValuesRangeQuery.QueryType.INTERSECTS, -450, 45000, true, true); assertEquals(new IndexOrDocValuesQuery(range, dv), query); } @@ -85,7 +85,7 @@ public void testLongRangeQuery() throws Exception { public void testFloatRangeQuery() throws Exception { Query query = new QueryStringQueryBuilder(FLOAT_RANGE_FIELD_NAME + ":[-450 TO 45000]").toQuery(createShardContext()); Query range = FloatRange.newIntersectsQuery(FLOAT_RANGE_FIELD_NAME, new float[]{-450}, new float[]{45000}); - Query dv = RangeFieldMapper.RangeType.FLOAT.dvRangeQuery(FLOAT_RANGE_FIELD_NAME, + Query dv = RangeType.FLOAT.dvRangeQuery(FLOAT_RANGE_FIELD_NAME, BinaryDocValuesRangeQuery.QueryType.INTERSECTS, -450.0f, 45000.0f, true, true); assertEquals(new IndexOrDocValuesQuery(range, dv), query); } @@ -93,7 +93,7 @@ public void testFloatRangeQuery() throws Exception { public void testDoubleRangeQuery() throws Exception { Query query = new QueryStringQueryBuilder(DOUBLE_RANGE_FIELD_NAME + ":[-450 TO 45000]").toQuery(createShardContext()); Query range = DoubleRange.newIntersectsQuery(DOUBLE_RANGE_FIELD_NAME, new double[]{-450}, new double[]{45000}); - Query dv = RangeFieldMapper.RangeType.DOUBLE.dvRangeQuery(DOUBLE_RANGE_FIELD_NAME, + Query dv = RangeType.DOUBLE.dvRangeQuery(DOUBLE_RANGE_FIELD_NAME, BinaryDocValuesRangeQuery.QueryType.INTERSECTS, -450.0, 45000.0, true, true); assertEquals(new IndexOrDocValuesQuery(range, dv), query); } @@ -106,7 +106,7 @@ public void testDateRangeQuery() throws Exception { Query range = LongRange.newIntersectsQuery(DATE_RANGE_FIELD_NAME, new long[]{ parser.parse("2010-01-01", () -> 0).toEpochMilli()}, new long[]{ parser.parse("2018-01-01", () -> 0).toEpochMilli()}); - Query dv = RangeFieldMapper.RangeType.DATE.dvRangeQuery(DATE_RANGE_FIELD_NAME, + Query dv = RangeType.DATE.dvRangeQuery(DATE_RANGE_FIELD_NAME, BinaryDocValuesRangeQuery.QueryType.INTERSECTS, parser.parse("2010-01-01", () -> 0).toEpochMilli(), parser.parse("2018-01-01", () -> 0).toEpochMilli(), true, true); @@ -118,7 +118,7 @@ public void testIPRangeQuery() throws Exception { InetAddress upper = InetAddresses.forString("192.168.0.5"); Query query = new QueryStringQueryBuilder(IP_RANGE_FIELD_NAME + ":[192.168.0.1 TO 192.168.0.5]").toQuery(createShardContext()); Query range = InetAddressRange.newIntersectsQuery(IP_RANGE_FIELD_NAME, lower, upper); - Query dv = RangeFieldMapper.RangeType.IP.dvRangeQuery(IP_RANGE_FIELD_NAME, + Query dv = RangeType.IP.dvRangeQuery(IP_RANGE_FIELD_NAME, BinaryDocValuesRangeQuery.QueryType.INTERSECTS, lower, upper, true, true); assertEquals(new IndexOrDocValuesQuery(range, dv), query); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java index 7783c90de82a4..40f24bebba00d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java @@ -40,7 +40,6 @@ import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.RangeFieldMapper.RangeFieldType; -import org.elasticsearch.index.mapper.RangeFieldMapper.RangeType; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.test.IndexSettingsModule; import org.joda.time.DateTime; @@ -434,9 +433,9 @@ private Object nextTo(Object from) throws Exception { } public void testParseIp() { - assertEquals(InetAddresses.forString("::1"), RangeFieldMapper.RangeType.IP.parse(InetAddresses.forString("::1"), randomBoolean())); - assertEquals(InetAddresses.forString("::1"), RangeFieldMapper.RangeType.IP.parse("::1", randomBoolean())); - assertEquals(InetAddresses.forString("::1"), RangeFieldMapper.RangeType.IP.parse(new BytesRef("::1"), randomBoolean())); + assertEquals(InetAddresses.forString("::1"), RangeType.IP.parse(InetAddresses.forString("::1"), randomBoolean())); + assertEquals(InetAddresses.forString("::1"), RangeType.IP.parse("::1", randomBoolean())); + assertEquals(InetAddresses.forString("::1"), RangeType.IP.parse(new BytesRef("::1"), randomBoolean())); } public void testTermQuery() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java index 65dce061a3daf..5f67cca02cd2e 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java @@ -31,7 +31,9 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; @@ -226,4 +228,35 @@ private void runGlobalCheckpointSyncTest( } } + public void testPersistGlobalCheckpoint() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(2); + Settings.Builder indexSettings = Settings.builder() + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), randomTimeValue(100, 1000, "ms")) + .put("index.number_of_replicas", randomIntBetween(0, 1)); + if (randomBoolean()) { + indexSettings.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC) + .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), randomTimeValue(100, 1000, "ms")); + } + prepareCreate("test", indexSettings).get(); + if (randomBoolean()) { + ensureGreen("test"); + } + int numDocs = randomIntBetween(1, 20); + for (int i = 0; i < numDocs; i++) { + client().prepareIndex("test", "test", Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + } + ensureGreen("test"); + assertBusy(() -> { + for (IndicesService indicesService : internalCluster().getDataNodeInstances(IndicesService.class)) { + for (IndexService indexService : indicesService) { + for (IndexShard shard : indexService) { + final SeqNoStats seqNoStats = shard.seqNoStats(); + assertThat(seqNoStats.getLocalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); + assertThat(shard.getLastKnownGlobalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); + assertThat(shard.getLastSyncedGlobalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); + } + } + } + }); + } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 8947f4e9905e4..0d92bc3802f63 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -79,10 +79,12 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.DocIdSeqNoAndSource; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine.DeleteResult; +import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.InternalEngineFactory; @@ -4131,4 +4133,39 @@ protected void ensureMaxSeqNoEqualsToGlobalCheckpoint(SeqNoStats seqNoStats) { assertThat(readonlyShard.docStats().getCount(), equalTo(numDocs)); closeShards(readonlyShard); } + + public void testCloseShardWhileEngineIsWarming() throws Exception { + CountDownLatch warmerStarted = new CountDownLatch(1); + CountDownLatch warmerBlocking = new CountDownLatch(1); + IndexShard shard = newShard(true, Settings.EMPTY, config -> { + Engine.Warmer warmer = reader -> { + try { + warmerStarted.countDown(); + warmerBlocking.await(); + config.getWarmer().warm(reader); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + }; + EngineConfig configWithWarmer = new EngineConfig(config.getShardId(), config.getAllocationId(), config.getThreadPool(), + config.getIndexSettings(), warmer, config.getStore(), config.getMergePolicy(), config.getAnalyzer(), + config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), + config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), + config.getExternalRefreshListener(), config.getInternalRefreshListener(), config.getIndexSort(), + config.getCircuitBreakerService(), config.getGlobalCheckpointSupplier(), config.retentionLeasesSupplier(), + config.getPrimaryTermSupplier(), config.getTombstoneDocSupplier()); + return new InternalEngine(configWithWarmer); + }); + Thread recoveryThread = new Thread(() -> expectThrows(AlreadyClosedException.class, () -> recoverShardFromStore(shard))); + recoveryThread.start(); + try { + warmerStarted.await(); + shard.close("testing", false); + assertThat(shard.state, equalTo(IndexShardState.CLOSED)); + } finally { + warmerBlocking.countDown(); + } + recoveryThread.join(); + shard.store().close(); + } } diff --git a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java index d1fae9acceb67..01d97ea8824c5 100644 --- a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java +++ b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java @@ -63,21 +63,6 @@ public void testResolveDefaultSimilarities() { assertThat(similarityService.getSimilarity("BM25").get(), instanceOf(LegacyBM25Similarity.class)); assertThat(similarityService.getSimilarity("boolean").get(), instanceOf(BooleanSimilarity.class)); assertThat(similarityService.getSimilarity("default"), equalTo(null)); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> similarityService.getSimilarity("classic")); - assertEquals("The [classic] similarity may not be used anymore. Please use the [BM25] similarity or build a custom [scripted] " - + "similarity instead.", e.getMessage()); - } - - public void testResolveSimilaritiesFromMapping_classicIsForbidden() throws IOException { - Settings indexSettings = Settings.builder() - .put("index.similarity.my_similarity.type", "classic") - .put("index.similarity.my_similarity.discount_overlaps", false) - .build(); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> createIndex("foo", indexSettings)); - assertEquals("The [classic] similarity may not be used anymore. Please use the [BM25] similarity or build a custom [scripted] " - + "similarity instead.", e.getMessage()); } public void testResolveSimilaritiesFromMapping_bm25() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 24acf3bad8e03..daa6fdfe625e4 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -3305,21 +3305,25 @@ public void testSyncConcurrently() throws Exception { } assertNotNull(location); long globalCheckpoint = lastGlobalCheckpoint.get(); + final boolean synced; if (randomBoolean()) { - translog.ensureSynced(location); + synced = translog.ensureSynced(location); } else { translog.sync(); + synced = true; } for (Translog.Operation op : ops) { assertThat("seq# " + op.seqNo() + " was not marked as persisted", persistedSeqNos, hasItem(op.seqNo())); } Checkpoint checkpoint = translog.getLastSyncedCheckpoint(); assertThat(checkpoint.offset, greaterThanOrEqualTo(location.translogLocation)); - assertThat(checkpoint.globalCheckpoint, greaterThanOrEqualTo(globalCheckpoint)); for (Translog.Operation op : ops) { assertThat(checkpoint.minSeqNo, lessThanOrEqualTo(op.seqNo())); assertThat(checkpoint.maxSeqNo, greaterThanOrEqualTo(op.seqNo())); } + if (synced) { + assertThat(checkpoint.globalCheckpoint, greaterThanOrEqualTo(globalCheckpoint)); + } } catch (Exception e) { throw new AssertionError(e); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 039e6da224a67..995e0a53ec234 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -92,6 +92,7 @@ import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.engine.MockEngineSupport; +import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.test.transport.StubbableTransport; @@ -856,6 +857,7 @@ public void sendRequest(Transport.Connection connection, long requestId, String } } + @TestIssueLogging(value = "org.elasticsearch:DEBUG", issueUrl = "https://github.com/elastic/elasticsearch/issues/45953") public void testHistoryRetention() throws Exception { internalCluster().startNodes(3); @@ -907,8 +909,9 @@ public void testHistoryRetention() throws Exception { recoveryStates.removeIf(r -> r.getTimer().getStartNanoTime() <= desyncNanoTime); assertThat(recoveryStates, hasSize(1)); - assertThat(recoveryStates.get(0).getIndex().totalFileCount(), is(0)); - assertThat(recoveryStates.get(0).getTranslog().recoveredOperations(), greaterThan(0)); + final RecoveryState recoveryState = recoveryStates.get(0); + assertThat(recoveryState.toString(), recoveryState.getIndex().totalFileCount(), is(0)); + assertThat(recoveryState.getTranslog().recoveredOperations(), greaterThan(0)); } public void testDoNotInfinitelyWaitForMapping() { diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index 81eb2bc20eaa0..3db51082b7e9d 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.NoOpEngine; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.seqno.SeqNoStats; @@ -173,8 +174,14 @@ public void testPrepareIndexForPeerRecovery() throws Exception { long globalCheckpoint = populateRandomData(shard).getGlobalCheckpoint(); Optional safeCommit = shard.store().findSafeIndexCommit(globalCheckpoint); assertTrue(safeCommit.isPresent()); + final Translog.TranslogGeneration recoveringTranslogGeneration; + try (Engine.IndexCommitRef commitRef = shard.acquireSafeIndexCommit()) { + recoveringTranslogGeneration = new Translog.TranslogGeneration( + commitRef.getIndexCommit().getUserData().get(Translog.TRANSLOG_UUID_KEY), + Long.parseLong(commitRef.getIndexCommit().getUserData().get(Translog.TRANSLOG_GENERATION_KEY))); + } int expectedTotalLocal = 0; - try (Translog.Snapshot snapshot = getTranslog(shard).newSnapshotFromMinSeqNo(safeCommit.get().localCheckpoint + 1)) { + try (Translog.Snapshot snapshot = getTranslog(shard).newSnapshotFromGen(recoveringTranslogGeneration, globalCheckpoint)) { Translog.Operation op; while ((op = snapshot.next()) != null) { if (op.seqNo() <= globalCheckpoint) { diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java index d79763a9f6eab..7a1bcefea9d95 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java @@ -161,7 +161,7 @@ public void initializeSnapshot(SnapshotId snapshotId, List indices, Met @Override public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, List shardFailures, long repositoryStateId, - boolean includeGlobalState, Map userMetadata) { + boolean includeGlobalState, MetaData metaData, Map userMetadata) { return null; } @@ -202,7 +202,7 @@ public boolean isReadOnly() { @Override public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit - snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { + snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, ActionListener listener) { } diff --git a/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java index dd4ca7bfd20e4..b89635af97d47 100644 --- a/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java +++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; import java.io.IOException; @@ -37,23 +36,22 @@ import static org.hamcrest.Matchers.instanceOf; public class FsBlobStoreRepositoryIT extends ESBlobStoreRepositoryIntegTestCase { + @Override - protected void createTestRepository(String name, boolean verify) { - assertAcked(client().admin().cluster().preparePutRepository(name) - .setVerify(verify) - .setType("fs").setSettings(Settings.builder() - .put("location", randomRepoPath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + protected String repositoryType() { + return FsRepository.TYPE; } @Override - protected void afterCreationCheck(Repository repository) { - assertThat(repository, instanceOf(FsRepository.class)); + protected Settings repositorySettings() { + return Settings.builder() + .put(super.repositorySettings()) + .put("location", randomRepoPath()) + .build(); } public void testMissingDirectoriesNotCreatedInReadonlyRepository() throws IOException, ExecutionException, InterruptedException { - final String repoName = randomAsciiName(); + final String repoName = randomName(); final Path repoPath = randomRepoPath(); logger.info("--> creating repository {} at {}", repoName, repoPath); @@ -63,13 +61,13 @@ public void testMissingDirectoriesNotCreatedInReadonlyRepository() throws IOExce .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); - String indexName = randomAsciiName(); + final String indexName = randomName(); int docCount = iterations(10, 1000); logger.info("--> create random index {} with {} records", indexName, docCount); addRandomDocuments(indexName, docCount); assertHitCount(client().prepareSearch(indexName).setSize(0).get(), docCount); - final String snapshotName = randomAsciiName(); + final String snapshotName = randomName(); logger.info("--> create snapshot {}:{}", repoName, snapshotName); assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName) .setWaitForCompletion(true).setIndices(indexName)); diff --git a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java index 1dc7a6263d37b..6c48a19cbb5e6 100644 --- a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java @@ -35,6 +35,7 @@ import org.apache.lucene.util.IOSupplier; import org.apache.lucene.util.TestUtil; import org.elasticsearch.Version; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -99,10 +100,12 @@ public void testSnapshotAndRestore() throws IOException, InterruptedException { IndexId indexId = new IndexId(idxSettings.getIndex().getName(), idxSettings.getUUID()); IndexCommit indexCommit = Lucene.getIndexCommit(Lucene.readSegmentInfos(store.directory()), store.directory()); + final PlainActionFuture future1 = PlainActionFuture.newFuture(); runGeneric(threadPool, () -> { IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(); repository.snapshotShard(store, null, snapshotId, indexId, indexCommit, - snapshotStatus); + snapshotStatus, future1); + future1.actionGet(); IndexShardSnapshotStatus.Copy copy = snapshotStatus.asCopy(); assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount()); }); @@ -124,9 +127,11 @@ public void testSnapshotAndRestore() throws IOException, InterruptedException { SnapshotId incSnapshotId = new SnapshotId("test1", "test1"); IndexCommit incIndexCommit = Lucene.getIndexCommit(Lucene.readSegmentInfos(store.directory()), store.directory()); Collection commitFileNames = incIndexCommit.getFileNames(); + final PlainActionFuture future2 = PlainActionFuture.newFuture(); runGeneric(threadPool, () -> { IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(); - repository.snapshotShard(store, null, incSnapshotId, indexId, incIndexCommit, snapshotStatus); + repository.snapshotShard(store, null, incSnapshotId, indexId, incIndexCommit, snapshotStatus, future2); + future2.actionGet(); IndexShardSnapshotStatus.Copy copy = snapshotStatus.asCopy(); assertEquals(2, copy.getIncrementalFileCount()); assertEquals(commitFileNames.size(), copy.getTotalFileCount()); @@ -198,4 +203,5 @@ private int indexDocs(Directory directory) throws IOException { return docs; } } + } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java index 17581b9458413..4eba98dd49b9d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java @@ -70,7 +70,7 @@ public void testMatchNoDocsDeprecatedInterval() throws IOException { histogram -> { assertEquals(0, histogram.getBuckets().size()); assertFalse(AggregationInspectionHelper.hasValue(histogram)); - } + }, false ); assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } @@ -78,11 +78,11 @@ public void testMatchNoDocsDeprecatedInterval() throws IOException { public void testMatchNoDocs() throws IOException { testBothCases(new MatchNoDocsQuery(), dataset, aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(DATE_FIELD), - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> assertEquals(0, histogram.getBuckets().size()), false ); testBothCases(new MatchNoDocsQuery(), dataset, aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD), - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> assertEquals(0, histogram.getBuckets().size()), false ); } @@ -94,21 +94,21 @@ public void testMatchAllDocsDeprecatedInterval() throws IOException { histogram -> { assertEquals(6, histogram.getBuckets().size()); assertTrue(AggregationInspectionHelper.hasValue(histogram)); - } + }, false ); testSearchAndReduceCase(query, dataset, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD), histogram -> { assertEquals(8, histogram.getBuckets().size()); assertTrue(AggregationInspectionHelper.hasValue(histogram)); - } + }, false ); testBothCases(query, dataset, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD).minDocCount(1L), histogram -> { assertEquals(6, histogram.getBuckets().size()); assertTrue(AggregationInspectionHelper.hasValue(histogram)); - } + }, false ); assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } @@ -122,33 +122,33 @@ public void testMatchAllDocs() throws IOException { } testSearchAndReduceCase(query, foo, aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD).order(BucketOrder.count(false)), - histogram -> assertEquals(8, histogram.getBuckets().size()) + histogram -> assertEquals(8, histogram.getBuckets().size()), false ); testSearchCase(query, dataset, aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(DATE_FIELD), - histogram -> assertEquals(6, histogram.getBuckets().size()) + histogram -> assertEquals(6, histogram.getBuckets().size()), false ); testSearchAndReduceCase(query, dataset, aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(DATE_FIELD), - histogram -> assertEquals(8, histogram.getBuckets().size()) + histogram -> assertEquals(8, histogram.getBuckets().size()), false ); testBothCases(query, dataset, aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(DATE_FIELD).minDocCount(1L), - histogram -> assertEquals(6, histogram.getBuckets().size()) + histogram -> assertEquals(6, histogram.getBuckets().size()), false ); testSearchCase(query, dataset, aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD), - histogram -> assertEquals(6, histogram.getBuckets().size()) + histogram -> assertEquals(6, histogram.getBuckets().size()), false ); testSearchAndReduceCase(query, dataset, aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD), - histogram -> assertEquals(8, histogram.getBuckets().size()) + histogram -> assertEquals(8, histogram.getBuckets().size()), false ); testBothCases(query, dataset, aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD).minDocCount(1L), - histogram -> assertEquals(6, histogram.getBuckets().size()) + histogram -> assertEquals(6, histogram.getBuckets().size()), false ); } @@ -162,10 +162,10 @@ public void testNoDocsDeprecatedInterval() throws IOException { histogram -> { assertEquals(0, histogram.getBuckets().size()); assertFalse(AggregationInspectionHelper.hasValue(histogram)); - } + }, false ); testSearchAndReduceCase(query, dates, aggregation, - histogram -> assertNull(histogram) + histogram -> assertNull(histogram), false ); assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } @@ -176,19 +176,19 @@ public void testNoDocs() throws IOException { Consumer aggregation = agg -> agg.calendarInterval(DateHistogramInterval.YEAR).field(DATE_FIELD); testSearchCase(query, dates, aggregation, - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> assertEquals(0, histogram.getBuckets().size()), false ); testSearchAndReduceCase(query, dates, aggregation, - histogram -> assertNull(histogram) + histogram -> assertNull(histogram), false ); aggregation = agg -> agg.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD); testSearchCase(query, dates, aggregation, - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> assertEquals(0, histogram.getBuckets().size()), false ); testSearchAndReduceCase(query, dates, aggregation, - histogram -> assertNull(histogram) + histogram -> assertNull(histogram), false ); } @@ -198,7 +198,7 @@ public void testAggregateWrongFieldDeprecated() throws IOException { histogram -> { assertEquals(0, histogram.getBuckets().size()); assertFalse(AggregationInspectionHelper.hasValue(histogram)); - } + }, false ); assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } @@ -206,11 +206,11 @@ public void testAggregateWrongFieldDeprecated() throws IOException { public void testAggregateWrongField() throws IOException { testBothCases(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field("wrong_field"), - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> assertEquals(0, histogram.getBuckets().size()), false ); testBothCases(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field("wrong_field"), - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> assertEquals(0, histogram.getBuckets().size()), false ); } @@ -232,7 +232,7 @@ public void testIntervalYearDeprecated() throws IOException { bucket = buckets.get(2); assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); assertEquals(1, bucket.getDocCount()); - } + }, false ); assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } @@ -255,7 +255,7 @@ public void testIntervalYear() throws IOException { bucket = buckets.get(2); assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); assertEquals(1, bucket.getDocCount()); - } + }, false ); } @@ -278,7 +278,7 @@ public void testIntervalMonthDeprecated() throws IOException { bucket = buckets.get(2); assertEquals("2017-03-01T00:00:00.000Z", bucket.getKeyAsString()); assertEquals(3, bucket.getDocCount()); - } + }, false ); assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } @@ -302,7 +302,7 @@ public void testIntervalMonth() throws IOException { bucket = buckets.get(2); assertEquals("2017-03-01T00:00:00.000Z", bucket.getKeyAsString()); assertEquals(3, bucket.getDocCount()); - } + }, false ); } @@ -337,7 +337,7 @@ public void testIntervalDayDeprecated() throws IOException { bucket = buckets.get(3); assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString()); assertEquals(1, bucket.getDocCount()); - } + }, false ); assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } @@ -373,7 +373,7 @@ public void testIntervalDay() throws IOException { bucket = buckets.get(3); assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString()); assertEquals(1, bucket.getDocCount()); - } + }, false ); testBothCases(new MatchAllDocsQuery(), Arrays.asList( @@ -405,7 +405,7 @@ public void testIntervalDay() throws IOException { bucket = buckets.get(3); assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString()); assertEquals(1, bucket.getDocCount()); - } + }, false ); } @@ -451,7 +451,7 @@ public void testIntervalHourDeprecated() throws IOException { bucket = buckets.get(5); assertEquals("2017-02-01T16:00:00.000Z", bucket.getKeyAsString()); assertEquals(3, bucket.getDocCount()); - } + }, false ); assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } @@ -498,7 +498,7 @@ public void testIntervalHour() throws IOException { bucket = buckets.get(5); assertEquals("2017-02-01T16:00:00.000Z", bucket.getKeyAsString()); assertEquals(3, bucket.getDocCount()); - } + }, false ); testBothCases(new MatchAllDocsQuery(), Arrays.asList( @@ -541,7 +541,7 @@ public void testIntervalHour() throws IOException { bucket = buckets.get(5); assertEquals("2017-02-01T16:00:00.000Z", bucket.getKeyAsString()); assertEquals(3, bucket.getDocCount()); - } + }, false ); } @@ -570,7 +570,7 @@ public void testIntervalMinuteDeprecated() throws IOException { bucket = buckets.get(2); assertEquals("2017-02-01T09:16:00.000Z", bucket.getKeyAsString()); assertEquals(2, bucket.getDocCount()); - } + }, false ); assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } @@ -600,7 +600,7 @@ public void testIntervalMinute() throws IOException { bucket = buckets.get(2); assertEquals("2017-02-01T09:16:00.000Z", bucket.getKeyAsString()); assertEquals(2, bucket.getDocCount()); - } + }, false ); testBothCases(new MatchAllDocsQuery(), Arrays.asList( @@ -626,7 +626,7 @@ public void testIntervalMinute() throws IOException { bucket = buckets.get(2); assertEquals("2017-02-01T09:16:00.000Z", bucket.getKeyAsString()); assertEquals(2, bucket.getDocCount()); - } + }, false ); } @@ -656,7 +656,7 @@ public void testIntervalSecondDeprecated() throws IOException { bucket = buckets.get(2); assertEquals("2017-02-01T00:00:37.000Z", bucket.getKeyAsString()); assertEquals(3, bucket.getDocCount()); - } + }, false ); assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } @@ -687,7 +687,7 @@ public void testIntervalSecond() throws IOException { bucket = buckets.get(2); assertEquals("2017-02-01T00:00:37.000Z", bucket.getKeyAsString()); assertEquals(3, bucket.getDocCount()); - } + }, false ); testBothCases(new MatchAllDocsQuery(), Arrays.asList( @@ -714,7 +714,64 @@ public void testIntervalSecond() throws IOException { bucket = buckets.get(2); assertEquals("2017-02-01T00:00:37.000Z", bucket.getKeyAsString()); assertEquals(3, bucket.getDocCount()); - } + }, false + ); + } + + public void testNanosIntervalSecond() throws IOException { + testBothCases(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01T00:00:05.015298384Z", + "2017-02-01T00:00:11.299954583Z", + "2017-02-01T00:00:11.074986434Z", + "2017-02-01T00:00:37.688314602Z", + "2017-02-01T00:00:37.210328172Z", + "2017-02-01T00:00:37.380889483Z" + ), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.SECOND).field(DATE_FIELD).minDocCount(1L), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T00:00:11.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T00:00:37.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + }, true + ); + testBothCases(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01T00:00:05.015298384Z", + "2017-02-01T00:00:11.299954583Z", + "2017-02-01T00:00:11.074986434Z", + "2017-02-01T00:00:37.688314602Z", + "2017-02-01T00:00:37.210328172Z", + "2017-02-01T00:00:37.380889483Z" + ), + aggregation -> aggregation.fixedInterval(new DateHistogramInterval("1000ms")).field(DATE_FIELD).minDocCount(1L), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T00:00:11.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T00:00:37.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + }, true ); } @@ -750,7 +807,7 @@ public void testMinDocCountDeprecated() throws IOException { bucket = buckets.get(3); assertEquals("2017-02-01T00:00:20.000Z", bucket.getKeyAsString()); assertEquals(1, bucket.getDocCount()); - } + }, false ); // 5 sec interval with minDocCount = 3 @@ -763,7 +820,7 @@ public void testMinDocCountDeprecated() throws IOException { Histogram.Bucket bucket = buckets.get(0); assertEquals("2017-02-01T00:00:10.000Z", bucket.getKeyAsString()); assertEquals(3, bucket.getDocCount()); - } + }, false ); assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } @@ -800,7 +857,7 @@ public void testMinDocCount() throws IOException { bucket = buckets.get(3); assertEquals("2017-02-01T00:00:20.000Z", bucket.getKeyAsString()); assertEquals(1, bucket.getDocCount()); - } + }, false ); // 5 sec interval with minDocCount = 3 @@ -813,7 +870,7 @@ public void testMinDocCount() throws IOException { Histogram.Bucket bucket = buckets.get(0); assertEquals("2017-02-01T00:00:10.000Z", bucket.getKeyAsString()); assertEquals(3, bucket.getDocCount()); - } + }, false ); } @@ -827,15 +884,15 @@ public void testMaxBucket() throws IOException { expectThrows(TooManyBucketsException.class, () -> testSearchCase(query, timestamps, aggregation -> aggregation.fixedInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD), - histogram -> {}, 2)); + histogram -> {}, 2, false)); expectThrows(TooManyBucketsException.class, () -> testSearchAndReduceCase(query, timestamps, aggregation -> aggregation.fixedInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD), - histogram -> {}, 2)); + histogram -> {}, 2, false)); expectThrows(TooManyBucketsException.class, () -> testSearchAndReduceCase(query, timestamps, aggregation -> aggregation.fixedInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD).minDocCount(0L), - histogram -> {}, 100)); + histogram -> {}, 100, false)); expectThrows(TooManyBucketsException.class, () -> testSearchAndReduceCase(query, timestamps, aggregation -> @@ -846,7 +903,7 @@ public void testMaxBucket() throws IOException { .fixedInterval(DateHistogramInterval.seconds(5)) .field(DATE_FIELD) ), - histogram -> {}, 5)); + histogram -> {}, 5, false)); } public void testMaxBucketDeprecated() throws IOException { @@ -859,15 +916,15 @@ public void testMaxBucketDeprecated() throws IOException { expectThrows(TooManyBucketsException.class, () -> testSearchCase(query, timestamps, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD), - histogram -> {}, 2)); + histogram -> {}, 2, false)); expectThrows(TooManyBucketsException.class, () -> testSearchAndReduceCase(query, timestamps, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD), - histogram -> {}, 2)); + histogram -> {}, 2, false)); expectThrows(TooManyBucketsException.class, () -> testSearchAndReduceCase(query, timestamps, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD).minDocCount(0L), - histogram -> {}, 100)); + histogram -> {}, 100, false)); expectThrows(TooManyBucketsException.class, () -> testSearchAndReduceCase(query, timestamps, aggregation -> @@ -878,7 +935,7 @@ public void testMaxBucketDeprecated() throws IOException { .dateHistogramInterval(DateHistogramInterval.seconds(5)) .field(DATE_FIELD) ), - histogram -> {}, 5)); + histogram -> {}, 5, false)); assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } @@ -894,7 +951,7 @@ public void testFixedWithCalendar() throws IOException { "2017-02-05" ), aggregation -> aggregation.fixedInterval(DateHistogramInterval.WEEK).field(DATE_FIELD), - histogram -> {} + histogram -> {}, false )); assertThat(e.getMessage(), equalTo("failed to parse setting [date_histogram.fixedInterval] with value [1w] as a time value: " + "unit is missing or unrecognized")); @@ -912,7 +969,7 @@ public void testCalendarWithFixed() throws IOException { "2017-02-05" ), aggregation -> aggregation.calendarInterval(new DateHistogramInterval("5d")).field(DATE_FIELD), - histogram -> {} + histogram -> {}, false )); assertThat(e.getMessage(), equalTo("The supplied interval [5d] could not be parsed as a calendar interval.")); } @@ -931,7 +988,7 @@ public void testCalendarAndThenFixed() throws IOException { aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) .fixedInterval(new DateHistogramInterval("2d")) .field(DATE_FIELD), - histogram -> {} + histogram -> {}, false )); assertThat(e.getMessage(), equalTo("Cannot use [fixed_interval] with [calendar_interval] configuration option.")); } @@ -950,7 +1007,7 @@ public void testFixedAndThenCalendar() throws IOException { aggregation -> aggregation.fixedInterval(new DateHistogramInterval("2d")) .calendarInterval(DateHistogramInterval.DAY) .field(DATE_FIELD), - histogram -> {} + histogram -> {}, false )); assertThat(e.getMessage(), equalTo("Cannot use [calendar_interval] with [fixed_interval] configuration option.")); } @@ -969,7 +1026,7 @@ public void testNewThenLegacy() throws IOException { aggregation -> aggregation.fixedInterval(new DateHistogramInterval("2d")) .dateHistogramInterval(DateHistogramInterval.DAY) .field(DATE_FIELD), - histogram -> {} + histogram -> {}, false )); assertThat(e.getMessage(), equalTo("Cannot use [interval] with [fixed_interval] or [calendar_interval] configuration options.")); @@ -986,7 +1043,7 @@ public void testNewThenLegacy() throws IOException { aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) .dateHistogramInterval(DateHistogramInterval.DAY) .field(DATE_FIELD), - histogram -> {} + histogram -> {}, false )); assertThat(e.getMessage(), equalTo("Cannot use [interval] with [fixed_interval] or [calendar_interval] configuration options.")); @@ -1003,7 +1060,7 @@ public void testNewThenLegacy() throws IOException { aggregation -> aggregation.fixedInterval(new DateHistogramInterval("2d")) .interval(1000) .field(DATE_FIELD), - histogram -> {} + histogram -> {}, false )); assertThat(e.getMessage(), equalTo("Cannot use [interval] with [fixed_interval] or [calendar_interval] configuration options.")); @@ -1020,7 +1077,7 @@ public void testNewThenLegacy() throws IOException { aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) .interval(1000) .field(DATE_FIELD), - histogram -> {} + histogram -> {}, false )); assertThat(e.getMessage(), equalTo("Cannot use [interval] with [fixed_interval] or [calendar_interval] configuration options.")); } @@ -1039,7 +1096,7 @@ public void testLegacyThenNew() throws IOException { aggregation -> aggregation .dateHistogramInterval(DateHistogramInterval.DAY) .fixedInterval(new DateHistogramInterval("2d")) .field(DATE_FIELD), - histogram -> {} + histogram -> {}, false )); assertThat(e.getMessage(), equalTo("Cannot use [fixed_interval] with [interval] configuration option.")); @@ -1056,7 +1113,7 @@ public void testLegacyThenNew() throws IOException { aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.DAY) .calendarInterval(DateHistogramInterval.DAY) .field(DATE_FIELD), - histogram -> {} + histogram -> {}, false )); assertThat(e.getMessage(), equalTo("Cannot use [calendar_interval] with [interval] configuration option.")); @@ -1073,7 +1130,7 @@ public void testLegacyThenNew() throws IOException { aggregation -> aggregation.interval(1000) .fixedInterval(new DateHistogramInterval("2d")) .field(DATE_FIELD), - histogram -> {} + histogram -> {}, false )); assertThat(e.getMessage(), equalTo("Cannot use [fixed_interval] with [interval] configuration option.")); @@ -1090,7 +1147,7 @@ public void testLegacyThenNew() throws IOException { aggregation -> aggregation.interval(1000) .calendarInterval(DateHistogramInterval.DAY) .field(DATE_FIELD), - histogram -> {} + histogram -> {}, false )); assertThat(e.getMessage(), equalTo("Cannot use [calendar_interval] with [interval] configuration option.")); @@ -1101,7 +1158,7 @@ public void testIllegalInterval() throws IOException { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> testSearchCase(new MatchAllDocsQuery(), Collections.emptyList(), aggregation -> aggregation.dateHistogramInterval(new DateHistogramInterval("foobar")).field(DATE_FIELD), - histogram -> {} + histogram -> {}, false )); assertThat(e.getMessage(), equalTo("Unable to parse interval [foobar]")); assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); @@ -1109,50 +1166,59 @@ public void testIllegalInterval() throws IOException { private void testSearchCase(Query query, List dataset, Consumer configure, - Consumer verify) throws IOException { - testSearchCase(query, dataset, configure, verify, 10000); + Consumer verify, boolean useNanosecondResolution) throws IOException { + testSearchCase(query, dataset, configure, verify, 10000, useNanosecondResolution); } private void testSearchCase(Query query, List dataset, Consumer configure, Consumer verify, - int maxBucket) throws IOException { - executeTestCase(false, query, dataset, configure, verify, maxBucket); + int maxBucket, boolean useNanosecondResolution) throws IOException { + executeTestCase(false, query, dataset, configure, verify, maxBucket, useNanosecondResolution); } private void testSearchAndReduceCase(Query query, List dataset, Consumer configure, - Consumer verify) throws IOException { - testSearchAndReduceCase(query, dataset, configure, verify, 1000); + Consumer verify, boolean useNanosecondResolution) throws IOException { + testSearchAndReduceCase(query, dataset, configure, verify, 1000, useNanosecondResolution); } private void testSearchAndReduceCase(Query query, List dataset, Consumer configure, Consumer verify, - int maxBucket) throws IOException { - executeTestCase(true, query, dataset, configure, verify, maxBucket); + int maxBucket, boolean useNanosecondResolution) throws IOException { + executeTestCase(true, query, dataset, configure, verify, maxBucket, useNanosecondResolution); } private void testBothCases(Query query, List dataset, Consumer configure, - Consumer verify) throws IOException { - testBothCases(query, dataset, configure, verify, 10000); + Consumer verify, boolean useNanosecondResolution) throws IOException { + testBothCases(query, dataset, configure, verify, 10000, useNanosecondResolution); } private void testBothCases(Query query, List dataset, Consumer configure, Consumer verify, - int maxBucket) throws IOException { - testSearchCase(query, dataset, configure, verify, maxBucket); - testSearchAndReduceCase(query, dataset, configure, verify, maxBucket); + int maxBucket, boolean useNanosecondResolution) throws IOException { + testSearchCase(query, dataset, configure, verify, maxBucket, useNanosecondResolution); + testSearchAndReduceCase(query, dataset, configure, verify, maxBucket, useNanosecondResolution); } - private void executeTestCase(boolean reduced, Query query, List dataset, + private void executeTestCase(boolean reduced, + Query query, + List dataset, Consumer configure, Consumer verify, - int maxBucket) throws IOException { + int maxBucket, boolean useNanosecondResolution) throws IOException { try (Directory directory = newDirectory()) { + DateFieldMapper.Builder builder = new DateFieldMapper.Builder("_name"); + if (useNanosecondResolution) { + builder.withResolution(DateFieldMapper.Resolution.NANOSECONDS); + } + DateFieldMapper.DateFieldType fieldType = builder.fieldType(); + fieldType.setHasDocValues(true); + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { Document document = new Document(); for (String date : dataset) { @@ -1160,7 +1226,7 @@ private void executeTestCase(boolean reduced, Query query, List dataset, indexWriter.commit(); } - long instant = asLong(date); + long instant = asLong(date, fieldType); document.add(new SortedNumericDocValuesField(DATE_FIELD, instant)); document.add(new LongPoint(INSTANT_FIELD, instant)); indexWriter.addDocument(document); @@ -1176,9 +1242,6 @@ private void executeTestCase(boolean reduced, Query query, List dataset, configure.accept(aggregationBuilder); } - DateFieldMapper.Builder builder = new DateFieldMapper.Builder("_name"); - DateFieldMapper.DateFieldType fieldType = builder.fieldType(); - fieldType.setHasDocValues(true); fieldType.setName(aggregationBuilder.field()); InternalDateHistogram histogram; @@ -1195,4 +1258,8 @@ private void executeTestCase(boolean reduced, Query query, List dataset, private static long asLong(String dateTime) { return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); } + + private static long asLong(String dateTime, DateFieldMapper.DateFieldType fieldType) { + return fieldType.parse(dateTime); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java new file mode 100644 index 0000000000000..1a629b745b4e1 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java @@ -0,0 +1,684 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.histogram; + +import org.apache.lucene.document.BinaryDocValuesField; +import org.apache.lucene.document.Document; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.queries.BinaryDocValuesRangeQuery; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; + +import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.function.Consumer; + +import static java.util.Collections.singleton; + +public class DateRangeHistogramAggregatorTests extends AggregatorTestCase { + + public static final String FIELD_NAME = "fieldName"; + + public void testBasics() throws Exception { + RangeFieldMapper.Range range = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-01T12:14:36"), + asLong("2019-08-01T15:07:22"), true, true); + testCase( + new MatchAllDocsQuery(), + builder -> builder.calendarInterval(DateHistogramInterval.DAY), + writer -> writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range))))), + histo -> { + assertEquals(1, histo.getBuckets().size()); + assertTrue(AggregationInspectionHelper.hasValue(histo)); + } + ); + } + + public void testUnsupportedRangeType() throws Exception { + RangeType rangeType = RangeType.LONG; + final String fieldName = "field"; + + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + Document doc = new Document(); + BytesRef encodedRange = + rangeType.encodeRanges(singleton(new RangeFieldMapper.Range(rangeType, 12234, 89765, true, true))); + doc.add(new BinaryDocValuesField(fieldName, encodedRange)); + w.addDocument(doc); + + DateHistogramAggregationBuilder aggBuilder = new DateHistogramAggregationBuilder("my_agg") + .field(fieldName) + .calendarInterval(DateHistogramInterval.MONTH); + + MappedFieldType fieldType = new RangeFieldMapper.Builder(fieldName, rangeType).fieldType(); + fieldType.setName(fieldName); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + expectThrows(IllegalArgumentException.class, () -> createAggregator(aggBuilder, searcher, fieldType)); + } + } + } + + /* + * Test calendar interval behaves correctly on months over 30 days + */ + public void testLongMonthsCalendarInterval() throws Exception { + RangeFieldMapper.Range julyRange = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T00:00:00"), + asLong("2019-07-31T23:59:59"), true, true); + RangeFieldMapper.Range augustRange = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-01T00:00:00"), + asLong("2019-08-31T23:59:59"), true, true); + RangeFieldMapper.Range septemberRange = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-09-01T00:00:00"), + asLong("2019-09-30T23:59:59"), true, true); + + // Calendar interval case - three months, three bucketLong.MIN_VALUE;s + testCase( + new MatchAllDocsQuery(), + builder -> builder.calendarInterval(DateHistogramInterval.MONTH), + writer -> { + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(julyRange))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(augustRange))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(septemberRange))))); + }, + histo -> { + assertEquals(3, histo.getBuckets().size()); + + assertEquals(asZDT("2019-07-01T00:00:00"), histo.getBuckets().get(0).getKey()); + assertEquals(1, histo.getBuckets().get(0).getDocCount()); + + assertEquals(asZDT("2019-08-01T00:00:00"), histo.getBuckets().get(1).getKey()); + assertEquals(1, histo.getBuckets().get(1).getDocCount()); + + assertEquals(asZDT("2019-09-01T00:00:00"), histo.getBuckets().get(2).getKey()); + assertEquals(1, histo.getBuckets().get(2).getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(histo)); + } + ); + } + + /* + * Test fixed interval 30d behaves correctly with months over 30 days + */ + public void testLongMonthsFixedInterval() throws Exception { + RangeFieldMapper.Range julyRange = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T00:00:00"), + asLong("2019-07-31T23:59:59"), true, true); + RangeFieldMapper.Range augustRange = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-01T00:00:00"), + asLong("2019-08-31T23:59:59"), true, true); + RangeFieldMapper.Range septemberRange = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-09-01T00:00:00"), + asLong("2019-09-30T23:59:59"), true, true); + + // Fixed interval case - 4 periods of 30 days + testCase( + new MatchAllDocsQuery(), + builder -> builder.fixedInterval(new DateHistogramInterval("30d")), + writer -> { + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(julyRange))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(augustRange))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(septemberRange))))); + }, + histo -> { + assertEquals(4, histo.getBuckets().size()); + + assertEquals(asZDT("2019-06-13T00:00:00"), histo.getBuckets().get(0).getKey()); + assertEquals(1, histo.getBuckets().get(0).getDocCount()); + + assertEquals(asZDT("2019-07-13T00:00:00"), histo.getBuckets().get(1).getKey()); + assertEquals(2, histo.getBuckets().get(1).getDocCount()); + + assertEquals(asZDT("2019-08-12T00:00:00"), histo.getBuckets().get(2).getKey()); + assertEquals(2, histo.getBuckets().get(2).getDocCount()); + + assertEquals(asZDT("2019-09-11T00:00:00"), histo.getBuckets().get(3).getKey()); + assertEquals(1, histo.getBuckets().get(3).getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(histo)); + } + ); + } + + public void testOffsetCalendarInterval() throws Exception { + + RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T03:15:00"), + asLong("2019-07-01T03:20:00"), true, true); + RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T03:45:00"), + asLong("2019-07-01T03:50:00"), true, true); + RangeFieldMapper.Range range3 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T03:55:00"), + asLong("2019-07-01T04:05:00"), true, true); + RangeFieldMapper.Range range4 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T04:17:00"), + asLong("2019-07-01T04:19:00"), true, true); + RangeFieldMapper.Range range5 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T04:55:00"), + asLong("2019-07-01T05:05:00"), true, true); + + // No offset, just to make sure the ranges line up as expected + testCase( + new MatchAllDocsQuery(), + builder -> builder.calendarInterval(DateHistogramInterval.HOUR), + writer -> { + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5))))); + }, + histo -> { + assertEquals(3, histo.getBuckets().size()); + + assertEquals(asZDT("2019-07-01T03:00:00"), histo.getBuckets().get(0).getKey()); + assertEquals(3, histo.getBuckets().get(0).getDocCount()); + + assertEquals(asZDT("2019-07-01T04:00:00"), histo.getBuckets().get(1).getKey()); + assertEquals(3, histo.getBuckets().get(1).getDocCount()); + + assertEquals(asZDT("2019-07-01T05:00:00"), histo.getBuckets().get(2).getKey()); + assertEquals(1, histo.getBuckets().get(2).getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(histo)); + } + ); + + // 10 minute offset should shift all data into one bucket + testCase( + new MatchAllDocsQuery(), + builder -> builder.calendarInterval(DateHistogramInterval.HOUR).offset("10m"), + writer -> { + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5))))); + }, + histo -> { + assertEquals(2, histo.getBuckets().size()); + + assertEquals(asZDT("2019-07-01T03:10:00"), histo.getBuckets().get(0).getKey()); + assertEquals(3, histo.getBuckets().get(0).getDocCount()); + + assertEquals(asZDT("2019-07-01T04:10:00"), histo.getBuckets().get(1).getKey()); + assertEquals(2, histo.getBuckets().get(1).getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(histo)); + } + ); + } + + public void testOffsetFixedInterval() throws Exception { + + RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T03:15:00"), + asLong("2019-07-01T03:20:00"), true, true); + RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T03:45:00"), + asLong("2019-07-01T03:50:00"), true, true); + RangeFieldMapper.Range range3 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T03:55:00"), + asLong("2019-07-01T04:05:00"), true, true); + RangeFieldMapper.Range range4 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T04:17:00"), + asLong("2019-07-01T04:19:00"), true, true); + RangeFieldMapper.Range range5 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T04:55:00"), + asLong("2019-07-01T05:05:00"), true, true); + + // No offset, just to make sure the ranges line up as expected + testCase( + new MatchAllDocsQuery(), + builder -> builder.fixedInterval(new DateHistogramInterval("1h")), + writer -> { + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5))))); + }, + histo -> { + assertEquals(3, histo.getBuckets().size()); + + assertEquals(asZDT("2019-07-01T03:00:00"), histo.getBuckets().get(0).getKey()); + assertEquals(3, histo.getBuckets().get(0).getDocCount()); + + assertEquals(asZDT("2019-07-01T04:00:00"), histo.getBuckets().get(1).getKey()); + assertEquals(3, histo.getBuckets().get(1).getDocCount()); + + assertEquals(asZDT("2019-07-01T05:00:00"), histo.getBuckets().get(2).getKey()); + assertEquals(1, histo.getBuckets().get(2).getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(histo)); + } + ); + + // 10 minute offset should shift all data into one bucket + testCase( + new MatchAllDocsQuery(), + builder -> builder.fixedInterval(new DateHistogramInterval("1h")).offset("10m"), + writer -> { + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5))))); + }, + histo -> { + assertEquals(2, histo.getBuckets().size()); + + assertEquals(asZDT("2019-07-01T03:10:00"), histo.getBuckets().get(0).getKey()); + assertEquals(3, histo.getBuckets().get(0).getDocCount()); + + assertEquals(asZDT("2019-07-01T04:10:00"), histo.getBuckets().get(1).getKey()); + assertEquals(2, histo.getBuckets().get(1).getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(histo)); + } + ); + } + + /* + * Test that when incrementing the rounded bucket key, offsets are correctly taken into account at the <1hour scale + */ + public void testNextRoundingValueOffsetHours() throws Exception { + RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T03:15:00"), + asLong("2019-07-01T03:20:00"), true, true); + RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T04:15:00"), + asLong("2019-07-01T04:20:00"), true, true); + RangeFieldMapper.Range range3 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T05:15:00"), + asLong("2019-07-01T05:20:00"), true, true); + RangeFieldMapper.Range range4 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T06:15:00"), + asLong("2019-07-01T06:20:00"), true, true); + RangeFieldMapper.Range range5 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T07:15:00"), + asLong("2019-07-01T07:20:00"), true, true); + RangeFieldMapper.Range range6 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T08:15:00"), + asLong("2019-07-01T08:20:00"), true, true); + + testCase( + new MatchAllDocsQuery(), + builder -> builder.fixedInterval(new DateHistogramInterval("1h")).offset("13m"), + writer -> { + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range6))))); + }, + histo -> { + assertEquals(6, histo.getBuckets().size()); + + assertEquals(asZDT("2019-07-01T03:13:00"), histo.getBuckets().get(0).getKey()); + assertEquals(1, histo.getBuckets().get(0).getDocCount()); + + assertEquals(asZDT("2019-07-01T04:13:00"), histo.getBuckets().get(1).getKey()); + assertEquals(1, histo.getBuckets().get(1).getDocCount()); + + assertEquals(asZDT("2019-07-01T05:13:00"), histo.getBuckets().get(2).getKey()); + assertEquals(1, histo.getBuckets().get(2).getDocCount()); + + assertEquals(asZDT("2019-07-01T06:13:00"), histo.getBuckets().get(3).getKey()); + assertEquals(1, histo.getBuckets().get(3).getDocCount()); + + assertEquals(asZDT("2019-07-01T07:13:00"), histo.getBuckets().get(4).getKey()); + assertEquals(1, histo.getBuckets().get(4).getDocCount()); + + assertEquals(asZDT("2019-07-01T08:13:00"), histo.getBuckets().get(5).getKey()); + assertEquals(1, histo.getBuckets().get(5).getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(histo)); + } + ); + + testCase( + new MatchAllDocsQuery(), + builder -> builder.calendarInterval(DateHistogramInterval.HOUR).offset("13m"), + writer -> { + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range6))))); + }, + histo -> { + assertEquals(6, histo.getBuckets().size()); + + assertEquals(asZDT("2019-07-01T03:13:00"), histo.getBuckets().get(0).getKey()); + assertEquals(1, histo.getBuckets().get(0).getDocCount()); + + assertEquals(asZDT("2019-07-01T04:13:00"), histo.getBuckets().get(1).getKey()); + assertEquals(1, histo.getBuckets().get(1).getDocCount()); + + assertEquals(asZDT("2019-07-01T05:13:00"), histo.getBuckets().get(2).getKey()); + assertEquals(1, histo.getBuckets().get(2).getDocCount()); + + assertEquals(asZDT("2019-07-01T06:13:00"), histo.getBuckets().get(3).getKey()); + assertEquals(1, histo.getBuckets().get(3).getDocCount()); + + assertEquals(asZDT("2019-07-01T07:13:00"), histo.getBuckets().get(4).getKey()); + assertEquals(1, histo.getBuckets().get(4).getDocCount()); + + assertEquals(asZDT("2019-07-01T08:13:00"), histo.getBuckets().get(5).getKey()); + assertEquals(1, histo.getBuckets().get(5).getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(histo)); + } + ); + } + + /* + * Test that when incrementing the rounded bucket key, offsets are correctly taken into account when interval is on date scale and + * offset is on time scale + */ + public void testNextRoundingValueOffsetDays() throws Exception { + RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T03:15:00"), + asLong("2019-07-01T03:20:00"), true, true); + RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-02T04:15:00"), + asLong("2019-07-02T04:20:00"), true, true); + RangeFieldMapper.Range range3 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-03T05:15:00"), + asLong("2019-07-03T05:20:00"), true, true); + RangeFieldMapper.Range range4 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-04T06:15:00"), + asLong("2019-07-04T06:20:00"), true, true); + RangeFieldMapper.Range range5 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-05T07:15:00"), + asLong("2019-07-05T07:20:00"), true, true); + RangeFieldMapper.Range range6 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-06T08:15:00"), + asLong("2019-07-06T08:20:00"), true, true); + + testCase( + new MatchAllDocsQuery(), + builder -> builder.fixedInterval(new DateHistogramInterval("1d")).offset("36h"), + writer -> { + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range6))))); + }, + histo -> { + assertEquals(6, histo.getBuckets().size()); + + assertEquals(asZDT("2019-06-30T12:00:00"), histo.getBuckets().get(0).getKey()); + assertEquals(1, histo.getBuckets().get(0).getDocCount()); + + assertEquals(asZDT("2019-07-01T12:00:00"), histo.getBuckets().get(1).getKey()); + assertEquals(1, histo.getBuckets().get(1).getDocCount()); + + assertEquals(asZDT("2019-07-02T12:00:00"), histo.getBuckets().get(2).getKey()); + assertEquals(1, histo.getBuckets().get(2).getDocCount()); + + assertEquals(asZDT("2019-07-03T12:00:00"), histo.getBuckets().get(3).getKey()); + assertEquals(1, histo.getBuckets().get(3).getDocCount()); + + assertEquals(asZDT("2019-07-04T12:00:00"), histo.getBuckets().get(4).getKey()); + assertEquals(1, histo.getBuckets().get(4).getDocCount()); + + assertEquals(asZDT("2019-07-05T12:00:00"), histo.getBuckets().get(5).getKey()); + assertEquals(1, histo.getBuckets().get(5).getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(histo)); + } + ); + + testCase( + new MatchAllDocsQuery(), + builder -> builder.calendarInterval(DateHistogramInterval.DAY).offset("12h"), + writer -> { + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range6))))); + }, + histo -> { + assertEquals(6, histo.getBuckets().size()); + + assertEquals(asZDT("2019-06-30T12:00:00"), histo.getBuckets().get(0).getKey()); + assertEquals(1, histo.getBuckets().get(0).getDocCount()); + + assertEquals(asZDT("2019-07-01T12:00:00"), histo.getBuckets().get(1).getKey()); + assertEquals(1, histo.getBuckets().get(1).getDocCount()); + + assertEquals(asZDT("2019-07-02T12:00:00"), histo.getBuckets().get(2).getKey()); + assertEquals(1, histo.getBuckets().get(2).getDocCount()); + + assertEquals(asZDT("2019-07-03T12:00:00"), histo.getBuckets().get(3).getKey()); + assertEquals(1, histo.getBuckets().get(3).getDocCount()); + + assertEquals(asZDT("2019-07-04T12:00:00"), histo.getBuckets().get(4).getKey()); + assertEquals(1, histo.getBuckets().get(4).getDocCount()); + + assertEquals(asZDT("2019-07-05T12:00:00"), histo.getBuckets().get(5).getKey()); + assertEquals(1, histo.getBuckets().get(5).getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(histo)); + } + ); + } + + public void testMinDocCount() throws Exception { + RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-01T12:14:36"), + asLong("2019-08-01T15:07:22"), true, true); + RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T12:14:36"), + asLong("2019-08-02T15:07:22"), true, true); + RangeFieldMapper.Range range3 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T12:14:36"), + asLong("2019-08-02T15:07:22"), true, true); + RangeFieldMapper.Range range4 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T12:14:36"), + asLong("2019-08-03T15:07:22"), true, true); + + // Guard case, make sure the agg buckets as expected without min doc count + testCase( + new MatchAllDocsQuery(), + builder -> builder.calendarInterval(DateHistogramInterval.DAY), + writer -> { + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4))))); + }, + histo -> { + assertEquals(3, histo.getBuckets().size()); + + assertEquals(asZDT("2019-08-01T00:00:00"), histo.getBuckets().get(0).getKey()); + assertEquals(1, histo.getBuckets().get(0).getDocCount()); + + assertEquals(asZDT("2019-08-02T00:00:00"), histo.getBuckets().get(1).getKey()); + assertEquals(3, histo.getBuckets().get(1).getDocCount()); + + assertEquals(asZDT("2019-08-03T00:00:00"), histo.getBuckets().get(2).getKey()); + assertEquals(1, histo.getBuckets().get(2).getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(histo)); + } + ); + + testCase( + new MatchAllDocsQuery(), + builder -> builder.calendarInterval(DateHistogramInterval.DAY).minDocCount(2), + writer -> { + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4))))); + }, + histo -> { + assertEquals(1, histo.getBuckets().size()); + + assertEquals(asZDT("2019-08-02T00:00:00"), histo.getBuckets().get(0).getKey()); + assertEquals(3, histo.getBuckets().get(0).getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(histo)); + } + ); + } + + public void testIntersectQuery() throws Exception { + RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T02:15:00"), + asLong("2019-08-02T02:45:00"), true, true); + RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T05:15:00"), + asLong("2019-08-02T05:45:00"), true, true); + + RangeFieldMapper.Range range3 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T03:15:00"), + asLong("2019-08-02T03:45:00"), true, true); + RangeFieldMapper.Range range4 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T04:15:00"), + asLong("2019-08-02T04:45:00"), true, true); + RangeFieldMapper.Range range5 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T03:30:00"), + asLong("2019-08-02T04:30:00"), true, true); + + RangeFieldMapper.Range range6 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T02:15:00"), + asLong("2019-08-02T03:45:00"), true, true); + RangeFieldMapper.Range range7 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T04:15:00"), + asLong("2019-08-02T05:45:00"), true, true); + RangeFieldMapper.Range range8 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T02:30:00"), + asLong("2019-08-02T05:30:00"), true, true); + + Query query = RangeType.DATE.dvRangeQuery(FIELD_NAME, BinaryDocValuesRangeQuery.QueryType.INTERSECTS, asLong("2019-08-02T03:00:00"), + asLong("2019-08-02T05:00:00"), true, true); + + + testCase( + query, + builder -> builder.calendarInterval(DateHistogramInterval.HOUR).minDocCount(2), + writer -> { + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range6))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range7))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range8))))); + }, + histo -> { + assertEquals(4, histo.getBuckets().size()); + + assertEquals(asZDT("2019-08-02T02:00:00"), histo.getBuckets().get(0).getKey()); + assertEquals(2, histo.getBuckets().get(0).getDocCount()); + + assertEquals(asZDT("2019-08-02T03:00:00"), histo.getBuckets().get(1).getKey()); + assertEquals(4, histo.getBuckets().get(1).getDocCount()); + + assertEquals(asZDT("2019-08-02T04:00:00"), histo.getBuckets().get(2).getKey()); + assertEquals(4, histo.getBuckets().get(2).getDocCount()); + + assertEquals(asZDT("2019-08-02T05:00:00"), histo.getBuckets().get(3).getKey()); + assertEquals(2, histo.getBuckets().get(3).getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(histo)); + } + ); + } + + public void testWithinQuery() throws Exception { + RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T02:15:00"), + asLong("2019-08-02T02:45:00"), true, true); + RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T05:15:00"), + asLong("2019-08-02T05:45:00"), true, true); + + RangeFieldMapper.Range range3 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T03:15:00"), + asLong("2019-08-02T03:45:00"), true, true); + RangeFieldMapper.Range range4 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T04:15:00"), + asLong("2019-08-02T04:45:00"), true, true); + RangeFieldMapper.Range range5 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T03:30:00"), + asLong("2019-08-02T04:30:00"), true, true); + + RangeFieldMapper.Range range6 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T02:15:00"), + asLong("2019-08-02T03:45:00"), true, true); + RangeFieldMapper.Range range7 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T04:15:00"), + asLong("2019-08-02T05:45:00"), true, true); + RangeFieldMapper.Range range8 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T02:30:00"), + asLong("2019-08-02T05:30:00"), true, true); + + Query query = RangeType.DATE.dvRangeQuery(FIELD_NAME, BinaryDocValuesRangeQuery.QueryType.WITHIN, asLong("2019-08-02T03:00:00"), + asLong("2019-08-02T05:00:00"), true, true); + + + testCase( + query, + builder -> builder.calendarInterval(DateHistogramInterval.HOUR).minDocCount(2), + writer -> { + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range6))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range7))))); + writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range8))))); + }, + histo -> { + assertEquals(2, histo.getBuckets().size()); + + assertEquals(asZDT("2019-08-02T03:00:00"), histo.getBuckets().get(0).getKey()); + assertEquals(2, histo.getBuckets().get(0).getDocCount()); + + assertEquals(asZDT("2019-08-02T04:00:00"), histo.getBuckets().get(1).getKey()); + assertEquals(2, histo.getBuckets().get(1).getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(histo)); + } + ); + } + + private void testCase(Query query, + Consumer configure, + CheckedConsumer buildIndex, + Consumer verify) throws IOException { + MappedFieldType fieldType = new RangeFieldMapper.Builder(FIELD_NAME, RangeType.DATE).fieldType(); + fieldType.setName(FIELD_NAME); + final DateHistogramAggregationBuilder aggregationBuilder = new DateHistogramAggregationBuilder("_name").field(FIELD_NAME); + if (configure != null) { + configure.accept(aggregationBuilder); + } + testCase(aggregationBuilder, query, buildIndex, verify, fieldType); + } + + private void testCase(DateHistogramAggregationBuilder aggregationBuilder, Query query, + CheckedConsumer buildIndex, Consumer verify, + MappedFieldType fieldType) throws IOException { + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + buildIndex.accept(indexWriter); + indexWriter.close(); + + IndexReader indexReader = DirectoryReader.open(directory); + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + InternalDateHistogram histogram = searchAndReduce(indexSearcher, query, aggregationBuilder, fieldType); + verify.accept(histogram); + + indexReader.close(); + directory.close(); + } + + private static long asLong(String dateTime) { + return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + } + + private static ZonedDateTime asZDT(String dateTime) { + return Instant.ofEpochMilli(asLong(dateTime)).atZone(ZoneOffset.UTC); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/NumericHistogramAggregatorTests.java similarity index 89% rename from server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/NumericHistogramAggregatorTests.java index 624870f6e47a6..e3d1b931c71d5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/NumericHistogramAggregatorTests.java @@ -34,10 +34,9 @@ import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; - import static org.hamcrest.Matchers.containsString; -public class HistogramAggregatorTests extends AggregatorTestCase { +public class NumericHistogramAggregatorTests extends AggregatorTestCase { public void testLongs() throws Exception { try (Directory dir = newDirectory(); @@ -300,6 +299,44 @@ public void testOffset() throws Exception { } } + public void testRandomOffset() throws Exception { + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + // Note, these values are carefully chosen to ensure that no matter what offset we pick, no two can end up in the same bucket + for (double value : new double[] {9.3, 3.2, -5}) { + Document doc = new Document(); + doc.add(new SortedNumericDocValuesField("field", NumericUtils.doubleToSortableLong(value))); + w.addDocument(doc); + } + + final double offset = randomDouble(); + final double interval = 5; + final double expectedOffset = offset % interval; + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(interval) + .offset(offset); + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE); + fieldType.setName("field"); + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertEquals(3, histogram.getBuckets().size()); + + assertEquals(-10 + expectedOffset, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(expectedOffset, histogram.getBuckets().get(1).getKey()); + assertEquals(1, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(5 + expectedOffset, histogram.getBuckets().get(2).getKey()); + assertEquals(1, histogram.getBuckets().get(2).getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(histogram)); + } + } + } + public void testExtendedBounds() throws Exception { try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java new file mode 100644 index 0000000000000..73dd41e640dda --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java @@ -0,0 +1,445 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.histogram; + +import org.apache.lucene.document.BinaryDocValuesField; +import org.apache.lucene.document.Document; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.junit.Rule; +import org.junit.rules.ExpectedException; + +import java.util.Collections; +import java.util.Set; + +public class RangeHistogramAggregatorTests extends AggregatorTestCase { + + @Rule + public final ExpectedException expectedException = ExpectedException.none(); + + public void testDoubles() throws Exception { + RangeType rangeType = RangeType.DOUBLE; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] { + new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true), // bucket 0 5 + new RangeFieldMapper.Range(rangeType, -3.1, 4.2, true, true), // bucket -5, 0 + new RangeFieldMapper.Range(rangeType, 4.2, 13.3, true, true), // bucket 0, 5, 10 + new RangeFieldMapper.Range(rangeType, 42.5, 49.3, true, true), // bucket 40, 45 + }) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range)); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + } + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(5); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertEquals(6, histogram.getBuckets().size()); + + assertEquals(-5d, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(0d, histogram.getBuckets().get(1).getKey()); + assertEquals(3, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(5d, histogram.getBuckets().get(2).getKey()); + assertEquals(2, histogram.getBuckets().get(2).getDocCount()); + + assertEquals(10d, histogram.getBuckets().get(3).getKey()); + assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + + assertEquals(40d, histogram.getBuckets().get(4).getKey()); + assertEquals(1, histogram.getBuckets().get(4).getDocCount()); + + assertEquals(45d, histogram.getBuckets().get(5).getKey()); + assertEquals(1, histogram.getBuckets().get(5).getDocCount()); + } + } + } + + public void testLongs() throws Exception { + RangeType rangeType = RangeType.LONG; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] { + new RangeFieldMapper.Range(rangeType, 1L, 5L, true, true), // bucket 0 5 + new RangeFieldMapper.Range(rangeType, -3L, 4L, true, true), // bucket -5, 0 + new RangeFieldMapper.Range(rangeType, 4L, 13L, true, true), // bucket 0, 5, 10 + new RangeFieldMapper.Range(rangeType, 42L, 49L, true, true), // bucket 40, 45 + }) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range)); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + } + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(5); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertEquals(6, histogram.getBuckets().size()); + + assertEquals(-5d, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(0d, histogram.getBuckets().get(1).getKey()); + assertEquals(3, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(5d, histogram.getBuckets().get(2).getKey()); + assertEquals(2, histogram.getBuckets().get(2).getDocCount()); + + assertEquals(10d, histogram.getBuckets().get(3).getKey()); + assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + + assertEquals(40d, histogram.getBuckets().get(4).getKey()); + assertEquals(1, histogram.getBuckets().get(4).getDocCount()); + + assertEquals(45d, histogram.getBuckets().get(5).getKey()); + assertEquals(1, histogram.getBuckets().get(5).getDocCount()); + } + } + } + + public void testMultipleRanges() throws Exception { + RangeType rangeType = RangeType.LONG; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Set.of( + new RangeFieldMapper.Range(rangeType, 1L, 5L, true, true), // bucket 0 5 + new RangeFieldMapper.Range(rangeType, -3L, 4L, true, true), // bucket -5, 0 + new RangeFieldMapper.Range(rangeType, 4L, 13L, true, true), // bucket 0, 5, 10 + new RangeFieldMapper.Range(rangeType, 42L, 49L, true, true) // bucket 40, 45 + )); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(5); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertEquals(6, histogram.getBuckets().size()); + + assertEquals(-5d, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(0d, histogram.getBuckets().get(1).getKey()); + assertEquals(1, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(5d, histogram.getBuckets().get(2).getKey()); + assertEquals(1, histogram.getBuckets().get(2).getDocCount()); + + assertEquals(10d, histogram.getBuckets().get(3).getKey()); + assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + + assertEquals(40d, histogram.getBuckets().get(4).getKey()); + assertEquals(1, histogram.getBuckets().get(4).getDocCount()); + + assertEquals(45d, histogram.getBuckets().get(5).getKey()); + assertEquals(1, histogram.getBuckets().get(5).getDocCount()); + } + } + + } + + public void testMultipleRangesLotsOfOverlap() throws Exception { + RangeType rangeType = RangeType.LONG; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Set.of( + new RangeFieldMapper.Range(rangeType, 1L, 2L, true, true), // bucket 0 + new RangeFieldMapper.Range(rangeType, 1L, 4L, true, true), // bucket 0 + new RangeFieldMapper.Range(rangeType, 1L, 13L, true, true), // bucket 0, 5, 10 + new RangeFieldMapper.Range(rangeType, 1L, 5L, true, true) // bucket 0, 5 + )); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(5); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertEquals(3, histogram.getBuckets().size()); + + assertEquals(0d, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(5d, histogram.getBuckets().get(1).getKey()); + assertEquals(1, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(10d, histogram.getBuckets().get(2).getKey()); + assertEquals(1, histogram.getBuckets().get(2).getDocCount()); + } + } + + } + + public void testLongsIrrationalInterval() throws Exception { + RangeType rangeType = RangeType.LONG; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] { + new RangeFieldMapper.Range(rangeType, 1L, 5L, true, true), // bucket 0 5 + new RangeFieldMapper.Range(rangeType, -3L, 4L, true, true), // bucket -5, 0 + new RangeFieldMapper.Range(rangeType, 4L, 13L, true, true), // bucket 0, 5, 10 + }) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range)); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + } + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(Math.PI); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertEquals(6, histogram.getBuckets().size()); + + assertEquals(-1 * Math.PI, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(0 * Math.PI, histogram.getBuckets().get(1).getKey()); + assertEquals(2, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(1 * Math.PI, histogram.getBuckets().get(2).getKey()); + assertEquals(3, histogram.getBuckets().get(2).getDocCount()); + + assertEquals(2 * Math.PI, histogram.getBuckets().get(3).getKey()); + assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + + assertEquals(3 * Math.PI, histogram.getBuckets().get(4).getKey()); + assertEquals(1, histogram.getBuckets().get(4).getDocCount()); + + assertEquals(4 * Math.PI, histogram.getBuckets().get(5).getKey()); + assertEquals(1, histogram.getBuckets().get(5).getDocCount()); + } + } + } + + public void testMinDocCount() throws Exception { + RangeType rangeType = RangeType.LONG; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] { + new RangeFieldMapper.Range(rangeType, -14L, -11L, true, true), // bucket -15 + new RangeFieldMapper.Range(rangeType, 0L, 9L, true, true), // bucket 0, 5 + new RangeFieldMapper.Range(rangeType, 6L, 12L, true, true), // bucket 5, 10 + new RangeFieldMapper.Range(rangeType, 13L, 14L, true, true), // bucket 10 + }) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range)); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + } + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(5) + .minDocCount(2); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertEquals(2, histogram.getBuckets().size()); + + assertEquals(5d, histogram.getBuckets().get(0).getKey()); + assertEquals(2, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(10d, histogram.getBuckets().get(1).getKey()); + assertEquals(2, histogram.getBuckets().get(1).getDocCount()); + } + } + } + + public void testOffset() throws Exception { + RangeType rangeType = RangeType.DOUBLE; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] { + new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true), // bucket -1, 4 + new RangeFieldMapper.Range(rangeType, -3.1, 4.2, true, true), // bucket -6 -1 4 + new RangeFieldMapper.Range(rangeType, 4.2, 13.3, true, true), // bucket 4, 9 + new RangeFieldMapper.Range(rangeType, 42.5, 49.3, true, true), // bucket 39, 44, 49 + }) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range)); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + } + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(5) + .offset(4); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + //assertEquals(7, histogram.getBuckets().size()); + + assertEquals(-6d, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(-1d, histogram.getBuckets().get(1).getKey()); + assertEquals(2, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(4d, histogram.getBuckets().get(2).getKey()); + assertEquals(3, histogram.getBuckets().get(2).getDocCount()); + + assertEquals(9d, histogram.getBuckets().get(3).getKey()); + assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + + assertEquals(39d, histogram.getBuckets().get(4).getKey()); + assertEquals(1, histogram.getBuckets().get(4).getDocCount()); + + assertEquals(44d, histogram.getBuckets().get(5).getKey()); + assertEquals(1, histogram.getBuckets().get(5).getDocCount()); + + assertEquals(49d, histogram.getBuckets().get(6).getKey()); + assertEquals(1, histogram.getBuckets().get(6).getDocCount()); + } + } + } + + public void testOffsetGtInterval() throws Exception { + RangeType rangeType = RangeType.DOUBLE; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] { + new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true), // bucket 0 5 + new RangeFieldMapper.Range(rangeType, -3.1, 4.2, true, true), // bucket -5, 0 + new RangeFieldMapper.Range(rangeType, 4.2, 13.3, true, true), // bucket 0, 5, 10 + new RangeFieldMapper.Range(rangeType, 42.5, 49.3, true, true), // bucket 40, 45 + }) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range)); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + } + + // I'd like to randomize the offset here, like I did in the test for the numeric side, but there's no way I can think of to + // construct the intervals such that they wouldn't "slosh" between buckets. + final double offset = 20; + final double interval = 5; + final double expectedOffset = offset % interval; + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(interval) + .offset(offset); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertEquals(6, histogram.getBuckets().size()); + + assertEquals(-5d + expectedOffset, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(0d + expectedOffset, histogram.getBuckets().get(1).getKey()); + assertEquals(3, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(5d + expectedOffset, histogram.getBuckets().get(2).getKey()); + assertEquals(2, histogram.getBuckets().get(2).getDocCount()); + + assertEquals(10d + expectedOffset, histogram.getBuckets().get(3).getKey()); + assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + + assertEquals(40d + expectedOffset, histogram.getBuckets().get(4).getKey()); + assertEquals(1, histogram.getBuckets().get(4).getDocCount()); + + assertEquals(45d + expectedOffset, histogram.getBuckets().get(5).getKey()); + assertEquals(1, histogram.getBuckets().get(5).getDocCount()); + } + } + } + + + public void testIpRangesUnsupported() throws Exception { + RangeType rangeType = RangeType.IP; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + Document doc = new Document(); + BytesRef encodedRange = + rangeType.encodeRanges(Collections.singleton(new RangeFieldMapper.Range(rangeType, InetAddresses.forString("10.0.0.1"), + InetAddresses.forString("10.0.0.10"), true, true))); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(5); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + expectedException.expect(IllegalArgumentException.class); + search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + } + } + + } + +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorTests.java index daaeb94d8fae9..dbc3ac3b490c2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.missing; +import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Document; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; @@ -30,11 +31,13 @@ import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; -import org.elasticsearch.search.aggregations.support.ValueType; import java.io.IOException; +import java.util.Collections; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; @@ -85,6 +88,34 @@ public void testMatchSparse() throws IOException { }); } + public void testMatchSparseRangeField() throws IOException { + int numDocs = randomIntBetween(100, 200); + final AtomicInteger count = new AtomicInteger(); + final String fieldName = "field"; + RangeType rangeType = RangeType.DOUBLE; + final BinaryDocValuesField field = new BinaryDocValuesField(fieldName, rangeType.encodeRanges(Collections.singleton( + new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true)))); + MappedFieldType fieldType = new RangeFieldMapper.Builder(fieldName, rangeType).fieldType(); + fieldType.setName(fieldName); + testBothCases(numDocs, + fieldName, + Queries.newMatchAllQuery(), + doc -> { + if (randomBoolean()) { + doc.add(new SortedNumericDocValuesField("another_field", randomLong())); + count.incrementAndGet(); + } else { + doc.add(field); + } + }, + internalMissing -> { + assertEquals(internalMissing.getDocCount(), count.get()); + count.set(0); + assertTrue(AggregationInspectionHelper.hasValue(internalMissing)); + }, fieldType); + } + + public void testMissingField() throws IOException { int numDocs = randomIntBetween(10, 20); testBothCases(numDocs, @@ -104,8 +135,22 @@ private void testBothCases(int numDocs, Query query, Consumer consumer, Consumer verify) throws IOException { - executeTestCase(numDocs, fieldName, query, consumer, verify, false); - executeTestCase(numDocs, fieldName, query, consumer, verify, true); + NumberFieldMapper.Builder mapperBuilder = new NumberFieldMapper.Builder("_name", + NumberFieldMapper.NumberType.LONG); + final MappedFieldType fieldType = mapperBuilder.fieldType(); + fieldType.setHasDocValues(true); + fieldType.setName(fieldName); + testBothCases(numDocs, fieldName, query, consumer, verify, fieldType); + } + + private void testBothCases(int numDocs, + String fieldName, + Query query, + Consumer consumer, + Consumer verify, + MappedFieldType fieldType) throws IOException { + executeTestCase(numDocs, fieldName, query, consumer, verify, false, fieldType); + executeTestCase(numDocs, fieldName, query, consumer, verify, true, fieldType); } @@ -114,7 +159,8 @@ private void executeTestCase(int numDocs, Query query, Consumer consumer, Consumer verify, - boolean reduced) throws IOException { + boolean reduced, + MappedFieldType fieldType) throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { Document document = new Document(); @@ -131,16 +177,9 @@ private void executeTestCase(int numDocs, try (IndexReader indexReader = DirectoryReader.open(directory)) { IndexSearcher indexSearcher = newSearcher(indexReader, true, true); - MissingAggregationBuilder builder = - new MissingAggregationBuilder("_name", ValueType.LONG); + MissingAggregationBuilder builder = new MissingAggregationBuilder("_name", null); builder.field(fieldName); - NumberFieldMapper.Builder mapperBuilder = new NumberFieldMapper.Builder("_name", - NumberFieldMapper.NumberType.LONG); - MappedFieldType fieldType = mapperBuilder.fieldType(); - fieldType.setHasDocValues(true); - fieldType.setName(builder.field()); - InternalMissing missing; if (reduced) { missing = searchAndReduce(indexSearcher, query, builder, fieldType); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorTests.java index 72403f8f7820b..6cb4826ead2c4 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.bucket.significant; import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.StoredField; @@ -40,9 +41,12 @@ import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; +import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregatorFactory.ExecutionMode; import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; @@ -50,6 +54,7 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.function.Function; @@ -256,6 +261,44 @@ public void testUnmapped() throws IOException { } } + /** + * Uses the significant terms aggregation on a range field + */ + public void testRangeField() throws IOException { + RangeType rangeType = RangeType.DOUBLE; + final RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true); + final RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(rangeType, 6.0D, 10.0D, true, true); + final String fieldName = "rangeField"; + MappedFieldType fieldType = new RangeFieldMapper.Builder(fieldName, rangeType).fieldType(); + fieldType.setName(fieldName); + + IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); + indexWriterConfig.setMaxBufferedDocs(100); + indexWriterConfig.setRAMBufferSizeMB(100); // flush on open to have a single segment + try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, indexWriterConfig)) { + for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] { + new RangeFieldMapper.Range(rangeType, 1L, 5L, true, true), + new RangeFieldMapper.Range(rangeType, -3L, 4L, true, true), + new RangeFieldMapper.Range(rangeType, 4L, 13L, true, true), + new RangeFieldMapper.Range(rangeType, 42L, 49L, true, true), + }) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range)); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + } + + // Attempt aggregation on range field + SignificantTermsAggregationBuilder sigAgg = new SignificantTermsAggregationBuilder("sig_text", null).field(fieldName); + sigAgg.executionHint(randomExecutionHint()); + + try (IndexReader reader = DirectoryReader.open(w)) { + IndexSearcher indexSearcher = newIndexSearcher(reader); + expectThrows(AggregationExecutionException.class, () -> createAggregator(sigAgg, indexSearcher, fieldType)); + } + } + } + public void testFieldAlias() throws IOException { TextFieldType textFieldType = new TextFieldType(); textFieldType.setName("text"); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java index a0d48b7ab778f..ef678df3dca72 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.aggregations.bucket.terms; +import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.LongPoint; @@ -44,11 +45,14 @@ import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorTestCase; @@ -285,6 +289,36 @@ public void testUnmapped() throws Exception { } } + public void testRangeField() throws Exception { + RangeType rangeType = RangeType.DOUBLE; + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] { + new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true), // bucket 0 5 + new RangeFieldMapper.Range(rangeType, -3.1, 4.2, true, true), // bucket -5, 0 + new RangeFieldMapper.Range(rangeType, 4.2, 13.3, true, true), // bucket 0, 5, 10 + new RangeFieldMapper.Range(rangeType, 42.5, 49.3, true, true), // bucket 40, 45 + }) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range)); + doc.add(new BinaryDocValuesField("field", encodedRange)); + indexWriter.addDocument(doc); + } + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) { + IndexSearcher indexSearcher = newIndexSearcher(indexReader); + RareTermsAggregationBuilder aggregationBuilder = new RareTermsAggregationBuilder("_name", null) + .field("field"); + expectThrows(AggregationExecutionException.class, + () -> createAggregator(aggregationBuilder, indexSearcher, fieldType)); + } + } + } + } + + public void testNestedTerms() throws IOException { Query query = new MatchAllDocsQuery(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index 18e633617e35e..727c3ea3a87ae 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.aggregations.bucket.terms; +import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.InetAddressPoint; @@ -49,6 +50,8 @@ import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.mapper.Uid; @@ -85,6 +88,7 @@ import java.io.IOException; import java.net.InetAddress; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.Iterator; @@ -888,6 +892,34 @@ public void testUnmappedWithMissing() throws Exception { } } + public void testRangeField() throws Exception { + try (Directory directory = newDirectory()) { + double start = randomDouble(); + double end = randomDoubleBetween(Math.nextUp(start), Double.MAX_VALUE, false); + RangeType rangeType = RangeType.DOUBLE; + final RangeFieldMapper.Range range = new RangeFieldMapper.Range(rangeType, start, end, true, true); + final String fieldName = "field"; + final BinaryDocValuesField field = new BinaryDocValuesField(fieldName, rangeType.encodeRanges(Collections.singleton(range))); + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + Document document = new Document(); + document.add(field); + indexWriter.addDocument(document); + try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) { + MappedFieldType fieldType = new RangeFieldMapper.Builder(fieldName, rangeType).fieldType(); + fieldType.setHasDocValues(true); + fieldType.setName(fieldName); + + IndexSearcher indexSearcher = newIndexSearcher(indexReader); + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name", null) .field(fieldName); + // Note - other places we throw IllegalArgumentException + expectThrows(AggregationExecutionException.class, () -> { + createAggregator(aggregationBuilder, indexSearcher, fieldType); + }); + } + } + } + } + public void testGeoPointField() throws Exception { try (Directory directory = newDirectory()) { GeoPoint point = RandomGeoGenerator.randomPoint(random()); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java index 6cc2aa1a4593e..53a01d08bdcf2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; @@ -34,12 +35,14 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; -import org.elasticsearch.search.aggregations.support.ValueType; import java.io.IOException; import java.util.Arrays; +import java.util.Set; import java.util.function.Consumer; import static java.util.Collections.singleton; @@ -54,6 +57,25 @@ public void testNoDocs() throws IOException { }); } + public void testRangeFieldValues() throws IOException { + RangeType rangeType = RangeType.DOUBLE; + final RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true); + final RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(rangeType, 6.0D, 10.0D, true, true); + final String fieldName = "rangeField"; + MappedFieldType fieldType = new RangeFieldMapper.Builder(fieldName, rangeType).fieldType(); + fieldType.setName(fieldName); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name", null).field(fieldName); + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(singleton(range1))))); + iw.addDocument(singleton(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(singleton(range1))))); + iw.addDocument(singleton(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(singleton(range2))))); + iw.addDocument(singleton(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(Set.of(range1, range2))))); + }, card -> { + assertEquals(3.0, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); + }, fieldType); + } + public void testNoMatchingField() throws IOException { testCase(new MatchAllDocsQuery(), iw -> { iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 7))); @@ -155,8 +177,7 @@ private void testCase(Query query, CheckedConsumer { + iw.addDocument(singleton(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(singleton(range1))))); + iw.addDocument(singleton(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(singleton(range1))))); + iw.addDocument(singleton(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(singleton(range2))))); + iw.addDocument(singleton(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(Set.of(range1, range2))))); + }, count -> { + assertEquals(4.0, count.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(count)); + }, fieldType); + } + private void testCase(Query query, ValueType valueType, CheckedConsumer indexer, @@ -215,6 +238,8 @@ private static MappedFieldType createMappedFieldType(ValueType valueType) { return new IpFieldMapper.Builder("_name").fieldType(); case GEOPOINT: return new GeoPointFieldMapper.Builder("_name").fieldType(); + case RANGE: + return new RangeFieldMapper.Builder("_name", RangeType.DOUBLE).fieldType(); default: throw new IllegalArgumentException("Test does not support value type [" + valueType + "]"); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceTypeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceTypeTests.java index d2f73aab3aaa3..42c276e0c4efb 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceTypeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceTypeTests.java @@ -37,6 +37,7 @@ public void testValidOrdinals() { assertThat(ValuesSourceType.NUMERIC.ordinal(), equalTo(1)); assertThat(ValuesSourceType.BYTES.ordinal(), equalTo(2)); assertThat(ValuesSourceType.GEOPOINT.ordinal(), equalTo(3)); + assertThat(ValuesSourceType.RANGE.ordinal(), equalTo(4)); } @Override @@ -45,6 +46,7 @@ public void testFromString() { assertThat(ValuesSourceType.fromString("numeric"), equalTo(ValuesSourceType.NUMERIC)); assertThat(ValuesSourceType.fromString("bytes"), equalTo(ValuesSourceType.BYTES)); assertThat(ValuesSourceType.fromString("geopoint"), equalTo(ValuesSourceType.GEOPOINT)); + assertThat(ValuesSourceType.fromString("range"), equalTo(ValuesSourceType.RANGE)); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> ValuesSourceType.fromString("does_not_exist")); assertThat(e.getMessage(), equalTo("No enum constant org.elasticsearch.search.aggregations.support.ValuesSourceType.DOES_NOT_EXIST")); @@ -57,6 +59,7 @@ public void testReadFrom() throws IOException { assertReadFromStream(1, ValuesSourceType.NUMERIC); assertReadFromStream(2, ValuesSourceType.BYTES); assertReadFromStream(3, ValuesSourceType.GEOPOINT); + assertReadFromStream(4, ValuesSourceType.RANGE); } @Override @@ -65,5 +68,6 @@ public void testWriteTo() throws IOException { assertWriteToStream(ValuesSourceType.NUMERIC, 1); assertWriteToStream(ValuesSourceType.BYTES, 2); assertWriteToStream(ValuesSourceType.GEOPOINT, 3); + assertWriteToStream(ValuesSourceType.RANGE, 4); } } diff --git a/server/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java b/server/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java index 92bf4d6acad2c..197e82ea3a47b 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/server/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java @@ -100,7 +100,7 @@ public void testSimpleProfile() { ProfileResult histoAggResult = aggProfileResultsList.get(0); assertThat(histoAggResult, notNullValue()); assertThat(histoAggResult.getQueryName(), - equalTo("HistogramAggregator")); + equalTo("NumericHistogramAggregator")); assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0)); assertThat(histoAggResult.getTime(), greaterThan(0L)); @@ -145,7 +145,7 @@ public void testMultiLevelProfile() { ProfileResult histoAggResult = aggProfileResultsList.get(0); assertThat(histoAggResult, notNullValue()); assertThat(histoAggResult.getQueryName(), - equalTo("HistogramAggregator")); + equalTo("NumericHistogramAggregator")); assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); assertThat(histoAggResult.getTime(), greaterThan(0L)); Map histoBreakdown = histoAggResult.getTimeBreakdown(); @@ -215,7 +215,7 @@ public void testMultiLevelProfileBreadthFirst() { ProfileResult histoAggResult = aggProfileResultsList.get(0); assertThat(histoAggResult, notNullValue()); assertThat(histoAggResult.getQueryName(), - equalTo("HistogramAggregator")); + equalTo("NumericHistogramAggregator")); assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); assertThat(histoAggResult.getTime(), greaterThan(0L)); Map histoBreakdown = histoAggResult.getTimeBreakdown(); @@ -346,7 +346,7 @@ public void testComplexProfile() { ProfileResult histoAggResult = aggProfileResultsList.get(0); assertThat(histoAggResult, notNullValue()); assertThat(histoAggResult.getQueryName(), - equalTo("HistogramAggregator")); + equalTo("NumericHistogramAggregator")); assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); assertThat(histoAggResult.getTime(), greaterThan(0L)); Map histoBreakdown = histoAggResult.getTimeBreakdown(); diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index 192e54506495f..2190e573707e6 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -606,15 +606,16 @@ public void testDisableTopScoreCollection() throws Exception { .build(); context.parsedQuery(new ParsedQuery(q)); - context.setSize(10); + context.setSize(3); + context.trackTotalHitsUpTo(3); + TopDocsCollectorContext topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, reader, false); assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.COMPLETE); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertEquals(5, context.queryResult().topDocs().topDocs.totalHits.value); assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.EQUAL_TO); - assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(5)); - + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3)); context.sort(new SortAndFormats(new Sort(new SortField("other", SortField.Type.INT)), new DocValueFormat[] { DocValueFormat.RAW })); @@ -623,7 +624,7 @@ public void testDisableTopScoreCollection() throws Exception { assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertEquals(5, context.queryResult().topDocs().topDocs.totalHits.value); - assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(5)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3)); assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.EQUAL_TO); reader.close(); diff --git a/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java b/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java index 882b3cc4b1e86..c00760899c4d2 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java @@ -116,8 +116,8 @@ public void testBlobStoreOperations() throws IOException { xContentRegistry(), true); // Write blobs in different formats - checksumSMILE.write(new BlobObj("checksum smile"), blobContainer, "check-smile"); - checksumSMILECompressed.write(new BlobObj("checksum smile compressed"), blobContainer, "check-smile-comp"); + checksumSMILE.write(new BlobObj("checksum smile"), blobContainer, "check-smile", true); + checksumSMILECompressed.write(new BlobObj("checksum smile compressed"), blobContainer, "check-smile-comp", true); // Assert that all checksum blobs can be read by all formats assertEquals(checksumSMILE.read(blobContainer, "check-smile").getText(), "checksum smile"); @@ -136,8 +136,8 @@ public void testCompressionIsApplied() throws IOException { ChecksumBlobStoreFormat checksumFormatComp = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, xContentRegistry(), true); BlobObj blobObj = new BlobObj(veryRedundantText.toString()); - checksumFormatComp.write(blobObj, blobContainer, "blob-comp"); - checksumFormat.write(blobObj, blobContainer, "blob-not-comp"); + checksumFormatComp.write(blobObj, blobContainer, "blob-comp", true); + checksumFormat.write(blobObj, blobContainer, "blob-not-comp", true); Map blobs = blobContainer.listBlobsByPrefix("blob-"); assertEquals(blobs.size(), 2); assertThat(blobs.get("blob-not-comp").length(), greaterThan(blobs.get("blob-comp").length())); @@ -150,7 +150,7 @@ public void testBlobCorruption() throws IOException { BlobObj blobObj = new BlobObj(testString); ChecksumBlobStoreFormat checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, xContentRegistry(), randomBoolean()); - checksumFormat.write(blobObj, blobContainer, "test-path"); + checksumFormat.write(blobObj, blobContainer, "test-path", true); assertEquals(checksumFormat.read(blobContainer, "test-path").getText(), testString); randomCorruption(blobContainer, "test-path"); try { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 6ef892a74bb5a..b11cf9107e30b 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -47,7 +47,6 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.SnapshotsInProgress; @@ -110,7 +109,6 @@ import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -2538,28 +2536,15 @@ public void testCloseOrDeleteIndexDuringSnapshot() throws Exception { Client client = client(); - boolean allowPartial = randomBoolean(); logger.info("--> creating repository"); - // only block on repo init if we have partial snapshot or we run into deadlock when acquiring shard locks for index deletion/closing - boolean initBlocking = allowPartial || randomBoolean(); - if (initBlocking) { - assertAcked(client.admin().cluster().preparePutRepository("test-repo") - .setType("mock").setSettings(Settings.builder() - .put("location", randomRepoPath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put("block_on_init", true) - )); - } else { - assertAcked(client.admin().cluster().preparePutRepository("test-repo") - .setType("mock").setSettings(Settings.builder() - .put("location", randomRepoPath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put("block_on_data", true) - )); - } + assertAcked(client.admin().cluster().preparePutRepository("test-repo") + .setType("mock").setSettings(Settings.builder() + .put("location", randomRepoPath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put("block_on_data", true))); + createIndex("test-idx-1", "test-idx-2", "test-idx-3"); ensureGreen(); @@ -2575,70 +2560,40 @@ public void testCloseOrDeleteIndexDuringSnapshot() throws Exception { assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits().value, equalTo(100L)); assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().getTotalHits().value, equalTo(100L)); - logger.info("--> snapshot allow partial {}", allowPartial); + logger.info("--> snapshot"); ActionFuture future = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") - .setIndices("test-idx-*").setWaitForCompletion(true).setPartial(allowPartial).execute(); + .setIndices("test-idx-*").setWaitForCompletion(true).setPartial(false).execute(); logger.info("--> wait for block to kick in"); - if (initBlocking) { - waitForBlock(internalCluster().getMasterName(), "test-repo", TimeValue.timeValueMinutes(1)); - } else { - waitForBlockOnAnyDataNode("test-repo", TimeValue.timeValueMinutes(1)); - } - boolean closedOnPartial = false; + waitForBlockOnAnyDataNode("test-repo", TimeValue.timeValueMinutes(1)); + try { - if (allowPartial) { - // partial snapshots allow close / delete operations - if (randomBoolean()) { - logger.info("--> delete index while partial snapshot is running"); + // non-partial snapshots do not allow close / delete operations on indices where snapshot has not been completed + if (randomBoolean()) { + try { + logger.info("--> delete index while non-partial snapshot is running"); client.admin().indices().prepareDelete("test-idx-1").get(); - } else { - logger.info("--> close index while partial snapshot is running"); - closedOnPartial = true; - client.admin().indices().prepareClose("test-idx-1").get(); + fail("Expected deleting index to fail during snapshot"); + } catch (SnapshotInProgressException e) { + assertThat(e.getMessage(), containsString("Cannot delete indices that are being snapshotted: [[test-idx-1/")); } } else { - // non-partial snapshots do not allow close / delete operations on indices where snapshot has not been completed - if (randomBoolean()) { - try { - logger.info("--> delete index while non-partial snapshot is running"); - client.admin().indices().prepareDelete("test-idx-1").get(); - fail("Expected deleting index to fail during snapshot"); - } catch (SnapshotInProgressException e) { - assertThat(e.getMessage(), containsString("Cannot delete indices that are being snapshotted: [[test-idx-1/")); - } - } else { - try { - logger.info("--> close index while non-partial snapshot is running"); - client.admin().indices().prepareClose("test-idx-1").get(); - fail("Expected closing index to fail during snapshot"); - } catch (SnapshotInProgressException e) { - assertThat(e.getMessage(), containsString("Cannot close indices that are being snapshotted: [[test-idx-1/")); - } + try { + logger.info("--> close index while non-partial snapshot is running"); + client.admin().indices().prepareClose("test-idx-1").get(); + fail("Expected closing index to fail during snapshot"); + } catch (SnapshotInProgressException e) { + assertThat(e.getMessage(), containsString("Cannot close indices that are being snapshotted: [[test-idx-1/")); } } } finally { - if (initBlocking) { - logger.info("--> unblock running master node"); - unblockNode("test-repo", internalCluster().getMasterName()); - } else { - logger.info("--> unblock all data nodes"); - unblockAllDataNodes("test-repo"); - } + logger.info("--> unblock all data nodes"); + unblockAllDataNodes("test-repo"); } logger.info("--> waiting for snapshot to finish"); CreateSnapshotResponse createSnapshotResponse = future.get(); - if (allowPartial && closedOnPartial == false) { - logger.info("Deleted/Closed index during snapshot, but allow partial"); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo((SnapshotState.PARTIAL))); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat(createSnapshotResponse.getSnapshotInfo().failedShards(), greaterThan(0)); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), - lessThan(createSnapshotResponse.getSnapshotInfo().totalShards())); - } else { - logger.info("Snapshot successfully completed"); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo((SnapshotState.SUCCESS))); - } + logger.info("Snapshot successfully completed"); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo((SnapshotState.SUCCESS))); } public void testCloseIndexDuringRestore() throws Exception { @@ -3493,7 +3448,7 @@ public void testSnapshotSucceedsAfterSnapshotFailure() throws Exception { assertThat(shardFailure.reason(), containsString("Random IOException")); } } - } catch (SnapshotCreationException | RepositoryException ex) { + } catch (SnapshotException | RepositoryException ex) { // sometimes, the snapshot will fail with a top level I/O exception assertThat(ExceptionsHelper.stackTrace(ex), containsString("Random IOException")); } @@ -3856,76 +3811,6 @@ public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception { assertThat(client.prepareGet(restoredIndexName2, typeName, sameSourceIndex ? docId : docId2).get().isExists(), equalTo(true)); } - public void testAbortedSnapshotDuringInitDoesNotStart() throws Exception { - final Client client = client(); - - // Blocks on initialization - assertAcked(client.admin().cluster().preparePutRepository("repository") - .setType("mock").setSettings(Settings.builder() - .put("location", randomRepoPath()) - .put("block_on_init", true) - )); - - createIndex("test-idx"); - final int nbDocs = scaledRandomIntBetween(100, 500); - for (int i = 0; i < nbDocs; i++) { - index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i); - } - flushAndRefresh("test-idx"); - assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo((long) nbDocs)); - - // Create a snapshot - client.admin().cluster().prepareCreateSnapshot("repository", "snap").execute(); - waitForBlock(internalCluster().getMasterName(), "repository", TimeValue.timeValueMinutes(1)); - boolean blocked = true; - - // Snapshot is initializing (and is blocked at this stage) - SnapshotsStatusResponse snapshotsStatus = client.admin().cluster().prepareSnapshotStatus("repository").setSnapshots("snap").get(); - assertThat(snapshotsStatus.getSnapshots().iterator().next().getState(), equalTo(State.INIT)); - - final List states = new CopyOnWriteArrayList<>(); - final ClusterStateListener listener = event -> { - SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); - for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { - if ("snap".equals(entry.snapshot().getSnapshotId().getName())) { - states.add(entry.state()); - } - } - }; - - try { - // Record the upcoming states of the snapshot on all nodes - internalCluster().getInstances(ClusterService.class).forEach(clusterService -> clusterService.addListener(listener)); - - // Delete the snapshot while it is being initialized - ActionFuture delete = client.admin().cluster().prepareDeleteSnapshot("repository", "snap").execute(); - - // The deletion must set the snapshot in the ABORTED state - assertBusy(() -> { - SnapshotsStatusResponse status = - client.admin().cluster().prepareSnapshotStatus("repository").setSnapshots("snap").get(); - assertThat(status.getSnapshots().iterator().next().getState(), equalTo(State.ABORTED)); - }); - - // Now unblock the repository - unblockNode("repository", internalCluster().getMasterName()); - blocked = false; - - assertAcked(delete.get()); - expectThrows(SnapshotMissingException.class, () -> - client.admin().cluster().prepareGetSnapshots("repository").setSnapshots("snap").get() - .getSnapshots("repository")); - - assertFalse("Expecting snapshot state to be updated", states.isEmpty()); - assertFalse("Expecting snapshot to be aborted and not started at all", states.contains(State.STARTED)); - } finally { - internalCluster().getInstances(ClusterService.class).forEach(clusterService -> clusterService.removeListener(listener)); - if (blocked) { - unblockNode("repository", internalCluster().getMasterName()); - } - } - } - public void testRestoreIncreasesPrimaryTerms() { final String indexName = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); createIndex(indexName, Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 8e82a689f2b75..fb95c3d3d6028 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -196,12 +196,14 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.elasticsearch.action.support.ActionTestUtils.assertNoFailureListener; import static org.elasticsearch.env.Environment.PATH_HOME_SETTING; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.mockito.Mockito.mock; public class SnapshotResiliencyTests extends ESTestCase { @@ -493,6 +495,85 @@ public void run() { assertThat(snapshotIds, either(hasSize(1)).or(hasSize(0))); } + public void testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates() { + setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10)); + + String repoName = "repo"; + String snapshotName = "snapshot"; + final String index = "test"; + + final int shards = randomIntBetween(1, 10); + final int documents = randomIntBetween(2, 100); + TestClusterNode masterNode = + testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state()); + + final StepListener createSnapshotResponseStepListener = new StepListener<>(); + + continueOrDie(createRepoAndIndex(masterNode, repoName, index, shards), createIndexResponse -> { + final AtomicBoolean initiatedSnapshot = new AtomicBoolean(false); + for (int i = 0; i < documents; ++i) { + // Index a few documents with different field names so we trigger a dynamic mapping update for each of them + masterNode.client.bulk( + new BulkRequest().add(new IndexRequest(index).source(Map.of("foo" + i, "bar"))) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), + assertNoFailureListener( + bulkResponse -> { + assertFalse("Failures in bulkresponse: " + bulkResponse.buildFailureMessage(), bulkResponse.hasFailures()); + if (initiatedSnapshot.compareAndSet(false, true)) { + masterNode.client.admin().cluster().prepareCreateSnapshot(repoName, snapshotName) + .setWaitForCompletion(true).execute(createSnapshotResponseStepListener); + } + })); + } + }); + + final String restoredIndex = "restored"; + + final StepListener restoreSnapshotResponseStepListener = new StepListener<>(); + + continueOrDie(createSnapshotResponseStepListener, createSnapshotResponse -> masterNode.client.admin().cluster().restoreSnapshot( + new RestoreSnapshotRequest(repoName, snapshotName) + .renamePattern(index).renameReplacement(restoredIndex).waitForCompletion(true), restoreSnapshotResponseStepListener)); + + final StepListener searchResponseStepListener = new StepListener<>(); + + continueOrDie(restoreSnapshotResponseStepListener, restoreSnapshotResponse -> { + assertEquals(shards, restoreSnapshotResponse.getRestoreInfo().totalShards()); + masterNode.client.search( + new SearchRequest(restoredIndex).source(new SearchSourceBuilder().size(documents).trackTotalHits(true)), + searchResponseStepListener); + }); + + final AtomicBoolean documentCountVerified = new AtomicBoolean(); + + continueOrDie(searchResponseStepListener, r -> { + final long hitCount = r.getHits().getTotalHits().value; + assertThat( + "Documents were restored but the restored index mapping was older than some documents and misses some of their fields", + (int) hitCount, + lessThanOrEqualTo(((Map) masterNode.clusterService.state().metaData().index(restoredIndex).mapping() + .sourceAsMap().get("properties")).size()) + ); + documentCountVerified.set(true); + }); + + runUntil(documentCountVerified::get, TimeUnit.MINUTES.toMillis(5L)); + + assertNotNull(createSnapshotResponseStepListener.result()); + assertNotNull(restoreSnapshotResponseStepListener.result()); + SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); + final Repository repository = masterNode.repositoriesService.repository(repoName); + Collection snapshotIds = repository.getRepositoryData().getSnapshotIds(); + assertThat(snapshotIds, hasSize(1)); + + final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotIds.iterator().next()); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertThat(snapshotInfo.indices(), containsInAnyOrder(index)); + assertEquals(shards, snapshotInfo.successfulShards()); + assertEquals(0, snapshotInfo.failedShards()); + } + private StepListener createRepoAndIndex(TestClusterNode masterNode, String repoName, String index, int shards) { final AdminClient adminClient = masterNode.client.admin(); @@ -1050,7 +1131,7 @@ protected void assertSnapshotOrGenericThread() { } else { return metaData -> { final Repository repository = new MockEventuallyConsistentRepository( - metaData, environment, xContentRegistry(), deterministicTaskQueue.getThreadPool(), blobStoreContext); + metaData, xContentRegistry(), deterministicTaskQueue.getThreadPool(), blobStoreContext); repository.start(); return repository; }; diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index 13a00119f29e7..d8e6cd7070966 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.unit.TimeValue; import java.util.List; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -76,7 +77,7 @@ public void testStatusApiConsistency() { assertEquals(snStatus.getStats().getTime(), snapshotInfo.endTime() - snapshotInfo.startTime()); } - public void testStatusAPICallInProgressSnapshot() throws InterruptedException { + public void testStatusAPICallInProgressSnapshot() throws Exception { Client client = client(); logger.info("--> creating repository"); @@ -101,7 +102,7 @@ public void testStatusAPICallInProgressSnapshot() throws InterruptedException { final List snapshotStatus = client.admin().cluster().snapshotsStatus( new SnapshotsStatusRequest("test-repo", new String[]{"test-snap"})).actionGet().getSnapshots(); - assertEquals(snapshotStatus.get(0).getState(), SnapshotsInProgress.State.STARTED); + assertBusy(() -> assertEquals(SnapshotsInProgress.State.STARTED, snapshotStatus.get(0).getState()), 1L, TimeUnit.MINUTES); logger.info("--> unblock all data nodes"); unblockAllDataNodes("test-repo"); diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java index 9a2e4e246ec43..0b5d2c4f858d8 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.test.ESTestCase; @@ -69,9 +68,12 @@ public class MockEventuallyConsistentRepository extends BlobStoreRepository { private final NamedXContentRegistry namedXContentRegistry; - public MockEventuallyConsistentRepository(RepositoryMetaData metadata, Environment environment, - NamedXContentRegistry namedXContentRegistry, ThreadPool threadPool, Context context) { - super(metadata, environment.settings(), namedXContentRegistry, threadPool, BlobPath.cleanPath()); + public MockEventuallyConsistentRepository( + final RepositoryMetaData metadata, + final NamedXContentRegistry namedXContentRegistry, + final ThreadPool threadPool, + final Context context) { + super(metadata, namedXContentRegistry, threadPool, BlobPath.cleanPath()); this.context = context; this.namedXContentRegistry = namedXContentRegistry; } @@ -285,9 +287,11 @@ public void writeBlob(String blobName, InputStream inputStream, long blobSize, b // We do some checks in case there is a consistent state for a blob to prevent turning it inconsistent. final boolean hasConsistentContent = relevantActions.size() == 1 && relevantActions.get(0).operation == Operation.PUT; - if (BlobStoreRepository.INDEX_LATEST_BLOB.equals(blobName)) { + if (BlobStoreRepository.INDEX_LATEST_BLOB.equals(blobName) + || blobName.startsWith(BlobStoreRepository.METADATA_PREFIX)) { // TODO: Ensure that it is impossible to ever decrement the generation id stored in index.latest then assert that - // it never decrements here + // it never decrements here. Same goes for the metadata, ensure that we never overwrite newer with older + // metadata. } else if (blobName.startsWith(BlobStoreRepository.SNAPSHOT_PREFIX)) { if (hasConsistentContent) { if (basePath().buildAsString().equals(path().buildAsString())) { diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java index 81934fe93bd8a..14d4a5ba60b97 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.snapshots.mockstore; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobContainer; @@ -62,7 +63,7 @@ public void setUp() throws Exception { public void testReadAfterWriteConsistently() throws IOException { MockEventuallyConsistentRepository.Context blobStoreContext = new MockEventuallyConsistentRepository.Context(); try (BlobStoreRepository repository = new MockEventuallyConsistentRepository( - new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), environment, + new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), xContentRegistry(), mock(ThreadPool.class), blobStoreContext)) { repository.start(); final BlobContainer blobContainer = repository.blobStore().blobContainer(repository.basePath()); @@ -82,7 +83,7 @@ public void testReadAfterWriteConsistently() throws IOException { public void testReadAfterWriteAfterReadThrows() throws IOException { MockEventuallyConsistentRepository.Context blobStoreContext = new MockEventuallyConsistentRepository.Context(); try (BlobStoreRepository repository = new MockEventuallyConsistentRepository( - new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), environment, + new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), xContentRegistry(), mock(ThreadPool.class), blobStoreContext)) { repository.start(); final BlobContainer blobContainer = repository.blobStore().blobContainer(repository.basePath()); @@ -98,7 +99,7 @@ public void testReadAfterWriteAfterReadThrows() throws IOException { public void testReadAfterDeleteAfterWriteThrows() throws IOException { MockEventuallyConsistentRepository.Context blobStoreContext = new MockEventuallyConsistentRepository.Context(); try (BlobStoreRepository repository = new MockEventuallyConsistentRepository( - new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), environment, + new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), xContentRegistry(), mock(ThreadPool.class), blobStoreContext)) { repository.start(); final BlobContainer blobContainer = repository.blobStore().blobContainer(repository.basePath()); @@ -116,7 +117,7 @@ public void testReadAfterDeleteAfterWriteThrows() throws IOException { public void testOverwriteRandomBlobFails() throws IOException { MockEventuallyConsistentRepository.Context blobStoreContext = new MockEventuallyConsistentRepository.Context(); try (BlobStoreRepository repository = new MockEventuallyConsistentRepository( - new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), environment, + new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), xContentRegistry(), mock(ThreadPool.class), blobStoreContext)) { repository.start(); final BlobContainer container = repository.blobStore().blobContainer(repository.basePath()); @@ -133,7 +134,7 @@ public void testOverwriteRandomBlobFails() throws IOException { public void testOverwriteShardSnapBlobFails() throws IOException { MockEventuallyConsistentRepository.Context blobStoreContext = new MockEventuallyConsistentRepository.Context(); try (BlobStoreRepository repository = new MockEventuallyConsistentRepository( - new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), environment, + new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), xContentRegistry(), mock(ThreadPool.class), blobStoreContext)) { repository.start(); final BlobContainer container = @@ -151,26 +152,26 @@ public void testOverwriteShardSnapBlobFails() throws IOException { public void testOverwriteSnapshotInfoBlob() { MockEventuallyConsistentRepository.Context blobStoreContext = new MockEventuallyConsistentRepository.Context(); try (BlobStoreRepository repository = new MockEventuallyConsistentRepository( - new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), environment, + new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), xContentRegistry(), mock(ThreadPool.class), blobStoreContext)) { repository.start(); // We create a snap- blob for snapshot "foo" in the first generation final SnapshotId snapshotId = new SnapshotId("foo", UUIDs.randomBase64UUID()); repository.finalizeSnapshot(snapshotId, Collections.emptyList(), 1L, null, 5, Collections.emptyList(), - -1L, false, Collections.emptyMap()); + -1L, false, MetaData.EMPTY_META_DATA, Collections.emptyMap()); // We try to write another snap- blob for "foo" in the next generation. It fails because the content differs. final AssertionError assertionError = expectThrows(AssertionError.class, () -> repository.finalizeSnapshot( snapshotId, Collections.emptyList(), 1L, null, 6, Collections.emptyList(), - 0, false, Collections.emptyMap())); + 0, false, MetaData.EMPTY_META_DATA, Collections.emptyMap())); assertThat(assertionError.getMessage(), equalTo("\nExpected: <6>\n but: was <5>")); // We try to write yet another snap- blob for "foo" in the next generation. // It passes cleanly because the content of the blob except for the timestamps. repository.finalizeSnapshot(snapshotId, Collections.emptyList(), 1L, null, 5, Collections.emptyList(), - 0, false, Collections.emptyMap()); + 0, false, MetaData.EMPTY_META_DATA, Collections.emptyMap()); } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index bd0a5cc772fd7..fa33f8aef8679 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -24,7 +24,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; @@ -39,10 +38,8 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.RepositoryPlugin; -import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.fs.FsRepository; -import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -102,8 +99,6 @@ public long getFailureCount() { private final String randomPrefix; - private volatile boolean blockOnInitialization; - private volatile boolean blockOnControlFiles; private volatile boolean blockOnDataFiles; @@ -126,21 +121,12 @@ public MockRepository(RepositoryMetaData metadata, Environment environment, maximumNumberOfFailures = metadata.settings().getAsLong("max_failure_number", 100L); blockOnControlFiles = metadata.settings().getAsBoolean("block_on_control", false); blockOnDataFiles = metadata.settings().getAsBoolean("block_on_data", false); - blockOnInitialization = metadata.settings().getAsBoolean("block_on_init", false); blockAndFailOnWriteSnapFile = metadata.settings().getAsBoolean("block_on_snap", false); randomPrefix = metadata.settings().get("random", "default"); waitAfterUnblock = metadata.settings().getAsLong("wait_after_unblock", 0L); logger.info("starting mock repository with random prefix {}", randomPrefix); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData clusterMetadata) { - if (blockOnInitialization) { - blockExecution(); - } - super.initializeSnapshot(snapshotId, indices, clusterMetadata); - } - private static RepositoryMetaData overrideSettings(RepositoryMetaData metadata, Environment environment) { // TODO: use another method of testing not being able to read the test file written by the master... // this is super duper hacky @@ -174,7 +160,6 @@ public synchronized void unblock() { // Clean blocking flags, so we wouldn't try to block again blockOnDataFiles = false; blockOnControlFiles = false; - blockOnInitialization = false; blockOnWriteIndexFile = false; blockAndFailOnWriteSnapFile = false; this.notifyAll(); @@ -200,7 +185,7 @@ private synchronized boolean blockExecution() { logger.debug("[{}] Blocking execution", metadata.name()); boolean wasBlocked = false; try { - while (blockOnDataFiles || blockOnControlFiles || blockOnInitialization || blockOnWriteIndexFile || + while (blockOnDataFiles || blockOnControlFiles || blockOnWriteIndexFile || blockAndFailOnWriteSnapFile) { blocked = true; this.wait(); diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index 17106508ae71a..136cd250f85dd 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -288,7 +288,7 @@ public void testInvalidHeader() throws IOException { } } - public void testHTTPHeader() throws IOException { + public void testHTTPRequest() throws IOException { String[] httpHeaders = {"GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH", "TRACE"}; for (String httpHeader : httpHeaders) { @@ -304,9 +304,54 @@ public void testHTTPHeader() throws IOException { TcpTransport.decodeFrame(bytes); fail("Expected exception"); } catch (Exception ex) { - assertThat(ex, instanceOf(TcpTransport.HttpOnTransportException.class)); + assertThat(ex, instanceOf(TcpTransport.HttpRequestOnTransportException.class)); assertEquals("This is not an HTTP port", ex.getMessage()); } } } + + public void testTLSHeader() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + + streamOutput.write(0x16); + streamOutput.write(0x03); + byte byte1 = randomByte(); + streamOutput.write(byte1); + byte byte2 = randomByte(); + streamOutput.write(byte2); + streamOutput.write(randomByte()); + streamOutput.write(randomByte()); + streamOutput.write(randomByte()); + + try { + BytesReference bytes = streamOutput.bytes(); + TcpTransport.decodeFrame(bytes); + fail("Expected exception"); + } catch (Exception ex) { + assertThat(ex, instanceOf(StreamCorruptedException.class)); + String expected = "SSL/TLS request received but SSL/TLS is not enabled on this node, got (16,3," + + Integer.toHexString(byte1 & 0xFF) + "," + + Integer.toHexString(byte2 & 0xFF) + ")"; + assertEquals(expected, ex.getMessage()); + } + } + + public void testHTTPResponse() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + streamOutput.write('H'); + streamOutput.write('T'); + streamOutput.write('T'); + streamOutput.write('P'); + streamOutput.write(randomByte()); + streamOutput.write(randomByte()); + + try { + TcpTransport.decodeFrame(streamOutput.bytes()); + fail("Expected exception"); + } catch (Exception ex) { + assertThat(ex, instanceOf(StreamCorruptedException.class)); + assertEquals("received HTTP response on transport port, ensure that transport port " + + "(not HTTP port) of a remote node is specified in the configuration", ex.getMessage()); + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java index dfe21ee429406..7ec046472e344 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java @@ -18,110 +18,80 @@ */ package org.elasticsearch.cluster; -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import java.util.Arrays; -import java.util.Collections; -import java.util.concurrent.CountDownLatch; +import java.util.List; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; - -/** - * Fake ClusterInfoService class that allows updating the nodes stats disk - * usage with fake values - */ public class MockInternalClusterInfoService extends InternalClusterInfoService { /** This is a marker plugin used to trigger MockNode to use this mock info service. */ public static class TestPlugin extends Plugin {} - private final ClusterName clusterName; - private volatile NodeStats[] stats = new NodeStats[3]; + @Nullable // if no fakery should take place + public volatile Function shardSizeFunction; - /** Create a fake NodeStats for the given node and usage */ - public static NodeStats makeStats(String nodeName, DiskUsage usage) { - FsInfo.Path[] paths = new FsInfo.Path[1]; - FsInfo.Path path = new FsInfo.Path("/dev/null", null, - usage.getTotalBytes(), usage.getFreeBytes(), usage.getFreeBytes()); - paths[0] = path; - FsInfo fsInfo = new FsInfo(System.currentTimeMillis(), null, paths); - return new NodeStats( - new DiscoveryNode(nodeName, ESTestCase.buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), - System.currentTimeMillis(), - null, null, null, null, null, - fsInfo, - null, null, null, - null, null, null, null); - } + @Nullable // if no fakery should take place + public volatile BiFunction diskUsageFunction; public MockInternalClusterInfoService(Settings settings, ClusterService clusterService, ThreadPool threadPool, NodeClient client) { super(settings, clusterService, threadPool, client); - this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); - stats[0] = makeStats("node_t1", new DiskUsage("node_t1", "n1", "/dev/null", 100, 100)); - stats[1] = makeStats("node_t2", new DiskUsage("node_t2", "n2", "/dev/null", 100, 100)); - stats[2] = makeStats("node_t3", new DiskUsage("node_t3", "n3", "/dev/null", 100, 100)); - } - - public void setN1Usage(String nodeName, DiskUsage newUsage) { - stats[0] = makeStats(nodeName, newUsage); - } - - public void setN2Usage(String nodeName, DiskUsage newUsage) { - stats[1] = makeStats(nodeName, newUsage); - } - - public void setN3Usage(String nodeName, DiskUsage newUsage) { - stats[2] = makeStats(nodeName, newUsage); } @Override - public CountDownLatch updateNodeStats(final ActionListener listener) { - NodesStatsResponse response = new NodesStatsResponse(clusterName, Arrays.asList(stats), Collections.emptyList()); - listener.onResponse(response); - return new CountDownLatch(0); + public ClusterInfo getClusterInfo() { + final ClusterInfo clusterInfo = super.getClusterInfo(); + return new SizeFakingClusterInfo(clusterInfo); } @Override - public CountDownLatch updateIndicesStats(final ActionListener listener) { - // Not used, so noop - return new CountDownLatch(0); - } + List adjustNodesStats(List nodesStats) { + final BiFunction diskUsageFunction = this.diskUsageFunction; + if (diskUsageFunction == null) { + return nodesStats; + } - @Override - public ClusterInfo getClusterInfo() { - ClusterInfo clusterInfo = super.getClusterInfo(); - return new DevNullClusterInfo(clusterInfo.getNodeLeastAvailableDiskUsages(), - clusterInfo.getNodeMostAvailableDiskUsages(), clusterInfo.shardSizes); + return nodesStats.stream().map(nodeStats -> { + final DiscoveryNode discoveryNode = nodeStats.getNode(); + final FsInfo oldFsInfo = nodeStats.getFs(); + return new NodeStats(discoveryNode, nodeStats.getTimestamp(), nodeStats.getIndices(), nodeStats.getOs(), + nodeStats.getProcess(), nodeStats.getJvm(), nodeStats.getThreadPool(), new FsInfo(oldFsInfo.getTimestamp(), + oldFsInfo.getIoStats(), + StreamSupport.stream(oldFsInfo.spliterator(), false) + .map(fsInfoPath -> diskUsageFunction.apply(discoveryNode, fsInfoPath)) + .toArray(FsInfo.Path[]::new)), nodeStats.getTransport(), + nodeStats.getHttp(), nodeStats.getBreaker(), nodeStats.getScriptStats(), nodeStats.getDiscoveryStats(), + nodeStats.getIngestStats(), nodeStats.getAdaptiveSelectionStats()); + }).collect(Collectors.toList()); } - /** - * ClusterInfo that always points to DevNull. - */ - public static class DevNullClusterInfo extends ClusterInfo { - public DevNullClusterInfo(ImmutableOpenMap leastAvailableSpaceUsage, - ImmutableOpenMap mostAvailableSpaceUsage, ImmutableOpenMap shardSizes) { - super(leastAvailableSpaceUsage, mostAvailableSpaceUsage, shardSizes, null); + class SizeFakingClusterInfo extends ClusterInfo { + SizeFakingClusterInfo(ClusterInfo delegate) { + super(delegate.getNodeLeastAvailableDiskUsages(), delegate.getNodeMostAvailableDiskUsages(), + delegate.shardSizes, delegate.routingToDataPath); } @Override - public String getDataPath(ShardRouting shardRouting) { - return "/dev/null"; + public Long getShardSize(ShardRouting shardRouting) { + final Function shardSizeFunction = MockInternalClusterInfoService.this.shardSizeFunction; + if (shardSizeFunction == null) { + return super.getShardSize(shardRouting); + } + + return shardSizeFunction.apply(shardRouting); } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 95ff43e8b3c7d..b67108a16c19b 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -1130,34 +1130,38 @@ public static void assertMaxSeqNoInCommitUserData(Engine engine) throws Exceptio } public static void assertAtMostOneLuceneDocumentPerSequenceNumber(Engine engine) throws IOException { - if (engine.config().getIndexSettings().isSoftDeleteEnabled() == false || engine instanceof InternalEngine == false) { - return; + if (engine instanceof InternalEngine) { + try { + engine.refresh("test"); + try (Engine.Searcher searcher = engine.acquireSearcher("test")) { + assertAtMostOneLuceneDocumentPerSequenceNumber(engine.config().getIndexSettings(), searcher.getDirectoryReader()); + } + } catch (AlreadyClosedException ignored) { + // engine was closed + } } - try { - engine.refresh("test"); - try (Engine.Searcher searcher = engine.acquireSearcher("test")) { - DirectoryReader reader = Lucene.wrapAllDocsLive(searcher.getDirectoryReader()); - Set seqNos = new HashSet<>(); - for (LeafReaderContext leaf : reader.leaves()) { - NumericDocValues primaryTermDocValues = leaf.reader().getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME); - NumericDocValues seqNoDocValues = leaf.reader().getNumericDocValues(SeqNoFieldMapper.NAME); - int docId; - while ((docId = seqNoDocValues.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { - assertTrue(seqNoDocValues.advanceExact(docId)); - long seqNo = seqNoDocValues.longValue(); - assertThat(seqNo, greaterThanOrEqualTo(0L)); - if (primaryTermDocValues.advanceExact(docId)) { - if (seqNos.add(seqNo) == false) { - final IdOnlyFieldVisitor idFieldVisitor = new IdOnlyFieldVisitor(); - leaf.reader().document(docId, idFieldVisitor); - throw new AssertionError("found multiple documents for seq=" + seqNo + " id=" + idFieldVisitor.getId()); - } - } + } + + public static void assertAtMostOneLuceneDocumentPerSequenceNumber(IndexSettings indexSettings, + DirectoryReader reader) throws IOException { + Set seqNos = new HashSet<>(); + final DirectoryReader wrappedReader = indexSettings.isSoftDeleteEnabled() ? Lucene.wrapAllDocsLive(reader) : reader; + for (LeafReaderContext leaf : wrappedReader.leaves()) { + NumericDocValues primaryTermDocValues = leaf.reader().getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME); + NumericDocValues seqNoDocValues = leaf.reader().getNumericDocValues(SeqNoFieldMapper.NAME); + int docId; + while ((docId = seqNoDocValues.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { + assertTrue(seqNoDocValues.advanceExact(docId)); + long seqNo = seqNoDocValues.longValue(); + assertThat(seqNo, greaterThanOrEqualTo(0L)); + if (primaryTermDocValues.advanceExact(docId)) { + if (seqNos.add(seqNo) == false) { + final IdOnlyFieldVisitor idFieldVisitor = new IdOnlyFieldVisitor(); + leaf.reader().document(docId, idFieldVisitor); + throw new AssertionError("found multiple documents for seq=" + seqNo + " id=" + idFieldVisitor.getId()); } } } - } catch (AlreadyClosedException ignored) { - } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 4b621e5fe5153..cce9780b09223 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -375,7 +375,7 @@ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMe indexSettings.getSettings(), "index"); mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY); SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap()); - final Engine.Warmer warmer = reader -> {}; + final Engine.Warmer warmer = createTestWarmer(indexSettings); ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); CircuitBreakerService breakerService = new HierarchyCircuitBreakerService(nodeSettings, clusterSettings); indexShard = new IndexShard( @@ -832,12 +832,14 @@ protected void snapshotShard(final IndexShard shard, final Snapshot snapshot, final Repository repository) throws IOException { final IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(); + final PlainActionFuture future = PlainActionFuture.newFuture(); try (Engine.IndexCommitRef indexCommitRef = shard.acquireLastIndexCommit(true)) { Index index = shard.shardId().getIndex(); IndexId indexId = new IndexId(index.getName(), index.getUUID()); repository.snapshotShard(shard.store(), shard.mapperService(), snapshot.getSnapshotId(), indexId, - indexCommitRef.getIndexCommit(), snapshotStatus); + indexCommitRef.getIndexCommit(), snapshotStatus, future); + future.actionGet(); } final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); @@ -860,4 +862,17 @@ public static Translog getTranslog(IndexShard shard) { public static ReplicationTracker getReplicationTracker(IndexShard indexShard) { return indexShard.getReplicationTracker(); } + + public static Engine.Warmer createTestWarmer(IndexSettings indexSettings) { + return reader -> { + // This isn't a warmer but sometimes verify the content in the reader + if (randomBoolean()) { + try { + EngineTestCase.assertAtMostOneLuceneDocumentPerSequenceNumber(indexSettings, reader); + } catch (IOException e) { + throw new AssertionError(e); + } + } + }; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java index 418cee00c0d21..417e4e98649af 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java @@ -100,7 +100,7 @@ public void initializeSnapshot(SnapshotId snapshotId, List indices, Met @Override public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, List shardFailures, long repositoryStateId, - boolean includeGlobalState, Map userMetadata) { + boolean includeGlobalState, MetaData metaData, Map userMetadata) { return null; } @@ -135,7 +135,7 @@ public boolean isReadOnly() { @Override public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, - IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, ActionListener listener) { } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java index c889c5756bfff..e78975fdab515 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java @@ -26,9 +26,12 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; -import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.snapshots.SnapshotMissingException; import org.elasticsearch.snapshots.SnapshotRestoreException; @@ -47,42 +50,50 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; /** - * Basic integration tests for blob-based repository validation. + * Integration tests for {@link BlobStoreRepository} implementations. */ public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase { - protected abstract void createTestRepository(String name, boolean verify); - - protected void afterCreationCheck(Repository repository) { + protected abstract String repositoryType(); + protected Settings repositorySettings() { + final Settings.Builder settings = Settings.builder(); + settings.put("compress", randomBoolean()); + if (randomBoolean()) { + long size = 1 << randomIntBetween(7, 10); + settings.put("chunk_size", new ByteSizeValue(size, randomFrom(ByteSizeUnit.BYTES, ByteSizeUnit.KB))); + } + return settings.build(); } - protected void createAndCheckTestRepository(String name) { + protected final String createRepository(final String name) { final boolean verify = randomBoolean(); - createTestRepository(name, verify); - - final Iterable repositoriesServices = - internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class); - - for (RepositoriesService repositoriesService : repositoriesServices) { - final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(name); - afterCreationCheck(repository); - assertThat("blob store has to be lazy initialized", - repository.getBlobStore(), verify ? is(notNullValue()) : is(nullValue())); - } + logger.debug("--> creating repository [name: {}, verify: {}]", name, verify); + assertAcked(client().admin().cluster().preparePutRepository(name) + .setType(repositoryType()) + .setVerify(verify) + .setSettings(repositorySettings())); + + internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class).forEach(repositories -> { + assertThat(repositories.repository(name), notNullValue()); + assertThat(repositories.repository(name), instanceOf(BlobStoreRepository.class)); + assertThat(repositories.repository(name).isReadOnly(), is(false)); + BlobStore blobStore = ((BlobStoreRepository) repositories.repository(name)).getBlobStore(); + assertThat("blob store has to be lazy initialized", blobStore, verify ? is(notNullValue()) : is(nullValue())); + }); + return name; } public void testSnapshotAndRestore() throws Exception { - final String repoName = randomAsciiName(); - logger.info("--> creating repository {}", repoName); - createAndCheckTestRepository(repoName); + final String repoName = createRepository(randomName()); int indexCount = randomIntBetween(1, 5); int[] docCounts = new int[indexCount]; String[] indexNames = generateRandomNames(indexCount); @@ -93,7 +104,7 @@ public void testSnapshotAndRestore() throws Exception { assertHitCount(client().prepareSearch(indexNames[i]).setSize(0).get(), docCounts[i]); } - final String snapshotName = randomAsciiName(); + final String snapshotName = randomName(); logger.info("--> create snapshot {}:{}", repoName, snapshotName); assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName) .setWaitForCompletion(true).setIndices(indexNames)); @@ -153,13 +164,11 @@ public void testSnapshotAndRestore() throws Exception { } public void testMultipleSnapshotAndRollback() throws Exception { - String repoName = randomAsciiName(); - logger.info("--> creating repository {}", repoName); - createAndCheckTestRepository(repoName); + final String repoName = createRepository(randomName()); int iterationCount = randomIntBetween(2, 5); int[] docCounts = new int[iterationCount]; - String indexName = randomAsciiName(); - String snapshotName = randomAsciiName(); + String indexName = randomName(); + String snapshotName = randomName(); assertAcked(client().admin().indices().prepareCreate(indexName).get()); for (int i = 0; i < iterationCount; i++) { if (randomBoolean() && i > 0) { // don't delete on the first iteration @@ -210,12 +219,8 @@ public void testMultipleSnapshotAndRollback() throws Exception { } public void testIndicesDeletedFromRepository() throws Exception { + final String repoName = createRepository("test-repo"); Client client = client(); - - logger.info("--> creating repository"); - final String repoName = "test-repo"; - createAndCheckTestRepository(repoName); - createIndex("test-idx-1", "test-idx-2", "test-idx-3"); ensureGreen(); @@ -280,41 +285,39 @@ protected void addRandomDocuments(String name, int numDocs) throws ExecutionExce indexRandom(true, indexRequestBuilders); } - protected String[] generateRandomNames(int num) { + private String[] generateRandomNames(int num) { Set names = new HashSet<>(); for (int i = 0; i < num; i++) { String name; do { - name = randomAsciiName(); + name = randomName(); } while (names.contains(name)); names.add(name); } return names.toArray(new String[num]); } - public static CreateSnapshotResponse assertSuccessfulSnapshot(CreateSnapshotRequestBuilder requestBuilder) { + protected static void assertSuccessfulSnapshot(CreateSnapshotRequestBuilder requestBuilder) { CreateSnapshotResponse response = requestBuilder.get(); assertSuccessfulSnapshot(response); - return response; } - public static void assertSuccessfulSnapshot(CreateSnapshotResponse response) { + private static void assertSuccessfulSnapshot(CreateSnapshotResponse response) { assertThat(response.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(response.getSnapshotInfo().successfulShards(), equalTo(response.getSnapshotInfo().totalShards())); } - public static RestoreSnapshotResponse assertSuccessfulRestore(RestoreSnapshotRequestBuilder requestBuilder) { + private static void assertSuccessfulRestore(RestoreSnapshotRequestBuilder requestBuilder) { RestoreSnapshotResponse response = requestBuilder.get(); assertSuccessfulRestore(response); - return response; } - public static void assertSuccessfulRestore(RestoreSnapshotResponse response) { + private static void assertSuccessfulRestore(RestoreSnapshotResponse response) { assertThat(response.getRestoreInfo().successfulShards(), greaterThan(0)); assertThat(response.getRestoreInfo().successfulShards(), equalTo(response.getRestoreInfo().totalShards())); } - public static String randomAsciiName() { + protected static String randomName() { return randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 1a402577f407a..58ebc1be607ce 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -509,7 +509,7 @@ private TestCluster buildAndPutCluster(Scope currentClusterScope, long seed) thr return testCluster; } - private static void clearClusters() throws IOException { + private static void clearClusters() throws Exception { if (!clusters.isEmpty()) { IOUtils.close(clusters.values()); clusters.clear(); @@ -518,9 +518,9 @@ private static void clearClusters() throws IOException { restClient.close(); restClient = null; } - assertEquals(HttpChannelTaskHandler.INSTANCE.getNumChannels() + " channels still being tracked in " + - HttpChannelTaskHandler.class.getSimpleName() + " while there should be none", 0, - HttpChannelTaskHandler.INSTANCE.getNumChannels()); + assertBusy(() -> assertEquals(HttpChannelTaskHandler.INSTANCE.getNumChannels() + " channels still being tracked in " + + HttpChannelTaskHandler.class.getSimpleName() + " while there should be none", 0, + HttpChannelTaskHandler.INSTANCE.getNumChannels())); } private void afterInternal(boolean afterClass) throws Exception { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index b911a963073d2..857c32426c641 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -242,9 +242,6 @@ private static void setTestSysProps() { // Enable Netty leak detection and monitor logger for logged leak errors System.setProperty("io.netty.leakDetection.level", "paranoid"); - - // Disable direct buffer pooling - System.setProperty("io.netty.allocator.numDirectArenas", "0"); } protected final Logger logger = LogManager.getLogger(getClass()); diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index fe07450bbc10e..df4a318e1ffd2 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -34,6 +34,7 @@ project.copyRestSpec.from(xpackResources) { testClusters.integTest { extraConfigFile 'op-jwks.json', xpackProject('test:idp-fixture').file("oidc/op-jwks.json") + extraConfigFile 'testClient.crt', xpackProject('plugin:security').file("src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testClient.crt") setting 'xpack.security.enabled', 'true' setting 'xpack.security.authc.api_key.enabled', 'true' setting 'xpack.security.authc.token.enabled', 'true' @@ -53,6 +54,9 @@ testClusters.integTest { keystore 'xpack.security.authc.realms.oidc.oidc1.rp.client_secret', 'b07efb7a1cf6ec9462afe7b6d3ab55c6c7880262aa61ac28dded292aca47c9a2' setting 'xpack.security.authc.realms.oidc.oidc1.rp.response_type', 'id_token' setting 'xpack.security.authc.realms.oidc.oidc1.claims.principal', 'sub' + setting 'xpack.security.authc.realms.pki.pki1.order', '3' + setting 'xpack.security.authc.realms.pki.pki1.certificate_authorities', '[ "testClient.crt" ]' + setting 'xpack.security.authc.realms.pki.pki1.delegation.enabled', 'true' user username: 'test_admin' } diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc index 059dbc1e74716..d385ef29c4672 100644 --- a/x-pack/docs/en/rest-api/security.asciidoc +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -6,6 +6,7 @@ You can use the following APIs to perform security activities. * <> * <> +* <> * <> * <> * <> @@ -98,6 +99,7 @@ include::security/put-app-privileges.asciidoc[] include::security/create-role-mappings.asciidoc[] include::security/create-roles.asciidoc[] include::security/create-users.asciidoc[] +include::security/delegate-pki-authentication.asciidoc[] include::security/delete-app-privileges.asciidoc[] include::security/delete-role-mappings.asciidoc[] include::security/delete-roles.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security/delegate-pki-authentication.asciidoc b/x-pack/docs/en/rest-api/security/delegate-pki-authentication.asciidoc new file mode 100644 index 0000000000000..92d82f1c273e9 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/delegate-pki-authentication.asciidoc @@ -0,0 +1,96 @@ +[role="xpack"] +[[security-api-delegate-pki-authentication]] +=== Delegate PKI authentication API +++++ +Delegate PKI authentication +++++ + +Implements the exchange of an {@code X509Certificate} chain into an {es} access +token. + +[[security-api-delegate-pki-authentication-request]] +==== {api-request-title} + +`POST /_security/delegate_pki` + +[[security-api-delegate-pki-authentication-prereqs]] +==== {api-prereq-title} + +* To call this API, the (proxy) user must have the `delegate_pki` or the `all` +cluster privilege. The `kibana_system` built-in role already grants this +privilege. See {stack-ov}/security-privileges.html[Security privileges]. + +[[security-api-delegate-pki-authentication-desc]] +==== {api-description-title} + +This API implements the exchange of an _X509Certificate_ chain for an {es} +access token. The certificate chain is validated, according to RFC 5280, by +sequentially considering the trust configuration of every installed PKI realm +that has `delegation.enabled` set to `true` (default is `false`). A +successfully trusted client certificate is also subject to the validation of +the subject distinguished name according to that respective's realm +`username_pattern`. + +This API is called by *smart* and *trusted* proxies, such as {kib}, which +terminate the user's TLS session but still want to authenticate the user +by using a PKI realm--as if the user connected directly to {es}. For more +details, see <>. + +IMPORTANT: The association between the subject public key in the target +certificate and the corresponding private key is *not* validated. This is part +of the TLS authentication process and it is delegated to the proxy that calls +this API. The proxy is *trusted* to have performed the TLS authentication and +this API translates that authentication into an {es} access token. + +[[security-api-delegate-pki-authentication-request-body]] +==== {api-request-body-title} + +`x509_certificate_chain`:: +(Required, list of strings) The _X509Certificate_ chain, which is represented as +an ordered string array. Each string in the array is a base64-encoded +(Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. ++ +The first element is the target certificate contains the subject distinguished +name that is requesting access. This may be followed by additional certificates; +each subsequent certificate is used to certify the previous one. + + +[[security-api-delegate-pki-authentication-response-body]] +==== {api-response-body-title} + +`access_token`:: +(string) An access token associated to the subject distinguished name of the +client's certificate. + +`expires_in`:: +(time units) The amount of time (in seconds) that the token expires in. + +`type`:: +(string) The type of token. + +[[security-api-delegate-pki-authentication-example]] +==== {api-examples-title} + +The following is an example request: + +[source, js] +------------------------------------------------------------ +POST /_security/delegate_pki +{ + "x509_certificate_chain": ["MIIDbTCCAlWgAwIBAgIJAIxTS7Qdho9jMA0GCSqGSIb3DQEBCwUAMFMxKzApBgNVBAMTIkVsYXN0aWNzZWFyY2ggVGVzdCBJbnRlcm1lZGlhdGUgQ0ExFjAUBgNVBAsTDUVsYXN0aWNzZWFyY2gxDDAKBgNVBAoTA29yZzAeFw0xOTA3MTkxMzMzNDFaFw0yMzA3MTgxMzMzNDFaMEoxIjAgBgNVBAMTGUVsYXN0aWNzZWFyY2ggVGVzdCBDbGllbnQxFjAUBgNVBAsTDUVsYXN0aWNzZWFyY2gxDDAKBgNVBAoTA29yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANHgMX2aX8t0nj4sGLNuKISmmXIYCj9RwRqS7L03l9Nng7kOKnhHu/nXDt7zMRJyHj+q6FAt5khlavYSVCQyrDybRuA5z31gOdqXerrjs2OXS5HSHNvoDAnHFsaYX/5geMewVTtc/vqpd7Ph/QtaKfmG2FK0JNQo0k24tcgCIcyMtBh6BA70yGBM0OT8GdOgd/d/mA7mRhaxIUMNYQzRYRsp4hMnnWoOTkR5Q8KSO3MKw9dPSpPe8EnwtJE10S3s5aXmgytru/xQqrFycPBNj4KbKVmqMP0G60CzXik5pr2LNvOFz3Qb6sYJtqeZF+JKgGWdaTC89m63+TEnUHqk0lcCAwEAAaNNMEswCQYDVR0TBAIwADAdBgNVHQ4EFgQU/+aAD6Q4mFq1vpHorC25/OY5zjcwHwYDVR0jBBgwFoAU8siFCiMiYZZm/95qFC75AG/LRE0wDQYJKoZIhvcNAQELBQADggEBAIRpCgDLpvXcgDHUk10uhxev21mlIbU+VP46ANnCuj0UELhTrdTuWvO1PAI4z+WbDUxryQfOOXO9R6D0dE5yR56L/J7d+KayW34zU7yRDZM7+rXpocdQ1Ex8mjP9HJ/Bf56YZTBQJpXeDrKow4FvtkI3bcIMkqmbG16LHQXeG3RS4ds4S4wCnE2nA6vIn9y+4R999q6y1VSBORrYULcDWxS54plHLEdiMr1vVallg82AGobS9GMcTL2U4Nx5IYZG7sbTk3LrDxVpVg/S2wLofEdOEwqCeHug/iOihNLJBabEW6z4TDLJAVW5KCY1DfhkYlBfHn7vxKkfKoCUK/yLWWI="] <1> +} +------------------------------------------------------------ +// CONSOLE +<1> A one element certificate chain. + +Which returns the following response: + +[source,js] +-------------------------------------------------- +{ + "access_token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", + "type" : "Bearer", + "expires_in" : 1200 +} +-------------------------------------------------- +// TESTRESPONSE[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/] diff --git a/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc b/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc index 7201a3f324d0e..1aa47ebab64cc 100644 --- a/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc @@ -39,13 +39,20 @@ pertain to retrieving api keys: `realm_name`:: (Optional, string) The name of an authentication realm. This parameter cannot be -used with either `id` or `name`. +used with either `id` or `name` or when `owner` flag is set to `true`. `username`:: (Optional, string) The username of a user. This parameter cannot be used with -either `id` or `name`. +either `id` or `name` or when `owner` flag is set to `true`. -NOTE: While all parameters are optional, at least one of them is required. +`owner`:: +(Optional, boolean) A boolean flag that can be used to query API keys owned +by the currently authenticated user. Defaults to false. +The 'realm_name' or 'username' parameters cannot be specified when this +parameter is set to 'true' as they are assumed to be the currently authenticated ones. + +NOTE: At least one of "id", "name", "username" and "realm_name" must be specified + if "owner" is "false" (default). [[security-api-get-api-key-example]] ==== {api-examples-title} @@ -114,6 +121,37 @@ GET /_security/api_key?username=myuser // CONSOLE // TEST[continued] +The following example retrieves all API keys owned by the currently authenticated user: + +[source,js] +-------------------------------------------------- +GET /_security/api_key?owner=true +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +Following creates an API key + +[source, js] +------------------------------------------------------------ +POST /_security/api_key +{ + "name": "my-api-key-1" +} +------------------------------------------------------------ +// CONSOLE + +The following example retrieves the API key identified by the specified `id` if +it is owned by the currently authenticated user: + +[source,js] +-------------------------------------------------- +GET /_security/api_key?id=VuaCfGcBCdbkQm-e5aOx&owner=true +-------------------------------------------------- +// CONSOLE +// TEST[s/VuaCfGcBCdbkQm-e5aOx/$body.id/] +// TEST[continued] + Finally, the following example retrieves all API keys for the user `myuser` in the `native1` realm immediately: diff --git a/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc b/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc index b6afc70715a55..b9390066a0d92 100644 --- a/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc @@ -66,6 +66,7 @@ A successful call returns an object with "cluster" and "index" fields. "cluster" : [ "all", "create_snapshot", + "delegate_pki", "manage", "manage_api_key", "manage_ccr", @@ -75,6 +76,7 @@ A successful call returns an object with "cluster" and "index" fields. "manage_ingest_pipelines", "manage_ml", "manage_oidc", + "manage_own_api_key", "manage_pipeline", "manage_rollup", "manage_saml", diff --git a/x-pack/docs/en/rest-api/security/invalidate-api-keys.asciidoc b/x-pack/docs/en/rest-api/security/invalidate-api-keys.asciidoc index a5cdcb1821e81..ecd79a0906cfd 100644 --- a/x-pack/docs/en/rest-api/security/invalidate-api-keys.asciidoc +++ b/x-pack/docs/en/rest-api/security/invalidate-api-keys.asciidoc @@ -40,13 +40,20 @@ pertain to invalidating api keys: `realm_name`:: (Optional, string) The name of an authentication realm. This parameter cannot be -used with either `id` or `name`. +used with either `id` or `name` or when `owner` flag is set to `true`. `username`:: (Optional, string) The username of a user. This parameter cannot be used with -either `id` or `name`. +either `id` or `name` or when `owner` flag is set to `true`. -NOTE: While all parameters are optional, at least one of them is required. +`owner`:: +(Optional, boolean) A boolean flag that can be used to query API keys owned +by the currently authenticated user. Defaults to false. +The 'realm_name' or 'username' parameters cannot be specified when this +parameter is set to 'true' as they are assumed to be the currently authenticated ones. + +NOTE: At least one of "id", "name", "username" and "realm_name" must be specified + if "owner" is "false" (default). [[security-api-invalidate-api-key-response-body]] ==== {api-response-body-title} @@ -138,6 +145,32 @@ DELETE /_security/api_key // CONSOLE // TEST +The following example invalidates the API key identified by the specified `id` if + it is owned by the currently authenticated user immediately: + +[source,js] +-------------------------------------------------- +DELETE /_security/api_key +{ + "id" : "VuaCfGcBCdbkQm-e5aOx", + "owner" : "true" +} +-------------------------------------------------- +// CONSOLE + +The following example invalidates all API keys owned by the currently authenticated + user immediately: + +[source,js] +-------------------------------------------------- +DELETE /_security/api_key +{ + "owner" : "true" +} +-------------------------------------------------- +// CONSOLE +// TEST + Finally, the following example invalidates all API keys for the user `myuser` in the `native1` realm immediately: diff --git a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc index 58144d0b23c1a..a3fc1a6c0b0a1 100644 --- a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc @@ -2,26 +2,39 @@ [[configuring-pki-realm]] === Configuring a PKI realm -You can configure {es} to use Public Key Infrastructure (PKI) certificates -to authenticate users. This requires clients to present X.509 certificates. +You can configure {es} to use Public Key Infrastructure (PKI) certificates to +authenticate users. This requires clients connecting directly to {es} to +present X.509 certificates. The certificates must first be accepted for +authentication on the SSL/TLS layer on {es}. Only then they are optionally +further validated by a PKI realm. -NOTE: You cannot use PKI certificates to authenticate users in {kib}. +Users may also use PKI certificates to authenticate to {kib}, however this +requires some <>. On +{es}, this configuration enables {kib} to act as a proxy for SSL/TLS +authentication and to submit the client certificates to {es} for further +validation by a PKI realm. + +For more general information, see {stack-ov}/pki-realm.html[PKI user authentication]. + +[float] +[role="xpack"] +[[pki-realm-for-direct-clients]] +==== PKI authentication for clients connecting directly to {es} To use PKI in {es}, you configure a PKI realm, enable client authentication on -the desired network layers (transport or http), and map the Distinguished Names -(DNs) from the user certificates to roles in the -<> or role-mapping file. +the desired network layers (transport or http), and map the Distinguished Name +(DN) from the Subject field in the user certificate to roles by using the +<> or the role-mapping file. You can also use a combination of PKI and username/password authentication. For example, you can enable SSL/TLS on the transport layer and define a PKI realm to require transport clients to authenticate with X.509 certificates, while still -authenticating HTTP traffic using username and password credentials. You can -also set `xpack.security.transport.ssl.client_authentication` to `optional` to +authenticating HTTP traffic using username and password credentials. You can +also set `xpack.security.transport.ssl.client_authentication` to `optional` to allow clients without certificates to authenticate with other credentials. -IMPORTANT: You must enable SSL/TLS and enable client authentication to use PKI. - -For more information, see {stack-ov}/pki-realm.html[PKI User Authentication]. +IMPORTANT: You must enable SSL/TLS with client authentication to use PKI when +clients connect directly to {es}. . Add a realm configuration for a `pki` realm to `elasticsearch.yml` under the `xpack.security.authc.realms.pki` namespace. @@ -43,17 +56,19 @@ xpack: order: 1 ------------------------------------------------------------ -With this configuration, any certificate trusted by the SSL/TLS layer is accepted -for authentication. The username is the common name (CN) extracted from the DN -of the certificate. +With this configuration, any certificate trusted by the {es} SSL/TLS layer is +accepted for authentication. The username is the common name (CN) extracted +from the DN in the Subject field of the end-entity certificate. This +configuration does not permit PKI authentication to {kib}. IMPORTANT: When you configure realms in `elasticsearch.yml`, only the realms you specify are used for authentication. If you also want to use the `native` or `file` realms, you must include them in the realm chain. -If you want to use something other than the CN of the DN as the username, you -can specify a regex to extract the desired username. For example, the regex in -the following configuration extracts the email address from the DN: +If you want to use something other than the CN of the Subject DN as the +username, you can specify a regex to extract the desired username. The regex is +applied on the Subject DN. For example, the regex in the following +configuration extracts the email address from the Subject DN: [source, yaml] ------------------------------------------------------------ @@ -65,23 +80,29 @@ xpack: pki1: username_pattern: "EMAILADDRESS=(.*?)(?:,|$)" ------------------------------------------------------------ + +NOTE: If the regex is too restrictive and does not match the Subject DN of the +client's certificate, then the realm does not authenticate the certificate. + -- -. Restart {es}. +. Restart {es} because realm configuration is not reloaded automatically. If +you're following through with the next steps, you might wish to hold the +restart for last. -. <>. +. <>. . Enable client authentication on the desired network layers (transport or http). + -- -The PKI realm relies on the TLS settings of the node's network interface. The -realm can be configured to be more restrictive than the underlying network -connection - that is, it is possible to configure the node such that some -connections are accepted by the network interface but then fail to be -authenticated by the PKI realm. However, the reverse is not possible. The PKI -realm cannot authenticate a connection that has been refused by the network -interface. +When clients connect directly to {es} and are not proxy-authenticated, the PKI +realm relies on the TLS settings of the node's network interface. The realm can +be configured to be more restrictive than the underlying network connection. +That is, it is possible to configure the node such that some connections +are accepted by the network interface but then fail to be authenticated by the +PKI realm. However, the reverse is not possible. The PKI realm cannot +authenticate a connection that has been refused by the network interface. In particular this means: @@ -96,14 +117,15 @@ In particular this means: used by the client. The relevant network interface (transport or http) must be configured to trust -any certificate that is to be used within the PKI realm. However, it possible to +any certificate that is to be used within the PKI realm. However, it is possible to configure the PKI realm to trust only a _subset_ of the certificates accepted by the network interface. This is useful when the SSL/TLS layer trusts clients with certificates that are signed by a different CA than the one that signs your users' certificates. -To configure the PKI realm with its own truststore, specify the `truststore.path` -option. For example: +To configure the PKI realm with its own truststore, specify the +`truststore.path` option. The path must be located within the Elasticsearch +configuration directory (ES_PATH_CONF). For example: [source, yaml] ------------------------------------------------------------ @@ -114,22 +136,33 @@ xpack: pki: pki1: truststore: - path: "/path/to/pki_truststore.jks" - password: "x-pack-test-password" + path: "pki1_truststore.jks" +------------------------------------------------------------ + +If the truststore is password protected, the password should be configured by +adding the appropriate `secure_password` setting to the {es} keystore. For +example, the following command adds the password for the example realm above: + +[source, shell] +------------------------------------------------------------ +bin/elasticsearch-keystore add \ +xpack.security.authc.realms.pki.pki1.truststore.secure_password ------------------------------------------------------------ The `certificate_authorities` option can be used as an alternative to the -`truststore.path` setting. +`truststore.path` setting, when the certificate files are PEM formatted +. The setting accepts a list. The two options are exclusive, they cannot be both used +simultaneously. -- . Map roles for PKI users. + -- -You map roles for PKI users through the -<> or by using a file stored on -each node. When a user authenticates against a PKI realm, the privileges for -that user are the union of all privileges defined by the roles to which the -user is mapped. +You map roles for PKI users through the <> or by using a file stored on each node. Both configuration +options are merged together. When a user authenticates against a PKI realm, the +privileges for that user are the union of all privileges defined by the roles +to which the user is mapped. You identify a user by the distinguished name in their certificate. For example, the following mapping configuration maps `John Doe` to the @@ -150,7 +183,11 @@ PUT /_security/role_mapping/users // CONSOLE <1> The distinguished name (DN) of a PKI user. -Or, alternatively, configured in a role-mapping file: +Or, alternatively, configured inside a role-mapping file. The file's path +defaults to `ES_PATH_CONF/role_mapping.yml`. You can specify a different path (which must be within +ES_PATH_CONF) by using the `files.role_mapping` realm setting (e.g. +`xpack.security.authc.realms.pki.pki1.files.role_mapping`): + [source, yaml] ------------------------------------------------------------ user: <1> @@ -163,7 +200,7 @@ The distinguished name for a PKI user follows X.500 naming conventions which place the most specific fields (like `cn` or `uid`) at the beginning of the name, and the most general fields (like `o` or `dc`) at the end of the name. Some tools, such as _openssl_, may print out the subject name in a different - format. +format. One way that you can determine the correct DN for a certificate is to use the <> (use the relevant PKI @@ -179,3 +216,76 @@ NOTE: The PKI realm supports alternative to role mapping. -- + +[float] +[role="xpack"] +[[pki-realm-for-proxied-clients]] +==== PKI authentication for clients connecting to {kib} + +By default, the PKI realm relies on the node's network interface to perform the +SSL/TLS handshake and extract the client certificate. This behaviour requires +that that clients connect directly to {es} so that their SSL connection is +terminated by the {es} node. If SSL/TLS authenticatication is to be performed +by {kib}, the PKI realm must be configured to permit delegation. + +Specifically, when clients presenting X.509 certificates connect to {kib}, +{kib} performs the SSL/TLS authentication. {kib} then forwards the client's +certificate chain, by calling an {es} API, to have them further validated by +the PKI realms that have been configured for delegation. + +To permit authentication delegation for a specific {es} PKI realm, start by +configuring the realm for the usual case, as detailed in the +<> +section. Note that you must explicitly configure a `truststore` (or, +equivalently `certificate_authorities`) even though it is the same trust +configuration that you have configured on the network layer. Afterwards, +simply toggle the `delegation.enabled` realm setting to `true`. This realm is +now allowed to validate delegated PKI authentication (after restarting {es}). + +NOTE: PKI authentication delegation requires that the +`xpack.security.authc.token.enabled` setting be `true` and that SSL/TLS be +configured (without SSL/TLS client authentication). + +NOTE: {kib} also needs to be configured to allow PKI certificate authentication. + +A PKI realm with `delegation.enabled` still works unchanged for clients +connecting directly to {es}. Directly authenticated users, and users that are PKI +authenticated by delegation to {kib} both follow the same +{stack-ov}/mapping-roles.html[role mapping rules] or +{stack-ov}/realm-chains.html#authorization_realms[authorization realms +configurations]. + +However, if you use the <>, +you can distinguish between users that are authenticated by delegation and +users that are authenticated directly. The former have the +extra fields `pki_delegated_by_user` and `pki_delegated_by_realm` in the user's +metadata. In the common setup, where authentication is delegated to {kib}, the +values of these fields are `kibana` and `reserved`, respectively. For example, +the following role mapping rule will assign the `role_for_pki1_direct` role to +all users that have been authenticated directly by the `pki1` realm, by +connecting to {es} instead of going through {kib}: + +[source,js] +-------------------------------------------------- +PUT /_security/role_mapping/direct_pki_only +{ + "roles" : [ "role_for_pki1_direct" ], + "rules" : { + "all": [ + { + "field": {"realm.name": "pki1"} + }, + { + "field": { + "metadata.pki_delegated_by_user": null <1> + } + } + ] + }, + "enabled": true +} +-------------------------------------------------- +// CONSOLE +<1> only when this metadata field is set (it is *not* `null`) the user has been +authenticated in the delegation scenario. + diff --git a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc index a99e385bd8c25..cf8911238a02c 100644 --- a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc @@ -28,6 +28,11 @@ you are able to map users to both API-managed roles and file-managed roles NOTE: The PKI, LDAP, Kerberos and SAML realms support using <> as an alternative to role mapping. +NOTE: When <> is enabled, the roles +of the anonymous user are assigned to all the other users as well. + +NOTE: Users with no roles assigned will be unauthorized for any action. + [[mapping-roles-api]] ==== Using the role mapping API diff --git a/x-pack/docs/en/watcher/actions.asciidoc b/x-pack/docs/en/watcher/actions.asciidoc index 226bddfdcaa8f..52bd5d732b640 100644 --- a/x-pack/docs/en/watcher/actions.asciidoc +++ b/x-pack/docs/en/watcher/actions.asciidoc @@ -198,8 +198,10 @@ image::images/action-throttling.jpg[align="center"] You can use the `foreach` field in an action to trigger the configured action for every element within that array. -In order to protect from long running watches, after one hundred runs with an -foreach loop the execution is gracefully stopped. +In order to protect from long running watches, you can use the `max_iterations` +field to limit the maximum amount of runs that each watch executes. If this limit +is reached, the execution is gracefully stopped. If not set, this field defaults +to one hundred. [source,js] -------------------------------------------------- @@ -224,6 +226,7 @@ PUT _watcher/watch/log_event_watch "actions" : { "log_hits" : { "foreach" : "ctx.payload.hits.hits", <1> + "max_iterations" : 500, "logging" : { "text" : "Found id {{ctx.payload._id}} with field {{ctx.payload._source.my_field}}" } diff --git a/x-pack/plugin/analytics/build.gradle b/x-pack/plugin/analytics/build.gradle new file mode 100644 index 0000000000000..19799b750fd94 --- /dev/null +++ b/x-pack/plugin/analytics/build.gradle @@ -0,0 +1,26 @@ +evaluationDependsOn(xpackModule('core')) + +apply plugin: 'elasticsearch.esplugin' +esplugin { + name 'x-pack-analytics' + description 'Elasticsearch Expanded Pack Plugin - Analytics' + classname 'org.elasticsearch.xpack.analytics.AnalyticsPlugin' + extendedPlugins = ['x-pack-core'] +} +archivesBaseName = 'x-pack-analytics' + +compileJava.options.compilerArgs << "-Xlint:-rawtypes" +compileTestJava.options.compilerArgs << "-Xlint:-rawtypes" + + +dependencies { + compileOnly project(":server") + + compileOnly project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } +} + +integTest.enabled = false diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/AnalyticsPlugin.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/AnalyticsPlugin.java new file mode 100644 index 0000000000000..446f47ae7a12b --- /dev/null +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/AnalyticsPlugin.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.analytics; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; +import org.elasticsearch.xpack.core.analytics.action.AnalyticsStatsAction; +import org.elasticsearch.xpack.analytics.action.AnalyticsInfoTransportAction; +import org.elasticsearch.xpack.analytics.action.AnalyticsUsageTransportAction; +import org.elasticsearch.xpack.analytics.action.TransportAnalyticsStatsAction; +import org.elasticsearch.xpack.analytics.cumulativecardinality.CumulativeCardinalityPipelineAggregationBuilder; +import org.elasticsearch.xpack.analytics.cumulativecardinality.CumulativeCardinalityPipelineAggregator; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +import static java.util.Collections.singletonList; + +public class AnalyticsPlugin extends Plugin implements SearchPlugin, ActionPlugin { + + // TODO this should probably become more structured once Analytics plugin has more than just one agg + public static AtomicLong cumulativeCardUsage = new AtomicLong(0); + + public AnalyticsPlugin() { } + + public static XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } + + @Override + public List getPipelineAggregations() { + return singletonList(new PipelineAggregationSpec( + CumulativeCardinalityPipelineAggregationBuilder.NAME, + CumulativeCardinalityPipelineAggregationBuilder::new, + CumulativeCardinalityPipelineAggregator::new, + CumulativeCardinalityPipelineAggregationBuilder::parse)); + } + + @Override + public List> getActions() { + return Arrays.asList( + new ActionHandler<>(XPackUsageFeatureAction.ANALYTICS, AnalyticsUsageTransportAction.class), + new ActionHandler<>(XPackInfoFeatureAction.ANALYTICS, AnalyticsInfoTransportAction.class), + new ActionHandler<>(AnalyticsStatsAction.INSTANCE, TransportAnalyticsStatsAction.class)); + } +} diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/DataScienceAggregationBuilders.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/DataScienceAggregationBuilders.java new file mode 100644 index 0000000000000..014210f9a0485 --- /dev/null +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/DataScienceAggregationBuilders.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.analytics; + +import org.elasticsearch.xpack.analytics.cumulativecardinality.CumulativeCardinalityPipelineAggregationBuilder; + +public class DataScienceAggregationBuilders { + + public static CumulativeCardinalityPipelineAggregationBuilder cumulativeCaardinality(String name, String bucketsPath) { + return new CumulativeCardinalityPipelineAggregationBuilder(name, bucketsPath); + } +} diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/action/AnalyticsInfoTransportAction.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/action/AnalyticsInfoTransportAction.java new file mode 100644 index 0000000000000..5502d449d2851 --- /dev/null +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/action/AnalyticsInfoTransportAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.analytics.action; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureTransportAction; + +public class AnalyticsInfoTransportAction extends XPackInfoFeatureTransportAction { + + private final XPackLicenseState licenseState; + + @Inject + public AnalyticsInfoTransportAction(TransportService transportService, ActionFilters actionFilters, + XPackLicenseState licenseState) { + super(XPackInfoFeatureAction.ANALYTICS.name(), transportService, actionFilters); + this.licenseState = licenseState; + } + + @Override + public String name() { + return XPackField.ANALYTICS; + } + + @Override + public boolean available() { + return licenseState.isDataScienceAllowed(); + } + + @Override + public boolean enabled() { + return true; + } + +} diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/action/AnalyticsUsageTransportAction.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/action/AnalyticsUsageTransportAction.java new file mode 100644 index 0000000000000..998ac6b3f0b56 --- /dev/null +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/action/AnalyticsUsageTransportAction.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.analytics.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureTransportAction; +import org.elasticsearch.xpack.core.analytics.AnalyticsFeatureSetUsage; + +public class AnalyticsUsageTransportAction extends XPackUsageFeatureTransportAction { + private final XPackLicenseState licenseState; + + @Inject + public AnalyticsUsageTransportAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + XPackLicenseState licenseState) { + super(XPackUsageFeatureAction.ANALYTICS.name(), transportService, clusterService, + threadPool, actionFilters, indexNameExpressionResolver); + this.licenseState = licenseState; + } + + @Override + protected void masterOperation(Task task, XPackUsageRequest request, ClusterState state, + ActionListener listener) { + boolean available = licenseState.isDataScienceAllowed(); + + AnalyticsFeatureSetUsage usage = + new AnalyticsFeatureSetUsage(available, true); + listener.onResponse(new XPackUsageFeatureResponse(usage)); + } +} diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/action/TransportAnalyticsStatsAction.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/action/TransportAnalyticsStatsAction.java new file mode 100644 index 0000000000000..eaf3eb1e0242a --- /dev/null +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/action/TransportAnalyticsStatsAction.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.analytics.action; + +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.nodes.TransportNodesAction; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.analytics.action.AnalyticsStatsAction; +import org.elasticsearch.xpack.analytics.AnalyticsPlugin; + +import java.io.IOException; +import java.util.List; + +public class TransportAnalyticsStatsAction extends TransportNodesAction { + + + @Inject + public TransportAnalyticsStatsAction(TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters) { + super(AnalyticsStatsAction.NAME, threadPool, clusterService, transportService, actionFilters, + AnalyticsStatsAction.Request::new, AnalyticsStatsAction.NodeRequest::new, ThreadPool.Names.MANAGEMENT, + AnalyticsStatsAction.NodeResponse.class); + } + + @Override + protected AnalyticsStatsAction.Response newResponse(AnalyticsStatsAction.Request request, + List nodes, + List failures) { + return new AnalyticsStatsAction.Response(clusterService.getClusterName(), nodes, failures); + } + + @Override + protected AnalyticsStatsAction.NodeRequest newNodeRequest(AnalyticsStatsAction.Request request) { + return new AnalyticsStatsAction.NodeRequest(request); + } + + @Override + protected AnalyticsStatsAction.NodeResponse newNodeResponse(StreamInput in) throws IOException { + return new AnalyticsStatsAction.NodeResponse(in); + } + + @Override + protected AnalyticsStatsAction.NodeResponse nodeOperation(AnalyticsStatsAction.NodeRequest request, Task task) { + AnalyticsStatsAction.NodeResponse statsResponse = new AnalyticsStatsAction.NodeResponse(clusterService.localNode()); + statsResponse.setCumulativeCardinalityUsage(AnalyticsPlugin.cumulativeCardUsage.get()); + return statsResponse; + } + +} diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregationBuilder.java new file mode 100644 index 0000000000000..236745ddfad14 --- /dev/null +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregationBuilder.java @@ -0,0 +1,147 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.analytics.cumulativecardinality; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.BucketMetricsParser; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.analytics.AnalyticsPlugin; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; + +import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregator.Parser.BUCKETS_PATH; +import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregator.Parser.FORMAT; + +public class CumulativeCardinalityPipelineAggregationBuilder + extends AbstractPipelineAggregationBuilder { + public static final String NAME = "cumulative_cardinality"; + + private String format; + + private static final Function> PARSER + = name -> { + ConstructingObjectParser parser = new ConstructingObjectParser<>( + CumulativeCardinalityPipelineAggregationBuilder.NAME, + false, + o -> new CumulativeCardinalityPipelineAggregationBuilder(name, (String) o[0])); + + parser.declareString(ConstructingObjectParser.constructorArg(), BUCKETS_PATH_FIELD); + parser.declareString(CumulativeCardinalityPipelineAggregationBuilder::format, FORMAT); + return parser; + }; + + public CumulativeCardinalityPipelineAggregationBuilder(String name, String bucketsPath) { + super(name, NAME, new String[] { bucketsPath }); + } + + /** + * Read from a stream. + */ + public CumulativeCardinalityPipelineAggregationBuilder(StreamInput in) throws IOException { + super(in, NAME); + format = in.readOptionalString(); + } + + @Override + protected final void doWriteTo(StreamOutput out) throws IOException { + out.writeOptionalString(format); + } + + /** + * Sets the format to use on the output of this aggregation. + */ + public CumulativeCardinalityPipelineAggregationBuilder format(String format) { + if (format == null) { + throw new IllegalArgumentException("[format] must not be null: [" + name + "]"); + } + this.format = format; + return this; + } + + /** + * Gets the format to use on the output of this aggregation. + */ + public String format() { + return format; + } + + protected DocValueFormat formatter() { + if (format != null) { + return new DocValueFormat.Decimal(format); + } else { + return DocValueFormat.RAW; + } + } + + @Override + protected PipelineAggregator createInternal(Map metaData) { + return new CumulativeCardinalityPipelineAggregator(name, bucketsPaths, formatter(), metaData); + } + + @Override + public void doValidate(AggregatorFactory parent, Collection aggFactories, + Collection pipelineAggregatorFactories) { + if (bucketsPaths.length != 1) { + throw new IllegalStateException(BUCKETS_PATH.getPreferredName() + + " must contain a single entry for aggregation [" + name + "]"); + } + + validateSequentiallyOrderedParentAggs(parent, NAME, name); + } + + @Override + protected final XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { + if (format != null) { + builder.field(BucketMetricsParser.FORMAT.getPreferredName(), format); + } + return builder; + } + + public static CumulativeCardinalityPipelineAggregationBuilder parse(String aggName, XContentParser parser) { + if (AnalyticsPlugin.getLicenseState().isDataScienceAllowed() == false) { + throw LicenseUtils.newComplianceException(XPackField.ANALYTICS); + } + + // Increment usage here since it is a good boundary between internal and external, and should correlate 1:1 with + // usage and not internal instantiations + AnalyticsPlugin.cumulativeCardUsage.incrementAndGet(); + return PARSER.apply(aggName).apply(parser, null); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), format); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + if (super.equals(obj) == false) return false; + CumulativeCardinalityPipelineAggregationBuilder other = (CumulativeCardinalityPipelineAggregationBuilder) obj; + return Objects.equals(format, other.format); + } + + @Override + public String getWriteableName() { + return NAME; + } +} diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregator.java new file mode 100644 index 0000000000000..67cfbc6f04caf --- /dev/null +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregator.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.analytics.cumulativecardinality; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.AggregationExecutionException; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory; +import org.elasticsearch.search.aggregations.metrics.HyperLogLogPlusPlus; +import org.elasticsearch.search.aggregations.metrics.InternalCardinality; +import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.support.AggregationPath; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +public class CumulativeCardinalityPipelineAggregator extends PipelineAggregator { + private final DocValueFormat formatter; + + CumulativeCardinalityPipelineAggregator(String name, String[] bucketsPaths, DocValueFormat formatter, Map metadata) { + super(name, bucketsPaths, metadata); + this.formatter = formatter; + } + + /** + * Read from a stream. + */ + public CumulativeCardinalityPipelineAggregator(StreamInput in) throws IOException { + super(in); + formatter = in.readNamedWriteable(DocValueFormat.class); + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(formatter); + } + + @Override + public String getWriteableName() { + return CumulativeCardinalityPipelineAggregationBuilder.NAME; + } + + @Override + public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { + InternalMultiBucketAggregation + histo = (InternalMultiBucketAggregation) aggregation; + List buckets = histo.getBuckets(); + HistogramFactory factory = (HistogramFactory) histo; + List newBuckets = new ArrayList<>(buckets.size()); + HyperLogLogPlusPlus hll = null; + + try { + long cardinality = 0; + for (InternalMultiBucketAggregation.InternalBucket bucket : buckets) { + HyperLogLogPlusPlus bucketHll = resolveBucketValue(histo, bucket, bucketsPaths()[0]); + if (hll == null && bucketHll != null) { + // We have to create a new HLL because otherwise it will alter the + // existing cardinality sketch and bucket value + hll = new HyperLogLogPlusPlus(bucketHll.precision(), reduceContext.bigArrays(), 1); + } + if (bucketHll != null) { + hll.merge(0, bucketHll, 0); + cardinality = hll.cardinality(0); + } + + List aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false) + .map((p) -> (InternalAggregation) p) + .collect(Collectors.toList()); + aggs.add(new InternalSimpleLongValue(name(), cardinality, formatter, new ArrayList<>(), metaData())); + Bucket newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs)); + newBuckets.add(newBucket); + } + return factory.createAggregation(newBuckets); + } finally { + if (hll != null) { + hll.close(); + } + } + } + + private HyperLogLogPlusPlus resolveBucketValue(MultiBucketsAggregation agg, + InternalMultiBucketAggregation.InternalBucket bucket, + String aggPath) { + List aggPathsList = AggregationPath.parse(aggPath).getPathElementsAsStringList(); + Object propertyValue = bucket.getProperty(agg.getName(), aggPathsList); + if (propertyValue == null) { + throw new AggregationExecutionException(AbstractPipelineAggregationBuilder.BUCKETS_PATH_FIELD.getPreferredName() + + " must reference a cardinality aggregation"); + } + + if (propertyValue instanceof InternalCardinality) { + return ((InternalCardinality) propertyValue).getCounts(); + } + + String currentAggName; + if (aggPathsList.isEmpty()) { + currentAggName = agg.getName(); + } else { + currentAggName = aggPathsList.get(0); + } + + throw new AggregationExecutionException(AbstractPipelineAggregationBuilder.BUCKETS_PATH_FIELD.getPreferredName() + + " must reference a cardinality aggregation, got: [" + + propertyValue.getClass().getSimpleName() + "] at aggregation [" + currentAggName + "]"); + } + +} diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/InternalSimpleLongValue.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/InternalSimpleLongValue.java new file mode 100644 index 0000000000000..e8db75edad5d9 --- /dev/null +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/InternalSimpleLongValue.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.analytics.cumulativecardinality; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.SimpleValue; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class InternalSimpleLongValue extends InternalNumericMetricsAggregation.SingleValue implements SimpleValue { + public static final String NAME = "simple_long_value"; + protected final long value; + + public InternalSimpleLongValue(String name, long value, DocValueFormat formatter, List pipelineAggregators, + Map metaData) { + super(name, pipelineAggregators, metaData); + this.format = formatter; + this.value = value; + } + + /** + * Read from a stream. + */ + public InternalSimpleLongValue(StreamInput in) throws IOException { + super(in); + format = in.readNamedWriteable(DocValueFormat.class); + value = in.readZLong(); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(format); + out.writeZLong(value); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public double value() { + return value; + } + + public long getValue() { + return value; + } + + DocValueFormat formatter() { + return format; + } + + @Override + public InternalSimpleLongValue doReduce(List aggregations, ReduceContext reduceContext) { + throw new UnsupportedOperationException("Not supported"); + } + + @Override + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + boolean hasValue = !(Double.isInfinite(value) || Double.isNaN(value)); + builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? value : null); + if (hasValue && format != DocValueFormat.RAW) { + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value).toString()); + } + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), value); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + if (super.equals(obj) == false) return false; + InternalSimpleLongValue other = (InternalSimpleLongValue) obj; + return Objects.equals(value, other.value); + } +} diff --git a/x-pack/plugin/analytics/src/main/plugin-metadata/plugin-security.policy b/x-pack/plugin/analytics/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/StubAggregatorFactory.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/StubAggregatorFactory.java new file mode 100644 index 0000000000000..edcd66ab422dd --- /dev/null +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/StubAggregatorFactory.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.analytics; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Test implementation for AggregatorFactory. + */ +public class StubAggregatorFactory extends AggregatorFactory { + + private final Aggregator aggregator; + + private StubAggregatorFactory(SearchContext context, Aggregator aggregator) throws IOException { + super("_name", context, null, new AggregatorFactories.Builder(), Collections.emptyMap()); + this.aggregator = aggregator; + } + + @Override + protected Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, List list, Map metaData) throws IOException { + return aggregator; + } + + public static StubAggregatorFactory createInstance() throws IOException { + BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + SearchContext searchContext = mock(SearchContext.class); + when(searchContext.bigArrays()).thenReturn(bigArrays); + + Aggregator aggregator = mock(Aggregator.class); + + return new StubAggregatorFactory(searchContext, aggregator); + } +} diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/action/AnalyticsInfoTransportActionTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/action/AnalyticsInfoTransportActionTests.java new file mode 100644 index 0000000000000..9eebdd03c1162 --- /dev/null +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/action/AnalyticsInfoTransportActionTests.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.analytics.action; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; +import org.elasticsearch.xpack.core.analytics.AnalyticsFeatureSetUsage; +import org.junit.Before; + +import static org.hamcrest.core.Is.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class AnalyticsInfoTransportActionTests extends ESTestCase { + + private XPackLicenseState licenseState; + + @Before + public void init() { + licenseState = mock(XPackLicenseState.class); + } + + public void testAvailable() throws Exception { + AnalyticsInfoTransportAction featureSet = new AnalyticsInfoTransportAction( + mock(TransportService.class), mock(ActionFilters.class), licenseState); + boolean available = randomBoolean(); + when(licenseState.isDataScienceAllowed()).thenReturn(available); + assertThat(featureSet.available(), is(available)); + + AnalyticsUsageTransportAction usageAction = new AnalyticsUsageTransportAction(mock(TransportService.class), null, null, + mock(ActionFilters.class), null, licenseState); + PlainActionFuture future = new PlainActionFuture<>(); + usageAction.masterOperation(null, null, null, future); + XPackFeatureSet.Usage usage = future.get().getUsage(); + assertThat(usage.available(), is(available)); + + BytesStreamOutput out = new BytesStreamOutput(); + usage.writeTo(out); + XPackFeatureSet.Usage serializedUsage = new AnalyticsFeatureSetUsage(out.bytes().streamInput()); + assertThat(serializedUsage.available(), is(available)); + } + + public void testEnabled() throws Exception { + AnalyticsInfoTransportAction featureSet = new AnalyticsInfoTransportAction( + mock(TransportService.class), mock(ActionFilters.class), licenseState); + assertThat(featureSet.enabled(), is(true)); + assertTrue(featureSet.enabled()); + + AnalyticsUsageTransportAction usageAction = new AnalyticsUsageTransportAction(mock(TransportService.class), + null, null, mock(ActionFilters.class), null, licenseState); + PlainActionFuture future = new PlainActionFuture<>(); + usageAction.masterOperation(null, null, null, future); + XPackFeatureSet.Usage usage = future.get().getUsage(); + assertTrue(usage.enabled()); + + BytesStreamOutput out = new BytesStreamOutput(); + usage.writeTo(out); + XPackFeatureSet.Usage serializedUsage = new AnalyticsFeatureSetUsage(out.bytes().streamInput()); + assertTrue(serializedUsage.enabled()); + } + +} diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/action/TransportAnalyticsStatsActionTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/action/TransportAnalyticsStatsActionTests.java new file mode 100644 index 0000000000000..a4c25fe653fdd --- /dev/null +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/action/TransportAnalyticsStatsActionTests.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.analytics.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.analytics.action.AnalyticsStatsAction; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Collections; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportAnalyticsStatsActionTests extends ESTestCase { + + private TransportAnalyticsStatsAction action; + + @Before + public void setupTransportAction() { + TransportService transportService = mock(TransportService.class); + ThreadPool threadPool = mock(ThreadPool.class); + + ClusterService clusterService = mock(ClusterService.class); + DiscoveryNode discoveryNode = new DiscoveryNode("nodeId", buildNewFakeTransportAddress(), Version.CURRENT); + when(clusterService.localNode()).thenReturn(discoveryNode); + + ClusterName clusterName = new ClusterName("cluster_name"); + when(clusterService.getClusterName()).thenReturn(clusterName); + + ClusterState clusterState = mock(ClusterState.class); + when(clusterState.getMetaData()).thenReturn(MetaData.EMPTY_META_DATA); + when(clusterService.state()).thenReturn(clusterState); + + + action = new TransportAnalyticsStatsAction(transportService, clusterService, threadPool, new + ActionFilters(Collections.emptySet())); + } + + public void testCumulativeCardStats() throws Exception { + AnalyticsStatsAction.Request request = new AnalyticsStatsAction.Request(); + AnalyticsStatsAction.NodeResponse nodeResponse1 = action.nodeOperation(new AnalyticsStatsAction.NodeRequest(request), null); + AnalyticsStatsAction.NodeResponse nodeResponse2 = action.nodeOperation(new AnalyticsStatsAction.NodeRequest(request), null); + + AnalyticsStatsAction.Response response = action.newResponse(request, + Arrays.asList(nodeResponse1, nodeResponse2), Collections.emptyList()); + + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + ObjectPath objectPath = ObjectPath.createFromXContent(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + assertThat(objectPath.evaluate("stats.0.cumulative_cardinality_usage"), equalTo(0)); + assertThat(objectPath.evaluate("stats.1.cumulative_cardinality_usage"), equalTo(0)); + } + } +} diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityAggregatorTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityAggregatorTests.java new file mode 100644 index 0000000000000..4513f4fa409f1 --- /dev/null +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityAggregatorTests.java @@ -0,0 +1,257 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.analytics.cumulativecardinality; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.Rounding; +import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregationExecutionException; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregatorFactory; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregatorFactory; +import org.elasticsearch.search.aggregations.metrics.CardinalityAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValueType; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.aggregations.support.ValuesSourceType; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.xpack.analytics.StubAggregatorFactory; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; + +public class CumulativeCardinalityAggregatorTests extends AggregatorTestCase { + + private static final String HISTO_FIELD = "histo"; + private static final String VALUE_FIELD = "value_field"; + + private static final List datasetTimes = Arrays.asList( + "2017-01-01T01:07:45", //1 + "2017-01-01T03:43:34", //1 + "2017-01-03T04:11:00", //3 + "2017-01-03T05:11:31", //1 + "2017-01-05T08:24:05", //5 + "2017-01-05T13:09:32", //1 + "2017-01-07T13:47:43", //7 + "2017-01-08T16:14:34", //1 + "2017-01-09T17:09:50", //9 + "2017-01-09T22:55:46");//10 + + private static final List datasetValues = Arrays.asList(1,1,3,1,5,1,7,1,9,10); + private static final List cumulativeCardinality = Arrays.asList(1.0,1.0,2.0,2.0,3.0,3.0,4.0,4.0,6.0); + + public void testSimple() throws IOException { + + Query query = new MatchAllDocsQuery(); + + DateHistogramAggregationBuilder aggBuilder = new DateHistogramAggregationBuilder("histo"); + aggBuilder.calendarInterval(DateHistogramInterval.DAY).field(HISTO_FIELD); + aggBuilder.subAggregation(new CardinalityAggregationBuilder("the_cardinality", ValueType.NUMERIC).field(VALUE_FIELD)); + aggBuilder.subAggregation(new CumulativeCardinalityPipelineAggregationBuilder("cumulative_card", "the_cardinality")); + + executeTestCase(query, aggBuilder, histogram -> { + assertEquals(9, ((Histogram)histogram).getBuckets().size()); + List buckets = ((Histogram)histogram).getBuckets(); + int counter = 0; + for (Histogram.Bucket bucket : buckets) { + assertThat(((InternalSimpleLongValue) (bucket.getAggregations().get("cumulative_card"))).value(), + equalTo(cumulativeCardinality.get(counter))); + counter += 1; + } + }); + } + + public void testAllNull() throws IOException { + Query query = new MatchAllDocsQuery(); + + DateHistogramAggregationBuilder aggBuilder = new DateHistogramAggregationBuilder("histo"); + aggBuilder.calendarInterval(DateHistogramInterval.DAY).field(HISTO_FIELD); + aggBuilder.subAggregation(new CardinalityAggregationBuilder("the_cardinality", ValueType.NUMERIC).field("foo")); + aggBuilder.subAggregation(new CumulativeCardinalityPipelineAggregationBuilder("cumulative_card", "the_cardinality")); + + executeTestCase(query, aggBuilder, histogram -> { + assertEquals(9, ((Histogram)histogram).getBuckets().size()); + List buckets = ((Histogram)histogram).getBuckets(); + for (Histogram.Bucket bucket : buckets) { + assertThat(((InternalSimpleLongValue) (bucket.getAggregations().get("cumulative_card"))).value(), equalTo(0.0)); + } + }); + } + + public void testParentValidations() throws IOException { + ValuesSourceConfig valuesSource = new ValuesSourceConfig<>(ValuesSourceType.NUMERIC); + + // Histogram + Set aggBuilders = new HashSet<>(); + aggBuilders.add(new CumulativeCardinalityPipelineAggregationBuilder("cumulative_card", "sum")); + AggregatorFactory parent = new HistogramAggregatorFactory("name", valuesSource, 0.0d, 0.0d, + mock(InternalOrder.class), false, 0L, 0.0d, 1.0d, mock(SearchContext.class), null, + new AggregatorFactories.Builder(), Collections.emptyMap()); + CumulativeCardinalityPipelineAggregationBuilder builder + = new CumulativeCardinalityPipelineAggregationBuilder("name", "valid"); + builder.validate(parent, Collections.emptySet(), aggBuilders); + + // Date Histogram + aggBuilders.clear(); + aggBuilders.add(new CumulativeCardinalityPipelineAggregationBuilder("cumulative_card", "sum")); + parent = new DateHistogramAggregatorFactory("name", valuesSource, 0L, + mock(InternalOrder.class), false, 0L, mock(Rounding.class), mock(Rounding.class), + mock(ExtendedBounds.class), mock(SearchContext.class), mock(AggregatorFactory.class), + new AggregatorFactories.Builder(), Collections.emptyMap()); + builder = new CumulativeCardinalityPipelineAggregationBuilder("name", "valid"); + builder.validate(parent, Collections.emptySet(), aggBuilders); + + // Auto Date Histogram + ValuesSourceConfig numericVS = new ValuesSourceConfig<>(ValuesSourceType.NUMERIC); + aggBuilders.clear(); + aggBuilders.add(new CumulativeCardinalityPipelineAggregationBuilder("cumulative_card", "sum")); + AutoDateHistogramAggregationBuilder.RoundingInfo[] roundings = new AutoDateHistogramAggregationBuilder.RoundingInfo[1]; + parent = new AutoDateHistogramAggregatorFactory("name", numericVS, + 1, roundings, + mock(SearchContext.class), null, new AggregatorFactories.Builder(), Collections.emptyMap()); + builder = new CumulativeCardinalityPipelineAggregationBuilder("name", "valid"); + builder.validate(parent, Collections.emptySet(), aggBuilders); + + // Mocked "test" agg, should fail validation + aggBuilders.clear(); + aggBuilders.add(new CumulativeCardinalityPipelineAggregationBuilder("cumulative_card", "sum")); + StubAggregatorFactory parentFactory = StubAggregatorFactory.createInstance(); + + CumulativeCardinalityPipelineAggregationBuilder failBuilder + = new CumulativeCardinalityPipelineAggregationBuilder("name", "invalid_agg>metric"); + IllegalStateException ex = expectThrows(IllegalStateException.class, + () -> failBuilder.validate(parentFactory, Collections.emptySet(), aggBuilders)); + assertEquals("cumulative_cardinality aggregation [name] must have a histogram, date_histogram or auto_date_histogram as parent", + ex.getMessage()); + } + + public void testNonCardinalityAgg() { + Query query = new MatchAllDocsQuery(); + + DateHistogramAggregationBuilder aggBuilder = new DateHistogramAggregationBuilder("histo"); + aggBuilder.calendarInterval(DateHistogramInterval.DAY).field(HISTO_FIELD); + aggBuilder.subAggregation(new SumAggregationBuilder("the_sum").field("foo")); + aggBuilder.subAggregation(new CumulativeCardinalityPipelineAggregationBuilder("cumulative_card", "the_sum")); + + AggregationExecutionException e = expectThrows(AggregationExecutionException.class, + () -> executeTestCase(query, aggBuilder, histogram -> fail("Test should not have executed"))); + assertThat(e.getMessage(), equalTo("buckets_path must reference a cardinality aggregation, " + + "got: [InternalSum] at aggregation [the_sum]")); + } + + private void executeTestCase(Query query, AggregationBuilder aggBuilder, Consumer verify) throws IOException { + executeTestCase(query, aggBuilder, verify, indexWriter -> { + Document document = new Document(); + int counter = 0; + for (String date : datasetTimes) { + if (frequently()) { + indexWriter.commit(); + } + + long instant = asLong(date); + document.add(new SortedNumericDocValuesField(HISTO_FIELD, instant)); + document.add(new NumericDocValuesField(VALUE_FIELD, datasetValues.get(counter))); + indexWriter.addDocument(document); + document.clear(); + counter += 1; + } + }); + } + + private void executeTestCase(Query query, AggregationBuilder aggBuilder, Consumer verify, + CheckedConsumer setup) throws IOException { + + + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + setup.accept(indexWriter); + } + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + DateFieldMapper.Builder builder = new DateFieldMapper.Builder("_name"); + DateFieldMapper.DateFieldType fieldType = builder.fieldType(); + fieldType.setHasDocValues(true); + fieldType.setName(HISTO_FIELD); + + MappedFieldType valueFieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); + valueFieldType.setHasDocValues(true); + valueFieldType.setName("value_field"); + + InternalAggregation histogram; + histogram = searchAndReduce(indexSearcher, query, aggBuilder, fieldType, valueFieldType); + verify.accept(histogram); + } + } + } + + private static long asLong(String dateTime) { + return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + } + + + private static AggregatorFactory getRandomSequentiallyOrderedParentAgg() throws IOException { + AggregatorFactory factory; + ValuesSourceConfig valuesSource = new ValuesSourceConfig<>(ValuesSourceType.NUMERIC); + ValuesSourceConfig numericVS = new ValuesSourceConfig<>(ValuesSourceType.NUMERIC); + switch (randomIntBetween(0, 2)) { + case 0: + factory = new HistogramAggregatorFactory("name", valuesSource, 0.0d, 0.0d, + mock(InternalOrder.class), false, 0L, 0.0d, 1.0d, mock(SearchContext.class), null, + new AggregatorFactories.Builder(), Collections.emptyMap()); + break; + case 1: + factory = new DateHistogramAggregatorFactory("name", valuesSource, 0L, + mock(InternalOrder.class), false, 0L, mock(Rounding.class), mock(Rounding.class), + mock(ExtendedBounds.class), mock(SearchContext.class), mock(AggregatorFactory.class), + new AggregatorFactories.Builder(), Collections.emptyMap()); + break; + case 2: + default: + AutoDateHistogramAggregationBuilder.RoundingInfo[] roundings = new AutoDateHistogramAggregationBuilder.RoundingInfo[1]; + factory = new AutoDateHistogramAggregatorFactory("name", numericVS, + 1, roundings, + mock(SearchContext.class), null, new AggregatorFactories.Builder(), Collections.emptyMap()); + } + + return factory; + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index ced081ec0edf6..ae025ae2a8d26 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -44,7 +44,6 @@ import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.seqno.RetentionLeaseActions; -import org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException; import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.ShardId; @@ -439,7 +438,6 @@ protected Scheduler.Cancellable scheduleBackgroundRetentionLeaseRenewal(final Lo * going on. Log it, and renew again after another renew interval has passed. */ final Throwable innerCause = ExceptionsHelper.unwrapCause(inner); - assert innerCause instanceof RetentionLeaseAlreadyExistsException == false; logRetentionLeaseFailure(retentionLeaseId, innerCause); })); } else { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java index 28aedc91ff9f9..c6101c0879d7f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java @@ -398,6 +398,7 @@ static String[] extractLeaderShardHistoryUUIDs(Map ccrIndexMetaD IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING, IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING, IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING, + IndexSettings.INDEX_FLUSH_AFTER_MERGE_THRESHOLD_SIZE_SETTING, IndexSettings.INDEX_GC_DELETES_SETTING, IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD, IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING, diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index 0231681666f5a..39ae997b6a43c 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -252,11 +252,10 @@ public RepositoryData getRepositoryData() { public void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData metaData) { throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); } - @Override public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, List shardFailures, long repositoryStateId, boolean includeGlobalState, - Map userMetadata) { + MetaData metaData, Map userMetadata) { throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); } @@ -296,7 +295,7 @@ public boolean isReadOnly() { @Override public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, - IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, ActionListener listener) { throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index a123d3620e4c1..394da4937c200 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -70,6 +70,9 @@ public class XPackLicenseState { "Creating and Starting rollup jobs will no longer be allowed.", "Stopping/Deleting existing jobs, RollupCaps API and RollupSearch continue to function." }); + messages.put(XPackField.ANALYTICS, new String[] { + "Aggregations provided by Data Science plugin are no longer usable." + }); EXPIRATION_MESSAGES = Collections.unmodifiableMap(messages); } @@ -744,6 +747,15 @@ public boolean isSpatialAllowed() { return localStatus.active; } + /** + * Datascience is always available as long as there is a valid license + * + * @return true if the license is active + */ + public synchronized boolean isDataScienceAllowed() { + return status.active; + } + public synchronized boolean isTrialLicense() { return status.mode == OperationMode.TRIAL; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java index f6c3124c9be9f..280e4a4344575 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java @@ -15,6 +15,7 @@ import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.SimpleFSDirectory; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -24,6 +25,7 @@ import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.ShardLock; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.ReadOnlyEngine; @@ -35,12 +37,15 @@ import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.Repository; +import java.io.Closeable; import java.io.IOException; import java.io.UncheckedIOException; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.function.Function; import java.util.function.Supplier; @@ -78,41 +83,63 @@ public void initializeSnapshot(SnapshotId snapshotId, List indices, Met // a _source only snapshot with a plain repository it will be just fine since we already set the // required engine, that the index is read-only and the mapping to a default mapping try { - MetaData.Builder builder = MetaData.builder(metaData); - for (IndexId indexId : indices) { - IndexMetaData index = metaData.index(indexId.getName()); - IndexMetaData.Builder indexMetadataBuilder = IndexMetaData.builder(index); - // for a minimal restore we basically disable indexing on all fields and only create an index - // that is valid from an operational perspective. ie. it will have all metadata fields like version/ - // seqID etc. and an indexed ID field such that we can potentially perform updates on them or delete documents. - ImmutableOpenMap mappings = index.getMappings(); - Iterator> iterator = mappings.iterator(); - while (iterator.hasNext()) { - ObjectObjectCursor next = iterator.next(); - // we don't need to obey any routing here stuff is read-only anyway and get is disabled - final String mapping = "{ \"" + next.key + "\": { \"enabled\": false, \"_meta\": " + next.value.source().string() - + " } }"; - indexMetadataBuilder.putMapping(next.key, mapping); - } - indexMetadataBuilder.settings(Settings.builder().put(index.getSettings()) - .put(SOURCE_ONLY.getKey(), true) - .put("index.blocks.write", true)); // read-only! - indexMetadataBuilder.settingsVersion(1 + indexMetadataBuilder.settingsVersion()); - builder.put(indexMetadataBuilder); - } - super.initializeSnapshot(snapshotId, indices, builder.build()); + super.initializeSnapshot(snapshotId, indices, metadataToSnapshot(indices, metaData)); + } catch (IOException ex) { + throw new UncheckedIOException(ex); + } + } + + @Override + public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, + List shardFailures, long repositoryStateId, boolean includeGlobalState, MetaData metaData, + Map userMetadata) { + // we process the index metadata at snapshot time. This means if somebody tries to restore + // a _source only snapshot with a plain repository it will be just fine since we already set the + // required engine, that the index is read-only and the mapping to a default mapping + try { + return super.finalizeSnapshot(snapshotId, indices, startTime, failure, totalShards, shardFailures, repositoryStateId, + includeGlobalState, metadataToSnapshot(indices, metaData), userMetadata); } catch (IOException ex) { throw new UncheckedIOException(ex); } } + private static MetaData metadataToSnapshot(List indices, MetaData metaData) throws IOException { + MetaData.Builder builder = MetaData.builder(metaData); + for (IndexId indexId : indices) { + IndexMetaData index = metaData.index(indexId.getName()); + IndexMetaData.Builder indexMetadataBuilder = IndexMetaData.builder(index); + // for a minimal restore we basically disable indexing on all fields and only create an index + // that is valid from an operational perspective. ie. it will have all metadata fields like version/ + // seqID etc. and an indexed ID field such that we can potentially perform updates on them or delete documents. + ImmutableOpenMap mappings = index.getMappings(); + Iterator> iterator = mappings.iterator(); + while (iterator.hasNext()) { + ObjectObjectCursor next = iterator.next(); + // we don't need to obey any routing here stuff is read-only anyway and get is disabled + final String mapping = "{ \"" + next.key + "\": { \"enabled\": false, \"_meta\": " + next.value.source().string() + + " } }"; + indexMetadataBuilder.putMapping(next.key, mapping); + } + indexMetadataBuilder.settings(Settings.builder().put(index.getSettings()) + .put(SOURCE_ONLY.getKey(), true) + .put("index.blocks.write", true)); // read-only! + indexMetadataBuilder.settingsVersion(1 + indexMetadataBuilder.settingsVersion()); + builder.put(indexMetadataBuilder); + } + return builder.build(); + } + + @Override public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, - IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, ActionListener listener) { if (mapperService.documentMapper() != null // if there is no mapping this is null && mapperService.documentMapper().sourceMapper().isComplete() == false) { - throw new IllegalStateException("Can't snapshot _source only on an index that has incomplete source ie. has _source disabled " + - "or filters the source"); + listener.onFailure( + new IllegalStateException("Can't snapshot _source only on an index that has incomplete source ie. has _source disabled " + + "or filters the source")); + return; } Directory unwrap = FilterDirectory.unwrap(store.directory()); if (unwrap instanceof FSDirectory == false) { @@ -121,7 +148,10 @@ public void snapshotShard(Store store, MapperService mapperService, SnapshotId s Path dataPath = ((FSDirectory) unwrap).getDirectory().getParent(); // TODO should we have a snapshot tmp directory per shard that is maintained by the system? Path snapPath = dataPath.resolve(SNAPSHOT_DIR_NAME); - try (FSDirectory directory = new SimpleFSDirectory(snapPath)) { + final List toClose = new ArrayList<>(3); + try { + FSDirectory directory = new SimpleFSDirectory(snapPath); + toClose.add(directory); Store tempStore = new Store(store.shardId(), store.indexSettings(), directory, new ShardLock(store.shardId()) { @Override protected void closeInternal() { @@ -137,16 +167,20 @@ protected void closeInternal() { final long maxDoc = segmentInfos.totalMaxDoc(); tempStore.bootstrapNewHistory(maxDoc, maxDoc); store.incRef(); - try (DirectoryReader reader = DirectoryReader.open(tempStore.directory(), - Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.OFF_HEAP.name()))) { - IndexCommit indexCommit = reader.getIndexCommit(); - super.snapshotShard(tempStore, mapperService, snapshotId, indexId, indexCommit, snapshotStatus); - } finally { - store.decRef(); - } + toClose.add(store::decRef); + DirectoryReader reader = DirectoryReader.open(tempStore.directory(), + Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.OFF_HEAP.name())); + toClose.add(reader); + IndexCommit indexCommit = reader.getIndexCommit(); + super.snapshotShard(tempStore, mapperService, snapshotId, indexId, indexCommit, snapshotStatus, + ActionListener.runBefore(listener, () -> IOUtils.close(toClose))); } catch (IOException e) { - // why on earth does this super method not declare IOException - throw new UncheckedIOException(e); + try { + IOUtils.close(toClose); + } catch (IOException ex) { + e.addSuppressed(ex); + } + listener.onFailure(e); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 8e2f3414d58b1..fbb919490d0ae 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -49,6 +49,7 @@ import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; import org.elasticsearch.xpack.core.dataframe.transforms.SyncConfig; import org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig; +import org.elasticsearch.xpack.core.analytics.AnalyticsFeatureSetUsage; import org.elasticsearch.xpack.core.deprecation.DeprecationInfoAction; import org.elasticsearch.xpack.core.flattened.FlattenedFeatureSetUsage; import org.elasticsearch.xpack.core.frozen.FrozenIndicesFeatureSetUsage; @@ -509,7 +510,9 @@ public List getNamedWriteables() { // Frozen indices new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.FROZEN_INDICES, FrozenIndicesFeatureSetUsage::new), // Spatial - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SPATIAL, SpatialFeatureSetUsage::new) + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SPATIAL, SpatialFeatureSetUsage::new), + // data science + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ANALYTICS, AnalyticsFeatureSetUsage::new) ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java index 904db89bb542a..0fab0b3e59a5a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java @@ -47,6 +47,8 @@ public final class XPackField { public static final String FROZEN_INDICES = "frozen_indices"; /** Name constant for spatial features. */ public static final String SPATIAL = "spatial"; + /** Name constant for the data science plugin. */ + public static final String ANALYTICS = "analytics"; private XPackField() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java index 2019256bb27e3..70742b3b40b4d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java @@ -38,10 +38,11 @@ public class XPackInfoFeatureAction extends ActionType public static final XPackInfoFeatureAction VOTING_ONLY = new XPackInfoFeatureAction(XPackField.VOTING_ONLY); public static final XPackInfoFeatureAction FROZEN_INDICES = new XPackInfoFeatureAction(XPackField.FROZEN_INDICES); public static final XPackInfoFeatureAction SPATIAL = new XPackInfoFeatureAction(XPackField.SPATIAL); + public static final XPackInfoFeatureAction ANALYTICS = new XPackInfoFeatureAction(XPackField.ANALYTICS); public static final List ALL = Arrays.asList( SECURITY, MONITORING, WATCHER, GRAPH, MACHINE_LEARNING, LOGSTASH, SQL, ROLLUP, INDEX_LIFECYCLE, CCR, DATA_FRAME, FLATTENED, - VECTORS, VOTING_ONLY, FROZEN_INDICES, SPATIAL + VECTORS, VOTING_ONLY, FROZEN_INDICES, SPATIAL, ANALYTICS ); private XPackInfoFeatureAction(String name) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java index e5a9eca8f1fc5..303e782946129 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java @@ -38,10 +38,11 @@ public class XPackUsageFeatureAction extends ActionType ALL = Arrays.asList( SECURITY, MONITORING, WATCHER, GRAPH, MACHINE_LEARNING, LOGSTASH, SQL, ROLLUP, INDEX_LIFECYCLE, CCR, DATA_FRAME, FLATTENED, - VECTORS, VOTING_ONLY, FROZEN_INDICES, SPATIAL + VECTORS, VOTING_ONLY, FROZEN_INDICES, SPATIAL, ANALYTICS ); private XPackUsageFeatureAction(String name) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java new file mode 100644 index 0000000000000..f7805703ad58a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.analytics; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; + +import java.io.IOException; +import java.util.Objects; + +public class AnalyticsFeatureSetUsage extends XPackFeatureSet.Usage { + + public AnalyticsFeatureSetUsage(boolean available, boolean enabled) { + super(XPackField.ANALYTICS, available, enabled); + } + + public AnalyticsFeatureSetUsage(StreamInput input) throws IOException { + super(input); + } + + @Override + public int hashCode() { + return Objects.hash(available, enabled); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + AnalyticsFeatureSetUsage other = (AnalyticsFeatureSetUsage) obj; + return Objects.equals(available, other.available) && + Objects.equals(enabled, other.enabled); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/action/AnalyticsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/action/AnalyticsStatsAction.java new file mode 100644 index 0000000000000..38f84aefc4317 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/action/AnalyticsStatsAction.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.analytics.action; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.nodes.BaseNodeRequest; +import org.elasticsearch.action.support.nodes.BaseNodeResponse; +import org.elasticsearch.action.support.nodes.BaseNodesRequest; +import org.elasticsearch.action.support.nodes.BaseNodesResponse; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +public class AnalyticsStatsAction extends ActionType { + public static final AnalyticsStatsAction INSTANCE = new AnalyticsStatsAction(); + public static final String NAME = "cluster:monitor/xpack/analytics/stats"; + + private AnalyticsStatsAction() { + super(NAME, Response::new); + } + + public static class Request extends BaseNodesRequest implements ToXContentObject { + + public Request() { + super((String[]) null); + } + + public Request(StreamInput in) throws IOException { + super(in); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + // Nothing to hash atm, so just use the action name + return Objects.hashCode(NAME); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + return true; + } + } + + public static class NodeRequest extends BaseNodeRequest { + public NodeRequest(StreamInput in) throws IOException { + super(in); + } + + public NodeRequest(Request request) { + + } + } + + public static class Response extends BaseNodesResponse implements Writeable, ToXContentObject { + public Response(StreamInput in) throws IOException { + super(in); + } + + public Response(ClusterName clusterName, List nodes, List failures) { + super(clusterName, nodes, failures); + } + + @Override + protected List readNodesFrom(StreamInput in) throws IOException { + return in.readList(NodeResponse::new); + } + + @Override + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeList(nodes); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray("stats"); + for (NodeResponse node : getNodes()) { + node.toXContent(builder, params); + } + builder.endArray(); + + return builder; + } + } + + public static class NodeResponse extends BaseNodeResponse implements ToXContentObject { + static ParseField CUMULATIVE_CARDINALITY_USAGE = new ParseField("cumulative_cardinality_usage"); + private long cumulativeCardinalityUsage; + + public NodeResponse(StreamInput in) throws IOException { + super(in); + cumulativeCardinalityUsage = in.readZLong(); + } + + public NodeResponse(DiscoveryNode node) { + super(node); + } + + public void setCumulativeCardinalityUsage(long cumulativeCardinalityUsage) { + this.cumulativeCardinalityUsage = cumulativeCardinalityUsage; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeZLong(cumulativeCardinalityUsage); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(CUMULATIVE_CARDINALITY_USAGE.getPreferredName(), cumulativeCardinalityUsage); + builder.endObject(); + return builder; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessage.java index 85327337730f4..f43c745a7971c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessage.java @@ -6,36 +6,59 @@ package org.elasticsearch.xpack.core.common.notifications; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.io.IOException; import java.util.Date; import java.util.Objects; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + public abstract class AbstractAuditMessage implements ToXContentObject { - public static final ParseField TYPE = new ParseField("audit_message"); public static final ParseField MESSAGE = new ParseField("message"); public static final ParseField LEVEL = new ParseField("level"); public static final ParseField TIMESTAMP = new ParseField("timestamp"); public static final ParseField NODE_NAME = new ParseField("node_name"); + protected static final ConstructingObjectParser createParser( + String name, AbstractAuditMessageFactory messageFactory, ParseField resourceField) { + + ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + name, + true, + a -> messageFactory.newMessage((String)a[0], (String)a[1], (Level)a[2], (Date)a[3], (String)a[4])); + + PARSER.declareString(optionalConstructorArg(), resourceField); + PARSER.declareString(constructorArg(), MESSAGE); + PARSER.declareField(constructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return Level.fromString(p.text()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, LEVEL, ObjectParser.ValueType.STRING); + PARSER.declareField(constructorArg(), + p -> TimeUtils.parseTimeField(p, TIMESTAMP.getPreferredName()), + TIMESTAMP, + ObjectParser.ValueType.VALUE); + PARSER.declareString(optionalConstructorArg(), NODE_NAME); + + return PARSER; + } + private final String resourceId; private final String message; private final Level level; private final Date timestamp; private final String nodeName; - public AbstractAuditMessage(String resourceId, String message, Level level, String nodeName) { - this.resourceId = resourceId; - this.message = Objects.requireNonNull(message); - this.level = Objects.requireNonNull(level); - this.timestamp = new Date(); - this.nodeName = nodeName; - } - protected AbstractAuditMessage(String resourceId, String message, Level level, Date timestamp, String nodeName) { this.resourceId = resourceId; this.message = Objects.requireNonNull(message); @@ -82,7 +105,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par @Override public int hashCode() { - return Objects.hash(resourceId, message, level, timestamp); + return Objects.hash(resourceId, message, level, timestamp, nodeName); } @Override @@ -98,25 +121,9 @@ public boolean equals(Object obj) { return Objects.equals(resourceId, other.resourceId) && Objects.equals(message, other.message) && Objects.equals(level, other.level) && - Objects.equals(timestamp, other.timestamp); + Objects.equals(timestamp, other.timestamp) && + Objects.equals(nodeName, other.nodeName); } protected abstract String getResourceField(); - - public abstract static class AbstractBuilder { - - public T info(String resourceId, String message, String nodeName) { - return newMessage(Level.INFO, resourceId, message, nodeName); - } - - public T warning(String resourceId, String message, String nodeName) { - return newMessage(Level.WARNING, resourceId, message, nodeName); - } - - public T error(String resourceId, String message, String nodeName) { - return newMessage(Level.ERROR, resourceId, message, nodeName); - } - - protected abstract T newMessage(Level level, String resourceId, String message, String nodeName); - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageFactory.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageFactory.java new file mode 100644 index 0000000000000..e4b547d3f2b10 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageFactory.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.common.notifications; + +import java.util.Date; + +/** + * {@link AbstractAuditMessageFactory} interface provides means for creating audit messages. + * @param type of the audit message + */ +public interface AbstractAuditMessageFactory { + + T newMessage(String resourceId, String message, Level level, Date timestamp, String nodeName); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java index 102e07dc4c14b..dbd0ead64cba5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Date; import java.util.Objects; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -28,37 +29,37 @@ public abstract class AbstractAuditor { private final String nodeName; private final String auditIndex; private final String executionOrigin; - private final AbstractAuditMessage.AbstractBuilder messageBuilder; + private final AbstractAuditMessageFactory messageFactory; - public AbstractAuditor(Client client, - String nodeName, - String auditIndex, - String executionOrigin, - AbstractAuditMessage.AbstractBuilder messageBuilder) { + protected AbstractAuditor(Client client, + String nodeName, + String auditIndex, + String executionOrigin, + AbstractAuditMessageFactory messageFactory) { this.client = Objects.requireNonNull(client); this.nodeName = Objects.requireNonNull(nodeName); this.auditIndex = auditIndex; this.executionOrigin = executionOrigin; - this.messageBuilder = Objects.requireNonNull(messageBuilder); + this.messageFactory = Objects.requireNonNull(messageFactory); } public void info(String resourceId, String message) { - indexDoc(messageBuilder.info(resourceId, message, nodeName)); + indexDoc(messageFactory.newMessage(resourceId, message, Level.INFO, new Date(), nodeName)); } public void warning(String resourceId, String message) { - indexDoc(messageBuilder.warning(resourceId, message, nodeName)); + indexDoc(messageFactory.newMessage(resourceId, message, Level.WARNING, new Date(), nodeName)); } public void error(String resourceId, String message) { - indexDoc(messageBuilder.error(resourceId, message, nodeName)); + indexDoc(messageFactory.newMessage(resourceId, message, Level.ERROR, new Date(), nodeName)); } - protected void onIndexResponse(IndexResponse response) { + private void onIndexResponse(IndexResponse response) { logger.trace("Successfully wrote audit message"); } - protected void onIndexFailure(Exception exception) { + private void onIndexFailure(Exception exception) { logger.debug("Failed to write audit message", exception); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessage.java index e0ebd8e97d9a0..c7a8804a14e38 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessage.java @@ -7,47 +7,19 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ObjectParser; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.xpack.core.common.notifications.AbstractAuditMessage; import org.elasticsearch.xpack.core.common.notifications.Level; -import org.elasticsearch.xpack.core.common.time.TimeUtils; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import java.util.Date; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; - public class DataFrameAuditMessage extends AbstractAuditMessage { private static final ParseField TRANSFORM_ID = new ParseField(DataFrameField.TRANSFORM_ID); - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "data_frame_audit_message", - true, - a -> new DataFrameAuditMessage((String)a[0], (String)a[1], (Level)a[2], (Date)a[3], (String)a[4])); - - static { - PARSER.declareString(optionalConstructorArg(), TRANSFORM_ID); - PARSER.declareString(constructorArg(), MESSAGE); - PARSER.declareField(constructorArg(), p -> { - if (p.currentToken() == XContentParser.Token.VALUE_STRING) { - return Level.fromString(p.text()); - } - throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); - }, LEVEL, ObjectParser.ValueType.STRING); - PARSER.declareField(constructorArg(), - p -> TimeUtils.parseTimeField(p, TIMESTAMP.getPreferredName()), - TIMESTAMP, - ObjectParser.ValueType.VALUE); - PARSER.declareString(optionalConstructorArg(), NODE_NAME); - } + public static final ConstructingObjectParser PARSER = + createParser("data_frame_audit_message", DataFrameAuditMessage::new, TRANSFORM_ID); - public DataFrameAuditMessage(String resourceId, String message, Level level, String nodeName) { - super(resourceId, message, level, nodeName); - } - - protected DataFrameAuditMessage(String resourceId, String message, Level level, Date timestamp, String nodeName) { + public DataFrameAuditMessage(String resourceId, String message, Level level, Date timestamp, String nodeName) { super(resourceId, message, level, timestamp, nodeName); } @@ -55,13 +27,4 @@ protected DataFrameAuditMessage(String resourceId, String message, Level level, protected String getResourceField() { return TRANSFORM_ID.getPreferredName(); } - - public static AbstractAuditMessage.AbstractBuilder builder() { - return new AbstractBuilder() { - @Override - protected DataFrameAuditMessage newMessage(Level level, String resourceId, String message, String nodeName) { - return new DataFrameAuditMessage(resourceId, message, level, nodeName); - } - }; - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/DataFrameAnalysis.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/DataFrameAnalysis.java index 0ea15b6f803b3..bc0e623cdeb49 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/DataFrameAnalysis.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/DataFrameAnalysis.java @@ -8,8 +8,8 @@ import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.xcontent.ToXContentObject; +import java.util.List; import java.util.Map; -import java.util.Set; public interface DataFrameAnalysis extends ToXContentObject, NamedWriteable { @@ -24,9 +24,9 @@ public interface DataFrameAnalysis extends ToXContentObject, NamedWriteable { boolean supportsCategoricalFields(); /** - * @return The set of fields that analyzed documents must have for the analysis to operate + * @return The names and types of the fields that analyzed documents must have for the analysis to operate */ - Set getRequiredFields(); + List getRequiredFields(); /** * @return {@code true} if this analysis supports data frame rows with missing values diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetection.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetection.java index 32a4789057292..35e3d234a7c0b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetection.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetection.java @@ -18,10 +18,10 @@ import java.io.IOException; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; -import java.util.Set; public class OutlierDetection implements DataFrameAnalysis { @@ -160,8 +160,8 @@ public boolean supportsCategoricalFields() { } @Override - public Set getRequiredFields() { - return Collections.emptySet(); + public List getRequiredFields() { + return Collections.emptyList(); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java index 9c779cc5ee747..04a5801ffa2a6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java @@ -17,9 +17,9 @@ import java.io.IOException; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Set; public class Regression implements DataFrameAnalysis { @@ -32,13 +32,15 @@ public class Regression implements DataFrameAnalysis { public static final ParseField MAXIMUM_NUMBER_TREES = new ParseField("maximum_number_trees"); public static final ParseField FEATURE_BAG_FRACTION = new ParseField("feature_bag_fraction"); public static final ParseField PREDICTION_FIELD_NAME = new ParseField("prediction_field_name"); + public static final ParseField TRAINING_PERCENT = new ParseField("training_percent"); private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); private static final ConstructingObjectParser STRICT_PARSER = createParser(false); private static ConstructingObjectParser createParser(boolean lenient) { ConstructingObjectParser parser = new ConstructingObjectParser<>(NAME.getPreferredName(), lenient, - a -> new Regression((String) a[0], (Double) a[1], (Double) a[2], (Double) a[3], (Integer) a[4], (Double) a[5], (String) a[6])); + a -> new Regression((String) a[0], (Double) a[1], (Double) a[2], (Double) a[3], (Integer) a[4], (Double) a[5], (String) a[6], + (Double) a[7])); parser.declareString(ConstructingObjectParser.constructorArg(), DEPENDENT_VARIABLE); parser.declareDouble(ConstructingObjectParser.optionalConstructorArg(), LAMBDA); parser.declareDouble(ConstructingObjectParser.optionalConstructorArg(), GAMMA); @@ -46,6 +48,7 @@ private static ConstructingObjectParser createParser(boolean l parser.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAXIMUM_NUMBER_TREES); parser.declareDouble(ConstructingObjectParser.optionalConstructorArg(), FEATURE_BAG_FRACTION); parser.declareString(ConstructingObjectParser.optionalConstructorArg(), PREDICTION_FIELD_NAME); + parser.declareDouble(ConstructingObjectParser.optionalConstructorArg(), TRAINING_PERCENT); return parser; } @@ -60,9 +63,11 @@ public static Regression fromXContent(XContentParser parser, boolean ignoreUnkno private final Integer maximumNumberTrees; private final Double featureBagFraction; private final String predictionFieldName; + private final double trainingPercent; public Regression(String dependentVariable, @Nullable Double lambda, @Nullable Double gamma, @Nullable Double eta, - @Nullable Integer maximumNumberTrees, @Nullable Double featureBagFraction, @Nullable String predictionFieldName) { + @Nullable Integer maximumNumberTrees, @Nullable Double featureBagFraction, @Nullable String predictionFieldName, + @Nullable Double trainingPercent) { this.dependentVariable = Objects.requireNonNull(dependentVariable); if (lambda != null && lambda < 0) { @@ -91,10 +96,15 @@ public Regression(String dependentVariable, @Nullable Double lambda, @Nullable D this.featureBagFraction = featureBagFraction; this.predictionFieldName = predictionFieldName; + + if (trainingPercent != null && (trainingPercent < 1.0 || trainingPercent > 100.0)) { + throw ExceptionsHelper.badRequestException("[{}] must be a double in [1, 100]", TRAINING_PERCENT.getPreferredName()); + } + this.trainingPercent = trainingPercent == null ? 100.0 : trainingPercent; } public Regression(String dependentVariable) { - this(dependentVariable, null, null, null, null, null, null); + this(dependentVariable, null, null, null, null, null, null, null); } public Regression(StreamInput in) throws IOException { @@ -105,6 +115,15 @@ public Regression(StreamInput in) throws IOException { maximumNumberTrees = in.readOptionalVInt(); featureBagFraction = in.readOptionalDouble(); predictionFieldName = in.readOptionalString(); + trainingPercent = in.readDouble(); + } + + public String getDependentVariable() { + return dependentVariable; + } + + public double getTrainingPercent() { + return trainingPercent; } @Override @@ -121,6 +140,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVInt(maximumNumberTrees); out.writeOptionalDouble(featureBagFraction); out.writeOptionalString(predictionFieldName); + out.writeDouble(trainingPercent); } @Override @@ -145,6 +165,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (predictionFieldName != null) { builder.field(PREDICTION_FIELD_NAME.getPreferredName(), predictionFieldName); } + builder.field(TRAINING_PERCENT.getPreferredName(), trainingPercent); builder.endObject(); return builder; } @@ -180,8 +201,8 @@ public boolean supportsCategoricalFields() { } @Override - public Set getRequiredFields() { - return Collections.singleton(dependentVariable); + public List getRequiredFields() { + return Collections.singletonList(new RequiredField(dependentVariable, Types.numerical())); } @Override @@ -191,7 +212,8 @@ public boolean supportsMissingValues() { @Override public int hashCode() { - return Objects.hash(dependentVariable, lambda, gamma, eta, maximumNumberTrees, featureBagFraction, predictionFieldName); + return Objects.hash(dependentVariable, lambda, gamma, eta, maximumNumberTrees, featureBagFraction, predictionFieldName, + trainingPercent); } @Override @@ -205,6 +227,7 @@ public boolean equals(Object o) { && Objects.equals(eta, that.eta) && Objects.equals(maximumNumberTrees, that.maximumNumberTrees) && Objects.equals(featureBagFraction, that.featureBagFraction) - && Objects.equals(predictionFieldName, that.predictionFieldName); + && Objects.equals(predictionFieldName, that.predictionFieldName) + && trainingPercent == that.trainingPercent; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/RequiredField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/RequiredField.java new file mode 100644 index 0000000000000..bca96b1a1b13a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/RequiredField.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.dataframe.analyses; + +import java.util.Collections; +import java.util.Objects; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; + +public class RequiredField { + + private final String name; + + /** + * The required field must have one of those types. + * We use a sorted set to ensure types are reported alphabetically in error messages. + */ + private final SortedSet types; + + public RequiredField(String name, Set types) { + this.name = Objects.requireNonNull(name); + this.types = Collections.unmodifiableSortedSet(new TreeSet<>(types)); + } + + public String getName() { + return name; + } + + public SortedSet getTypes() { + return types; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Types.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Types.java new file mode 100644 index 0000000000000..ba7cac81d7fda --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Types.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.dataframe.analyses; + +import org.elasticsearch.index.mapper.NumberFieldMapper; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Helper class that defines groups of types + */ +public final class Types { + + private Types() {} + + private static final Set CATEGORICAL_TYPES = Collections.unmodifiableSet(new HashSet<>(Arrays.asList("text", "keyword", "ip"))); + + private static final Set NUMERICAL_TYPES; + + static { + Set numericalTypes = Stream.of(NumberFieldMapper.NumberType.values()) + .map(NumberFieldMapper.NumberType::typeName) + .collect(Collectors.toSet()); + numericalTypes.add("scaled_float"); + NUMERICAL_TYPES = Collections.unmodifiableSet(numericalTypes); + } + + public static Set categorical() { + return CATEGORICAL_TYPES; + } + + public static Set numerical() { + return NUMERICAL_TYPES; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index f5e66fed8a882..9014724bf5ac5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -49,6 +49,9 @@ public final class Messages { public static final String DATAFEED_FREQUENCY_MUST_BE_MULTIPLE_OF_AGGREGATIONS_INTERVAL = "Datafeed frequency [{0}] must be a multiple of the aggregation interval [{1}]"; public static final String DATAFEED_ID_ALREADY_TAKEN = "A datafeed with id [{0}] already exists"; + public static final String DATAFEED_NEEDS_REMOTE_CLUSTER_SEARCH = "Datafeed [{0}] is configured with a remote index pattern(s) {1}" + + " but the current node [{2}] is not allowed to connect to remote clusters." + + " Please enable cluster.remote.connect for all machine learning nodes."; public static final String DATA_FRAME_ANALYTICS_BAD_QUERY_FORMAT = "Data Frame Analytics config query is not parsable"; public static final String DATA_FRAME_ANALYTICS_BAD_FIELD_FILTER = "No field [{0}] could be detected"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java index 13dd077f605f7..267d85e7205d3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java @@ -467,6 +467,9 @@ public static void addDataFrameAnalyticsFields(XContentBuilder builder) throws I .startObject(Regression.PREDICTION_FIELD_NAME.getPreferredName()) .field(TYPE, KEYWORD) .endObject() + .startObject(Regression.TRAINING_PERCENT.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() .endObject() .endObject() .endObject() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java index 91aa424a2483e..cba7a7f634d2e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java @@ -309,6 +309,7 @@ public final class ReservedFieldNames { Regression.MAXIMUM_NUMBER_TREES.getPreferredName(), Regression.FEATURE_BAG_FRACTION.getPreferredName(), Regression.PREDICTION_FIELD_NAME.getPreferredName(), + Regression.TRAINING_PERCENT.getPreferredName(), ElasticsearchMappings.CONFIG_TYPE, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessage.java index 3c00a1f032b47..36c3828f3239b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessage.java @@ -5,61 +5,26 @@ */ package org.elasticsearch.xpack.core.ml.notifications; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ObjectParser; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.xpack.core.common.notifications.AbstractAuditMessage; import org.elasticsearch.xpack.core.common.notifications.Level; import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.util.Date; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; - public class AnomalyDetectionAuditMessage extends AbstractAuditMessage { - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "ml_audit_message", - true, - a -> new AnomalyDetectionAuditMessage((String)a[0], (String)a[1], (Level)a[2], (Date)a[3], (String)a[4])); - - static { - PARSER.declareString(optionalConstructorArg(), Job.ID); - PARSER.declareString(constructorArg(), MESSAGE); - PARSER.declareField(constructorArg(), p -> { - if (p.currentToken() == XContentParser.Token.VALUE_STRING) { - return Level.fromString(p.text()); - } - throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); - }, LEVEL, ObjectParser.ValueType.STRING); - PARSER.declareField(constructorArg(), - p -> TimeUtils.parseTimeField(p, TIMESTAMP.getPreferredName()), - TIMESTAMP, - ObjectParser.ValueType.VALUE); - PARSER.declareString(optionalConstructorArg(), NODE_NAME); - } + private static final ParseField JOB_ID = Job.ID; + public static final ConstructingObjectParser PARSER = + createParser("ml_audit_message", AnomalyDetectionAuditMessage::new, JOB_ID); - public AnomalyDetectionAuditMessage(String resourceId, String message, Level level, String nodeName) { - super(resourceId, message, level, nodeName); - } - - protected AnomalyDetectionAuditMessage(String resourceId, String message, Level level, Date timestamp, String nodeName) { + public AnomalyDetectionAuditMessage(String resourceId, String message, Level level, Date timestamp, String nodeName) { super(resourceId, message, level, timestamp, nodeName); } @Override protected String getResourceField() { - return Job.ID.getPreferredName(); - } - - public static AbstractBuilder builder() { - return new AbstractBuilder() { - @Override - protected AnomalyDetectionAuditMessage newMessage(Level level, String resourceId, String message, String nodeName) { - return new AnomalyDetectionAuditMessage(resourceId, message, level, nodeName); - } - }; + return JOB_ID.getPreferredName(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/DelegatePkiAuthenticationAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/DelegatePkiAuthenticationAction.java new file mode 100644 index 0000000000000..e8b6c26ff6749 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/DelegatePkiAuthenticationAction.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.security.action; + +import org.elasticsearch.action.ActionType; + +/** + * ActionType for delegating PKI authentication + */ +public class DelegatePkiAuthenticationAction extends ActionType { + + public static final String NAME = "cluster:admin/xpack/security/delegate_pki"; + public static final DelegatePkiAuthenticationAction INSTANCE = new DelegatePkiAuthenticationAction(); + + private DelegatePkiAuthenticationAction() { + super(NAME, DelegatePkiAuthenticationResponse::new); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/DelegatePkiAuthenticationRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/DelegatePkiAuthenticationRequest.java new file mode 100644 index 0000000000000..3e662f735fc20 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/DelegatePkiAuthenticationRequest.java @@ -0,0 +1,138 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.security.action; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.xpack.core.ssl.CertParsingUtils; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.security.cert.CertificateEncodingException; +import java.security.cert.CertificateException; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; +import java.util.Base64; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * The request object for {@code TransportDelegatePkiAuthenticationAction} containing the certificate chain for the target subject + * distinguished name to be granted an access token. + */ +public final class DelegatePkiAuthenticationRequest extends ActionRequest implements ToXContentObject { + + private static final ParseField X509_CERTIFICATE_CHAIN_FIELD = new ParseField("x509_certificate_chain"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "delegate_pki_request", false, a -> { + @SuppressWarnings("unchecked") + List certificates = (List) a[0]; + return new DelegatePkiAuthenticationRequest(certificates); + }); + + static { + PARSER.declareFieldArray(optionalConstructorArg(), (parser,c) -> { + try (ByteArrayInputStream bis = new ByteArrayInputStream(Base64.getDecoder().decode(parser.text()))) { + return (X509Certificate) CertificateFactory.getInstance("X.509").generateCertificate(bis); + } catch (CertificateException | IOException e) { + throw new RuntimeException(e); + } + }, X509_CERTIFICATE_CHAIN_FIELD, ValueType.STRING_ARRAY); + } + + public static DelegatePkiAuthenticationRequest fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + + private List certificateChain; + + public DelegatePkiAuthenticationRequest(List certificateChain) { + this.certificateChain = List.copyOf(certificateChain); + } + + public DelegatePkiAuthenticationRequest(StreamInput input) throws IOException { + super(input); + try { + final CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509"); + certificateChain = List.copyOf(input.readList(in -> { + try (ByteArrayInputStream bis = new ByteArrayInputStream(in.readByteArray())) { + return (X509Certificate) certificateFactory.generateCertificate(bis); + } catch (CertificateException e) { + throw new IOException(e); + } + })); + } catch (CertificateException e) { + throw new IOException(e); + } + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (certificateChain.isEmpty()) { + validationException = addValidationError("certificates chain must not be empty", validationException); + } else if (false == CertParsingUtils.isOrderedCertificateChain(certificateChain)) { + validationException = addValidationError("certificates chain must be an ordered chain", validationException); + } + return validationException; + } + + public List getCertificateChain() { + return certificateChain; + } + + @Override + public void writeTo(StreamOutput output) throws IOException { + super.writeTo(output); + output.writeCollection(certificateChain, (out, cert) -> { + try { + out.writeByteArray(cert.getEncoded()); + } catch (CertificateEncodingException e) { + throw new IOException(e); + } + }); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DelegatePkiAuthenticationRequest that = (DelegatePkiAuthenticationRequest) o; + return Objects.equals(certificateChain, that.certificateChain); + } + + @Override + public int hashCode() { + return Objects.hashCode(certificateChain); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject().startArray(X509_CERTIFICATE_CHAIN_FIELD.getPreferredName()); + try { + for (X509Certificate cert : certificateChain) { + builder.value(Base64.getEncoder().encodeToString(cert.getEncoded())); + } + } catch (CertificateEncodingException e) { + throw new IOException(e); + } + return builder.endArray().endObject(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/DelegatePkiAuthenticationResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/DelegatePkiAuthenticationResponse.java new file mode 100644 index 0000000000000..4335d8f1cc667 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/DelegatePkiAuthenticationResponse.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.security.action; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * The response object for {@code TransportDelegatePkiAuthenticationAction} containing the issued access token. + */ +public final class DelegatePkiAuthenticationResponse extends ActionResponse implements ToXContentObject { + + private static final ParseField ACCESS_TOKEN_FIELD = new ParseField("access_token"); + private static final ParseField TYPE_FIELD = new ParseField("type"); + private static final ParseField EXPIRES_IN_FIELD = new ParseField("expires_in"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "delegate_pki_response", true, a -> { + final String accessToken = (String) a[0]; + final String type = (String) a[1]; + if (false == "Bearer".equals(type)) { + throw new IllegalArgumentException("Unknown token type [" + type + "], only [Bearer] type permitted"); + } + final Long expiresIn = (Long) a[2]; + return new DelegatePkiAuthenticationResponse(accessToken, TimeValue.timeValueSeconds(expiresIn)); + }); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ACCESS_TOKEN_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), TYPE_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), EXPIRES_IN_FIELD); + } + + private String accessToken; + private TimeValue expiresIn; + + DelegatePkiAuthenticationResponse() { } + + public DelegatePkiAuthenticationResponse(String accessToken, TimeValue expiresIn) { + this.accessToken = Objects.requireNonNull(accessToken); + // always store expiration in seconds because this is how we "serialize" to JSON and we need to parse back + this.expiresIn = TimeValue.timeValueSeconds(Objects.requireNonNull(expiresIn).getSeconds()); + } + + public DelegatePkiAuthenticationResponse(StreamInput input) throws IOException { + super(input); + accessToken = input.readString(); + expiresIn = input.readTimeValue(); + } + + public String getAccessToken() { + return accessToken; + } + + public TimeValue getExpiresIn() { + return expiresIn; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(accessToken); + out.writeTimeValue(expiresIn); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DelegatePkiAuthenticationResponse that = (DelegatePkiAuthenticationResponse) o; + return Objects.equals(accessToken, that.accessToken) && + Objects.equals(expiresIn, that.expiresIn); + } + + @Override + public int hashCode() { + return Objects.hash(accessToken, expiresIn); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject() + .field(ACCESS_TOKEN_FIELD.getPreferredName(), accessToken) + .field(TYPE_FIELD.getPreferredName(), "Bearer") + .field(EXPIRES_IN_FIELD.getPreferredName(), expiresIn.getSeconds()); + return builder.endObject(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequest.java index 125602f68c5e2..f08cebe8141f4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequest.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.security.action; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Nullable; @@ -14,6 +15,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; +import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -26,9 +28,10 @@ public final class GetApiKeyRequest extends ActionRequest { private final String userName; private final String apiKeyId; private final String apiKeyName; + private final boolean ownedByAuthenticatedUser; public GetApiKeyRequest() { - this(null, null, null, null); + this(null, null, null, null, false); } public GetApiKeyRequest(StreamInput in) throws IOException { @@ -37,14 +40,20 @@ public GetApiKeyRequest(StreamInput in) throws IOException { userName = in.readOptionalString(); apiKeyId = in.readOptionalString(); apiKeyName = in.readOptionalString(); + if (in.getVersion().onOrAfter(Version.V_7_4_0)) { + ownedByAuthenticatedUser = in.readOptionalBoolean(); + } else { + ownedByAuthenticatedUser = false; + } } public GetApiKeyRequest(@Nullable String realmName, @Nullable String userName, @Nullable String apiKeyId, - @Nullable String apiKeyName) { + @Nullable String apiKeyName, boolean ownedByAuthenticatedUser) { this.realmName = realmName; this.userName = userName; this.apiKeyId = apiKeyId; this.apiKeyName = apiKeyName; + this.ownedByAuthenticatedUser = ownedByAuthenticatedUser; } public String getRealmName() { @@ -63,13 +72,17 @@ public String getApiKeyName() { return apiKeyName; } + public boolean ownedByAuthenticatedUser() { + return ownedByAuthenticatedUser; + } + /** * Creates get API key request for given realm name * @param realmName realm name * @return {@link GetApiKeyRequest} */ public static GetApiKeyRequest usingRealmName(String realmName) { - return new GetApiKeyRequest(realmName, null, null, null); + return new GetApiKeyRequest(realmName, null, null, null, false); } /** @@ -78,7 +91,7 @@ public static GetApiKeyRequest usingRealmName(String realmName) { * @return {@link GetApiKeyRequest} */ public static GetApiKeyRequest usingUserName(String userName) { - return new GetApiKeyRequest(null, userName, null, null); + return new GetApiKeyRequest(null, userName, null, null, false); } /** @@ -88,34 +101,45 @@ public static GetApiKeyRequest usingUserName(String userName) { * @return {@link GetApiKeyRequest} */ public static GetApiKeyRequest usingRealmAndUserName(String realmName, String userName) { - return new GetApiKeyRequest(realmName, userName, null, null); + return new GetApiKeyRequest(realmName, userName, null, null, false); } /** * Creates get API key request for given api key id * @param apiKeyId api key id + * @param ownedByAuthenticatedUser set {@code true} if the request is only for the API keys owned by current authenticated user else + * {@code false} * @return {@link GetApiKeyRequest} */ - public static GetApiKeyRequest usingApiKeyId(String apiKeyId) { - return new GetApiKeyRequest(null, null, apiKeyId, null); + public static GetApiKeyRequest usingApiKeyId(String apiKeyId, boolean ownedByAuthenticatedUser) { + return new GetApiKeyRequest(null, null, apiKeyId, null, ownedByAuthenticatedUser); } /** * Creates get api key request for given api key name * @param apiKeyName api key name + * @param ownedByAuthenticatedUser set {@code true} if the request is only for the API keys owned by current authenticated user else + * {@code false} * @return {@link GetApiKeyRequest} */ - public static GetApiKeyRequest usingApiKeyName(String apiKeyName) { - return new GetApiKeyRequest(null, null, null, apiKeyName); + public static GetApiKeyRequest usingApiKeyName(String apiKeyName, boolean ownedByAuthenticatedUser) { + return new GetApiKeyRequest(null, null, null, apiKeyName, ownedByAuthenticatedUser); + } + + /** + * Creates get api key request to retrieve api key information for the api keys owned by the current authenticated user. + */ + public static GetApiKeyRequest forOwnedApiKeys() { + return new GetApiKeyRequest(null, null, null, null, true); } @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; if (Strings.hasText(realmName) == false && Strings.hasText(userName) == false && Strings.hasText(apiKeyId) == false - && Strings.hasText(apiKeyName) == false) { - validationException = addValidationError("One of [api key id, api key name, username, realm name] must be specified", - validationException); + && Strings.hasText(apiKeyName) == false && ownedByAuthenticatedUser == false) { + validationException = addValidationError("One of [api key id, api key name, username, realm name] must be specified if " + + "[owner] flag is false", validationException); } if (Strings.hasText(apiKeyId) || Strings.hasText(apiKeyName)) { if (Strings.hasText(realmName) || Strings.hasText(userName)) { @@ -124,6 +148,13 @@ public ActionRequestValidationException validate() { validationException); } } + if (ownedByAuthenticatedUser) { + if (Strings.hasText(realmName) || Strings.hasText(userName)) { + validationException = addValidationError( + "neither username nor realm-name may be specified when retrieving owned API keys", + validationException); + } + } if (Strings.hasText(apiKeyId) && Strings.hasText(apiKeyName)) { validationException = addValidationError("only one of [api key id, api key name] can be specified", validationException); } @@ -137,6 +168,29 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(userName); out.writeOptionalString(apiKeyId); out.writeOptionalString(apiKeyName); + if (out.getVersion().onOrAfter(Version.V_7_4_0)) { + out.writeOptionalBoolean(ownedByAuthenticatedUser); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + GetApiKeyRequest that = (GetApiKeyRequest) o; + return ownedByAuthenticatedUser == that.ownedByAuthenticatedUser && + Objects.equals(realmName, that.realmName) && + Objects.equals(userName, that.userName) && + Objects.equals(apiKeyId, that.apiKeyId) && + Objects.equals(apiKeyName, that.apiKeyName); } + @Override + public int hashCode() { + return Objects.hash(realmName, userName, apiKeyId, apiKeyName, ownedByAuthenticatedUser); } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java index 15a2c87becd20..6d26133479adf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.security.action; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Nullable; @@ -14,6 +15,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; +import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -26,9 +28,10 @@ public final class InvalidateApiKeyRequest extends ActionRequest { private final String userName; private final String id; private final String name; + private final boolean ownedByAuthenticatedUser; public InvalidateApiKeyRequest() { - this(null, null, null, null); + this(null, null, null, null, false); } public InvalidateApiKeyRequest(StreamInput in) throws IOException { @@ -37,14 +40,20 @@ public InvalidateApiKeyRequest(StreamInput in) throws IOException { userName = in.readOptionalString(); id = in.readOptionalString(); name = in.readOptionalString(); + if (in.getVersion().onOrAfter(Version.V_7_4_0)) { + ownedByAuthenticatedUser = in.readOptionalBoolean(); + } else { + ownedByAuthenticatedUser = false; + } } public InvalidateApiKeyRequest(@Nullable String realmName, @Nullable String userName, @Nullable String id, - @Nullable String name) { + @Nullable String name, boolean ownedByAuthenticatedUser) { this.realmName = realmName; this.userName = userName; this.id = id; this.name = name; + this.ownedByAuthenticatedUser = ownedByAuthenticatedUser; } public String getRealmName() { @@ -63,65 +72,92 @@ public String getName() { return name; } + public boolean ownedByAuthenticatedUser() { + return ownedByAuthenticatedUser; + } + /** * Creates invalidate api key request for given realm name + * * @param realmName realm name * @return {@link InvalidateApiKeyRequest} */ public static InvalidateApiKeyRequest usingRealmName(String realmName) { - return new InvalidateApiKeyRequest(realmName, null, null, null); + return new InvalidateApiKeyRequest(realmName, null, null, null, false); } /** * Creates invalidate API key request for given user name + * * @param userName user name * @return {@link InvalidateApiKeyRequest} */ public static InvalidateApiKeyRequest usingUserName(String userName) { - return new InvalidateApiKeyRequest(null, userName, null, null); + return new InvalidateApiKeyRequest(null, userName, null, null, false); } /** * Creates invalidate API key request for given realm and user name + * * @param realmName realm name - * @param userName user name + * @param userName user name * @return {@link InvalidateApiKeyRequest} */ public static InvalidateApiKeyRequest usingRealmAndUserName(String realmName, String userName) { - return new InvalidateApiKeyRequest(realmName, userName, null, null); + return new InvalidateApiKeyRequest(realmName, userName, null, null, false); } /** * Creates invalidate API key request for given api key id + * * @param id api key id + * @param ownedByAuthenticatedUser set {@code true} if the request is only for the API keys owned by current authenticated user else + * {@code false} * @return {@link InvalidateApiKeyRequest} */ - public static InvalidateApiKeyRequest usingApiKeyId(String id) { - return new InvalidateApiKeyRequest(null, null, id, null); + public static InvalidateApiKeyRequest usingApiKeyId(String id, boolean ownedByAuthenticatedUser) { + return new InvalidateApiKeyRequest(null, null, id, null, ownedByAuthenticatedUser); } /** * Creates invalidate api key request for given api key name + * * @param name api key name + * @param ownedByAuthenticatedUser set {@code true} if the request is only for the API keys owned by current authenticated user else + * {@code false} * @return {@link InvalidateApiKeyRequest} */ - public static InvalidateApiKeyRequest usingApiKeyName(String name) { - return new InvalidateApiKeyRequest(null, null, null, name); + public static InvalidateApiKeyRequest usingApiKeyName(String name, boolean ownedByAuthenticatedUser) { + return new InvalidateApiKeyRequest(null, null, null, name, ownedByAuthenticatedUser); + } + + /** + * Creates invalidate api key request to invalidate api keys owned by the current authenticated user. + */ + public static InvalidateApiKeyRequest forOwnedApiKeys() { + return new InvalidateApiKeyRequest(null, null, null, null, true); } @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; if (Strings.hasText(realmName) == false && Strings.hasText(userName) == false && Strings.hasText(id) == false - && Strings.hasText(name) == false) { - validationException = addValidationError("One of [api key id, api key name, username, realm name] must be specified", - validationException); + && Strings.hasText(name) == false && ownedByAuthenticatedUser == false) { + validationException = addValidationError("One of [api key id, api key name, username, realm name] must be specified if " + + "[owner] flag is false", validationException); } if (Strings.hasText(id) || Strings.hasText(name)) { if (Strings.hasText(realmName) || Strings.hasText(userName)) { validationException = addValidationError( - "username or realm name must not be specified when the api key id or api key name is specified", - validationException); + "username or realm name must not be specified when the api key id or api key name is specified", + validationException); + } + } + if (ownedByAuthenticatedUser) { + if (Strings.hasText(realmName) || Strings.hasText(userName)) { + validationException = addValidationError( + "neither username nor realm-name may be specified when invalidating owned API keys", + validationException); } } if (Strings.hasText(id) && Strings.hasText(name)) { @@ -137,5 +173,29 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(userName); out.writeOptionalString(id); out.writeOptionalString(name); + if (out.getVersion().onOrAfter(Version.V_7_4_0)) { + out.writeOptionalBoolean(ownedByAuthenticatedUser); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + InvalidateApiKeyRequest that = (InvalidateApiKeyRequest) o; + return ownedByAuthenticatedUser == that.ownedByAuthenticatedUser && + Objects.equals(realmName, that.realmName) && + Objects.equals(userName, that.userName) && + Objects.equals(id, that.id) && + Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(realmName, userName, id, name, ownedByAuthenticatedUser); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java index cd153c9009ed6..e9d203a3897ec 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java @@ -37,6 +37,10 @@ public final class PkiRealmSettings { RealmSettings.realmSettingPrefix(TYPE), "cache.max_users", key -> Setting.intSetting(key, DEFAULT_MAX_USERS, Setting.Property.NodeScope)); + public static final Setting.AffixSetting DELEGATION_ENABLED_SETTING = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), "delegation.enabled", + key -> Setting.boolSetting(key, false, Setting.Property.NodeScope)); + public static final Setting.AffixSetting> TRUST_STORE_PATH; public static final Setting.AffixSetting> TRUST_STORE_TYPE; public static final Setting.AffixSetting TRUST_STORE_PASSWORD; @@ -72,6 +76,7 @@ public static Set> getSettings() { settings.add(USERNAME_PATTERN_SETTING); settings.add(CACHE_TTL_SETTING); settings.add(CACHE_MAX_USERS_SETTING); + settings.add(DELEGATION_ENABLED_SETTING); settings.add(TRUST_STORE_PATH); settings.add(TRUST_STORE_PASSWORD); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java index 6d6a01684760c..964cc1275b029 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java @@ -8,8 +8,8 @@ import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; -import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; import org.elasticsearch.xpack.core.security.support.Automatons; import java.util.ArrayList; @@ -34,14 +34,16 @@ private ClusterPermission(final Set clusterPrivileges, } /** - * Checks permission to a cluster action for a given request. + * Checks permission to a cluster action for a given request in the context of given + * authentication. * * @param action cluster action * @param request {@link TransportRequest} + * @param authentication {@link Authentication} * @return {@code true} if the access is allowed else returns {@code false} */ - public boolean check(final String action, final TransportRequest request) { - return checks.stream().anyMatch(permission -> permission.check(action, request)); + public boolean check(final String action, final TransportRequest request, final Authentication authentication) { + return checks.stream().anyMatch(permission -> permission.check(action, request, authentication)); } /** @@ -80,21 +82,15 @@ public static class Builder { public Builder add(final ClusterPrivilege clusterPrivilege, final Set allowedActionPatterns, final Set excludeActionPatterns) { this.clusterPrivileges.add(clusterPrivilege); - if (allowedActionPatterns.isEmpty() && excludeActionPatterns.isEmpty()) { - this.actionAutomatons.add(Automatons.EMPTY); - } else { - final Automaton allowedAutomaton = Automatons.patterns(allowedActionPatterns); - final Automaton excludedAutomaton = Automatons.patterns(excludeActionPatterns); - this.actionAutomatons.add(Automatons.minusAndMinimize(allowedAutomaton, excludedAutomaton)); - } + final Automaton actionAutomaton = createAutomaton(allowedActionPatterns, excludeActionPatterns); + this.actionAutomatons.add(actionAutomaton); return this; } - public Builder add(final ConfigurableClusterPrivilege configurableClusterPrivilege, final Predicate actionPredicate, + public Builder add(final ClusterPrivilege clusterPrivilege, final Set allowedActionPatterns, final Predicate requestPredicate) { - return add(configurableClusterPrivilege, new ActionRequestPredicatePermissionCheck(configurableClusterPrivilege, - actionPredicate, - requestPredicate)); + final Automaton actionAutomaton = createAutomaton(allowedActionPatterns, Set.of()); + return add(clusterPrivilege, new ActionRequestBasedPermissionCheck(clusterPrivilege, actionAutomaton, requestPredicate)); } public Builder add(final ClusterPrivilege clusterPrivilege, final PermissionCheck permissionCheck) { @@ -116,6 +112,21 @@ public ClusterPermission build() { } return new ClusterPermission(this.clusterPrivileges, checks); } + + private static Automaton createAutomaton(Set allowedActionPatterns, Set excludeActionPatterns) { + allowedActionPatterns = (allowedActionPatterns == null) ? Set.of() : allowedActionPatterns; + excludeActionPatterns = (excludeActionPatterns == null) ? Set.of() : excludeActionPatterns; + + if (allowedActionPatterns.isEmpty()) { + return Automatons.EMPTY; + } else if (excludeActionPatterns.isEmpty()) { + return Automatons.patterns(allowedActionPatterns); + } else { + final Automaton allowedAutomaton = Automatons.patterns(allowedActionPatterns); + final Automaton excludedAutomaton = Automatons.patterns(excludeActionPatterns); + return Automatons.minusAndMinimize(allowedAutomaton, excludedAutomaton); + } + } } /** @@ -124,13 +135,15 @@ public ClusterPermission build() { */ public interface PermissionCheck { /** - * Checks permission to a cluster action for a given request. + * Checks permission to a cluster action for a given request in the context of given + * authentication. * * @param action action name * @param request {@link TransportRequest} + * @param authentication {@link Authentication} * @return {@code true} if the specified action for given request is allowed else returns {@code false} */ - boolean check(String action, TransportRequest request); + boolean check(String action, TransportRequest request, Authentication authentication); /** * Checks whether specified {@link PermissionCheck} is implied by this {@link PermissionCheck}.
    @@ -145,52 +158,80 @@ public interface PermissionCheck { boolean implies(PermissionCheck otherPermissionCheck); } - // Automaton based permission check - private static class AutomatonPermissionCheck implements PermissionCheck { + /** + * Base for implementing cluster action based {@link PermissionCheck}. + * It enforces the checks at cluster action level and then hands it off to the implementations + * to enforce checks based on {@link TransportRequest} and/or {@link Authentication}. + */ + public abstract static class ActionBasedPermissionCheck implements PermissionCheck { private final Automaton automaton; private final Predicate actionPredicate; - AutomatonPermissionCheck(final Automaton automaton) { + public ActionBasedPermissionCheck(final Automaton automaton) { this.automaton = automaton; this.actionPredicate = Automatons.predicate(automaton); } @Override - public boolean check(final String action, final TransportRequest request) { - return actionPredicate.test(action); + public final boolean check(final String action, final TransportRequest request, final Authentication authentication) { + return actionPredicate.test(action) && extendedCheck(action, request, authentication); } + protected abstract boolean extendedCheck(String action, TransportRequest request, Authentication authentication); + @Override - public boolean implies(final PermissionCheck permissionCheck) { - if (permissionCheck instanceof AutomatonPermissionCheck) { - return Operations.subsetOf(((AutomatonPermissionCheck) permissionCheck).automaton, this.automaton); + public final boolean implies(final PermissionCheck permissionCheck) { + if (permissionCheck instanceof ActionBasedPermissionCheck) { + return Operations.subsetOf(((ActionBasedPermissionCheck) permissionCheck).automaton, this.automaton) && + doImplies((ActionBasedPermissionCheck) permissionCheck); } return false; } + + protected abstract boolean doImplies(ActionBasedPermissionCheck permissionCheck); } - // action and request based permission check - private static class ActionRequestPredicatePermissionCheck implements PermissionCheck { + // Automaton based permission check + private static class AutomatonPermissionCheck extends ActionBasedPermissionCheck { + + AutomatonPermissionCheck(final Automaton automaton) { + super(automaton); + } + + @Override + protected boolean extendedCheck(String action, TransportRequest request, Authentication authentication) { + return true; + } + + @Override + protected boolean doImplies(ActionBasedPermissionCheck permissionCheck) { + return permissionCheck instanceof AutomatonPermissionCheck; + } + + } + + // action, request based permission check + private static class ActionRequestBasedPermissionCheck extends ActionBasedPermissionCheck { private final ClusterPrivilege clusterPrivilege; - final Predicate actionPredicate; - final Predicate requestPredicate; + private final Predicate requestPredicate; - ActionRequestPredicatePermissionCheck(final ClusterPrivilege clusterPrivilege, final Predicate actionPredicate, - final Predicate requestPredicate) { - this.clusterPrivilege = clusterPrivilege; - this.actionPredicate = actionPredicate; + ActionRequestBasedPermissionCheck(ClusterPrivilege clusterPrivilege, final Automaton automaton, + final Predicate requestPredicate) { + super(automaton); this.requestPredicate = requestPredicate; + this.clusterPrivilege = clusterPrivilege; } @Override - public boolean check(final String action, final TransportRequest request) { - return actionPredicate.test(action) && requestPredicate.test(request); + protected boolean extendedCheck(String action, TransportRequest request, Authentication authentication) { + return requestPredicate.test(request); } @Override - public boolean implies(final PermissionCheck permissionCheck) { - if (permissionCheck instanceof ActionRequestPredicatePermissionCheck) { - final ActionRequestPredicatePermissionCheck otherCheck = (ActionRequestPredicatePermissionCheck) permissionCheck; + protected boolean doImplies(final ActionBasedPermissionCheck permissionCheck) { + if (permissionCheck instanceof ActionRequestBasedPermissionCheck) { + final ActionRequestBasedPermissionCheck otherCheck = + (ActionRequestBasedPermissionCheck) permissionCheck; return this.clusterPrivilege.equals(otherCheck.clusterPrivilege); } return false; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRole.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRole.java index 8c7491d0a9a3d..871be8cbc6569 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRole.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRole.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.automaton.Automaton; import org.elasticsearch.cluster.metadata.AliasOrIndex; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; @@ -122,15 +123,18 @@ public ResourcePrivilegesMap checkIndicesPrivileges(Set checkForIndexPat } /** - * Check if cluster permissions allow for the given action, also checks whether the limited by role allows the given actions + * Check if cluster permissions allow for the given action, + * also checks whether the limited by role allows the given actions in the context of given + * authentication. * * @param action cluster action * @param request {@link TransportRequest} + * @param authentication {@link Authentication} * @return {@code true} if action is allowed else returns {@code false} */ @Override - public boolean checkClusterAction(String action, TransportRequest request) { - return super.checkClusterAction(action, request) && limitedBy.checkClusterAction(action, request); + public boolean checkClusterAction(String action, TransportRequest request, Authentication authentication) { + return super.checkClusterAction(action, request, authentication) && limitedBy.checkClusterAction(action, request, authentication); } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java index ef898a0876dda..94d583f616787 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege; @@ -121,14 +122,16 @@ public ResourcePrivilegesMap checkIndicesPrivileges(Set checkForIndexPat } /** - * Check if cluster permissions allow for the given action + * Check if cluster permissions allow for the given action in the context of given + * authentication. * * @param action cluster action * @param request {@link TransportRequest} + * @param authentication {@link Authentication} * @return {@code true} if action is allowed else returns {@code false} */ - public boolean checkClusterAction(String action, TransportRequest request) { - return cluster.check(action, request); + public boolean checkClusterAction(String action, TransportRequest request, Authentication authentication) { + return cluster.check(action, request, authentication); } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java index 755d76e76aa03..88df7a2e685d6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java @@ -18,6 +18,7 @@ import org.elasticsearch.xpack.core.ilm.action.GetStatusAction; import org.elasticsearch.xpack.core.ilm.action.StartILMAction; import org.elasticsearch.xpack.core.ilm.action.StopILMAction; +import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenAction; import org.elasticsearch.xpack.core.security.action.token.RefreshTokenAction; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; @@ -37,7 +38,7 @@ */ public class ClusterPrivilegeResolver { // shared automatons - private static final Set MANAGE_SECURITY_PATTERN = Set.of("cluster:admin/xpack/security/*"); + private static final Set ALL_SECURITY_PATTERN = Set.of("cluster:admin/xpack/security/*"); private static final Set MANAGE_SAML_PATTERN = Set.of("cluster:admin/xpack/security/saml/*", InvalidateTokenAction.NAME, RefreshTokenAction.NAME); private static final Set MANAGE_OIDC_PATTERN = Set.of("cluster:admin/xpack/security/oidc/*"); @@ -75,8 +76,7 @@ public class ClusterPrivilegeResolver { new ActionClusterPrivilege("monitor_data_frame_transforms", MONITOR_DATA_FRAME_PATTERN); public static final NamedClusterPrivilege MONITOR_WATCHER = new ActionClusterPrivilege("monitor_watcher", MONITOR_WATCHER_PATTERN); public static final NamedClusterPrivilege MONITOR_ROLLUP = new ActionClusterPrivilege("monitor_rollup", MONITOR_ROLLUP_PATTERN); - public static final NamedClusterPrivilege MANAGE = new ActionClusterPrivilege("manage", - ALL_CLUSTER_PATTERN, MANAGE_SECURITY_PATTERN); + public static final NamedClusterPrivilege MANAGE = new ActionClusterPrivilege("manage", ALL_CLUSTER_PATTERN, ALL_SECURITY_PATTERN); public static final NamedClusterPrivilege MANAGE_ML = new ActionClusterPrivilege("manage_ml", MANAGE_ML_PATTERN); public static final NamedClusterPrivilege MANAGE_DATA_FRAME = new ActionClusterPrivilege("manage_data_frame_transforms", MANAGE_DATA_FRAME_PATTERN); @@ -89,7 +89,8 @@ public class ClusterPrivilegeResolver { new ActionClusterPrivilege("manage_ingest_pipelines", MANAGE_INGEST_PIPELINE_PATTERN); public static final NamedClusterPrivilege TRANSPORT_CLIENT = new ActionClusterPrivilege("transport_client", TRANSPORT_CLIENT_PATTERN); - public static final NamedClusterPrivilege MANAGE_SECURITY = new ActionClusterPrivilege("manage_security", MANAGE_SECURITY_PATTERN); + public static final NamedClusterPrivilege MANAGE_SECURITY = new ActionClusterPrivilege("manage_security", ALL_SECURITY_PATTERN, + Set.of(DelegatePkiAuthenticationAction.NAME)); public static final NamedClusterPrivilege MANAGE_SAML = new ActionClusterPrivilege("manage_saml", MANAGE_SAML_PATTERN); public static final NamedClusterPrivilege MANAGE_OIDC = new ActionClusterPrivilege("manage_oidc", MANAGE_OIDC_PATTERN); public static final NamedClusterPrivilege MANAGE_API_KEY = new ActionClusterPrivilege("manage_api_key", MANAGE_API_KEY_PATTERN); @@ -102,6 +103,10 @@ public class ClusterPrivilegeResolver { public static final NamedClusterPrivilege READ_ILM = new ActionClusterPrivilege("read_ilm", READ_ILM_PATTERN); public static final NamedClusterPrivilege MANAGE_SLM = new ActionClusterPrivilege("manage_slm", MANAGE_SLM_PATTERN); public static final NamedClusterPrivilege READ_SLM = new ActionClusterPrivilege("read_slm", READ_SLM_PATTERN); + public static final NamedClusterPrivilege DELEGATE_PKI = new ActionClusterPrivilege("delegate_pki", + Set.of(DelegatePkiAuthenticationAction.NAME, InvalidateTokenAction.NAME)); + + public static final NamedClusterPrivilege MANAGE_OWN_API_KEY = ManageOwnApiKeyClusterPrivilege.INSTANCE; private static final Map VALUES = Stream.of( NONE, @@ -131,7 +136,9 @@ public class ClusterPrivilegeResolver { MANAGE_ILM, READ_ILM, MANAGE_SLM, - READ_SLM).collect(Collectors.toUnmodifiableMap(NamedClusterPrivilege::name, Function.identity())); + READ_SLM, + DELEGATE_PKI, + MANAGE_OWN_API_KEY).collect(Collectors.toUnmodifiableMap(NamedClusterPrivilege::name, Function.identity())); /** * Resolves a {@link NamedClusterPrivilege} from a given name if it exists. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java index 22ba4c1f2e33a..a5ec573d5c988 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java @@ -125,8 +125,6 @@ private static void expectFieldName(XContentParser parser, ParseField... fields) * of applications (identified by a wildcard-aware application-name). */ public static class ManageApplicationPrivileges implements ConfigurableClusterPrivilege { - - private static final Predicate ACTION_PREDICATE = Automatons.predicate("cluster:admin/xpack/security/privilege/*"); public static final String WRITEABLE_NAME = "manage-application-privileges"; private final Set applicationNames; @@ -145,6 +143,7 @@ public ManageApplicationPrivileges(Set applicationNames) { } return false; }; + } @Override @@ -215,7 +214,7 @@ public int hashCode() { @Override public ClusterPermission.Builder buildPermission(final ClusterPermission.Builder builder) { - return builder.add(this, ACTION_PREDICATE, requestPredicate); + return builder.add(this, Set.of("cluster:admin/xpack/security/privilege/*"), requestPredicate); } private interface Fields { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilege.java new file mode 100644 index 0000000000000..bea9b16ebfc1d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilege.java @@ -0,0 +1,106 @@ +/* + * + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + * + */ + +package org.elasticsearch.xpack.core.security.authz.privilege; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; +import org.elasticsearch.xpack.core.security.support.Automatons; + +/** + * Named cluster privilege for managing API keys owned by the current authenticated user. + */ +public class ManageOwnApiKeyClusterPrivilege implements NamedClusterPrivilege { + public static final ManageOwnApiKeyClusterPrivilege INSTANCE = new ManageOwnApiKeyClusterPrivilege(); + private static final String PRIVILEGE_NAME = "manage_own_api_key"; + private static final String API_KEY_REALM_TYPE = "_es_api_key"; + private static final String API_KEY_ID_KEY = "_security_api_key_id"; + + private ManageOwnApiKeyClusterPrivilege() { + } + + @Override + public String name() { + return PRIVILEGE_NAME; + } + + @Override + public ClusterPermission.Builder buildPermission(ClusterPermission.Builder builder) { + return builder.add(this, ManageOwnClusterPermissionCheck.INSTANCE); + } + + private static final class ManageOwnClusterPermissionCheck extends ClusterPermission.ActionBasedPermissionCheck { + public static final ManageOwnClusterPermissionCheck INSTANCE = new ManageOwnClusterPermissionCheck(); + + private ManageOwnClusterPermissionCheck() { + super(Automatons.patterns("cluster:admin/xpack/security/api_key/*")); + } + + @Override + protected boolean extendedCheck(String action, TransportRequest request, Authentication authentication) { + if (request instanceof CreateApiKeyRequest) { + return true; + } else if (request instanceof GetApiKeyRequest) { + final GetApiKeyRequest getApiKeyRequest = (GetApiKeyRequest) request; + return checkIfUserIsOwnerOfApiKeys(authentication, getApiKeyRequest.getApiKeyId(), getApiKeyRequest.getUserName(), + getApiKeyRequest.getRealmName(), getApiKeyRequest.ownedByAuthenticatedUser()); + } else if (request instanceof InvalidateApiKeyRequest) { + final InvalidateApiKeyRequest invalidateApiKeyRequest = (InvalidateApiKeyRequest) request; + return checkIfUserIsOwnerOfApiKeys(authentication, invalidateApiKeyRequest.getId(), + invalidateApiKeyRequest.getUserName(), invalidateApiKeyRequest.getRealmName(), + invalidateApiKeyRequest.ownedByAuthenticatedUser()); + } + throw new IllegalArgumentException( + "manage own api key privilege only supports API key requests (not " + request.getClass().getName() + ")"); + } + + @Override + protected boolean doImplies(ClusterPermission.ActionBasedPermissionCheck permissionCheck) { + return permissionCheck instanceof ManageOwnClusterPermissionCheck; + } + + private boolean checkIfUserIsOwnerOfApiKeys(Authentication authentication, String apiKeyId, String username, String realmName, + boolean ownedByAuthenticatedUser) { + if (isCurrentAuthenticationUsingSameApiKeyIdFromRequest(authentication, apiKeyId)) { + return true; + } else { + /* + * TODO bizybot we need to think on how we can propagate appropriate error message to the end user when username, realm name + * is missing. This is similar to the problem of propagating right error messages in case of access denied. + */ + if (authentication.getAuthenticatedBy().getType().equals(API_KEY_REALM_TYPE)) { + // API key cannot own any other API key so deny access + return false; + } else if (ownedByAuthenticatedUser) { + return true; + } else if (Strings.hasText(username) && Strings.hasText(realmName)) { + final String authenticatedUserPrincipal = authentication.getUser().principal(); + final String authenticatedUserRealm = authentication.getAuthenticatedBy().getName(); + return username.equals(authenticatedUserPrincipal) && realmName.equals(authenticatedUserRealm); + } + } + return false; + } + + private boolean isCurrentAuthenticationUsingSameApiKeyIdFromRequest(Authentication authentication, String apiKeyId) { + if (authentication.getAuthenticatedBy().getType().equals(API_KEY_REALM_TYPE)) { + // API key id from authentication must match the id from request + final String authenticatedApiKeyId = (String) authentication.getMetadata().get(API_KEY_ID_KEY); + if (Strings.hasText(apiKeyId)) { + return apiKeyId.equals(authenticatedApiKeyId); + } + } + return false; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 983ac56a226f7..209686033534b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -111,7 +111,7 @@ private static Map initializeReservedRoles() { .put(KibanaUser.ROLE_NAME, new RoleDescriptor(KibanaUser.ROLE_NAME, new String[] { "monitor", "manage_index_templates", MonitoringBulkAction.NAME, "manage_saml", "manage_token", "manage_oidc", - GetBuiltinPrivilegesAction.NAME + GetBuiltinPrivilegesAction.NAME, "delegate_pki" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/SSLExceptionHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/SSLExceptionHelper.java index ddc02ac55f7c2..c954671cc96c4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/SSLExceptionHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/SSLExceptionHelper.java @@ -17,7 +17,8 @@ private SSLExceptionHelper() { } public static boolean isNotSslRecordException(Throwable e) { - return e instanceof NotSslRecordException && e.getCause() == null; + return e instanceof DecoderException && + e.getCause() instanceof NotSslRecordException; } public static boolean isCloseDuringHandshakeException(Throwable e) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/SecurityTransportExceptionHandler.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/SecurityTransportExceptionHandler.java index 37616d0ad76bb..ecc55fb47523f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/SecurityTransportExceptionHandler.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/SecurityTransportExceptionHandler.java @@ -30,12 +30,7 @@ public void accept(TcpChannel channel, Exception e) { // just close and ignore - we are already stopped and just need to make sure we release all resources CloseableChannel.closeChannel(channel); } else if (SSLExceptionHelper.isNotSslRecordException(e)) { - if (logger.isTraceEnabled()) { - logger.trace( - new ParameterizedMessage("received plaintext traffic on an encrypted channel, closing connection {}", channel), e); - } else { - logger.warn("received plaintext traffic on an encrypted channel, closing connection {}", channel); - } + logger.warn("received plaintext traffic on an encrypted channel, closing connection {}", channel); CloseableChannel.closeChannel(channel); } else if (SSLExceptionHelper.isCloseDuringHandshakeException(e)) { if (logger.isTraceEnabled()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java index 24e319c9b5611..ddb05ad1df142 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java @@ -128,11 +128,9 @@ public ActionRequestValidationException validate() { ActionRequestValidationException err = new ActionRequestValidationException(); // ID validation - if (id.contains(",")) { - err.addValidationError("invalid policy id [" + id + "]: must not contain ','"); - } - if (id.contains(" ")) { - err.addValidationError("invalid policy id [" + id + "]: must not contain spaces"); + if (Strings.validFileName(id) == false) { + err.addValidationError("invalid policy id [" + id + "]: must not contain the following characters " + + Strings.INVALID_FILENAME_CHARS); } if (id.charAt(0) == '_') { err.addValidationError("invalid policy id [" + id + "]: must not start with '_'"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertParsingUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertParsingUtils.java index 364a774c6ac98..39111ceb610b4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertParsingUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertParsingUtils.java @@ -17,6 +17,7 @@ import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509ExtendedKeyManager; import javax.net.ssl.X509ExtendedTrustManager; + import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; @@ -279,4 +280,20 @@ public static X509ExtendedTrustManager trustManager(KeyStore keyStore, String al } throw new IllegalStateException("failed to find a X509ExtendedTrustManager"); } + + /** + * Checks that the {@code X509Certificate} array is ordered, such that the end-entity certificate is first and it is followed by any + * certificate authorities'. The check validates that the {@code issuer} of every certificate is the {@code subject} of the certificate + * in the next array position. No other certificate attributes are checked. + */ + public static boolean isOrderedCertificateChain(List chain) { + for (int i = 1; i < chain.size(); i++) { + X509Certificate cert = chain.get(i - 1); + X509Certificate issuer = chain.get(i); + if (false == cert.getIssuerX500Principal().equals(issuer.getSubjectX500Principal())) { + return false; + } + } + return true; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapper.java index 951f305edf5bc..7337e1e417763 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapper.java @@ -47,8 +47,6 @@ public class ActionWrapper implements ToXContentObject { - private final int MAXIMUM_FOREACH_RUNS = 100; - private String id; @Nullable private final ExecutableCondition condition; @@ -58,18 +56,21 @@ public class ActionWrapper implements ToXContentObject { private final ExecutableAction action; @Nullable private String path; + private final Integer maxIterations; public ActionWrapper(String id, ActionThrottler throttler, @Nullable ExecutableCondition condition, @Nullable ExecutableTransform transform, ExecutableAction action, - @Nullable String path) { + @Nullable String path, + @Nullable Integer maxIterations) { this.id = id; this.condition = condition; this.throttler = throttler; this.transform = transform; this.action = action; this.path = path; + this.maxIterations = (maxIterations != null) ? maxIterations : 100; } public String id() { @@ -177,7 +178,7 @@ public ActionWrapperResult execute(WatchExecutionContext ctx) { throw new ElasticsearchException("foreach object [{}] was an empty list, could not run any action", path); } else { for (Object o : collection) { - if (runs >= MAXIMUM_FOREACH_RUNS) { + if (runs >= maxIterations) { break; } if (o instanceof Map) { @@ -216,6 +217,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); } builder.endArray(); + builder.field(WatchField.MAX_ITERATIONS.getPreferredName(), maxIterations); return builder; } }); @@ -279,7 +281,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } if (Strings.isEmpty(path) == false) { builder.field(WatchField.FOREACH.getPreferredName(), path); + builder.field(WatchField.MAX_ITERATIONS.getPreferredName(), maxIterations); } + builder.field(action.type(), action, params); return builder.endObject(); } @@ -294,6 +298,7 @@ static ActionWrapper parse(String watchId, String actionId, XContentParser parse TimeValue throttlePeriod = null; String path = null; ExecutableAction action = null; + Integer maxIterations = null; String currentFieldName = null; XContentParser.Token token; @@ -316,6 +321,8 @@ static ActionWrapper parse(String watchId, String actionId, XContentParser parse throw new ElasticsearchParseException("could not parse action [{}/{}]. failed to parse field [{}] as time value", pe, watchId, actionId, currentFieldName); } + } else if (WatchField.MAX_ITERATIONS.match(currentFieldName, parser.getDeprecationHandler())) { + maxIterations = parser.intValue(); } else { // it's the type of the action ActionFactory actionFactory = actionRegistry.factory(currentFieldName); @@ -332,7 +339,7 @@ static ActionWrapper parse(String watchId, String actionId, XContentParser parse } ActionThrottler throttler = new ActionThrottler(clock, throttlePeriod, licenseState); - return new ActionWrapper(actionId, throttler, condition, transform, action, path); + return new ActionWrapper(actionId, throttler, condition, transform, action, path, maxIterations); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchField.java index 1bcb62447bf76..069794816c91e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchField.java @@ -14,6 +14,7 @@ public final class WatchField { public static final ParseField ACTIONS = new ParseField("actions"); public static final ParseField TRANSFORM = new ParseField("transform"); public static final ParseField FOREACH = new ParseField("foreach"); + public static final ParseField MAX_ITERATIONS = new ParseField("max_iterations"); public static final ParseField THROTTLE_PERIOD = new ParseField("throttle_period_in_millis"); public static final ParseField THROTTLE_PERIOD_HUMAN = new ParseField("throttle_period"); public static final ParseField METADATA = new ParseField("metadata"); diff --git a/x-pack/plugin/core/src/main/resources/slm-history.json b/x-pack/plugin/core/src/main/resources/slm-history.json index 762c398b2d9a2..76631602a3fec 100644 --- a/x-pack/plugin/core/src/main/resources/slm-history.json +++ b/x-pack/plugin/core/src/main/resources/slm-history.json @@ -8,6 +8,7 @@ "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1", "index.lifecycle.name": "slm-history-ilm-policy", + "index.lifecycle.rollover_alias": ".slm-history-${xpack.slm.template.version}", "index.format": 1 }, "mappings": { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java index 14aae50b3b1cd..34acf179c3fbf 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java @@ -65,7 +65,7 @@ import java.io.IOException; import java.nio.file.Path; -import java.util.Arrays; +import java.util.Collections; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; @@ -96,12 +96,13 @@ public void testSourceIncomplete() throws IOException { repository.start(); try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); - IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, () -> - runAsSnapshot(shard.getThreadPool(), - () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, - snapshotRef.getIndexCommit(), indexShardSnapshotStatus))); - assertEquals("Can't snapshot _source only on an index that has incomplete source ie. has _source disabled or filters the source" - , illegalStateException.getMessage()); + final PlainActionFuture future = PlainActionFuture.newFuture(); + runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, + snapshotRef.getIndexCommit(), indexShardSnapshotStatus, future)); + IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, future::actionGet); + assertEquals( + "Can't snapshot _source only on an index that has incomplete source ie. has _source disabled or filters the source", + illegalStateException.getMessage()); } closeShards(shard); } @@ -120,8 +121,10 @@ public void testIncrementalSnapshot() throws IOException { try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); SnapshotId snapshotId = new SnapshotId("test", "test"); + final PlainActionFuture future = PlainActionFuture.newFuture(); runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, - snapshotRef.getIndexCommit(), indexShardSnapshotStatus)); + snapshotRef.getIndexCommit(), indexShardSnapshotStatus, future)); + future.actionGet(); IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount()); totalFileCount = copy.getTotalFileCount(); @@ -134,8 +137,10 @@ public void testIncrementalSnapshot() throws IOException { SnapshotId snapshotId = new SnapshotId("test_1", "test_1"); IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); + final PlainActionFuture future = PlainActionFuture.newFuture(); runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, - snapshotRef.getIndexCommit(), indexShardSnapshotStatus)); + snapshotRef.getIndexCommit(), indexShardSnapshotStatus, future)); + future.actionGet(); IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); // we processed the segments_N file plus _1.si, _1.fdx, _1.fnm, _1.fdt assertEquals(5, copy.getIncrementalFileCount()); @@ -148,8 +153,10 @@ public void testIncrementalSnapshot() throws IOException { SnapshotId snapshotId = new SnapshotId("test_2", "test_2"); IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); + final PlainActionFuture future = PlainActionFuture.newFuture(); runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, - snapshotRef.getIndexCommit(), indexShardSnapshotStatus)); + snapshotRef.getIndexCommit(), indexShardSnapshotStatus, future)); + future.actionGet(); IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); // we processed the segments_N file plus _1_1.liv assertEquals(2, copy.getIncrementalFileCount()); @@ -193,12 +200,15 @@ public void testRestoreMinmal() throws IOException { repository.start(); try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); + final PlainActionFuture future = PlainActionFuture.newFuture(); runAsSnapshot(shard.getThreadPool(), () -> { - repository.initializeSnapshot(snapshotId, Arrays.asList(indexId), - MetaData.builder().put(shard.indexSettings() - .getIndexMetaData(), false).build()); repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, snapshotRef.getIndexCommit(), - indexShardSnapshotStatus); + indexShardSnapshotStatus, future); + future.actionGet(); + repository.finalizeSnapshot(snapshotId, Collections.singletonList(indexId), + indexShardSnapshotStatus.asCopy().getStartTime(), null, 1, Collections.emptyList(), + repository.getRepositoryData().getGenId(), true, + MetaData.builder().put(shard.indexSettings().getIndexMetaData(), false).build(), Collections.emptyMap()); }); IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/DelegatePkiAuthenticationRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/DelegatePkiAuthenticationRequestTests.java new file mode 100644 index 0000000000000..a89f5d3705ca4 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/DelegatePkiAuthenticationRequestTests.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationRequest; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import javax.security.auth.x500.X500Principal; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DelegatePkiAuthenticationRequestTests extends AbstractXContentTestCase { + + public void testRequestValidation() { + expectThrows(NullPointerException.class, () -> new DelegatePkiAuthenticationRequest((List) null)); + + DelegatePkiAuthenticationRequest request = new DelegatePkiAuthenticationRequest(Arrays.asList(new X509Certificate[0])); + ActionRequestValidationException ve = request.validate(); + assertNotNull(ve); + assertEquals(1, ve.validationErrors().size()); + assertThat(ve.validationErrors().get(0), is("certificates chain must not be empty")); + + List mockCertChain = new ArrayList<>(2); + mockCertChain.add(mock(X509Certificate.class)); + when(mockCertChain.get(0).getIssuerX500Principal()).thenReturn(new X500Principal("CN=Test, OU=elasticsearch, O=org")); + mockCertChain.add(mock(X509Certificate.class)); + when(mockCertChain.get(1).getSubjectX500Principal()).thenReturn(new X500Principal("CN=Not Test, OU=elasticsearch, O=org")); + request = new DelegatePkiAuthenticationRequest(mockCertChain); + ve = request.validate(); + assertNotNull(ve); + assertEquals(1, ve.validationErrors().size()); + assertThat(ve.validationErrors().get(0), is("certificates chain must be an ordered chain")); + + request = new DelegatePkiAuthenticationRequest(Arrays.asList(randomArray(1, 3, X509Certificate[]::new, () -> { + X509Certificate mockX509Certificate = mock(X509Certificate.class); + when(mockX509Certificate.getSubjectX500Principal()).thenReturn(new X500Principal("CN=Test, OU=elasticsearch, O=org")); + when(mockX509Certificate.getIssuerX500Principal()).thenReturn(new X500Principal("CN=Test, OU=elasticsearch, O=org")); + return mockX509Certificate; + }))); + ve = request.validate(); + assertNull(ve); + } + + public void testSerialization() throws Exception { + List certificates = randomCertificateList(); + DelegatePkiAuthenticationRequest request = new DelegatePkiAuthenticationRequest(certificates); + try (BytesStreamOutput out = new BytesStreamOutput()) { + request.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + final DelegatePkiAuthenticationRequest serialized = new DelegatePkiAuthenticationRequest(in); + assertThat(request.getCertificateChain(), is(certificates)); + assertThat(request, is(serialized)); + assertThat(request.hashCode(), is(serialized.hashCode())); + } + } + } + + private List randomCertificateList() { + List certificates = Arrays.asList(randomArray(1, 3, X509Certificate[]::new, () -> { + try { + return readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/" + + randomFrom("testclient.crt", "testnode.crt", "testnode-ip-only.crt", "openldap.crt", "samba4.crt"))); + } catch (Exception e) { + throw new RuntimeException(e); + } + })); + return certificates; + } + + private X509Certificate readCert(Path path) throws Exception { + try (InputStream in = Files.newInputStream(path)) { + CertificateFactory factory = CertificateFactory.getInstance("X.509"); + return (X509Certificate) factory.generateCertificate(in); + } + } + + @Override + protected DelegatePkiAuthenticationRequest createTestInstance() { + List certificates = randomCertificateList(); + return new DelegatePkiAuthenticationRequest(certificates); + } + + @Override + protected DelegatePkiAuthenticationRequest doParseInstance(XContentParser parser) throws IOException { + return DelegatePkiAuthenticationRequest.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/DelegatePkiAuthenticationResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/DelegatePkiAuthenticationResponseTests.java new file mode 100644 index 0000000000000..362068053b71c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/DelegatePkiAuthenticationResponseTests.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationResponse; + +import java.io.IOException; + +import static org.hamcrest.Matchers.is; + +public class DelegatePkiAuthenticationResponseTests extends AbstractXContentTestCase { + + public void testSerialization() throws Exception { + DelegatePkiAuthenticationResponse response = createTestInstance(); + try (BytesStreamOutput output = new BytesStreamOutput()) { + response.writeTo(output); + try (StreamInput input = output.bytes().streamInput()) { + DelegatePkiAuthenticationResponse serialized = new DelegatePkiAuthenticationResponse(input); + assertThat(response.getAccessToken(), is(serialized.getAccessToken())); + assertThat(response.getExpiresIn(), is(serialized.getExpiresIn())); + assertThat(response, is(serialized)); + } + } + } + + @Override + protected DelegatePkiAuthenticationResponse createTestInstance() { + return new DelegatePkiAuthenticationResponse(randomAlphaOfLengthBetween(0, 10), + TimeValue.parseTimeValue(randomTimeValue(), getClass().getSimpleName() + ".expiresIn")); + } + + @Override + protected DelegatePkiAuthenticationResponse doParseInstance(XContentParser parser) throws IOException { + return DelegatePkiAuthenticationResponse.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageTests.java index e87e2cb0d93dc..3704d56b819c9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageTests.java @@ -7,51 +7,20 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; -import org.elasticsearch.xpack.core.common.time.TimeUtils; -import org.junit.Before; import java.util.Date; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.hamcrest.Matchers.equalTo; public class AbstractAuditMessageTests extends AbstractXContentTestCase { - private long startMillis; static class TestAuditMessage extends AbstractAuditMessage { - private static final ParseField ID = new ParseField("test_id"); - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - AbstractAuditMessage.TYPE.getPreferredName(), - true, - a -> new TestAuditMessage((String)a[0], (String)a[1], (Level)a[2], (Date)a[3], (String)a[4])); - - static { - PARSER.declareString(optionalConstructorArg(), ID); - PARSER.declareString(constructorArg(), MESSAGE); - PARSER.declareField(constructorArg(), p -> { - if (p.currentToken() == XContentParser.Token.VALUE_STRING) { - return Level.fromString(p.text()); - } - throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); - }, LEVEL, ObjectParser.ValueType.STRING); - PARSER.declareField(constructorArg(), parser -> { - if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { - return new Date(parser.longValue()); - } else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { - return new Date(TimeUtils.dateStringToEpoch(parser.text())); - } - throw new IllegalArgumentException( - "unexpected token [" + parser.currentToken() + "] for [" + TIMESTAMP.getPreferredName() + "]"); - }, TIMESTAMP, ObjectParser.ValueType.VALUE); - PARSER.declareString(optionalConstructorArg(), NODE_NAME); - } - TestAuditMessage(String resourceId, String message, Level level, String nodeName) { - super(resourceId, message, level, nodeName); - } + private static final ParseField TEST_ID = new ParseField("test_id"); + public static final ConstructingObjectParser PARSER = + createParser("test_audit_message", TestAuditMessage::new, TEST_ID); TestAuditMessage(String resourceId, String message, Level level, Date timestamp, String nodeName) { super(resourceId, message, level, timestamp, nodeName); @@ -59,53 +28,45 @@ static class TestAuditMessage extends AbstractAuditMessage { @Override protected String getResourceField() { - return "test_id"; - } - - static AbstractAuditMessage.AbstractBuilder newBuilder() { - return new AbstractBuilder() { - @Override - protected TestAuditMessage newMessage(Level level, String resourceId, String message, String nodeName) { - return new TestAuditMessage(resourceId, message, level, nodeName); - } - }; + return TEST_ID.getPreferredName(); } } - @Before - public void setStartTime() { - startMillis = System.currentTimeMillis(); + private static final String RESOURCE_ID = "foo"; + private static final String MESSAGE = "some message"; + private static final Date TIMESTAMP = new Date(123456789); + private static final String NODE_NAME = "some_node"; + + public void testGetResourceField() { + TestAuditMessage message = new TestAuditMessage(RESOURCE_ID, MESSAGE, Level.INFO, TIMESTAMP, NODE_NAME); + assertThat(message.getResourceField(), equalTo(TestAuditMessage.TEST_ID.getPreferredName())); } public void testNewInfo() { - TestAuditMessage info = TestAuditMessage.newBuilder().info("foo", "some info", "some_node"); - assertEquals("foo", info.getResourceId()); - assertEquals("some info", info.getMessage()); - assertEquals(Level.INFO, info.getLevel()); - assertDateBetweenStartAndNow(info.getTimestamp()); + TestAuditMessage message = new TestAuditMessage(RESOURCE_ID, MESSAGE, Level.INFO, TIMESTAMP, NODE_NAME); + assertThat(message.getResourceId(), equalTo(RESOURCE_ID)); + assertThat(message.getMessage(), equalTo(MESSAGE)); + assertThat(message.getLevel(), equalTo(Level.INFO)); + assertThat(message.getTimestamp(), equalTo(TIMESTAMP)); + assertThat(message.getNodeName(), equalTo(NODE_NAME)); } public void testNewWarning() { - TestAuditMessage warning = TestAuditMessage.newBuilder().warning("bar", "some warning", "some_node"); - assertEquals("bar", warning.getResourceId()); - assertEquals("some warning", warning.getMessage()); - assertEquals(Level.WARNING, warning.getLevel()); - assertDateBetweenStartAndNow(warning.getTimestamp()); + TestAuditMessage message = new TestAuditMessage(RESOURCE_ID, MESSAGE, Level.WARNING, TIMESTAMP, NODE_NAME); + assertThat(message.getResourceId(), equalTo(RESOURCE_ID)); + assertThat(message.getMessage(), equalTo(MESSAGE)); + assertThat(message.getLevel(), equalTo(Level.WARNING)); + assertThat(message.getTimestamp(), equalTo(TIMESTAMP)); + assertThat(message.getNodeName(), equalTo(NODE_NAME)); } - public void testNewError() { - TestAuditMessage error = TestAuditMessage.newBuilder().error("foo", "some error", "some_node"); - assertEquals("foo", error.getResourceId()); - assertEquals("some error", error.getMessage()); - assertEquals(Level.ERROR, error.getLevel()); - assertDateBetweenStartAndNow(error.getTimestamp()); - } - - private void assertDateBetweenStartAndNow(Date timestamp) { - long timestampMillis = timestamp.getTime(); - assertTrue(timestampMillis >= startMillis); - assertTrue(timestampMillis <= System.currentTimeMillis()); + TestAuditMessage message = new TestAuditMessage(RESOURCE_ID, MESSAGE, Level.ERROR, TIMESTAMP, NODE_NAME); + assertThat(message.getResourceId(), equalTo(RESOURCE_ID)); + assertThat(message.getMessage(), equalTo(MESSAGE)); + assertThat(message.getLevel(), equalTo(Level.ERROR)); + assertThat(message.getTimestamp(), equalTo(TIMESTAMP)); + assertThat(message.getNodeName(), equalTo(NODE_NAME)); } @Override @@ -120,7 +81,12 @@ protected boolean supportsUnknownFields() { @Override protected TestAuditMessage createTestInstance() { - return new TestAuditMessage(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 200), - randomFrom(Level.values()), randomAlphaOfLengthBetween(1, 20)); + return new TestAuditMessage( + randomBoolean() ? null : randomAlphaOfLength(10), + randomAlphaOfLengthBetween(1, 20), + randomFrom(Level.values()), + new Date(), + randomBoolean() ? null : randomAlphaOfLengthBetween(1, 20) + ); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditorTests.java index a3c168d391d3f..e81e2cbfc1d34 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditorTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditorTests.java @@ -23,6 +23,11 @@ import java.io.IOException; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -36,6 +41,7 @@ public class AbstractAuditorTests extends ESTestCase { private Client client; private ArgumentCaptor indexRequestCaptor; + private long startMillis; @Before public void setUpMocks() { @@ -45,6 +51,8 @@ public void setUpMocks() { when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); indexRequestCaptor = ArgumentCaptor.forClass(IndexRequest.class); + + startMillis = System.currentTimeMillis(); } public void testInfo() throws IOException { @@ -53,12 +61,15 @@ public void testInfo() throws IOException { verify(client).index(indexRequestCaptor.capture(), any()); IndexRequest indexRequest = indexRequestCaptor.getValue(); - assertArrayEquals(new String[] {TEST_INDEX}, indexRequest.indices()); - assertEquals(TimeValue.timeValueSeconds(5), indexRequest.timeout()); + assertThat(indexRequest.indices(), arrayContaining(TEST_INDEX)); + assertThat(indexRequest.timeout(), equalTo(TimeValue.timeValueSeconds(5))); AbstractAuditMessageTests.TestAuditMessage auditMessage = parseAuditMessage(indexRequest.source()); - assertEquals("foo", auditMessage.getResourceId()); - assertEquals("Here is my info", auditMessage.getMessage()); - assertEquals(Level.INFO, auditMessage.getLevel()); + assertThat(auditMessage.getResourceId(), equalTo("foo")); + assertThat(auditMessage.getMessage(), equalTo("Here is my info")); + assertThat(auditMessage.getLevel(), equalTo(Level.INFO)); + assertThat(auditMessage.getTimestamp().getTime(), + allOf(greaterThanOrEqualTo(startMillis), lessThanOrEqualTo(System.currentTimeMillis()))); + assertThat(auditMessage.getNodeName(), equalTo(TEST_NODE_NAME)); } public void testWarning() throws IOException { @@ -67,12 +78,15 @@ public void testWarning() throws IOException { verify(client).index(indexRequestCaptor.capture(), any()); IndexRequest indexRequest = indexRequestCaptor.getValue(); - assertArrayEquals(new String[] {TEST_INDEX}, indexRequest.indices()); - assertEquals(TimeValue.timeValueSeconds(5), indexRequest.timeout()); + assertThat(indexRequest.indices(), arrayContaining(TEST_INDEX)); + assertThat(indexRequest.timeout(), equalTo(TimeValue.timeValueSeconds(5))); AbstractAuditMessageTests.TestAuditMessage auditMessage = parseAuditMessage(indexRequest.source()); - assertEquals("bar", auditMessage.getResourceId()); - assertEquals("Here is my warning", auditMessage.getMessage()); - assertEquals(Level.WARNING, auditMessage.getLevel()); + assertThat(auditMessage.getResourceId(), equalTo("bar")); + assertThat(auditMessage.getMessage(), equalTo("Here is my warning")); + assertThat(auditMessage.getLevel(), equalTo(Level.WARNING)); + assertThat(auditMessage.getTimestamp().getTime(), + allOf(greaterThanOrEqualTo(startMillis), lessThanOrEqualTo(System.currentTimeMillis()))); + assertThat(auditMessage.getNodeName(), equalTo(TEST_NODE_NAME)); } public void testError() throws IOException { @@ -81,23 +95,27 @@ public void testError() throws IOException { verify(client).index(indexRequestCaptor.capture(), any()); IndexRequest indexRequest = indexRequestCaptor.getValue(); - assertArrayEquals(new String[] {TEST_INDEX}, indexRequest.indices()); - assertEquals(TimeValue.timeValueSeconds(5), indexRequest.timeout()); + assertThat(indexRequest.indices(), arrayContaining(TEST_INDEX)); + assertThat(indexRequest.timeout(), equalTo(TimeValue.timeValueSeconds(5))); AbstractAuditMessageTests.TestAuditMessage auditMessage = parseAuditMessage(indexRequest.source()); - assertEquals("foobar", auditMessage.getResourceId()); - assertEquals("Here is my error", auditMessage.getMessage()); - assertEquals(Level.ERROR, auditMessage.getLevel()); + assertThat(auditMessage.getResourceId(), equalTo("foobar")); + assertThat(auditMessage.getMessage(), equalTo("Here is my error")); + assertThat(auditMessage.getLevel(), equalTo(Level.ERROR)); + assertThat(auditMessage.getTimestamp().getTime(), + allOf(greaterThanOrEqualTo(startMillis), lessThanOrEqualTo(System.currentTimeMillis()))); + assertThat(auditMessage.getNodeName(), equalTo(TEST_NODE_NAME)); } - private AbstractAuditMessageTests.TestAuditMessage parseAuditMessage(BytesReference msg) throws IOException { + private static AbstractAuditMessageTests.TestAuditMessage parseAuditMessage(BytesReference msg) throws IOException { XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(msg)) .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, msg.streamInput()); return AbstractAuditMessageTests.TestAuditMessage.PARSER.apply(parser, null); } - static class TestAuditor extends AbstractAuditor { + private static class TestAuditor extends AbstractAuditor { + TestAuditor(Client client) { - super(client, TEST_NODE_NAME, TEST_INDEX, TEST_ORIGIN, AbstractAuditMessageTests.TestAuditMessage.newBuilder()); + super(client, TEST_NODE_NAME, TEST_INDEX, TEST_ORIGIN, AbstractAuditMessageTests.TestAuditMessage::new); } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessageTests.java index e845dd76fc679..c39d61ab9d9d5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessageTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessageTests.java @@ -8,48 +8,10 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; import org.elasticsearch.xpack.core.common.notifications.Level; -import org.junit.Before; import java.util.Date; public class DataFrameAuditMessageTests extends AbstractXContentTestCase { - private long startMillis; - - @Before - public void setStartTime() { - startMillis = System.currentTimeMillis(); - } - - public void testNewInfo() { - DataFrameAuditMessage info = DataFrameAuditMessage.builder().info("foo", "some info", "some_node"); - assertEquals("foo", info.getResourceId()); - assertEquals("some info", info.getMessage()); - assertEquals(Level.INFO, info.getLevel()); - assertDateBetweenStartAndNow(info.getTimestamp()); - } - - public void testNewWarning() { - DataFrameAuditMessage warning = DataFrameAuditMessage.builder().warning("bar", "some warning", "some_node"); - assertEquals("bar", warning.getResourceId()); - assertEquals("some warning", warning.getMessage()); - assertEquals(Level.WARNING, warning.getLevel()); - assertDateBetweenStartAndNow(warning.getTimestamp()); - } - - - public void testNewError() { - DataFrameAuditMessage error = DataFrameAuditMessage.builder().error("foo", "some error", "some_node"); - assertEquals("foo", error.getResourceId()); - assertEquals("some error", error.getMessage()); - assertEquals(Level.ERROR, error.getLevel()); - assertDateBetweenStartAndNow(error.getTimestamp()); - } - - private void assertDateBetweenStartAndNow(Date timestamp) { - long timestampMillis = timestamp.getTime(); - assertTrue(timestampMillis >= startMillis); - assertTrue(timestampMillis <= System.currentTimeMillis()); - } @Override protected DataFrameAuditMessage doParseInstance(XContentParser parser) { @@ -67,6 +29,7 @@ protected DataFrameAuditMessage createTestInstance() { randomBoolean() ? null : randomAlphaOfLength(10), randomAlphaOfLengthBetween(1, 20), randomFrom(Level.values()), + new Date(), randomBoolean() ? null : randomAlphaOfLengthBetween(1, 20) ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/RegressionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/RegressionTests.java index e6a3dbbe8c212..77429141ed271 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/RegressionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/RegressionTests.java @@ -33,8 +33,9 @@ public static Regression createRandom() { Integer maximumNumberTrees = randomBoolean() ? null : randomIntBetween(1, 2000); Double featureBagFraction = randomBoolean() ? null : randomDoubleBetween(0.0, 1.0, false); String predictionFieldName = randomBoolean() ? null : randomAlphaOfLength(10); + Double trainingPercent = randomBoolean() ? null : randomDoubleBetween(1.0, 100.0, true); return new Regression(randomAlphaOfLength(10), lambda, gamma, eta, maximumNumberTrees, featureBagFraction, - predictionFieldName); + predictionFieldName, trainingPercent); } @Override @@ -44,57 +45,83 @@ protected Writeable.Reader instanceReader() { public void testRegression_GivenNegativeLambda() { ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> new Regression("foo", -0.00001, 0.0, 0.5, 500, 0.3, "result")); + () -> new Regression("foo", -0.00001, 0.0, 0.5, 500, 0.3, "result", 100.0)); assertThat(e.getMessage(), equalTo("[lambda] must be a non-negative double")); } public void testRegression_GivenNegativeGamma() { ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> new Regression("foo", 0.0, -0.00001, 0.5, 500, 0.3, "result")); + () -> new Regression("foo", 0.0, -0.00001, 0.5, 500, 0.3, "result", 100.0)); assertThat(e.getMessage(), equalTo("[gamma] must be a non-negative double")); } public void testRegression_GivenEtaIsZero() { ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> new Regression("foo", 0.0, 0.0, 0.0, 500, 0.3, "result")); + () -> new Regression("foo", 0.0, 0.0, 0.0, 500, 0.3, "result", 100.0)); assertThat(e.getMessage(), equalTo("[eta] must be a double in [0.001, 1]")); } public void testRegression_GivenEtaIsGreaterThanOne() { ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> new Regression("foo", 0.0, 0.0, 1.00001, 500, 0.3, "result")); + () -> new Regression("foo", 0.0, 0.0, 1.00001, 500, 0.3, "result", 100.0)); assertThat(e.getMessage(), equalTo("[eta] must be a double in [0.001, 1]")); } public void testRegression_GivenMaximumNumberTreesIsZero() { ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> new Regression("foo", 0.0, 0.0, 0.5, 0, 0.3, "result")); + () -> new Regression("foo", 0.0, 0.0, 0.5, 0, 0.3, "result", 100.0)); assertThat(e.getMessage(), equalTo("[maximum_number_trees] must be an integer in [1, 2000]")); } public void testRegression_GivenMaximumNumberTreesIsGreaterThan2k() { ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> new Regression("foo", 0.0, 0.0, 0.5, 2001, 0.3, "result")); + () -> new Regression("foo", 0.0, 0.0, 0.5, 2001, 0.3, "result", 100.0)); assertThat(e.getMessage(), equalTo("[maximum_number_trees] must be an integer in [1, 2000]")); } public void testRegression_GivenFeatureBagFractionIsLessThanZero() { ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> new Regression("foo", 0.0, 0.0, 0.5, 500, -0.00001, "result")); + () -> new Regression("foo", 0.0, 0.0, 0.5, 500, -0.00001, "result", 100.0)); assertThat(e.getMessage(), equalTo("[feature_bag_fraction] must be a double in (0, 1]")); } public void testRegression_GivenFeatureBagFractionIsGreaterThanOne() { ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> new Regression("foo", 0.0, 0.0, 0.5, 500, 1.00001, "result")); + () -> new Regression("foo", 0.0, 0.0, 0.5, 500, 1.00001, "result", 100.0)); assertThat(e.getMessage(), equalTo("[feature_bag_fraction] must be a double in (0, 1]")); } + + public void testRegression_GivenTrainingPercentIsNull() { + Regression regression = new Regression("foo", 0.0, 0.0, 0.5, 500, 1.0, "result", null); + assertThat(regression.getTrainingPercent(), equalTo(100.0)); + } + + public void testRegression_GivenTrainingPercentIsBoundary() { + Regression regression = new Regression("foo", 0.0, 0.0, 0.5, 500, 1.0, "result", 1.0); + assertThat(regression.getTrainingPercent(), equalTo(1.0)); + regression = new Regression("foo", 0.0, 0.0, 0.5, 500, 1.0, "result", 100.0); + assertThat(regression.getTrainingPercent(), equalTo(100.0)); + } + + public void testRegression_GivenTrainingPercentIsLessThanOne() { + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> new Regression("foo", 0.0, 0.0, 0.5, 500, 1.0, "result", 0.999)); + + assertThat(e.getMessage(), equalTo("[training_percent] must be a double in [1, 100]")); + } + + public void testRegression_GivenTrainingPercentIsGreaterThan100() { + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> new Regression("foo", 0.0, 0.0, 0.5, 500, 1.0, "result", 100.0001)); + + assertThat(e.getMessage(), equalTo("[training_percent] must be a double in [1, 100]")); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessageTests.java new file mode 100644 index 0000000000000..f3a12b8a75b9a --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessageTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.notifications; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xpack.core.common.notifications.Level; + +import java.util.Date; + +public class AnomalyDetectionAuditMessageTests extends AbstractXContentTestCase { + + @Override + protected AnomalyDetectionAuditMessage doParseInstance(XContentParser parser) { + return AnomalyDetectionAuditMessage.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected AnomalyDetectionAuditMessage createTestInstance() { + return new AnomalyDetectionAuditMessage( + randomBoolean() ? null : randomAlphaOfLength(10), + randomAlphaOfLengthBetween(1, 20), + randomFrom(Level.values()), + new Date(), + randomBoolean() ? null : randomAlphaOfLengthBetween(1, 20) + ); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequestTests.java index 27be0d88eb82c..1c5548af70a81 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequestTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.security.action; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.InputStreamStreamInput; @@ -17,15 +18,18 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; +import static org.elasticsearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class GetApiKeyRequestTests extends ESTestCase { public void testRequestValidation() { - GetApiKeyRequest request = GetApiKeyRequest.usingApiKeyId(randomAlphaOfLength(5)); + GetApiKeyRequest request = GetApiKeyRequest.usingApiKeyId(randomAlphaOfLength(5), randomBoolean()); ActionRequestValidationException ve = request.validate(); assertNull(ve); - request = GetApiKeyRequest.usingApiKeyName(randomAlphaOfLength(5)); + request = GetApiKeyRequest.usingApiKeyName(randomAlphaOfLength(5), randomBoolean()); ve = request.validate(); assertNull(ve); request = GetApiKeyRequest.usingRealmName(randomAlphaOfLength(5)); @@ -45,12 +49,14 @@ class Dummy extends ActionRequest { String user; String apiKeyId; String apiKeyName; + boolean ownedByAuthenticatedUser; Dummy(String[] a) { realm = a[0]; user = a[1]; apiKeyId = a[2]; apiKeyName = a[3]; + ownedByAuthenticatedUser = Boolean.parseBoolean(a[4]); } @Override @@ -65,23 +71,31 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(user); out.writeOptionalString(apiKeyId); out.writeOptionalString(apiKeyName); + out.writeOptionalBoolean(ownedByAuthenticatedUser); } } - String[][] inputs = new String[][] { - { randomFrom(new String[] { null, "" }), randomFrom(new String[] { null, "" }), randomFrom(new String[] { null, "" }), - randomFrom(new String[] { null, "" }) }, - { randomFrom(new String[] { null, "" }), "user", "api-kid", "api-kname" }, - { "realm", randomFrom(new String[] { null, "" }), "api-kid", "api-kname" }, - { "realm", "user", "api-kid", randomFrom(new String[] { null, "" }) }, - { randomFrom(new String[] { null, "" }), randomFrom(new String[] { null, "" }), "api-kid", "api-kname" } }; - String[][] expectedErrorMessages = new String[][] { { "One of [api key id, api key name, username, realm name] must be specified" }, - { "username or realm name must not be specified when the api key id or api key name is specified", - "only one of [api key id, api key name] can be specified" }, - { "username or realm name must not be specified when the api key id or api key name is specified", - "only one of [api key id, api key name] can be specified" }, - { "username or realm name must not be specified when the api key id or api key name is specified" }, - { "only one of [api key id, api key name] can be specified" } }; + String[][] inputs = new String[][]{ + {randomNullOrEmptyString(), randomNullOrEmptyString(), randomNullOrEmptyString(), + randomNullOrEmptyString(), "false"}, + {randomNullOrEmptyString(), "user", "api-kid", "api-kname", "false"}, + {"realm", randomNullOrEmptyString(), "api-kid", "api-kname", "false"}, + {"realm", "user", "api-kid", randomNullOrEmptyString(), "false"}, + {randomNullOrEmptyString(), randomNullOrEmptyString(), "api-kid", "api-kname", "false"}, + {"realm", randomNullOrEmptyString(), randomNullOrEmptyString(), randomNullOrEmptyString(), "true"}, + {randomNullOrEmptyString(), "user", randomNullOrEmptyString(), randomNullOrEmptyString(), "true"} + }; + String[][] expectedErrorMessages = new String[][]{ + {"One of [api key id, api key name, username, realm name] must be specified if [owner] flag is false"}, + {"username or realm name must not be specified when the api key id or api key name is specified", + "only one of [api key id, api key name] can be specified"}, + {"username or realm name must not be specified when the api key id or api key name is specified", + "only one of [api key id, api key name] can be specified"}, + {"username or realm name must not be specified when the api key id or api key name is specified"}, + {"only one of [api key id, api key name] can be specified"}, + {"neither username nor realm-name may be specified when retrieving owned API keys"}, + {"neither username nor realm-name may be specified when retrieving owned API keys"} + }; for (int caseNo = 0; caseNo < inputs.length; caseNo++) { try (ByteArrayOutputStream bos = new ByteArrayOutputStream(); @@ -100,4 +114,40 @@ public void writeTo(StreamOutput out) throws IOException { } } } + + public void testSerialization() throws IOException { + final String apiKeyId = randomAlphaOfLength(5); + final boolean ownedByAuthenticatedUser = true; + GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.usingApiKeyId(apiKeyId, ownedByAuthenticatedUser); + { + ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); + OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); + out.setVersion(randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_3_0)); + getApiKeyRequest.writeTo(out); + + InputStreamStreamInput inputStreamStreamInput = new InputStreamStreamInput(new ByteArrayInputStream(outBuffer.toByteArray())); + inputStreamStreamInput.setVersion(randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_3_0)); + GetApiKeyRequest requestFromInputStream = new GetApiKeyRequest(inputStreamStreamInput); + + assertThat(requestFromInputStream.getApiKeyId(), equalTo(getApiKeyRequest.getApiKeyId())); + // old version so the default for `ownedByAuthenticatedUser` is false + assertThat(requestFromInputStream.ownedByAuthenticatedUser(), is(false)); + } + { + ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); + OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); + out.setVersion(randomVersionBetween(random(), Version.V_7_4_0, Version.CURRENT)); + getApiKeyRequest.writeTo(out); + + InputStreamStreamInput inputStreamStreamInput = new InputStreamStreamInput(new ByteArrayInputStream(outBuffer.toByteArray())); + inputStreamStreamInput.setVersion(randomVersionBetween(random(), Version.V_7_4_0, Version.CURRENT)); + GetApiKeyRequest requestFromInputStream = new GetApiKeyRequest(inputStreamStreamInput); + + assertThat(requestFromInputStream, equalTo(getApiKeyRequest)); + } + } + + private static String randomNullOrEmptyString() { + return randomBoolean() ? "" : null; + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequestTests.java index 3d7fd90234286..2f959c4841761 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequestTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.security.action; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.InputStreamStreamInput; @@ -17,15 +18,18 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; +import static org.elasticsearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class InvalidateApiKeyRequestTests extends ESTestCase { public void testRequestValidation() { - InvalidateApiKeyRequest request = InvalidateApiKeyRequest.usingApiKeyId(randomAlphaOfLength(5)); + InvalidateApiKeyRequest request = InvalidateApiKeyRequest.usingApiKeyId(randomAlphaOfLength(5), randomBoolean()); ActionRequestValidationException ve = request.validate(); assertNull(ve); - request = InvalidateApiKeyRequest.usingApiKeyName(randomAlphaOfLength(5)); + request = InvalidateApiKeyRequest.usingApiKeyName(randomAlphaOfLength(5), randomBoolean()); ve = request.validate(); assertNull(ve); request = InvalidateApiKeyRequest.usingRealmName(randomAlphaOfLength(5)); @@ -45,12 +49,14 @@ class Dummy extends ActionRequest { String user; String apiKeyId; String apiKeyName; + boolean ownedByAuthenticatedUser; Dummy(String[] a) { realm = a[0]; user = a[1]; apiKeyId = a[2]; apiKeyName = a[3]; + ownedByAuthenticatedUser = Boolean.parseBoolean(a[4]); } @Override @@ -65,24 +71,31 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(user); out.writeOptionalString(apiKeyId); out.writeOptionalString(apiKeyName); + out.writeOptionalBoolean(ownedByAuthenticatedUser); } } - String[][] inputs = new String[][] { - { randomFrom(new String[] { null, "" }), randomFrom(new String[] { null, "" }), randomFrom(new String[] { null, "" }), - randomFrom(new String[] { null, "" }) }, - { randomFrom(new String[] { null, "" }), "user", "api-kid", "api-kname" }, - { "realm", randomFrom(new String[] { null, "" }), "api-kid", "api-kname" }, - { "realm", "user", "api-kid", randomFrom(new String[] { null, "" }) }, - { randomFrom(new String[] { null, "" }), randomFrom(new String[] { null, "" }), "api-kid", "api-kname" } }; - String[][] expectedErrorMessages = new String[][] { { "One of [api key id, api key name, username, realm name] must be specified" }, - { "username or realm name must not be specified when the api key id or api key name is specified", - "only one of [api key id, api key name] can be specified" }, - { "username or realm name must not be specified when the api key id or api key name is specified", - "only one of [api key id, api key name] can be specified" }, - { "username or realm name must not be specified when the api key id or api key name is specified" }, - { "only one of [api key id, api key name] can be specified" } }; - + String[][] inputs = new String[][]{ + {randomNullOrEmptyString(), randomNullOrEmptyString(), randomNullOrEmptyString(), + randomNullOrEmptyString(), "false"}, + {randomNullOrEmptyString(), "user", "api-kid", "api-kname", "false"}, + {"realm", randomNullOrEmptyString(), "api-kid", "api-kname", "false"}, + {"realm", "user", "api-kid", randomNullOrEmptyString(), "false"}, + {randomNullOrEmptyString(), randomNullOrEmptyString(), "api-kid", "api-kname", "false"}, + {"realm", randomNullOrEmptyString(), randomNullOrEmptyString(), randomNullOrEmptyString(), "true"}, + {randomNullOrEmptyString(), "user", randomNullOrEmptyString(), randomNullOrEmptyString(), "true"}, + }; + String[][] expectedErrorMessages = new String[][]{ + {"One of [api key id, api key name, username, realm name] must be specified if [owner] flag is false"}, + {"username or realm name must not be specified when the api key id or api key name is specified", + "only one of [api key id, api key name] can be specified"}, + {"username or realm name must not be specified when the api key id or api key name is specified", + "only one of [api key id, api key name] can be specified"}, + {"username or realm name must not be specified when the api key id or api key name is specified"}, + {"only one of [api key id, api key name] can be specified"}, + {"neither username nor realm-name may be specified when invalidating owned API keys"}, + {"neither username nor realm-name may be specified when invalidating owned API keys"} + }; for (int caseNo = 0; caseNo < inputs.length; caseNo++) { try (ByteArrayOutputStream bos = new ByteArrayOutputStream(); @@ -101,4 +114,41 @@ public void writeTo(StreamOutput out) throws IOException { } } } + + public void testSerialization() throws IOException { + final String apiKeyId = randomAlphaOfLength(5); + final boolean ownedByAuthenticatedUser = true; + InvalidateApiKeyRequest invalidateApiKeyRequest = InvalidateApiKeyRequest.usingApiKeyId(apiKeyId, ownedByAuthenticatedUser); + { + ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); + OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); + out.setVersion(randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_3_0)); + invalidateApiKeyRequest.writeTo(out); + + InputStreamStreamInput inputStreamStreamInput = new InputStreamStreamInput(new ByteArrayInputStream(outBuffer.toByteArray())); + inputStreamStreamInput.setVersion(randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_3_0)); + InvalidateApiKeyRequest requestFromInputStream = new InvalidateApiKeyRequest(inputStreamStreamInput); + + assertThat(requestFromInputStream.getId(), equalTo(invalidateApiKeyRequest.getId())); + // old version so the default for `ownedByAuthenticatedUser` is false + assertThat(requestFromInputStream.ownedByAuthenticatedUser(), is(false)); + } + { + ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); + OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); + out.setVersion(randomVersionBetween(random(), Version.V_7_4_0, Version.CURRENT)); + invalidateApiKeyRequest.writeTo(out); + + InputStreamStreamInput inputStreamStreamInput = new InputStreamStreamInput(new ByteArrayInputStream(outBuffer.toByteArray())); + inputStreamStreamInput.setVersion(randomVersionBetween(random(), Version.V_7_4_0, Version.CURRENT)); + InvalidateApiKeyRequest requestFromInputStream = new InvalidateApiKeyRequest(inputStreamStreamInput); + + assertThat(requestFromInputStream, equalTo(invalidateApiKeyRequest)); + } + } + + private static String randomNullOrEmptyString() { + return randomFrom(new String[]{"", null}); + } + } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermissionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermissionTests.java index 18eb99e97f2f7..5a52519c7b72e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermissionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermissionTests.java @@ -12,12 +12,11 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; -import org.elasticsearch.xpack.core.security.support.Automatons; import org.junit.Before; -import org.mockito.Mockito; import java.io.IOException; import java.util.Objects; @@ -26,9 +25,11 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; public class ClusterPermissionTests extends ESTestCase { - private TransportRequest mockTransportRequest = Mockito.mock(TransportRequest.class); + private TransportRequest mockTransportRequest; + private Authentication mockAuthentication; private ClusterPrivilege cpThatDoesNothing = new ClusterPrivilege() { @Override public ClusterPermission.Builder buildPermission(ClusterPermission.Builder builder) { @@ -38,7 +39,8 @@ public ClusterPermission.Builder buildPermission(ClusterPermission.Builder build @Before public void setup() { - mockTransportRequest = Mockito.mock(TransportRequest.class); + mockTransportRequest = mock(TransportRequest.class); + mockAuthentication = mock(Authentication.class); } public void testClusterPermissionBuilder() { @@ -78,10 +80,12 @@ public void testClusterPermissionCheck() { builder = mockConfigurableClusterPrivilege2.buildPermission(builder); final ClusterPermission clusterPermission = builder.build(); - assertThat(clusterPermission.check("cluster:admin/xpack/security/token/invalidate", mockTransportRequest), is(true)); - assertThat(clusterPermission.check("cluster:admin/ilm/stop", mockTransportRequest), is(true)); - assertThat(clusterPermission.check("cluster:admin/xpack/security/privilege/get", mockTransportRequest), is(true)); - assertThat(clusterPermission.check("cluster:admin/snapshot/status", mockTransportRequest), is(false)); + assertThat(clusterPermission.check("cluster:admin/xpack/security/token/invalidate", mockTransportRequest, mockAuthentication), + is(true)); + assertThat(clusterPermission.check("cluster:admin/ilm/stop", mockTransportRequest, mockAuthentication), is(true)); + assertThat(clusterPermission.check("cluster:admin/xpack/security/privilege/get", mockTransportRequest, mockAuthentication), + is(true)); + assertThat(clusterPermission.check("cluster:admin/snapshot/status", mockTransportRequest, mockAuthentication), is(false)); } public void testClusterPermissionCheckWithEmptyActionPatterns() { @@ -89,8 +93,9 @@ public void testClusterPermissionCheckWithEmptyActionPatterns() { builder.add(cpThatDoesNothing, Set.of(), Set.of()); final ClusterPermission clusterPermission = builder.build(); - assertThat(clusterPermission.check("cluster:admin/ilm/start", mockTransportRequest), is(false)); - assertThat(clusterPermission.check("cluster:admin/xpack/security/token/invalidate", mockTransportRequest), is(false)); + assertThat(clusterPermission.check("cluster:admin/ilm/start", mockTransportRequest, mockAuthentication), is(false)); + assertThat(clusterPermission.check("cluster:admin/xpack/security/token/invalidate", mockTransportRequest, mockAuthentication), + is(false)); } public void testClusterPermissionCheckWithExcludeOnlyActionPatterns() { @@ -98,8 +103,9 @@ public void testClusterPermissionCheckWithExcludeOnlyActionPatterns() { builder.add(cpThatDoesNothing, Set.of(), Set.of("cluster:some/thing/to/exclude")); final ClusterPermission clusterPermission = builder.build(); - assertThat(clusterPermission.check("cluster:admin/ilm/start", mockTransportRequest), is(false)); - assertThat(clusterPermission.check("cluster:admin/xpack/security/token/invalidate", mockTransportRequest), is(false)); + assertThat(clusterPermission.check("cluster:admin/ilm/start", mockTransportRequest, mockAuthentication), is(false)); + assertThat(clusterPermission.check("cluster:admin/xpack/security/token/invalidate", mockTransportRequest, mockAuthentication), + is(false)); } public void testClusterPermissionCheckWithActionPatterns() { @@ -107,8 +113,9 @@ public void testClusterPermissionCheckWithActionPatterns() { builder.add(cpThatDoesNothing, Set.of("cluster:admin/*"), Set.of("cluster:admin/ilm/*")); final ClusterPermission clusterPermission = builder.build(); - assertThat(clusterPermission.check("cluster:admin/ilm/start", mockTransportRequest), is(false)); - assertThat(clusterPermission.check("cluster:admin/xpack/security/token/invalidate", mockTransportRequest), is(true)); + assertThat(clusterPermission.check("cluster:admin/ilm/start", mockTransportRequest, mockAuthentication), is(false)); + assertThat(clusterPermission.check("cluster:admin/xpack/security/token/invalidate", mockTransportRequest, mockAuthentication), + is(true)); } public void testClusterPermissionCheckWithActionPatternsAndNoExludePatterns() { @@ -116,8 +123,9 @@ public void testClusterPermissionCheckWithActionPatternsAndNoExludePatterns() { builder.add(cpThatDoesNothing, Set.of("cluster:admin/*"), Set.of()); final ClusterPermission clusterPermission = builder.build(); - assertThat(clusterPermission.check("cluster:admin/ilm/start", mockTransportRequest), is(true)); - assertThat(clusterPermission.check("cluster:admin/xpack/security/token/invalidate", mockTransportRequest), is(true)); + assertThat(clusterPermission.check("cluster:admin/ilm/start", mockTransportRequest, mockAuthentication), is(true)); + assertThat(clusterPermission.check("cluster:admin/xpack/security/token/invalidate", mockTransportRequest, mockAuthentication), + is(true)); } public void testNoneClusterPermissionIsImpliedByNone() { @@ -223,7 +231,6 @@ public void testClusterPermissionSubsetIsImpliedByAllClusterPermission() { } private static class MockConfigurableClusterPrivilege implements ConfigurableClusterPrivilege { - static final Predicate ACTION_PREDICATE = Automatons.predicate("cluster:admin/xpack/security/privilege/*"); private Predicate requestPredicate; MockConfigurableClusterPrivilege(Predicate requestPredicate) { @@ -275,7 +282,7 @@ public String toString() { @Override public ClusterPermission.Builder buildPermission(ClusterPermission.Builder builder) { - return builder.add(this, ACTION_PREDICATE, requestPredicate); + return builder.add(this, Set.of("cluster:admin/xpack/security/privilege/*"), requestPredicate); } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRoleTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRoleTests.java index 4bcc581d072b1..74e06d1cbce25 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRoleTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRoleTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; @@ -128,24 +129,26 @@ public void testAuthorize() { public void testCheckClusterAction() { Role fromRole = Role.builder("a-role").cluster(Collections.singleton("manage_security"), Collections.emptyList()) - .build(); - assertThat(fromRole.checkClusterAction("cluster:admin/xpack/security/x", mock(TransportRequest.class)), is(true)); + .build(); + Authentication authentication = mock(Authentication.class); + assertThat(fromRole.checkClusterAction("cluster:admin/xpack/security/x", mock(TransportRequest.class), authentication), is(true)); { Role limitedByRole = Role.builder("limited-role") - .cluster(Collections.singleton("all"), Collections.emptyList()).build(); - assertThat(limitedByRole.checkClusterAction("cluster:admin/xpack/security/x", mock(TransportRequest.class)), is(true)); - assertThat(limitedByRole.checkClusterAction("cluster:other-action", mock(TransportRequest.class)), is(true)); + .cluster(Collections.singleton("all"), Collections.emptyList()).build(); + assertThat(limitedByRole.checkClusterAction("cluster:admin/xpack/security/x", mock(TransportRequest.class), authentication), + is(true)); + assertThat(limitedByRole.checkClusterAction("cluster:other-action", mock(TransportRequest.class), authentication), is(true)); Role role = LimitedRole.createLimitedRole(fromRole, limitedByRole); - assertThat(role.checkClusterAction("cluster:admin/xpack/security/x", mock(TransportRequest.class)), is(true)); - assertThat(role.checkClusterAction("cluster:other-action", mock(TransportRequest.class)), is(false)); + assertThat(role.checkClusterAction("cluster:admin/xpack/security/x", mock(TransportRequest.class), authentication), is(true)); + assertThat(role.checkClusterAction("cluster:other-action", mock(TransportRequest.class), authentication), is(false)); } { Role limitedByRole = Role.builder("limited-role") - .cluster(Collections.singleton("monitor"), Collections.emptyList()).build(); - assertThat(limitedByRole.checkClusterAction("cluster:monitor/me", mock(TransportRequest.class)), is(true)); + .cluster(Collections.singleton("monitor"), Collections.emptyList()).build(); + assertThat(limitedByRole.checkClusterAction("cluster:monitor/me", mock(TransportRequest.class), authentication), is(true)); Role role = LimitedRole.createLimitedRole(fromRole, limitedByRole); - assertThat(role.checkClusterAction("cluster:monitor/me", mock(TransportRequest.class)), is(false)); - assertThat(role.checkClusterAction("cluster:admin/xpack/security/x", mock(TransportRequest.class)), is(false)); + assertThat(role.checkClusterAction("cluster:monitor/me", mock(TransportRequest.class), authentication), is(false)); + assertThat(role.checkClusterAction("cluster:admin/xpack/security/x", mock(TransportRequest.class), authentication), is(false)); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageApplicationPrivilegesTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageApplicationPrivilegesTests.java index dfe1147fb2c43..10eea045aadab 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageApplicationPrivilegesTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageApplicationPrivilegesTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.xpack.core.security.action.privilege.DeletePrivilegesRequest; import org.elasticsearch.xpack.core.security.action.privilege.GetPrivilegesRequest; import org.elasticsearch.xpack.core.security.action.privilege.PutPrivilegesRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges.ManageApplicationPrivileges; @@ -40,6 +41,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; public class ManageApplicationPrivilegesTests extends ESTestCase { @@ -97,14 +99,15 @@ public void testActionAndRequestPredicate() { assertThat(kibanaAndLogstashPermission, notNullValue()); assertThat(cloudAndSwiftypePermission, notNullValue()); + final Authentication authentication = mock(Authentication.class); final GetPrivilegesRequest getKibana1 = new GetPrivilegesRequest(); getKibana1.application("kibana-1"); - assertTrue(kibanaAndLogstashPermission.check("cluster:admin/xpack/security/privilege/get", getKibana1)); - assertFalse(cloudAndSwiftypePermission.check("cluster:admin/xpack/security/privilege/get", getKibana1)); + assertTrue(kibanaAndLogstashPermission.check("cluster:admin/xpack/security/privilege/get", getKibana1, authentication)); + assertFalse(cloudAndSwiftypePermission.check("cluster:admin/xpack/security/privilege/get", getKibana1, authentication)); final DeletePrivilegesRequest deleteLogstash = new DeletePrivilegesRequest("logstash", new String[]{"all"}); - assertTrue(kibanaAndLogstashPermission.check("cluster:admin/xpack/security/privilege/get", deleteLogstash)); - assertFalse(cloudAndSwiftypePermission.check("cluster:admin/xpack/security/privilege/get", deleteLogstash)); + assertTrue(kibanaAndLogstashPermission.check("cluster:admin/xpack/security/privilege/get", deleteLogstash, authentication)); + assertFalse(cloudAndSwiftypePermission.check("cluster:admin/xpack/security/privilege/get", deleteLogstash, authentication)); final PutPrivilegesRequest putKibana = new PutPrivilegesRequest(); @@ -114,11 +117,12 @@ public void testActionAndRequestPredicate() { randomAlphaOfLengthBetween(3, 6).toLowerCase(Locale.ROOT), Collections.emptySet(), Collections.emptyMap())); } putKibana.setPrivileges(kibanaPrivileges); - assertTrue(kibanaAndLogstashPermission.check("cluster:admin/xpack/security/privilege/get", putKibana)); - assertFalse(cloudAndSwiftypePermission.check("cluster:admin/xpack/security/privilege/get", putKibana)); + assertTrue(kibanaAndLogstashPermission.check("cluster:admin/xpack/security/privilege/get", putKibana, authentication)); + assertFalse(cloudAndSwiftypePermission.check("cluster:admin/xpack/security/privilege/get", putKibana, authentication)); } public void testSecurityForGetAllApplicationPrivileges() { + final Authentication authentication = mock(Authentication.class); final GetPrivilegesRequest getAll = new GetPrivilegesRequest(); getAll.application(null); getAll.privileges(new String[0]); @@ -130,8 +134,8 @@ public void testSecurityForGetAllApplicationPrivileges() { final ClusterPermission kibanaOnlyPermission = kibanaOnly.buildPermission(ClusterPermission.builder()).build(); final ClusterPermission allAppsPermission = allApps.buildPermission(ClusterPermission.builder()).build(); - assertFalse(kibanaOnlyPermission.check("cluster:admin/xpack/security/privilege/get", getAll)); - assertTrue(allAppsPermission.check("cluster:admin/xpack/security/privilege/get", getAll)); + assertFalse(kibanaOnlyPermission.check("cluster:admin/xpack/security/privilege/get", getAll, authentication)); + assertTrue(allAppsPermission.check("cluster:admin/xpack/security/privilege/get", getAll, authentication)); } private ManageApplicationPrivileges clone(ManageApplicationPrivileges original) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java new file mode 100644 index 0000000000000..c6d67b9e00b58 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java @@ -0,0 +1,110 @@ +/* + * + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + * + */ + +package org.elasticsearch.xpack.core.security.authz.privilege; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; +import org.elasticsearch.xpack.core.security.user.User; + +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ManageOwnApiKeyClusterPrivilegeTests extends ESTestCase { + + public void testAuthenticationWithApiKeyAllowsAccessToApiKeyActionsWhenItIsOwner() { + final ClusterPermission clusterPermission = + ManageOwnApiKeyClusterPrivilege.INSTANCE.buildPermission(ClusterPermission.builder()).build(); + + final String apiKeyId = randomAlphaOfLengthBetween(4, 7); + final Authentication authentication = createMockAuthentication("joe","_es_api_key", "_es_api_key", + Map.of("_security_api_key_id", apiKeyId)); + final TransportRequest getApiKeyRequest = GetApiKeyRequest.usingApiKeyId(apiKeyId, randomBoolean()); + final TransportRequest invalidateApiKeyRequest = InvalidateApiKeyRequest.usingApiKeyId(apiKeyId, randomBoolean()); + + assertTrue(clusterPermission.check("cluster:admin/xpack/security/api_key/get", getApiKeyRequest, authentication)); + assertTrue(clusterPermission.check("cluster:admin/xpack/security/api_key/invalidate", invalidateApiKeyRequest, authentication)); + assertFalse(clusterPermission.check("cluster:admin/something", mock(TransportRequest.class), authentication)); + } + + public void testAuthenticationWithApiKeyDeniesAccessToApiKeyActionsWhenItIsNotOwner() { + final ClusterPermission clusterPermission = + ManageOwnApiKeyClusterPrivilege.INSTANCE.buildPermission(ClusterPermission.builder()).build(); + + final String apiKeyId = randomAlphaOfLengthBetween(4, 7); + final Authentication authentication = createMockAuthentication("joe","_es_api_key", "_es_api_key", + Map.of("_security_api_key_id", randomAlphaOfLength(7))); + final TransportRequest getApiKeyRequest = GetApiKeyRequest.usingApiKeyId(apiKeyId, randomBoolean()); + final TransportRequest invalidateApiKeyRequest = InvalidateApiKeyRequest.usingApiKeyId(apiKeyId, randomBoolean()); + + assertFalse(clusterPermission.check("cluster:admin/xpack/security/api_key/get", getApiKeyRequest, authentication)); + assertFalse(clusterPermission.check("cluster:admin/xpack/security/api_key/invalidate", invalidateApiKeyRequest, authentication)); + } + + public void testAuthenticationWithUserAllowsAccessToApiKeyActionsWhenItIsOwner() { + final ClusterPermission clusterPermission = + ManageOwnApiKeyClusterPrivilege.INSTANCE.buildPermission(ClusterPermission.builder()).build(); + + final Authentication authentication = createMockAuthentication("joe","realm1", "native", Map.of()); + final TransportRequest getApiKeyRequest = GetApiKeyRequest.usingRealmAndUserName("realm1", "joe"); + final TransportRequest invalidateApiKeyRequest = InvalidateApiKeyRequest.usingRealmAndUserName("realm1", "joe"); + + assertTrue(clusterPermission.check("cluster:admin/xpack/security/api_key/get", getApiKeyRequest, authentication)); + assertTrue(clusterPermission.check("cluster:admin/xpack/security/api_key/invalidate", invalidateApiKeyRequest, authentication)); + assertFalse(clusterPermission.check("cluster:admin/something", mock(TransportRequest.class), authentication)); + } + + public void testAuthenticationWithUserAllowsAccessToApiKeyActionsWhenItIsOwner_WithOwnerFlagOnly() { + final ClusterPermission clusterPermission = + ManageOwnApiKeyClusterPrivilege.INSTANCE.buildPermission(ClusterPermission.builder()).build(); + + final Authentication authentication = createMockAuthentication("joe","realm1", "native", Map.of()); + final TransportRequest getApiKeyRequest = GetApiKeyRequest.forOwnedApiKeys(); + final TransportRequest invalidateApiKeyRequest = InvalidateApiKeyRequest.forOwnedApiKeys(); + + assertTrue(clusterPermission.check("cluster:admin/xpack/security/api_key/get", getApiKeyRequest, authentication)); + assertTrue(clusterPermission.check("cluster:admin/xpack/security/api_key/invalidate", invalidateApiKeyRequest, authentication)); + assertFalse(clusterPermission.check("cluster:admin/something", mock(TransportRequest.class), authentication)); + } + + public void testAuthenticationWithUserDeniesAccessToApiKeyActionsWhenItIsNotOwner() { + final ClusterPermission clusterPermission = + ManageOwnApiKeyClusterPrivilege.INSTANCE.buildPermission(ClusterPermission.builder()).build(); + + final Authentication authentication = createMockAuthentication("joe", "realm1", "native", Map.of()); + final TransportRequest getApiKeyRequest = randomFrom( + GetApiKeyRequest.usingRealmAndUserName("realm1", randomAlphaOfLength(7)), + GetApiKeyRequest.usingRealmAndUserName(randomAlphaOfLength(5), "joe"), + new GetApiKeyRequest(randomAlphaOfLength(5), randomAlphaOfLength(7), null, null, false)); + final TransportRequest invalidateApiKeyRequest = randomFrom( + InvalidateApiKeyRequest.usingRealmAndUserName("realm1", randomAlphaOfLength(7)), + InvalidateApiKeyRequest.usingRealmAndUserName(randomAlphaOfLength(5), "joe"), + new InvalidateApiKeyRequest(randomAlphaOfLength(5), randomAlphaOfLength(7), null, null, false)); + + assertFalse(clusterPermission.check("cluster:admin/xpack/security/api_key/get", getApiKeyRequest, authentication)); + assertFalse(clusterPermission.check("cluster:admin/xpack/security/api_key/invalidate", invalidateApiKeyRequest, authentication)); + } + + private Authentication createMockAuthentication(String username, String realmName, String realmType, Map metadata) { + final User user = new User(username); + final Authentication authentication = mock(Authentication.class); + final Authentication.RealmRef authenticatedBy = mock(Authentication.RealmRef.class); + when(authentication.getUser()).thenReturn(user); + when(authentication.getAuthenticatedBy()).thenReturn(authenticatedBy); + when(authenticatedBy.getName()).thenReturn(realmName); + when(authenticatedBy.getType()).thenReturn(realmType); + when(authentication.getMetadata()).thenReturn(metadata); + return authentication; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java index bd64d2112287f..e02c930101694 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java @@ -9,17 +9,18 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; import org.elasticsearch.xpack.core.security.support.Automatons; import org.junit.Rule; import org.junit.rules.ExpectedException; -import org.mockito.Mockito; import java.util.Set; import java.util.function.Predicate; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Mockito.mock; public class PrivilegeTests extends ESTestCase { @Rule @@ -35,13 +36,13 @@ public void testSubActionPattern() throws Exception { private void verifyClusterActionAllowed(ClusterPrivilege clusterPrivilege, String... actions) { ClusterPermission clusterPermission = clusterPrivilege.buildPermission(ClusterPermission.builder()).build(); for (String action: actions) { - assertTrue(clusterPermission.check(action, Mockito.mock(TransportRequest.class))); + assertTrue(clusterPermission.check(action, mock(TransportRequest.class), mock(Authentication.class))); } } private void verifyClusterActionDenied(ClusterPrivilege clusterPrivilege, String... actions) { ClusterPermission clusterPermission = clusterPrivilege.buildPermission(ClusterPermission.builder()).build(); for (String action: actions) { - assertFalse(clusterPermission.check(action, Mockito.mock(TransportRequest.class))); + assertFalse(clusterPermission.check(action, mock(TransportRequest.class), mock(Authentication.class))); } } public void testCluster() throws Exception { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 4433b9d3750e7..ecedfc0c0e95e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -111,6 +111,7 @@ import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; import org.elasticsearch.xpack.core.ml.notifications.AuditorField; import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkAction; +import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; import org.elasticsearch.xpack.core.security.action.privilege.DeletePrivilegesAction; import org.elasticsearch.xpack.core.security.action.privilege.DeletePrivilegesRequest; import org.elasticsearch.xpack.core.security.action.privilege.GetBuiltinPrivilegesAction; @@ -124,6 +125,7 @@ import org.elasticsearch.xpack.core.security.action.token.CreateTokenAction; import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenAction; import org.elasticsearch.xpack.core.security.action.user.PutUserAction; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl.IndexAccessControl; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; @@ -200,33 +202,35 @@ public void testIsReserved() { public void testSnapshotUserRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("snapshot_user"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role snapshotUserRole = Role.builder(roleDescriptor, null).build(); - assertThat(snapshotUserRole.cluster().check(GetRepositoriesAction.NAME, request), is(true)); - assertThat(snapshotUserRole.cluster().check(CreateSnapshotAction.NAME, request), is(true)); - assertThat(snapshotUserRole.cluster().check(SnapshotsStatusAction.NAME, request), is(true)); - assertThat(snapshotUserRole.cluster().check(GetSnapshotsAction.NAME, request), is(true)); - - assertThat(snapshotUserRole.cluster().check(PutRepositoryAction.NAME, request), is(false)); - assertThat(snapshotUserRole.cluster().check(GetIndexTemplatesAction.NAME, request), is(false)); - assertThat(snapshotUserRole.cluster().check(DeleteIndexTemplateAction.NAME, request), is(false)); - assertThat(snapshotUserRole.cluster().check(PutPipelineAction.NAME, request), is(false)); - assertThat(snapshotUserRole.cluster().check(GetPipelineAction.NAME, request), is(false)); - assertThat(snapshotUserRole.cluster().check(DeletePipelineAction.NAME, request), is(false)); - assertThat(snapshotUserRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); - assertThat(snapshotUserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); - assertThat(snapshotUserRole.cluster().check(MonitoringBulkAction.NAME, request), is(false)); - assertThat(snapshotUserRole.cluster().check(GetWatchAction.NAME, request), is(false)); - assertThat(snapshotUserRole.cluster().check(PutWatchAction.NAME, request), is(false)); - assertThat(snapshotUserRole.cluster().check(DeleteWatchAction.NAME, request), is(false)); - assertThat(snapshotUserRole.cluster().check(ExecuteWatchAction.NAME, request), is(false)); - assertThat(snapshotUserRole.cluster().check(AckWatchAction.NAME, request), is(false)); - assertThat(snapshotUserRole.cluster().check(ActivateWatchAction.NAME, request), is(false)); - assertThat(snapshotUserRole.cluster().check(WatcherServiceAction.NAME, request), is(false)); + assertThat(snapshotUserRole.cluster().check(GetRepositoriesAction.NAME, request, authentication), is(true)); + assertThat(snapshotUserRole.cluster().check(CreateSnapshotAction.NAME, request, authentication), is(true)); + assertThat(snapshotUserRole.cluster().check(SnapshotsStatusAction.NAME, request, authentication), is(true)); + assertThat(snapshotUserRole.cluster().check(GetSnapshotsAction.NAME, request, authentication), is(true)); + + assertThat(snapshotUserRole.cluster().check(PutRepositoryAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(GetIndexTemplatesAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(DeleteIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(PutPipelineAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(GetPipelineAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(DeletePipelineAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(GetWatchAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(PutWatchAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(DeleteWatchAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(ExecuteWatchAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(AckWatchAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(ActivateWatchAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(WatcherServiceAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); assertThat(snapshotUserRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(randomAlphaOfLengthBetween(8, 24)), is(false)); assertThat(snapshotUserRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)), is(false)); @@ -247,22 +251,23 @@ public void testSnapshotUserRole() { public void testIngestAdminRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("ingest_admin"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role ingestAdminRole = Role.builder(roleDescriptor, null).build(); - assertThat(ingestAdminRole.cluster().check(PutIndexTemplateAction.NAME, request), is(true)); - assertThat(ingestAdminRole.cluster().check(GetIndexTemplatesAction.NAME, request), is(true)); - assertThat(ingestAdminRole.cluster().check(DeleteIndexTemplateAction.NAME, request), is(true)); - assertThat(ingestAdminRole.cluster().check(PutPipelineAction.NAME, request), is(true)); - assertThat(ingestAdminRole.cluster().check(GetPipelineAction.NAME, request), is(true)); - assertThat(ingestAdminRole.cluster().check(DeletePipelineAction.NAME, request), is(true)); - - assertThat(ingestAdminRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); - assertThat(ingestAdminRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); - assertThat(ingestAdminRole.cluster().check(MonitoringBulkAction.NAME, request), is(false)); + assertThat(ingestAdminRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(true)); + assertThat(ingestAdminRole.cluster().check(GetIndexTemplatesAction.NAME, request, authentication), is(true)); + assertThat(ingestAdminRole.cluster().check(DeleteIndexTemplateAction.NAME, request, authentication), is(true)); + assertThat(ingestAdminRole.cluster().check(PutPipelineAction.NAME, request, authentication), is(true)); + assertThat(ingestAdminRole.cluster().check(GetPipelineAction.NAME, request, authentication), is(true)); + assertThat(ingestAdminRole.cluster().check(DeletePipelineAction.NAME, request, authentication), is(true)); + assertThat(ingestAdminRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); + assertThat(ingestAdminRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); + assertThat(ingestAdminRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); + assertThat(ingestAdminRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); assertThat(ingestAdminRole.indices().allowedIndicesMatcher(IndexAction.NAME).test("foo"), is(false)); assertThat(ingestAdminRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)), @@ -275,39 +280,40 @@ public void testIngestAdminRole() { public void testKibanaSystemRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("kibana_system"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role kibanaRole = Role.builder(roleDescriptor, null).build(); - assertThat(kibanaRole.cluster().check(ClusterHealthAction.NAME, request), is(true)); - assertThat(kibanaRole.cluster().check(ClusterStateAction.NAME, request), is(true)); - assertThat(kibanaRole.cluster().check(ClusterStatsAction.NAME, request), is(true)); - assertThat(kibanaRole.cluster().check(PutIndexTemplateAction.NAME, request), is(true)); - assertThat(kibanaRole.cluster().check(GetIndexTemplatesAction.NAME, request), is(true)); - assertThat(kibanaRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); - assertThat(kibanaRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); - assertThat(kibanaRole.cluster().check(MonitoringBulkAction.NAME, request), is(true)); + assertThat(kibanaRole.cluster().check(ClusterHealthAction.NAME, request, authentication), is(true)); + assertThat(kibanaRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(true)); + assertThat(kibanaRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(true)); + assertThat(kibanaRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(true)); + assertThat(kibanaRole.cluster().check(GetIndexTemplatesAction.NAME, request, authentication), is(true)); + assertThat(kibanaRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); + assertThat(kibanaRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); + assertThat(kibanaRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(true)); // SAML and token - assertThat(kibanaRole.cluster().check(SamlPrepareAuthenticationAction.NAME, request), is(true)); - assertThat(kibanaRole.cluster().check(SamlAuthenticateAction.NAME, request), is(true)); - assertThat(kibanaRole.cluster().check(InvalidateTokenAction.NAME, request), is(true)); - assertThat(kibanaRole.cluster().check(CreateTokenAction.NAME, request), is(true)); + assertThat(kibanaRole.cluster().check(SamlPrepareAuthenticationAction.NAME, request, authentication), is(true)); + assertThat(kibanaRole.cluster().check(SamlAuthenticateAction.NAME, request, authentication), is(true)); + assertThat(kibanaRole.cluster().check(InvalidateTokenAction.NAME, request, authentication), is(true)); + assertThat(kibanaRole.cluster().check(CreateTokenAction.NAME, request, authentication), is(true)); // Application Privileges DeletePrivilegesRequest deleteKibanaPrivileges = new DeletePrivilegesRequest("kibana-.kibana", new String[]{ "all", "read" }); DeletePrivilegesRequest deleteLogstashPrivileges = new DeletePrivilegesRequest("logstash", new String[]{ "all", "read" }); - assertThat(kibanaRole.cluster().check(DeletePrivilegesAction.NAME, deleteKibanaPrivileges), is(true)); - assertThat(kibanaRole.cluster().check(DeletePrivilegesAction.NAME, deleteLogstashPrivileges), is(false)); + assertThat(kibanaRole.cluster().check(DeletePrivilegesAction.NAME, deleteKibanaPrivileges, authentication), is(true)); + assertThat(kibanaRole.cluster().check(DeletePrivilegesAction.NAME, deleteLogstashPrivileges, authentication), is(false)); GetPrivilegesRequest getKibanaPrivileges = new GetPrivilegesRequest(); getKibanaPrivileges.application("kibana-.kibana-sales"); GetPrivilegesRequest getApmPrivileges = new GetPrivilegesRequest(); getApmPrivileges.application("apm"); - assertThat(kibanaRole.cluster().check(GetPrivilegesAction.NAME, getKibanaPrivileges), is(true)); - assertThat(kibanaRole.cluster().check(GetPrivilegesAction.NAME, getApmPrivileges), is(false)); + assertThat(kibanaRole.cluster().check(GetPrivilegesAction.NAME, getKibanaPrivileges, authentication), is(true)); + assertThat(kibanaRole.cluster().check(GetPrivilegesAction.NAME, getApmPrivileges, authentication), is(false)); PutPrivilegesRequest putKibanaPrivileges = new PutPrivilegesRequest(); putKibanaPrivileges.setPrivileges(Collections.singletonList(new ApplicationPrivilegeDescriptor( @@ -315,13 +321,14 @@ public void testKibanaSystemRole() { PutPrivilegesRequest putSwiftypePrivileges = new PutPrivilegesRequest(); putSwiftypePrivileges.setPrivileges(Collections.singletonList(new ApplicationPrivilegeDescriptor( "swiftype-kibana" , "all", Collections.emptySet(), Collections.emptyMap()))); - assertThat(kibanaRole.cluster().check(PutPrivilegesAction.NAME, putKibanaPrivileges), is(true)); - assertThat(kibanaRole.cluster().check(PutPrivilegesAction.NAME, putSwiftypePrivileges), is(false)); + assertThat(kibanaRole.cluster().check(PutPrivilegesAction.NAME, putKibanaPrivileges, authentication), is(true)); + assertThat(kibanaRole.cluster().check(PutPrivilegesAction.NAME, putSwiftypePrivileges, authentication), is(false)); - assertThat(kibanaRole.cluster().check(GetBuiltinPrivilegesAction.NAME, request), is(true)); + assertThat(kibanaRole.cluster().check(GetBuiltinPrivilegesAction.NAME, request, authentication), is(true)); // Everything else assertThat(kibanaRole.runAs().check(randomAlphaOfLengthBetween(1, 12)), is(false)); + assertThat(kibanaRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test("foo"), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(".reporting"), is(false)); @@ -378,19 +385,21 @@ public void testKibanaSystemRole() { public void testKibanaUserRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("kibana_user"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role kibanaUserRole = Role.builder(roleDescriptor, null).build(); - assertThat(kibanaUserRole.cluster().check(ClusterHealthAction.NAME, request), is(false)); - assertThat(kibanaUserRole.cluster().check(ClusterStateAction.NAME, request), is(false)); - assertThat(kibanaUserRole.cluster().check(ClusterStatsAction.NAME, request), is(false)); - assertThat(kibanaUserRole.cluster().check(PutIndexTemplateAction.NAME, request), is(false)); - assertThat(kibanaUserRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); - assertThat(kibanaUserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); - assertThat(kibanaUserRole.cluster().check(MonitoringBulkAction.NAME, request), is(false)); + assertThat(kibanaUserRole.cluster().check(ClusterHealthAction.NAME, request, authentication), is(false)); + assertThat(kibanaUserRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(false)); + assertThat(kibanaUserRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(false)); + assertThat(kibanaUserRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(kibanaUserRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); + assertThat(kibanaUserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); + assertThat(kibanaUserRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); + assertThat(kibanaUserRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); assertThat(kibanaUserRole.runAs().check(randomAlphaOfLengthBetween(1, 12)), is(false)); @@ -415,21 +424,23 @@ public void testKibanaUserRole() { public void testMonitoringUserRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("monitoring_user"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role monitoringUserRole = Role.builder(roleDescriptor, null).build(); - assertThat(monitoringUserRole.cluster().check(MainAction.NAME, request), is(true)); - assertThat(monitoringUserRole.cluster().check(XPackInfoAction.NAME, request), is(true)); - assertThat(monitoringUserRole.cluster().check(ClusterHealthAction.NAME, request), is(false)); - assertThat(monitoringUserRole.cluster().check(ClusterStateAction.NAME, request), is(false)); - assertThat(monitoringUserRole.cluster().check(ClusterStatsAction.NAME, request), is(false)); - assertThat(monitoringUserRole.cluster().check(PutIndexTemplateAction.NAME, request), is(false)); - assertThat(monitoringUserRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); - assertThat(monitoringUserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); - assertThat(monitoringUserRole.cluster().check(MonitoringBulkAction.NAME, request), is(false)); + assertThat(monitoringUserRole.cluster().check(MainAction.NAME, request, authentication), is(true)); + assertThat(monitoringUserRole.cluster().check(XPackInfoAction.NAME, request, authentication), is(true)); + assertThat(monitoringUserRole.cluster().check(ClusterHealthAction.NAME, request, authentication), is(false)); + assertThat(monitoringUserRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(false)); + assertThat(monitoringUserRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(false)); + assertThat(monitoringUserRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(monitoringUserRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); + assertThat(monitoringUserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); + assertThat(monitoringUserRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); + assertThat(monitoringUserRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); assertThat(monitoringUserRole.runAs().check(randomAlphaOfLengthBetween(1, 12)), is(false)); @@ -471,28 +482,31 @@ public void testMonitoringUserRole() { public void testRemoteMonitoringAgentRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("remote_monitoring_agent"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role remoteMonitoringAgentRole = Role.builder(roleDescriptor, null).build(); - assertThat(remoteMonitoringAgentRole.cluster().check(ClusterHealthAction.NAME, request), is(true)); - assertThat(remoteMonitoringAgentRole.cluster().check(ClusterStateAction.NAME, request), is(true)); - assertThat(remoteMonitoringAgentRole.cluster().check(ClusterStatsAction.NAME, request), is(true)); - assertThat(remoteMonitoringAgentRole.cluster().check(PutIndexTemplateAction.NAME, request), is(true)); - assertThat(remoteMonitoringAgentRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); - assertThat(remoteMonitoringAgentRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); - assertThat(remoteMonitoringAgentRole.cluster().check(MonitoringBulkAction.NAME, request), is(false)); - assertThat(remoteMonitoringAgentRole.cluster().check(GetWatchAction.NAME, request), is(true)); - assertThat(remoteMonitoringAgentRole.cluster().check(PutWatchAction.NAME, request), is(true)); - assertThat(remoteMonitoringAgentRole.cluster().check(DeleteWatchAction.NAME, request), is(true)); - assertThat(remoteMonitoringAgentRole.cluster().check(ExecuteWatchAction.NAME, request), is(false)); - assertThat(remoteMonitoringAgentRole.cluster().check(AckWatchAction.NAME, request), is(false)); - assertThat(remoteMonitoringAgentRole.cluster().check(ActivateWatchAction.NAME, request), is(false)); - assertThat(remoteMonitoringAgentRole.cluster().check(WatcherServiceAction.NAME, request), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(ClusterHealthAction.NAME, request, authentication), is(true)); + assertThat(remoteMonitoringAgentRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(true)); + assertThat(remoteMonitoringAgentRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(true)); + assertThat(remoteMonitoringAgentRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(true)); + assertThat(remoteMonitoringAgentRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(GetWatchAction.NAME, request, authentication), is(true)); + assertThat(remoteMonitoringAgentRole.cluster().check(PutWatchAction.NAME, request, authentication), is(true)); + assertThat(remoteMonitoringAgentRole.cluster().check(DeleteWatchAction.NAME, request, authentication), is(true)); + assertThat(remoteMonitoringAgentRole.cluster().check(ExecuteWatchAction.NAME, request, authentication), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(AckWatchAction.NAME, request, authentication), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(ActivateWatchAction.NAME, request, authentication), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(WatcherServiceAction.NAME, request, authentication), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); + // we get this from the cluster:monitor privilege - assertThat(remoteMonitoringAgentRole.cluster().check(WatcherStatsAction.NAME, request), is(true)); + assertThat(remoteMonitoringAgentRole.cluster().check(WatcherStatsAction.NAME, request, authentication), is(true)); assertThat(remoteMonitoringAgentRole.runAs().check(randomAlphaOfLengthBetween(1, 12)), is(false)); @@ -530,21 +544,23 @@ public void testRemoteMonitoringAgentRole() { public void testRemoteMonitoringCollectorRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("remote_monitoring_collector"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role remoteMonitoringAgentRole = Role.builder(roleDescriptor, null).build(); - assertThat(remoteMonitoringAgentRole.cluster().check(ClusterHealthAction.NAME, request), is(true)); - assertThat(remoteMonitoringAgentRole.cluster().check(ClusterStateAction.NAME, request), is(true)); - assertThat(remoteMonitoringAgentRole.cluster().check(ClusterStatsAction.NAME, request), is(true)); - assertThat(remoteMonitoringAgentRole.cluster().check(GetIndexTemplatesAction.NAME, request), is(false)); - assertThat(remoteMonitoringAgentRole.cluster().check(PutIndexTemplateAction.NAME, request), is(false)); - assertThat(remoteMonitoringAgentRole.cluster().check(DeleteIndexTemplateAction.NAME, request), is(false)); - assertThat(remoteMonitoringAgentRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); - assertThat(remoteMonitoringAgentRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); - assertThat(remoteMonitoringAgentRole.cluster().check(MonitoringBulkAction.NAME, request), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(ClusterHealthAction.NAME, request, authentication), is(true)); + assertThat(remoteMonitoringAgentRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(true)); + assertThat(remoteMonitoringAgentRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(true)); + assertThat(remoteMonitoringAgentRole.cluster().check(GetIndexTemplatesAction.NAME, request, authentication), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(DeleteIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); assertThat(remoteMonitoringAgentRole.runAs().check(randomAlphaOfLengthBetween(1, 12)), is(false)); @@ -628,19 +644,21 @@ private void assertMonitoringOnRestrictedIndices(Role role) { public void testReportingUserRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("reporting_user"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role reportingUserRole = Role.builder(roleDescriptor, null).build(); - assertThat(reportingUserRole.cluster().check(ClusterHealthAction.NAME, request), is(false)); - assertThat(reportingUserRole.cluster().check(ClusterStateAction.NAME, request), is(false)); - assertThat(reportingUserRole.cluster().check(ClusterStatsAction.NAME, request), is(false)); - assertThat(reportingUserRole.cluster().check(PutIndexTemplateAction.NAME, request), is(false)); - assertThat(reportingUserRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); - assertThat(reportingUserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); - assertThat(reportingUserRole.cluster().check(MonitoringBulkAction.NAME, request), is(false)); + assertThat(reportingUserRole.cluster().check(ClusterHealthAction.NAME, request, authentication), is(false)); + assertThat(reportingUserRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(false)); + assertThat(reportingUserRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(false)); + assertThat(reportingUserRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(reportingUserRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); + assertThat(reportingUserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); + assertThat(reportingUserRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); + assertThat(reportingUserRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); assertThat(reportingUserRole.runAs().check(randomAlphaOfLengthBetween(1, 12)), is(false)); @@ -668,19 +686,21 @@ public void testReportingUserRole() { public void testKibanaDashboardOnlyUserRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("kibana_dashboard_only_user"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role dashboardsOnlyUserRole = Role.builder(roleDescriptor, null).build(); - assertThat(dashboardsOnlyUserRole.cluster().check(ClusterHealthAction.NAME, request), is(false)); - assertThat(dashboardsOnlyUserRole.cluster().check(ClusterStateAction.NAME, request), is(false)); - assertThat(dashboardsOnlyUserRole.cluster().check(ClusterStatsAction.NAME, request), is(false)); - assertThat(dashboardsOnlyUserRole.cluster().check(PutIndexTemplateAction.NAME, request), is(false)); - assertThat(dashboardsOnlyUserRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); - assertThat(dashboardsOnlyUserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); - assertThat(dashboardsOnlyUserRole.cluster().check(MonitoringBulkAction.NAME, request), is(false)); + assertThat(dashboardsOnlyUserRole.cluster().check(ClusterHealthAction.NAME, request, authentication), is(false)); + assertThat(dashboardsOnlyUserRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(false)); + assertThat(dashboardsOnlyUserRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(false)); + assertThat(dashboardsOnlyUserRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(dashboardsOnlyUserRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); + assertThat(dashboardsOnlyUserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); + assertThat(dashboardsOnlyUserRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); + assertThat(dashboardsOnlyUserRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); assertThat(dashboardsOnlyUserRole.runAs().check(randomAlphaOfLengthBetween(1, 12)), is(false)); @@ -702,18 +722,20 @@ public void testKibanaDashboardOnlyUserRole() { public void testSuperuserRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("superuser"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role superuserRole = Role.builder(roleDescriptor, null).build(); - assertThat(superuserRole.cluster().check(ClusterHealthAction.NAME, request), is(true)); - assertThat(superuserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(true)); - assertThat(superuserRole.cluster().check(PutUserAction.NAME, request), is(true)); - assertThat(superuserRole.cluster().check(PutRoleAction.NAME, request), is(true)); - assertThat(superuserRole.cluster().check(PutIndexTemplateAction.NAME, request), is(true)); - assertThat(superuserRole.cluster().check("internal:admin/foo", request), is(false)); + assertThat(superuserRole.cluster().check(ClusterHealthAction.NAME, request, authentication), is(true)); + assertThat(superuserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(true)); + assertThat(superuserRole.cluster().check(PutUserAction.NAME, request, authentication), is(true)); + assertThat(superuserRole.cluster().check(PutRoleAction.NAME, request, authentication), is(true)); + assertThat(superuserRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(true)); + assertThat(superuserRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(true)); + assertThat(superuserRole.cluster().check("internal:admin/foo", request, authentication), is(false)); final Settings indexSettings = Settings.builder().put("index.version.created", Version.CURRENT).build(); final String internalSecurityIndex = randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_MAIN_INDEX_6, @@ -772,19 +794,21 @@ public void testSuperuserRole() { public void testLogstashSystemRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("logstash_system"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role logstashSystemRole = Role.builder(roleDescriptor, null).build(); - assertThat(logstashSystemRole.cluster().check(ClusterHealthAction.NAME, request), is(true)); - assertThat(logstashSystemRole.cluster().check(ClusterStateAction.NAME, request), is(true)); - assertThat(logstashSystemRole.cluster().check(ClusterStatsAction.NAME, request), is(true)); - assertThat(logstashSystemRole.cluster().check(PutIndexTemplateAction.NAME, request), is(false)); - assertThat(logstashSystemRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); - assertThat(logstashSystemRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); - assertThat(logstashSystemRole.cluster().check(MonitoringBulkAction.NAME, request), is(true)); + assertThat(logstashSystemRole.cluster().check(ClusterHealthAction.NAME, request, authentication), is(true)); + assertThat(logstashSystemRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(true)); + assertThat(logstashSystemRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(true)); + assertThat(logstashSystemRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(logstashSystemRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); + assertThat(logstashSystemRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); + assertThat(logstashSystemRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); + assertThat(logstashSystemRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(true)); assertThat(logstashSystemRole.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); @@ -798,6 +822,7 @@ public void testLogstashSystemRole() { public void testBeatsAdminRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); final RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("beats_admin"); assertNotNull(roleDescriptor); @@ -805,13 +830,14 @@ public void testBeatsAdminRole() { final Role beatsAdminRole = Role.builder(roleDescriptor, null).build(); - assertThat(beatsAdminRole.cluster().check(ClusterHealthAction.NAME, request), is(false)); - assertThat(beatsAdminRole.cluster().check(ClusterStateAction.NAME, request), is(false)); - assertThat(beatsAdminRole.cluster().check(ClusterStatsAction.NAME, request), is(false)); - assertThat(beatsAdminRole.cluster().check(PutIndexTemplateAction.NAME, request), is(false)); - assertThat(beatsAdminRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); - assertThat(beatsAdminRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); - assertThat(beatsAdminRole.cluster().check(MonitoringBulkAction.NAME, request), is(false)); + assertThat(beatsAdminRole.cluster().check(ClusterHealthAction.NAME, request, authentication), is(false)); + assertThat(beatsAdminRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(false)); + assertThat(beatsAdminRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(false)); + assertThat(beatsAdminRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(beatsAdminRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); + assertThat(beatsAdminRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); + assertThat(beatsAdminRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); + assertThat(beatsAdminRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); assertThat(beatsAdminRole.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); @@ -836,19 +862,21 @@ public void testBeatsAdminRole() { public void testBeatsSystemRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor(BeatsSystemUser.ROLE_NAME); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role beatsSystemRole = Role.builder(roleDescriptor, null).build(); - assertThat(beatsSystemRole.cluster().check(ClusterHealthAction.NAME, request), is(true)); - assertThat(beatsSystemRole.cluster().check(ClusterStateAction.NAME, request), is(true)); - assertThat(beatsSystemRole.cluster().check(ClusterStatsAction.NAME, request), is(true)); - assertThat(beatsSystemRole.cluster().check(PutIndexTemplateAction.NAME, request), is(false)); - assertThat(beatsSystemRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); - assertThat(beatsSystemRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); - assertThat(beatsSystemRole.cluster().check(MonitoringBulkAction.NAME, request), is(true)); + assertThat(beatsSystemRole.cluster().check(ClusterHealthAction.NAME, request, authentication), is(true)); + assertThat(beatsSystemRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(true)); + assertThat(beatsSystemRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(true)); + assertThat(beatsSystemRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(beatsSystemRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); + assertThat(beatsSystemRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); + assertThat(beatsSystemRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); + assertThat(beatsSystemRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(true)); assertThat(beatsSystemRole.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); @@ -869,19 +897,21 @@ public void testBeatsSystemRole() { public void testAPMSystemRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor(APMSystemUser.ROLE_NAME); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role APMSystemRole = Role.builder(roleDescriptor, null).build(); - assertThat(APMSystemRole.cluster().check(ClusterHealthAction.NAME, request), is(true)); - assertThat(APMSystemRole.cluster().check(ClusterStateAction.NAME, request), is(true)); - assertThat(APMSystemRole.cluster().check(ClusterStatsAction.NAME, request), is(true)); - assertThat(APMSystemRole.cluster().check(PutIndexTemplateAction.NAME, request), is(false)); - assertThat(APMSystemRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); - assertThat(APMSystemRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); - assertThat(APMSystemRole.cluster().check(MonitoringBulkAction.NAME, request), is(true)); + assertThat(APMSystemRole.cluster().check(ClusterHealthAction.NAME, request, authentication), is(true)); + assertThat(APMSystemRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(true)); + assertThat(APMSystemRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(true)); + assertThat(APMSystemRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(APMSystemRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); + assertThat(APMSystemRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); + assertThat(APMSystemRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); + assertThat(APMSystemRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(true)); assertThat(APMSystemRole.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); @@ -895,6 +925,7 @@ public void testAPMSystemRole() { public void testAPMUserRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); final RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("apm_user"); assertNotNull(roleDescriptor); @@ -902,6 +933,7 @@ public void testAPMUserRole() { Role role = Role.builder(roleDescriptor, null).build(); + assertThat(role.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 12)), is(false)); assertNoAccessAllowed(role, "foo"); @@ -912,62 +944,65 @@ public void testAPMUserRole() { public void testMachineLearningAdminRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("machine_learning_admin"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role role = Role.builder(roleDescriptor, null).build(); - assertThat(role.cluster().check(CloseJobAction.NAME, request), is(true)); - assertThat(role.cluster().check(DeleteCalendarAction.NAME, request), is(true)); - assertThat(role.cluster().check(DeleteCalendarEventAction.NAME, request), is(true)); - assertThat(role.cluster().check(DeleteDatafeedAction.NAME, request), is(true)); - assertThat(role.cluster().check(DeleteExpiredDataAction.NAME, request), is(true)); - assertThat(role.cluster().check(DeleteFilterAction.NAME, request), is(true)); - assertThat(role.cluster().check(DeleteForecastAction.NAME, request), is(true)); - assertThat(role.cluster().check(DeleteJobAction.NAME, request), is(true)); - assertThat(role.cluster().check(DeleteModelSnapshotAction.NAME, request), is(true)); - assertThat(role.cluster().check(FinalizeJobExecutionAction.NAME, request), is(false)); // internal use only - assertThat(role.cluster().check(FindFileStructureAction.NAME, request), is(true)); - assertThat(role.cluster().check(FlushJobAction.NAME, request), is(true)); - assertThat(role.cluster().check(ForecastJobAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetBucketsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetCalendarEventsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetCalendarsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetCategoriesAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetDatafeedsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetDatafeedsStatsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetFiltersAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetInfluencersAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetJobsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetJobsStatsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetModelSnapshotsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetOverallBucketsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetRecordsAction.NAME, request), is(true)); - assertThat(role.cluster().check(IsolateDatafeedAction.NAME, request), is(false)); // internal use only - assertThat(role.cluster().check(KillProcessAction.NAME, request), is(false)); // internal use only - assertThat(role.cluster().check(MlInfoAction.NAME, request), is(true)); - assertThat(role.cluster().check(OpenJobAction.NAME, request), is(true)); - assertThat(role.cluster().check(PersistJobAction.NAME, request), is(true)); - assertThat(role.cluster().check(PostCalendarEventsAction.NAME, request), is(true)); - assertThat(role.cluster().check(PostDataAction.NAME, request), is(true)); - assertThat(role.cluster().check(PreviewDatafeedAction.NAME, request), is(true)); - assertThat(role.cluster().check(PutCalendarAction.NAME, request), is(true)); - assertThat(role.cluster().check(PutDatafeedAction.NAME, request), is(true)); - assertThat(role.cluster().check(PutFilterAction.NAME, request), is(true)); - assertThat(role.cluster().check(PutJobAction.NAME, request), is(true)); - assertThat(role.cluster().check(RevertModelSnapshotAction.NAME, request), is(true)); - assertThat(role.cluster().check(SetUpgradeModeAction.NAME, request), is(true)); - assertThat(role.cluster().check(StartDatafeedAction.NAME, request), is(true)); - assertThat(role.cluster().check(StopDatafeedAction.NAME, request), is(true)); - assertThat(role.cluster().check(UpdateCalendarJobAction.NAME, request), is(true)); - assertThat(role.cluster().check(UpdateDatafeedAction.NAME, request), is(true)); - assertThat(role.cluster().check(UpdateFilterAction.NAME, request), is(true)); - assertThat(role.cluster().check(UpdateJobAction.NAME, request), is(true)); - assertThat(role.cluster().check(UpdateModelSnapshotAction.NAME, request), is(true)); - assertThat(role.cluster().check(UpdateProcessAction.NAME, request), is(false)); // internal use only - assertThat(role.cluster().check(ValidateDetectorAction.NAME, request), is(true)); - assertThat(role.cluster().check(ValidateJobConfigAction.NAME, request), is(true)); + assertThat(role.cluster().check(CloseJobAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(DeleteCalendarAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(DeleteCalendarEventAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(DeleteDatafeedAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(DeleteExpiredDataAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(DeleteFilterAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(DeleteForecastAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(DeleteJobAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(DeleteModelSnapshotAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(FinalizeJobExecutionAction.NAME, request, authentication), is(false)); // internal use only + assertThat(role.cluster().check(FindFileStructureAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(FlushJobAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(ForecastJobAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetBucketsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetCalendarEventsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetCalendarsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetCategoriesAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetDatafeedsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetDatafeedsStatsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetFiltersAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetInfluencersAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetJobsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetJobsStatsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetModelSnapshotsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetOverallBucketsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetRecordsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(IsolateDatafeedAction.NAME, request, authentication), is(false)); // internal use only + assertThat(role.cluster().check(KillProcessAction.NAME, request, authentication), is(false)); // internal use only + assertThat(role.cluster().check(MlInfoAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(OpenJobAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(PersistJobAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(PostCalendarEventsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(PostDataAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(PreviewDatafeedAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(PutCalendarAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(PutDatafeedAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(PutFilterAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(PutJobAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(RevertModelSnapshotAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(SetUpgradeModeAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(StartDatafeedAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(StopDatafeedAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(UpdateCalendarJobAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(UpdateDatafeedAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(UpdateFilterAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(UpdateJobAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(UpdateModelSnapshotAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(UpdateProcessAction.NAME, request, authentication), is(false)); // internal use only + assertThat(role.cluster().check(ValidateDetectorAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(ValidateJobConfigAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); + assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); assertNoAccessAllowed(role, "foo"); @@ -995,62 +1030,65 @@ public void testMachineLearningAdminRole() { public void testMachineLearningUserRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("machine_learning_user"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role role = Role.builder(roleDescriptor, null).build(); - assertThat(role.cluster().check(CloseJobAction.NAME, request), is(false)); - assertThat(role.cluster().check(DeleteCalendarAction.NAME, request), is(false)); - assertThat(role.cluster().check(DeleteCalendarEventAction.NAME, request), is(false)); - assertThat(role.cluster().check(DeleteDatafeedAction.NAME, request), is(false)); - assertThat(role.cluster().check(DeleteExpiredDataAction.NAME, request), is(false)); - assertThat(role.cluster().check(DeleteFilterAction.NAME, request), is(false)); - assertThat(role.cluster().check(DeleteForecastAction.NAME, request), is(false)); - assertThat(role.cluster().check(DeleteJobAction.NAME, request), is(false)); - assertThat(role.cluster().check(DeleteModelSnapshotAction.NAME, request), is(false)); - assertThat(role.cluster().check(FinalizeJobExecutionAction.NAME, request), is(false)); - assertThat(role.cluster().check(FindFileStructureAction.NAME, request), is(true)); - assertThat(role.cluster().check(FlushJobAction.NAME, request), is(false)); - assertThat(role.cluster().check(ForecastJobAction.NAME, request), is(false)); - assertThat(role.cluster().check(GetBucketsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetCalendarEventsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetCalendarsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetCategoriesAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetDatafeedsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetDatafeedsStatsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetFiltersAction.NAME, request), is(false)); - assertThat(role.cluster().check(GetInfluencersAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetJobsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetJobsStatsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetModelSnapshotsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetOverallBucketsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetRecordsAction.NAME, request), is(true)); - assertThat(role.cluster().check(IsolateDatafeedAction.NAME, request), is(false)); - assertThat(role.cluster().check(KillProcessAction.NAME, request), is(false)); - assertThat(role.cluster().check(MlInfoAction.NAME, request), is(true)); - assertThat(role.cluster().check(OpenJobAction.NAME, request), is(false)); - assertThat(role.cluster().check(PersistJobAction.NAME, request), is(false)); - assertThat(role.cluster().check(PostCalendarEventsAction.NAME, request), is(false)); - assertThat(role.cluster().check(PostDataAction.NAME, request), is(false)); - assertThat(role.cluster().check(PreviewDatafeedAction.NAME, request), is(false)); - assertThat(role.cluster().check(PutCalendarAction.NAME, request), is(false)); - assertThat(role.cluster().check(PutDatafeedAction.NAME, request), is(false)); - assertThat(role.cluster().check(PutFilterAction.NAME, request), is(false)); - assertThat(role.cluster().check(PutJobAction.NAME, request), is(false)); - assertThat(role.cluster().check(RevertModelSnapshotAction.NAME, request), is(false)); - assertThat(role.cluster().check(SetUpgradeModeAction.NAME, request), is(false)); - assertThat(role.cluster().check(StartDatafeedAction.NAME, request), is(false)); - assertThat(role.cluster().check(StopDatafeedAction.NAME, request), is(false)); - assertThat(role.cluster().check(UpdateCalendarJobAction.NAME, request), is(false)); - assertThat(role.cluster().check(UpdateDatafeedAction.NAME, request), is(false)); - assertThat(role.cluster().check(UpdateFilterAction.NAME, request), is(false)); - assertThat(role.cluster().check(UpdateJobAction.NAME, request), is(false)); - assertThat(role.cluster().check(UpdateModelSnapshotAction.NAME, request), is(false)); - assertThat(role.cluster().check(UpdateProcessAction.NAME, request), is(false)); - assertThat(role.cluster().check(ValidateDetectorAction.NAME, request), is(false)); - assertThat(role.cluster().check(ValidateJobConfigAction.NAME, request), is(false)); + assertThat(role.cluster().check(CloseJobAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(DeleteCalendarAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(DeleteCalendarEventAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(DeleteDatafeedAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(DeleteExpiredDataAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(DeleteFilterAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(DeleteForecastAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(DeleteJobAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(DeleteModelSnapshotAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(FinalizeJobExecutionAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(FindFileStructureAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(FlushJobAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(ForecastJobAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(GetBucketsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetCalendarEventsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetCalendarsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetCategoriesAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetDatafeedsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetDatafeedsStatsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetFiltersAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(GetInfluencersAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetJobsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetJobsStatsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetModelSnapshotsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetOverallBucketsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetRecordsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(IsolateDatafeedAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(KillProcessAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(MlInfoAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(OpenJobAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(PersistJobAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(PostCalendarEventsAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(PostDataAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(PreviewDatafeedAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(PutCalendarAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(PutDatafeedAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(PutFilterAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(PutJobAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(RevertModelSnapshotAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(SetUpgradeModeAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(StartDatafeedAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(StopDatafeedAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(UpdateCalendarJobAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(UpdateDatafeedAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(UpdateFilterAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(UpdateJobAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(UpdateModelSnapshotAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(UpdateProcessAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(ValidateDetectorAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(ValidateJobConfigAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); + assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); assertNoAccessAllowed(role, "foo"); @@ -1079,19 +1117,22 @@ public void testMachineLearningUserRole() { public void testDataFrameTransformsAdminRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("data_frame_transforms_admin"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role role = Role.builder(roleDescriptor, null).build(); - assertThat(role.cluster().check(DeleteDataFrameTransformAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetDataFrameTransformsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetDataFrameTransformsStatsAction.NAME, request), is(true)); - assertThat(role.cluster().check(PreviewDataFrameTransformAction.NAME, request), is(true)); - assertThat(role.cluster().check(PutDataFrameTransformAction.NAME, request), is(true)); - assertThat(role.cluster().check(StartDataFrameTransformAction.NAME, request), is(true)); - assertThat(role.cluster().check(StopDataFrameTransformAction.NAME, request), is(true)); + assertThat(role.cluster().check(DeleteDataFrameTransformAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetDataFrameTransformsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetDataFrameTransformsStatsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(PreviewDataFrameTransformAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(PutDataFrameTransformAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(StartDataFrameTransformAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(StopDataFrameTransformAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); + assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); assertOnlyReadAllowed(role, ".data-frame-notifications-1"); @@ -1115,19 +1156,22 @@ public void testDataFrameTransformsAdminRole() { public void testDataFrameTransformsUserRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("data_frame_transforms_user"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role role = Role.builder(roleDescriptor, null).build(); - assertThat(role.cluster().check(DeleteDataFrameTransformAction.NAME, request), is(false)); - assertThat(role.cluster().check(GetDataFrameTransformsAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetDataFrameTransformsStatsAction.NAME, request), is(true)); - assertThat(role.cluster().check(PreviewDataFrameTransformAction.NAME, request), is(false)); - assertThat(role.cluster().check(PutDataFrameTransformAction.NAME, request), is(false)); - assertThat(role.cluster().check(StartDataFrameTransformAction.NAME, request), is(false)); - assertThat(role.cluster().check(StopDataFrameTransformAction.NAME, request), is(false)); + assertThat(role.cluster().check(DeleteDataFrameTransformAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(GetDataFrameTransformsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetDataFrameTransformsStatsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(PreviewDataFrameTransformAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(PutDataFrameTransformAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(StartDataFrameTransformAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(StopDataFrameTransformAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); + assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); assertOnlyReadAllowed(role, ".data-frame-notifications-1"); @@ -1151,20 +1195,23 @@ public void testDataFrameTransformsUserRole() { public void testWatcherAdminRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("watcher_admin"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role role = Role.builder(roleDescriptor, null).build(); - assertThat(role.cluster().check(PutWatchAction.NAME, request), is(true)); - assertThat(role.cluster().check(GetWatchAction.NAME, request), is(true)); - assertThat(role.cluster().check(DeleteWatchAction.NAME, request), is(true)); - assertThat(role.cluster().check(ExecuteWatchAction.NAME, request), is(true)); - assertThat(role.cluster().check(AckWatchAction.NAME, request), is(true)); - assertThat(role.cluster().check(ActivateWatchAction.NAME, request), is(true)); - assertThat(role.cluster().check(WatcherServiceAction.NAME, request), is(true)); - assertThat(role.cluster().check(WatcherStatsAction.NAME, request), is(true)); + assertThat(role.cluster().check(PutWatchAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetWatchAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(DeleteWatchAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(ExecuteWatchAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(AckWatchAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(ActivateWatchAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(WatcherServiceAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(WatcherStatsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); + assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); assertThat(role.indices().allowedIndicesMatcher(IndexAction.NAME).test("foo"), is(false)); @@ -1180,20 +1227,23 @@ public void testWatcherAdminRole() { public void testWatcherUserRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("watcher_user"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role role = Role.builder(roleDescriptor, null).build(); - assertThat(role.cluster().check(PutWatchAction.NAME, request), is(false)); - assertThat(role.cluster().check(GetWatchAction.NAME, request), is(true)); - assertThat(role.cluster().check(DeleteWatchAction.NAME, request), is(false)); - assertThat(role.cluster().check(ExecuteWatchAction.NAME, request), is(false)); - assertThat(role.cluster().check(AckWatchAction.NAME, request), is(false)); - assertThat(role.cluster().check(ActivateWatchAction.NAME, request), is(false)); - assertThat(role.cluster().check(WatcherServiceAction.NAME, request), is(false)); - assertThat(role.cluster().check(WatcherStatsAction.NAME, request), is(true)); + assertThat(role.cluster().check(PutWatchAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(GetWatchAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(DeleteWatchAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(ExecuteWatchAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(AckWatchAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(ActivateWatchAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(WatcherServiceAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(WatcherStatsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); + assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); assertThat(role.indices().allowedIndicesMatcher(IndexAction.NAME).test("foo"), is(false)); @@ -1252,16 +1302,18 @@ private void assertNoAccessAllowed(Role role, String index) { public void testLogstashAdminRole() { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("logstash_admin"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role logstashAdminRole = Role.builder(roleDescriptor, null).build(); - assertThat(logstashAdminRole.cluster().check(ClusterHealthAction.NAME, request), is(false)); - assertThat(logstashAdminRole.cluster().check(PutIndexTemplateAction.NAME, request), is(false)); - assertThat(logstashAdminRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); - assertThat(logstashAdminRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); + assertThat(logstashAdminRole.cluster().check(ClusterHealthAction.NAME, request, authentication), is(false)); + assertThat(logstashAdminRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(logstashAdminRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); + assertThat(logstashAdminRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); + assertThat(logstashAdminRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); assertThat(logstashAdminRole.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); @@ -1290,6 +1342,8 @@ public void testCodeAdminRole() { Role codeAdminRole = Role.builder(roleDescriptor, null).build(); + assertThat(codeAdminRole.cluster().check(DelegatePkiAuthenticationAction.NAME, mock(TransportRequest.class), + mock(Authentication.class)), is(false)); assertThat(codeAdminRole.indices().allowedIndicesMatcher(IndexAction.NAME).test("foo"), is(false)); assertThat(codeAdminRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(".reporting"), is(false)); @@ -1316,6 +1370,8 @@ public void testCodeUserRole() { Role codeUserRole = Role.builder(roleDescriptor, null).build(); + assertThat(codeUserRole.cluster().check(DelegatePkiAuthenticationAction.NAME, mock(TransportRequest.class), + mock(Authentication.class)), is(false)); assertThat(codeUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test("foo"), is(false)); assertThat(codeUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(".reporting"), is(false)); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/notifications/DataFrameAuditor.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/notifications/DataFrameAuditor.java index e756182f9c5c7..9d3e7f51c2e74 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/notifications/DataFrameAuditor.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/notifications/DataFrameAuditor.java @@ -16,7 +16,8 @@ * DataFrameAuditor class that abstracts away generic templating for easier injection */ public class DataFrameAuditor extends AbstractAuditor { + public DataFrameAuditor(Client client, String nodeName) { - super(client, nodeName, DataFrameInternalIndex.AUDIT_INDEX, DATA_FRAME_ORIGIN, DataFrameAuditMessage.builder()); + super(client, nodeName, DataFrameInternalIndex.AUDIT_INDEX, DATA_FRAME_ORIGIN, DataFrameAuditMessage::new); } } diff --git a/x-pack/plugin/ilm/qa/multi-node/build.gradle b/x-pack/plugin/ilm/qa/multi-node/build.gradle index 77464b031aa15..1746cc8f840ce 100644 --- a/x-pack/plugin/ilm/qa/multi-node/build.gradle +++ b/x-pack/plugin/ilm/qa/multi-node/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.test.RestIntegTestTask - apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -27,4 +25,8 @@ testClusters.integTest { setting 'xpack.ml.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' setting 'indices.lifecycle.poll_interval', '1000ms' + // TODO: Find a way to run these tests with more than one snapshot pool thread. Currently we need to limit to one thread so that the + // rate limiting settings in SnapshotLifecycleIT doesn't result in blocked snapshot threads because multiple threads overshoot + // the limit simultaneously and the rate limiter then moves to wait minutes to make up for this. + setting 'thread_pool.snapshot.max', '1' } diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java index 575d75aa10336..9b258471d61a0 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java @@ -8,6 +8,8 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -66,6 +68,8 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase { private String index; private String policy; + private static final Logger logger = LogManager.getLogger(TimeSeriesLifecycleActionsIT.class); + @Before public void refreshIndex() { index = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); @@ -961,7 +965,7 @@ private Map getOnlyIndexSettings(String index) throws IOExceptio return (Map) response.get("settings"); } - private StepKey getStepKeyForIndex(String indexName) throws IOException { + public static StepKey getStepKeyForIndex(String indexName) throws IOException { Map indexResponse = explainIndex(indexName); if (indexResponse == null) { return new StepKey(null, null, null); @@ -988,11 +992,12 @@ private String getReasonForIndex(String indexName) throws IOException { return ((Map) indexResponse.get("step_info")).get("reason"); } - private Map explainIndex(String indexName) throws IOException { + private static Map explainIndex(String indexName) throws IOException { return explain(indexName, false, false).get(indexName); } - private Map> explain(String indexPattern, boolean onlyErrors, boolean onlyManaged) throws IOException { + private static Map> explain(String indexPattern, boolean onlyErrors, + boolean onlyManaged) throws IOException { Request explainRequest = new Request("GET", indexPattern + "/_ilm/explain"); explainRequest.addParameter("only_errors", Boolean.toString(onlyErrors)); explainRequest.addParameter("only_managed", Boolean.toString(onlyManaged)); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleIT.java index 8af48cf4d98ce..f7f6b14991658 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.ilm.RolloverAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -22,6 +23,8 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.ilm.Step; +import org.elasticsearch.xpack.core.ilm.WaitForRolloverReadyStep; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; import java.io.IOException; @@ -34,6 +37,8 @@ import java.util.Optional; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.slm.history.SnapshotHistoryStore.SLM_HISTORY_INDEX_PREFIX; +import static org.elasticsearch.xpack.ilm.TimeSeriesLifecycleActionsIT.getStepKeyForIndex; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -48,8 +53,10 @@ protected boolean waitForAllSnapshotsWiped() { } public void testMissingRepo() throws Exception { - SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy("test-policy", "snap", - "*/1 * * * * ?", "missing-repo", Collections.emptyMap()); + final String policyId = "test-policy"; + final String missingRepoName = "missing-repo"; + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy(policyId, "snap", + "*/1 * * * * ?", missingRepoName, Collections.emptyMap()); Request putLifecycle = new Request("PUT", "/_slm/policy/test-policy"); XContentBuilder lifecycleBuilder = JsonXContent.contentBuilder(); @@ -319,6 +326,16 @@ private void assertHistoryIsPresent(String policyName, boolean success, String r logger.error(e); fail("failed to perform search:" + e.getMessage()); } + + // Finally, check that the history index is in a good state + assertHistoryIndexWaitingForRollover(); + } + + private void assertHistoryIndexWaitingForRollover() throws IOException { + Step.StepKey stepKey = getStepKeyForIndex(SLM_HISTORY_INDEX_PREFIX + "000001"); + assertEquals("hot", stepKey.getPhase()); + assertEquals(RolloverAction.NAME, stepKey.getAction()); + assertEquals(WaitForRolloverReadyStep.NAME, stepKey.getName()); } private void createSnapshotPolicy(String policyName, String snapshotNamePattern, String schedule, String repoId, diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java index 6857d186d92be..9370cad7f8771 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java @@ -56,7 +56,8 @@ public void testValidation() { ValidationException e = policy.validate(); assertThat(e.validationErrors(), - containsInAnyOrder("invalid policy id [a,b]: must not contain ','", + containsInAnyOrder( + "invalid policy id [a,b]: must not contain the following characters [ , \", *, \\, <, |, ,, >, /, ?]", "invalid snapshot name []: must not contain contain" + " the following characters [ , \", *, \\, <, |, ,, >, /, ?]", "invalid repository name [ ]: cannot be empty", diff --git a/x-pack/plugin/ml/qa/ml-with-security/build.gradle b/x-pack/plugin/ml/qa/ml-with-security/build.gradle index f0ddcfbfcbc4b..3793cffbf0e1e 100644 --- a/x-pack/plugin/ml/qa/ml-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/ml-with-security/build.gradle @@ -74,6 +74,8 @@ integTest.runner { 'ml/data_frame_analytics_crud/Test put regression given maximum_number_trees is greater than 2k', 'ml/data_frame_analytics_crud/Test put regression given feature_bag_fraction is negative', 'ml/data_frame_analytics_crud/Test put regression given feature_bag_fraction is greater than one', + 'ml/data_frame_analytics_crud/Test put regression given training_percent is less than one', + 'ml/data_frame_analytics_crud/Test put regression given training_percent is greater than hundred', 'ml/evaluate_data_frame/Test given missing index', 'ml/evaluate_data_frame/Test given index does not exist', 'ml/evaluate_data_frame/Test given missing evaluation', @@ -146,6 +148,7 @@ integTest.runner { 'ml/start_data_frame_analytics/Test start given source index has no compatible fields', 'ml/start_data_frame_analytics/Test start with inconsistent body/param ids', 'ml/start_data_frame_analytics/Test start given dest index is not empty', + 'ml/start_data_frame_analytics/Test start with compatible fields but no data', 'ml/start_stop_datafeed/Test start datafeed job, but not open', 'ml/start_stop_datafeed/Test start non existing datafeed', 'ml/start_stop_datafeed/Test stop non existing datafeed', diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java index 333811bcdb711..4491fac98c386 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java @@ -143,12 +143,12 @@ protected SearchResponse searchStoredProgress(String id) { } protected static DataFrameAnalyticsConfig buildRegressionAnalytics(String id, String[] sourceIndex, String destIndex, - @Nullable String resultsField, String dependentVariable) { + @Nullable String resultsField, Regression regression) { DataFrameAnalyticsConfig.Builder configBuilder = new DataFrameAnalyticsConfig.Builder(); configBuilder.setId(id); configBuilder.setSource(new DataFrameAnalyticsSource(sourceIndex, null)); configBuilder.setDest(new DataFrameAnalyticsDest(destIndex, resultsField)); - configBuilder.setAnalysis(new Regression(dependentVariable)); + configBuilder.setAnalysis(regression); return configBuilder.build(); } } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java new file mode 100644 index 0000000000000..d6bc2aeaee1eb --- /dev/null +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java @@ -0,0 +1,234 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; +import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; +import org.elasticsearch.xpack.core.ml.dataframe.analyses.Regression; +import org.junit.After; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; + +public class RegressionIT extends MlNativeDataFrameAnalyticsIntegTestCase { + + @After + public void cleanup() { + cleanUp(); + } + + public void testSingleNumericFeatureAndMixedTrainingAndNonTrainingRows() throws Exception { + String jobId = "regression_single_numeric_feature_and_mixed_data_set"; + String sourceIndex = jobId + "_source_index"; + + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + List featureValues = Arrays.asList(1.0, 2.0, 3.0); + List dependentVariableValues = Arrays.asList(10.0, 20.0, 30.0); + + for (int i = 0; i < 350; i++) { + Double field = featureValues.get(i % 3); + Double value = dependentVariableValues.get(i % 3); + + IndexRequest indexRequest = new IndexRequest(sourceIndex); + if (i < 300) { + indexRequest.source("feature", field, "variable", value); + } else { + indexRequest.source("feature", field); + } + bulkRequestBuilder.add(indexRequest); + } + BulkResponse bulkResponse = bulkRequestBuilder.get(); + if (bulkResponse.hasFailures()) { + fail("Failed to index data: " + bulkResponse.buildFailureMessage()); + } + + String destIndex = sourceIndex + "_results"; + DataFrameAnalyticsConfig config = buildRegressionAnalytics(jobId, new String[] {sourceIndex}, destIndex, null, + new Regression("variable")); + registerAnalytics(config); + putAnalytics(config); + + assertState(jobId, DataFrameAnalyticsState.STOPPED); + assertProgress(jobId, 0, 0, 0, 0); + + startAnalytics(jobId); + waitUntilAnalyticsIsStopped(jobId); + + SearchResponse sourceData = client().prepareSearch(sourceIndex).setTrackTotalHits(true).setSize(1000).get(); + for (SearchHit hit : sourceData.getHits()) { + GetResponse destDocGetResponse = client().prepareGet().setIndex(config.getDest().getIndex()).setId(hit.getId()).get(); + assertThat(destDocGetResponse.isExists(), is(true)); + Map sourceDoc = hit.getSourceAsMap(); + Map destDoc = destDocGetResponse.getSource(); + for (String field : sourceDoc.keySet()) { + assertThat(destDoc.containsKey(field), is(true)); + assertThat(destDoc.get(field), equalTo(sourceDoc.get(field))); + } + assertThat(destDoc.containsKey("ml"), is(true)); + + @SuppressWarnings("unchecked") + Map resultsObject = (Map) destDoc.get("ml"); + + assertThat(resultsObject.containsKey("variable_prediction"), is(true)); + + // TODO reenable this assertion when the backend is stable + // it seems for this case values can be as far off as 2.0 + + // double featureValue = (double) destDoc.get("feature"); + // double predictionValue = (double) resultsObject.get("variable_prediction"); + // assertThat(predictionValue, closeTo(10 * featureValue, 2.0)); + + boolean expectedIsTraining = destDoc.containsKey("variable"); + assertThat(resultsObject.containsKey("is_training"), is(true)); + assertThat(resultsObject.get("is_training"), is(expectedIsTraining)); + } + + assertProgress(jobId, 100, 100, 100, 100); + assertThat(searchStoredProgress(jobId).getHits().getTotalHits().value, equalTo(1L)); + } + + public void testWithOnlyTrainingRowsAndTrainingPercentIsHundred() throws Exception { + String jobId = "regression_only_training_data_and_training_percent_is_hundred"; + String sourceIndex = jobId + "_source_index"; + + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + List featureValues = Arrays.asList(1.0, 2.0, 3.0); + List dependentVariableValues = Arrays.asList(10.0, 20.0, 30.0); + + for (int i = 0; i < 350; i++) { + Double field = featureValues.get(i % 3); + Double value = dependentVariableValues.get(i % 3); + + IndexRequest indexRequest = new IndexRequest(sourceIndex); + indexRequest.source("feature", field, "variable", value); + bulkRequestBuilder.add(indexRequest); + } + BulkResponse bulkResponse = bulkRequestBuilder.get(); + if (bulkResponse.hasFailures()) { + fail("Failed to index data: " + bulkResponse.buildFailureMessage()); + } + + String destIndex = sourceIndex + "_results"; + DataFrameAnalyticsConfig config = buildRegressionAnalytics(jobId, new String[] {sourceIndex}, destIndex, null, + new Regression("variable")); + registerAnalytics(config); + putAnalytics(config); + + assertState(jobId, DataFrameAnalyticsState.STOPPED); + assertProgress(jobId, 0, 0, 0, 0); + + startAnalytics(jobId); + waitUntilAnalyticsIsStopped(jobId); + + SearchResponse sourceData = client().prepareSearch(sourceIndex).setTrackTotalHits(true).setSize(1000).get(); + for (SearchHit hit : sourceData.getHits()) { + GetResponse destDocGetResponse = client().prepareGet().setIndex(config.getDest().getIndex()).setId(hit.getId()).get(); + assertThat(destDocGetResponse.isExists(), is(true)); + Map sourceDoc = hit.getSourceAsMap(); + Map destDoc = destDocGetResponse.getSource(); + for (String field : sourceDoc.keySet()) { + assertThat(destDoc.containsKey(field), is(true)); + assertThat(destDoc.get(field), equalTo(sourceDoc.get(field))); + } + assertThat(destDoc.containsKey("ml"), is(true)); + + @SuppressWarnings("unchecked") + Map resultsObject = (Map) destDoc.get("ml"); + + assertThat(resultsObject.containsKey("variable_prediction"), is(true)); + assertThat(resultsObject.containsKey("is_training"), is(true)); + assertThat(resultsObject.get("is_training"), is(true)); + } + + assertProgress(jobId, 100, 100, 100, 100); + assertThat(searchStoredProgress(jobId).getHits().getTotalHits().value, equalTo(1L)); + } + + public void testWithOnlyTrainingRowsAndTrainingPercentIsFifty() throws Exception { + String jobId = "regression_only_training_data_and_training_percent_is_fifty"; + String sourceIndex = jobId + "_source_index"; + + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + List featureValues = Arrays.asList(1.0, 2.0, 3.0); + List dependentVariableValues = Arrays.asList(10.0, 20.0, 30.0); + + for (int i = 0; i < 350; i++) { + Double field = featureValues.get(i % 3); + Double value = dependentVariableValues.get(i % 3); + + IndexRequest indexRequest = new IndexRequest(sourceIndex); + indexRequest.source("feature", field, "variable", value); + bulkRequestBuilder.add(indexRequest); + } + BulkResponse bulkResponse = bulkRequestBuilder.get(); + if (bulkResponse.hasFailures()) { + fail("Failed to index data: " + bulkResponse.buildFailureMessage()); + } + + String destIndex = sourceIndex + "_results"; + DataFrameAnalyticsConfig config = buildRegressionAnalytics(jobId, new String[] {sourceIndex}, destIndex, null, + new Regression("variable", null, null, null, null, null, null, 50.0)); + registerAnalytics(config); + putAnalytics(config); + + assertState(jobId, DataFrameAnalyticsState.STOPPED); + assertProgress(jobId, 0, 0, 0, 0); + + startAnalytics(jobId); + waitUntilAnalyticsIsStopped(jobId); + + int trainingRowsCount = 0; + int nonTrainingRowsCount = 0; + SearchResponse sourceData = client().prepareSearch(sourceIndex).setTrackTotalHits(true).setSize(1000).get(); + for (SearchHit hit : sourceData.getHits()) { + GetResponse destDocGetResponse = client().prepareGet().setIndex(config.getDest().getIndex()).setId(hit.getId()).get(); + assertThat(destDocGetResponse.isExists(), is(true)); + Map sourceDoc = hit.getSourceAsMap(); + Map destDoc = destDocGetResponse.getSource(); + for (String field : sourceDoc.keySet()) { + assertThat(destDoc.containsKey(field), is(true)); + assertThat(destDoc.get(field), equalTo(sourceDoc.get(field))); + } + assertThat(destDoc.containsKey("ml"), is(true)); + + @SuppressWarnings("unchecked") + Map resultsObject = (Map) destDoc.get("ml"); + + assertThat(resultsObject.containsKey("variable_prediction"), is(true)); + + assertThat(resultsObject.containsKey("is_training"), is(true)); + // Let's just assert there's both training and non-training results + if ((boolean) resultsObject.get("is_training")) { + trainingRowsCount++; + } else { + nonTrainingRowsCount++; + } + } + assertThat(trainingRowsCount, greaterThan(0)); + assertThat(nonTrainingRowsCount, greaterThan(0)); + + assertProgress(jobId, 100, 100, 100, 100); + assertThat(searchStoredProgress(jobId).getHits().getTotalHits().value, equalTo(1L)); + } +} diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java index 3dfa83470f507..6920949bb9a14 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java @@ -28,8 +28,6 @@ import org.elasticsearch.xpack.core.ml.dataframe.analyses.OutlierDetection; import org.junit.After; -import java.util.Arrays; -import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.allOf; @@ -393,77 +391,6 @@ public void testOutlierDetectionWithPreExistingDestIndex() throws Exception { assertThat(searchStoredProgress(id).getHits().getTotalHits().value, equalTo(1L)); } - public void testRegressionWithNumericFeatureAndFewDocuments() throws Exception { - String sourceIndex = "test-regression-with-numeric-feature-and-few-docs"; - - BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - - List featureValues = Arrays.asList(1.0, 2.0, 3.0); - List dependentVariableValues = Arrays.asList(10.0, 20.0, 30.0); - - for (int i = 0; i < 350; i++) { - Double field = featureValues.get(i % 3); - Double value = dependentVariableValues.get(i % 3); - - IndexRequest indexRequest = new IndexRequest(sourceIndex); - if (i < 300) { - indexRequest.source("feature", field, "variable", value); - } else { - indexRequest.source("feature", field); - } - bulkRequestBuilder.add(indexRequest); - } - BulkResponse bulkResponse = bulkRequestBuilder.get(); - if (bulkResponse.hasFailures()) { - fail("Failed to index data: " + bulkResponse.buildFailureMessage()); - } - - String id = "test_regression_with_numeric_feature_and_few_docs"; - DataFrameAnalyticsConfig config = buildRegressionAnalytics(id, new String[] {sourceIndex}, - sourceIndex + "-results", null, "variable"); - registerAnalytics(config); - putAnalytics(config); - - assertState(id, DataFrameAnalyticsState.STOPPED); - assertProgress(id, 0, 0, 0, 0); - - startAnalytics(id); - waitUntilAnalyticsIsStopped(id); - - int resultsWithPrediction = 0; - SearchResponse sourceData = client().prepareSearch(sourceIndex).setTrackTotalHits(true).setSize(1000).get(); - assertThat(sourceData.getHits().getTotalHits().value, equalTo(350L)); - for (SearchHit hit : sourceData.getHits()) { - GetResponse destDocGetResponse = client().prepareGet().setIndex(config.getDest().getIndex()).setId(hit.getId()).get(); - assertThat(destDocGetResponse.isExists(), is(true)); - Map sourceDoc = hit.getSourceAsMap(); - Map destDoc = destDocGetResponse.getSource(); - for (String field : sourceDoc.keySet()) { - assertThat(destDoc.containsKey(field), is(true)); - assertThat(destDoc.get(field), equalTo(sourceDoc.get(field))); - } - assertThat(destDoc.containsKey("ml"), is(true)); - - @SuppressWarnings("unchecked") - Map resultsObject = (Map) destDoc.get("ml"); - - assertThat(resultsObject.containsKey("variable_prediction"), is(true)); - if (resultsObject.containsKey("variable_prediction")) { - resultsWithPrediction++; - double featureValue = (double) destDoc.get("feature"); - double predictionValue = (double) resultsObject.get("variable_prediction"); - // TODO reenable this assertion when the backend is stable - // it seems for this case values can be as far off as 2.0 - // assertThat(predictionValue, closeTo(10 * featureValue, 2.0)); - } - } - assertThat(resultsWithPrediction, greaterThan(0)); - - assertProgress(id, 100, 100, 100, 100); - assertThat(searchStoredProgress(id).getHits().getTotalHits().value, equalTo(1L)); - } - public void testModelMemoryLimitLowerThanEstimatedMemoryUsage() { String sourceIndex = "test-model-memory-limit"; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 4af18d7517899..a0b26c416de02 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -470,7 +470,7 @@ public Collection createComponents(Client client, ClusterService cluster return Collections.singletonList(new JobManagerHolder()); } - AnomalyDetectionAuditor auditor = new AnomalyDetectionAuditor(client, clusterService.getNodeName()); + AnomalyDetectionAuditor anomalyDetectionAuditor = new AnomalyDetectionAuditor(client, clusterService.getNodeName()); JobResultsProvider jobResultsProvider = new JobResultsProvider(client, settings); JobResultsPersister jobResultsPersister = new JobResultsPersister(client); JobDataCountsPersister jobDataCountsPersister = new JobDataCountsPersister(client); @@ -482,7 +482,7 @@ public Collection createComponents(Client client, ClusterService cluster jobResultsProvider, jobResultsPersister, clusterService, - auditor, + anomalyDetectionAuditor, threadPool, client, notifier, @@ -534,21 +534,23 @@ public Collection createComponents(Client client, ClusterService cluster NormalizerFactory normalizerFactory = new NormalizerFactory(normalizerProcessFactory, threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME)); AutodetectProcessManager autodetectProcessManager = new AutodetectProcessManager(env, settings, client, threadPool, - xContentRegistry, auditor, clusterService, jobManager, jobResultsProvider, jobResultsPersister, jobDataCountsPersister, - autodetectProcessFactory, normalizerFactory, nativeStorageProvider); + xContentRegistry, anomalyDetectionAuditor, clusterService, jobManager, jobResultsProvider, jobResultsPersister, + jobDataCountsPersister, autodetectProcessFactory, normalizerFactory, nativeStorageProvider); this.autodetectProcessManager.set(autodetectProcessManager); DatafeedJobBuilder datafeedJobBuilder = new DatafeedJobBuilder( client, xContentRegistry, - auditor, + anomalyDetectionAuditor, System::currentTimeMillis, jobConfigProvider, jobResultsProvider, datafeedConfigProvider, - jobResultsPersister); + jobResultsPersister, + settings, + clusterService.getNodeName()); DatafeedManager datafeedManager = new DatafeedManager(threadPool, client, clusterService, datafeedJobBuilder, - System::currentTimeMillis, auditor, autodetectProcessManager); + System::currentTimeMillis, anomalyDetectionAuditor, autodetectProcessManager); this.datafeedManager.set(datafeedManager); // Data frame analytics components @@ -590,8 +592,8 @@ public Collection createComponents(Client client, ClusterService cluster new MlInitializationService(settings, threadPool, clusterService, client), jobDataCountsPersister, datafeedManager, - auditor, - new MlAssignmentNotifier(settings, auditor, threadPool, client, clusterService), + anomalyDetectionAuditor, + new MlAssignmentNotifier(settings, anomalyDetectionAuditor, threadPool, client, clusterService), memoryTracker, analyticsProcessManager, memoryEstimationProcessManager, @@ -797,7 +799,8 @@ public UnaryOperator> getIndexTemplateMetaDat } try (XContentBuilder auditMapping = ElasticsearchMappings.auditMessageMapping()) { - IndexTemplateMetaData notificationMessageTemplate = IndexTemplateMetaData.builder(AuditorField.NOTIFICATIONS_INDEX) + IndexTemplateMetaData notificationMessageTemplate = + IndexTemplateMetaData.builder(AuditorField.NOTIFICATIONS_INDEX) .putMapping(SINGLE_MAPPING_NAME, Strings.toString(auditMapping)) .patterns(Collections.singletonList(AuditorField.NOTIFICATIONS_INDEX)) .version(Version.CURRENT.id) @@ -814,7 +817,8 @@ public UnaryOperator> getIndexTemplateMetaDat } try (XContentBuilder docMapping = MlMetaIndex.docMapping()) { - IndexTemplateMetaData metaTemplate = IndexTemplateMetaData.builder(MlMetaIndex.INDEX_NAME) + IndexTemplateMetaData metaTemplate = + IndexTemplateMetaData.builder(MlMetaIndex.INDEX_NAME) .patterns(Collections.singletonList(MlMetaIndex.INDEX_NAME)) .settings(Settings.builder() // Our indexes are small and one shard puts the @@ -831,7 +835,8 @@ public UnaryOperator> getIndexTemplateMetaDat } try (XContentBuilder configMapping = ElasticsearchMappings.configMapping()) { - IndexTemplateMetaData configTemplate = IndexTemplateMetaData.builder(AnomalyDetectorsIndex.configIndexName()) + IndexTemplateMetaData configTemplate = + IndexTemplateMetaData.builder(AnomalyDetectorsIndex.configIndexName()) .patterns(Collections.singletonList(AnomalyDetectorsIndex.configIndexName())) .settings(Settings.builder() // Our indexes are small and one shard puts the @@ -850,7 +855,8 @@ public UnaryOperator> getIndexTemplateMetaDat } try (XContentBuilder stateMapping = ElasticsearchMappings.stateMapping()) { - IndexTemplateMetaData stateTemplate = IndexTemplateMetaData.builder(AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX) + IndexTemplateMetaData stateTemplate = + IndexTemplateMetaData.builder(AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX) .patterns(Collections.singletonList(AnomalyDetectorsIndex.jobStateIndexPattern())) // TODO review these settings .settings(Settings.builder() @@ -866,7 +872,8 @@ public UnaryOperator> getIndexTemplateMetaDat } try (XContentBuilder docMapping = ElasticsearchMappings.resultsMapping(SINGLE_MAPPING_NAME)) { - IndexTemplateMetaData jobResultsTemplate = IndexTemplateMetaData.builder(AnomalyDetectorsIndex.jobResultsIndexPrefix()) + IndexTemplateMetaData jobResultsTemplate = + IndexTemplateMetaData.builder(AnomalyDetectorsIndex.jobResultsIndexPrefix()) .patterns(Collections.singletonList(AnomalyDetectorsIndex.jobResultsIndexPrefix() + "*")) .settings(Settings.builder() .put(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "0-1") diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java index 16b2298f52306..e7fa989447b87 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java @@ -34,6 +34,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; @@ -226,10 +227,41 @@ public void onFailure(Exception e) { } private void getConfigAndValidate(String id, ActionListener finalListener) { + + // Step 5. Validate that there are analyzable data in the source index + ActionListener validateMappingsMergeListener = ActionListener.wrap( + config -> DataFrameDataExtractorFactory.createForSourceIndices(client, + "validate_source_index_has_rows-" + id, + config, + ActionListener.wrap( + dataFrameDataExtractorFactory -> + dataFrameDataExtractorFactory + .newExtractor(false) + .collectDataSummaryAsync(ActionListener.wrap( + dataSummary -> { + if (dataSummary.rows == 0) { + finalListener.onFailure(new ElasticsearchStatusException( + "Unable to start {} as there are no analyzable data in source indices [{}].", + RestStatus.BAD_REQUEST, + id, + Strings.arrayToCommaDelimitedString(config.getSource().getIndex()) + )); + } else { + finalListener.onResponse(config); + } + }, + finalListener::onFailure + )), + finalListener::onFailure + )) + , + finalListener::onFailure + ); + // Step 4. Validate mappings can be merged ActionListener toValidateMappingsListener = ActionListener.wrap( config -> MappingsMerger.mergeMappings(client, config.getHeaders(), config.getSource().getIndex(), ActionListener.wrap( - mappings -> finalListener.onResponse(config), finalListener::onFailure)), + mappings -> validateMappingsMergeListener.onResponse(config), finalListener::onFailure)), finalListener::onFailure ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index 782e4d3c4ca96..a65f7cc615df0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -37,6 +37,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -47,6 +48,7 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedTimingStats; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.MlConfigMigrationEligibilityCheck; @@ -85,6 +87,7 @@ public class TransportStartDatafeedAction extends TransportMasterNodeAction createDataExtrator = job -> { + Consumer createDataExtractor = job -> { if (RemoteClusterLicenseChecker.containsRemoteIndex(params.getDatafeedIndices())) { final RemoteClusterLicenseChecker remoteClusterLicenseChecker = new RemoteClusterLicenseChecker(client, XPackLicenseState::isMachineLearningAllowedForOperationMode); @@ -193,6 +197,13 @@ public void onFailure(Exception e) { response -> { if (response.isSuccess() == false) { listener.onFailure(createUnlicensedError(params.getDatafeedId(), response)); + } else if (remoteClusterSearchSupported == false) { + listener.onFailure( + ExceptionsHelper.badRequestException(Messages.getMessage( + Messages.DATAFEED_NEEDS_REMOTE_CLUSTER_SEARCH, + datafeedConfigHolder.get().getId(), + RemoteClusterLicenseChecker.remoteIndices(datafeedConfigHolder.get().getIndices()), + clusterService.getNodeName()))); } else { createDataExtractor(job, datafeedConfigHolder.get(), params, waitForTaskListener); } @@ -214,7 +225,7 @@ public void onFailure(Exception e) { Job job = jobBuilder.build(); validate(job, datafeedConfigHolder.get(), tasks, xContentRegistry); auditDeprecations(datafeedConfigHolder.get(), job, auditor, xContentRegistry); - createDataExtrator.accept(job); + createDataExtractor.accept(job); } catch (Exception e) { listener.onFailure(e); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java index 728e14f6f4fed..53ac89cd0b376 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java @@ -8,17 +8,22 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.license.RemoteClusterLicenseChecker; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedJobValidator; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedTimingStats; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.core.ml.job.results.Result; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetector; import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetectorFactory; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; @@ -30,6 +35,7 @@ import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import java.util.Collections; +import java.util.List; import java.util.Objects; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; @@ -45,11 +51,13 @@ public class DatafeedJobBuilder { private final JobResultsProvider jobResultsProvider; private final DatafeedConfigProvider datafeedConfigProvider; private final JobResultsPersister jobResultsPersister; + private final boolean remoteClusterSearchSupported; + private final String nodeName; public DatafeedJobBuilder(Client client, NamedXContentRegistry xContentRegistry, AnomalyDetectionAuditor auditor, Supplier currentTimeSupplier, JobConfigProvider jobConfigProvider, JobResultsProvider jobResultsProvider, DatafeedConfigProvider datafeedConfigProvider, - JobResultsPersister jobResultsPersister) { + JobResultsPersister jobResultsPersister, Settings settings, String nodeName) { this.client = client; this.xContentRegistry = Objects.requireNonNull(xContentRegistry); this.auditor = Objects.requireNonNull(auditor); @@ -58,6 +66,8 @@ public DatafeedJobBuilder(Client client, NamedXContentRegistry xContentRegistry, this.jobResultsProvider = Objects.requireNonNull(jobResultsProvider); this.datafeedConfigProvider = Objects.requireNonNull(datafeedConfigProvider); this.jobResultsPersister = Objects.requireNonNull(jobResultsPersister); + this.remoteClusterSearchSupported = RemoteClusterService.ENABLE_REMOTE_CLUSTERS.get(settings); + this.nodeName = nodeName; } void build(String datafeedId, ActionListener listener) { @@ -168,6 +178,18 @@ void build(String datafeedId, ActionListener listener) { configBuilder -> { try { datafeedConfigHolder.set(configBuilder.build()); + if (remoteClusterSearchSupported == false) { + List remoteIndices = RemoteClusterLicenseChecker.remoteIndices(datafeedConfigHolder.get().getIndices()); + if (remoteIndices.isEmpty() == false) { + listener.onFailure( + ExceptionsHelper.badRequestException(Messages.getMessage( + Messages.DATAFEED_NEEDS_REMOTE_CLUSTER_SEARCH, + configBuilder.getId(), + remoteIndices, + nodeName))); + return; + } + } jobConfigProvider.getJob(datafeedConfigHolder.get().getJobId(), jobConfigListener); } catch (Exception e) { listener.onFailure(e); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java index 75b5ad950cb30..657608d08bb9b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java @@ -8,6 +8,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.SearchAction; @@ -22,6 +23,7 @@ import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.ml.dataframe.analyses.Types; import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedField; import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsIndex; @@ -234,21 +236,40 @@ public List getFieldNames() { } public DataSummary collectDataSummary() { - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client, SearchAction.INSTANCE) + SearchRequestBuilder searchRequestBuilder = buildDataSummarySearchRequestBuilder(); + SearchResponse searchResponse = executeSearchRequest(searchRequestBuilder); + return new DataSummary(searchResponse.getHits().getTotalHits().value, context.extractedFields.getAllFields().size()); + } + + public void collectDataSummaryAsync(ActionListener dataSummaryActionListener) { + SearchRequestBuilder searchRequestBuilder = buildDataSummarySearchRequestBuilder(); + final int numberOfFields = context.extractedFields.getAllFields().size(); + + ClientHelper.executeWithHeadersAsync(context.headers, + ClientHelper.ML_ORIGIN, + client, + SearchAction.INSTANCE, + searchRequestBuilder.request(), + ActionListener.wrap( + searchResponse -> dataSummaryActionListener.onResponse( + new DataSummary(searchResponse.getHits().getTotalHits().value, numberOfFields)), + dataSummaryActionListener::onFailure + )); + } + + private SearchRequestBuilder buildDataSummarySearchRequestBuilder() { + return new SearchRequestBuilder(client, SearchAction.INSTANCE) .setIndices(context.indices) .setSize(0) .setQuery(context.query) .setTrackTotalHits(true); - - SearchResponse searchResponse = executeSearchRequest(searchRequestBuilder); - return new DataSummary(searchResponse.getHits().getTotalHits().value, context.extractedFields.getAllFields().size()); } public Set getCategoricalFields() { Set categoricalFields = new HashSet<>(); for (ExtractedField extractedField : context.extractedFields.getAllFields()) { String fieldName = extractedField.getName(); - if (ExtractedFieldsDetector.CATEGORICAL_TYPES.containsAll(extractedField.getTypes())) { + if (Types.categorical().containsAll(extractedField.getTypes())) { categoricalFields.add(fieldName); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetector.java index 017b7070fcda2..dc173f4d8ffcb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetector.java @@ -11,12 +11,16 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.BooleanFieldMapper; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsDest; +import org.elasticsearch.xpack.core.ml.dataframe.analyses.RequiredField; +import org.elasticsearch.xpack.core.ml.dataframe.analyses.Types; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.NameResolver; @@ -32,9 +36,10 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; +import java.util.TreeSet; import java.util.stream.Collectors; -import java.util.stream.Stream; public class ExtractedFieldsDetector { @@ -46,18 +51,6 @@ public class ExtractedFieldsDetector { private static final List IGNORE_FIELDS = Arrays.asList("_id", "_field_names", "_index", "_parent", "_routing", "_seq_no", "_source", "_type", "_uid", "_version", "_feature", "_ignored", DataFrameAnalyticsIndex.ID_COPY); - public static final Set CATEGORICAL_TYPES = Collections.unmodifiableSet(new HashSet<>(Arrays.asList("text", "keyword", "ip"))); - - private static final Set NUMERICAL_TYPES; - - static { - Set numericalTypes = Stream.of(NumberFieldMapper.NumberType.values()) - .map(NumberFieldMapper.NumberType::typeName) - .collect(Collectors.toSet()); - numericalTypes.add("scaled_float"); - NUMERICAL_TYPES = Collections.unmodifiableSet(numericalTypes); - } - private final String[] index; private final DataFrameAnalyticsConfig config; private final String resultsField; @@ -76,30 +69,32 @@ public class ExtractedFieldsDetector { } public ExtractedFields detect() { - Set fields = new HashSet<>(fieldCapabilitiesResponse.get().keySet()); - fields.removeAll(IGNORE_FIELDS); - removeFieldsUnderResultsField(fields); - includeAndExcludeFields(fields); - removeFieldsWithIncompatibleTypes(fields); - checkRequiredFieldsArePresent(fields); + Set fields = getIncludedFields(); if (fields.isEmpty()) { - throw ExceptionsHelper.badRequestException("No compatible fields could be detected in index {}", Arrays.toString(index)); + throw ExceptionsHelper.badRequestException("No compatible fields could be detected in index {}. Supported types are {}.", + Arrays.toString(index), + getSupportedTypes()); } - List sortedFields = new ArrayList<>(fields); - // We sort the fields to ensure the checksum for each document is deterministic - Collections.sort(sortedFields); - ExtractedFields extractedFields = ExtractedFields.build(sortedFields, Collections.emptySet(), fieldCapabilitiesResponse); - if (extractedFields.getDocValueFields().size() > docValueFieldsLimit) { - extractedFields = fetchFromSourceIfSupported(extractedFields); - if (extractedFields.getDocValueFields().size() > docValueFieldsLimit) { - throw ExceptionsHelper.badRequestException("[{}] fields must be retrieved from doc_values but the limit is [{}]; " + - "please adjust the index level setting [{}]", extractedFields.getDocValueFields().size(), docValueFieldsLimit, - IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.getKey()); - } + checkNoIgnoredFields(fields); + checkFieldsHaveCompatibleTypes(fields); + checkRequiredFields(fields); + return detectExtractedFields(fields); + } + + private Set getIncludedFields() { + Set fields = new HashSet<>(fieldCapabilitiesResponse.get().keySet()); + removeFieldsUnderResultsField(fields); + FetchSourceContext analyzedFields = config.getAnalyzedFields(); + + // If the user has not explicitly included fields we'll include all compatible fields + if (analyzedFields == null || analyzedFields.includes().length == 0) { + fields.removeAll(IGNORE_FIELDS); + removeFieldsWithIncompatibleTypes(fields); } - return extractedFields; + includeAndExcludeFields(fields); + return fields; } private void removeFieldsUnderResultsField(Set fields) { @@ -132,25 +127,43 @@ private void removeFieldsWithIncompatibleTypes(Set fields) { Iterator fieldsIterator = fields.iterator(); while (fieldsIterator.hasNext()) { String field = fieldsIterator.next(); - Map fieldCaps = fieldCapabilitiesResponse.getField(field); - if (fieldCaps == null) { - LOGGER.debug("[{}] Removing field [{}] because it is missing from mappings", config.getId(), field); + if (hasCompatibleType(field) == false) { fieldsIterator.remove(); - } else { - Set fieldTypes = fieldCaps.keySet(); - if (NUMERICAL_TYPES.containsAll(fieldTypes)) { - LOGGER.debug("[{}] field [{}] is compatible as it is numerical", config.getId(), field); - } else if (config.getAnalysis().supportsCategoricalFields() && CATEGORICAL_TYPES.containsAll(fieldTypes)) { - LOGGER.debug("[{}] field [{}] is compatible as it is categorical", config.getId(), field); - } else { - LOGGER.debug("[{}] Removing field [{}] because its types are not supported; types {}", - config.getId(), field, fieldTypes); - fieldsIterator.remove(); - } } } } + private boolean hasCompatibleType(String field) { + Map fieldCaps = fieldCapabilitiesResponse.getField(field); + if (fieldCaps == null) { + LOGGER.debug("[{}] incompatible field [{}] because it is missing from mappings", config.getId(), field); + return false; + } + Set fieldTypes = fieldCaps.keySet(); + if (Types.numerical().containsAll(fieldTypes)) { + LOGGER.debug("[{}] field [{}] is compatible as it is numerical", config.getId(), field); + return true; + } else if (config.getAnalysis().supportsCategoricalFields() && Types.categorical().containsAll(fieldTypes)) { + LOGGER.debug("[{}] field [{}] is compatible as it is categorical", config.getId(), field); + return true; + } else if (isBoolean(fieldTypes)) { + LOGGER.debug("[{}] field [{}] is compatible as it is boolean", config.getId(), field); + return true; + } else { + LOGGER.debug("[{}] incompatible field [{}]; types {}; supported {}", config.getId(), field, fieldTypes, getSupportedTypes()); + return false; + } + } + + private Set getSupportedTypes() { + Set supportedTypes = new TreeSet<>(Types.numerical()); + if (config.getAnalysis().supportsCategoricalFields()) { + supportedTypes.addAll(Types.categorical()); + } + supportedTypes.add(BooleanFieldMapper.CONTENT_TYPE); + return supportedTypes; + } + private void includeAndExcludeFields(Set fields) { FetchSourceContext analyzedFields = config.getAnalyzedFields(); if (analyzedFields == null) { @@ -184,16 +197,61 @@ private void includeAndExcludeFields(Set fields) { } } - private void checkRequiredFieldsArePresent(Set fields) { - List missingFields = config.getAnalysis().getRequiredFields() - .stream() - .filter(f -> fields.contains(f) == false) - .collect(Collectors.toList()); - if (missingFields.isEmpty() == false) { - throw ExceptionsHelper.badRequestException("required fields {} are missing", missingFields); + private void checkNoIgnoredFields(Set fields) { + Optional ignoreField = IGNORE_FIELDS.stream().filter(fields::contains).findFirst(); + if (ignoreField.isPresent()) { + throw ExceptionsHelper.badRequestException("field [{}] cannot be analyzed", ignoreField.get()); + } + } + + private void checkFieldsHaveCompatibleTypes(Set fields) { + for (String field : fields) { + Map fieldCaps = fieldCapabilitiesResponse.getField(field); + if (fieldCaps == null) { + throw ExceptionsHelper.badRequestException("no mappings could be found for field [{}]", field); + } + + if (hasCompatibleType(field) == false) { + throw ExceptionsHelper.badRequestException("field [{}] has unsupported type {}. Supported types are {}.", field, + fieldCaps.keySet(), getSupportedTypes()); + } + } + } + + private void checkRequiredFields(Set fields) { + List requiredFields = config.getAnalysis().getRequiredFields(); + for (RequiredField requiredField : requiredFields) { + Map fieldCaps = fieldCapabilitiesResponse.getField(requiredField.getName()); + if (fields.contains(requiredField.getName()) == false || fieldCaps == null || fieldCaps.isEmpty()) { + List requiredFieldNames = requiredFields.stream().map(RequiredField::getName).collect(Collectors.toList()); + throw ExceptionsHelper.badRequestException("required field [{}] is missing; analysis requires fields {}", + requiredField.getName(), requiredFieldNames); + } + Set fieldTypes = fieldCaps.keySet(); + if (requiredField.getTypes().containsAll(fieldTypes) == false) { + throw ExceptionsHelper.badRequestException("invalid types {} for required field [{}]; expected types are {}", + fieldTypes, requiredField.getName(), requiredField.getTypes()); + } } } + private ExtractedFields detectExtractedFields(Set fields) { + List sortedFields = new ArrayList<>(fields); + // We sort the fields to ensure the checksum for each document is deterministic + Collections.sort(sortedFields); + ExtractedFields extractedFields = ExtractedFields.build(sortedFields, Collections.emptySet(), fieldCapabilitiesResponse); + if (extractedFields.getDocValueFields().size() > docValueFieldsLimit) { + extractedFields = fetchFromSourceIfSupported(extractedFields); + if (extractedFields.getDocValueFields().size() > docValueFieldsLimit) { + throw ExceptionsHelper.badRequestException("[{}] fields must be retrieved from doc_values but the limit is [{}]; " + + "please adjust the index level setting [{}]", extractedFields.getDocValueFields().size(), docValueFieldsLimit, + IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.getKey()); + } + } + extractedFields = fetchBooleanFieldsAsIntegers(extractedFields); + return extractedFields; + } + private ExtractedFields fetchFromSourceIfSupported(ExtractedFields extractedFields) { List adjusted = new ArrayList<>(extractedFields.getAllFields().size()); for (ExtractedField field : extractedFields.getDocValueFields()) { @@ -201,4 +259,46 @@ private ExtractedFields fetchFromSourceIfSupported(ExtractedFields extractedFiel } return new ExtractedFields(adjusted); } + + private ExtractedFields fetchBooleanFieldsAsIntegers(ExtractedFields extractedFields) { + List adjusted = new ArrayList<>(extractedFields.getAllFields().size()); + for (ExtractedField field : extractedFields.getAllFields()) { + if (isBoolean(field.getTypes())) { + adjusted.add(new BooleanAsInteger(field)); + } else { + adjusted.add(field); + } + } + return new ExtractedFields(adjusted); + } + + private static boolean isBoolean(Set types) { + return types.size() == 1 && types.contains(BooleanFieldMapper.CONTENT_TYPE); + } + + /** + * We convert boolean fields to integers with values 0, 1 as this is the preferred + * way to consume such features in the analytics process. + */ + private static class BooleanAsInteger extends ExtractedField { + + protected BooleanAsInteger(ExtractedField field) { + super(field.getAlias(), field.getName(), Collections.singleton(BooleanFieldMapper.CONTENT_TYPE), ExtractionMethod.DOC_VALUE); + } + + @Override + public Object[] value(SearchHit hit) { + DocumentField keyValue = hit.field(name); + if (keyValue != null) { + List values = keyValue.getValues().stream().map(v -> Boolean.TRUE.equals(v) ? 1 : 0).collect(Collectors.toList()); + return values.toArray(new Object[0]); + } + return new Object[0]; + } + + @Override + public boolean supportsFromSource() { + return false; + } + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java index e94dbf4747b2a..6a6462c2a9681 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java @@ -11,15 +11,19 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.client.Client; +import org.elasticsearch.common.Strings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; +import org.elasticsearch.xpack.core.ml.dataframe.analyses.DataFrameAnalysis; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.action.TransportStartDataFrameAnalyticsAction.DataFrameAnalyticsTask; import org.elasticsearch.xpack.ml.dataframe.extractor.DataFrameDataExtractor; import org.elasticsearch.xpack.ml.dataframe.extractor.DataFrameDataExtractorFactory; +import org.elasticsearch.xpack.ml.dataframe.process.customprocessing.CustomProcessor; +import org.elasticsearch.xpack.ml.dataframe.process.customprocessing.CustomProcessorFactory; import org.elasticsearch.xpack.ml.dataframe.process.results.AnalyticsResult; import java.io.IOException; @@ -90,7 +94,7 @@ private void processData(DataFrameAnalyticsTask task, DataFrameAnalyticsConfig c try { ProcessContext processContext = processContextByAllocation.get(task.getAllocationId()); writeHeaderRecord(dataExtractor, process); - writeDataRows(dataExtractor, process, task.getProgressTracker()); + writeDataRows(dataExtractor, process, config.getAnalysis(), task.getProgressTracker()); process.writeEndOfDataMessage(); process.flushStream(); @@ -122,7 +126,10 @@ private void processData(DataFrameAnalyticsTask task, DataFrameAnalyticsConfig c } private void writeDataRows(DataFrameDataExtractor dataExtractor, AnalyticsProcess process, - DataFrameAnalyticsTask.ProgressTracker progressTracker) throws IOException { + DataFrameAnalysis analysis, DataFrameAnalyticsTask.ProgressTracker progressTracker) throws IOException { + + CustomProcessor customProcessor = new CustomProcessorFactory(dataExtractor.getFieldNames()).create(analysis); + // The extra fields are for the doc hash and the control field (should be an empty string) String[] record = new String[dataExtractor.getFieldNames().size() + 2]; // The value of the control field should be an empty string for data frame rows @@ -139,6 +146,7 @@ private void writeDataRows(DataFrameDataExtractor dataExtractor, AnalyticsProces String[] rowValues = row.getValues(); System.arraycopy(rowValues, 0, record, 0, rowValues.length); record[record.length - 2] = String.valueOf(row.getChecksum()); + customProcessor.process(record); process.writeRecord(record); } } @@ -273,7 +281,15 @@ private synchronized boolean startProcess(DataFrameDataExtractorFactory dataExtr } dataExtractor = dataExtractorFactory.newExtractor(false); - process = createProcess(task, createProcessConfig(config, dataExtractor)); + AnalyticsProcessConfig analyticsProcessConfig = createProcessConfig(config, dataExtractor); + LOGGER.trace("[{}] creating analytics process with config [{}]", config.getId(), Strings.toString(analyticsProcessConfig)); + // If we have no rows, that means there is no data so no point in starting the native process + // just finish the task + if (analyticsProcessConfig.rows() == 0) { + LOGGER.info("[{}] no data found to analyze. Will not start analytics native process.", config.getId()); + return false; + } + process = createProcess(task, analyticsProcessConfig); DataFrameRowsJoiner dataFrameRowsJoiner = new DataFrameRowsJoiner(config.getId(), client, dataExtractorFactory.newExtractor(true)); resultProcessor = new AnalyticsResultProcessor(id, dataFrameRowsJoiner, this::isProcessKilled, task.getProgressTracker()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessor.java index 30c063324b15a..666817c0acf89 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessor.java @@ -53,7 +53,6 @@ public void awaitForCompletion() { public void process(AnalyticsProcess process) { long totalRows = process.getConfig().rows(); - LOGGER.info("Total rows = {}", totalRows); long processedRows = 0; // TODO When java 9 features can be used, we will not need the local variable here diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManager.java index 00d8c15e41876..6512dc075d701 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManager.java @@ -17,12 +17,10 @@ import org.elasticsearch.xpack.ml.dataframe.extractor.DataFrameDataExtractorFactory; import org.elasticsearch.xpack.ml.dataframe.process.results.MemoryUsageEstimationResult; -import java.io.IOException; import java.util.Iterator; import java.util.Objects; import java.util.Set; import java.util.concurrent.ExecutorService; -import java.util.function.Consumer; public class MemoryUsageEstimationProcessManager { @@ -74,24 +72,21 @@ private MemoryUsageEstimationResult runJob(String jobId, "", categoricalFields, config.getAnalysis()); - ProcessHolder processHolder = new ProcessHolder(); AnalyticsProcess process = processFactory.createAnalyticsProcess( jobId, processConfig, executorServiceForProcess, - onProcessCrash(jobId, processHolder)); - processHolder.process = process; - if (process.isProcessAlive() == false) { - String errorMsg = - new ParameterizedMessage("[{}] Error while starting process: {}", jobId, process.readError()).getFormattedMessage(); - throw ExceptionsHelper.serverError(errorMsg); - } + // The handler passed here will never be called as AbstractNativeProcess.detectCrash method returns early when + // (processInStream == null) which is the case for MemoryUsageEstimationProcess. + reason -> {}); try { return readResult(jobId, process); } catch (Exception e) { String errorMsg = - new ParameterizedMessage("[{}] Error while processing result [{}]", jobId, e.getMessage()).getFormattedMessage(); + new ParameterizedMessage( + "[{}] Error while processing process output [{}], process errors: [{}]", + jobId, e.getMessage(), process.readError()).getFormattedMessage(); throw ExceptionsHelper.serverError(errorMsg, e); } finally { process.consumeAndCloseOutputStream(); @@ -101,31 +96,14 @@ private MemoryUsageEstimationResult runJob(String jobId, LOGGER.info("[{}] Closed process", jobId); } catch (Exception e) { String errorMsg = - new ParameterizedMessage("[{}] Error while closing process [{}]", jobId, e.getMessage()).getFormattedMessage(); + new ParameterizedMessage( + "[{}] Error while closing process [{}], process errors: [{}]", + jobId, e.getMessage(), process.readError()).getFormattedMessage(); throw ExceptionsHelper.serverError(errorMsg, e); } } } - private static class ProcessHolder { - volatile AnalyticsProcess process; - } - - private static Consumer onProcessCrash(String jobId, ProcessHolder processHolder) { - return reason -> { - AnalyticsProcess process = processHolder.process; - if (process == null) { - LOGGER.error(new ParameterizedMessage("[{}] Process does not exist", jobId)); - return; - } - try { - process.kill(); - } catch (IOException e) { - LOGGER.error(new ParameterizedMessage("[{}] Failed to kill process", jobId), e); - } - }; - } - /** * Extracts {@link MemoryUsageEstimationResult} from process' output. */ diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/customprocessing/CustomProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/customprocessing/CustomProcessor.java new file mode 100644 index 0000000000000..518aee13b8c57 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/customprocessing/CustomProcessor.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.dataframe.process.customprocessing; + +/** + * A processor to manipulate rows before writing them to the process + */ +public interface CustomProcessor { + + void process(String[] row); +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/customprocessing/CustomProcessorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/customprocessing/CustomProcessorFactory.java new file mode 100644 index 0000000000000..baf7e06346944 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/customprocessing/CustomProcessorFactory.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.dataframe.process.customprocessing; + +import org.elasticsearch.xpack.core.ml.dataframe.analyses.DataFrameAnalysis; +import org.elasticsearch.xpack.core.ml.dataframe.analyses.Regression; + +import java.util.List; +import java.util.Objects; + +public class CustomProcessorFactory { + + private final List fieldNames; + + public CustomProcessorFactory(List fieldNames) { + this.fieldNames = Objects.requireNonNull(fieldNames); + } + + public CustomProcessor create(DataFrameAnalysis analysis) { + if (analysis instanceof Regression) { + return new RegressionCustomProcessor(fieldNames, (Regression) analysis); + } + return row -> {}; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/customprocessing/RegressionCustomProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/customprocessing/RegressionCustomProcessor.java new file mode 100644 index 0000000000000..4b814d3504a83 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/customprocessing/RegressionCustomProcessor.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.dataframe.process.customprocessing; + +import org.elasticsearch.common.Randomness; +import org.elasticsearch.xpack.core.ml.dataframe.analyses.Regression; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.util.List; +import java.util.Random; + +/** + * A processor that randomly clears the dependent variable value + * in order to split the dataset in training and validation data. + * This relies on the fact that when the dependent variable field + * is empty, then the row is not used for training but only to make predictions. + */ +class RegressionCustomProcessor implements CustomProcessor { + + private static final String EMPTY = ""; + + private final int dependentVariableIndex; + private final double trainingPercent; + private final Random random = Randomness.get(); + private boolean isFirstRow = true; + + RegressionCustomProcessor(List fieldNames, Regression regression) { + this.dependentVariableIndex = findDependentVariableIndex(fieldNames, regression.getDependentVariable()); + this.trainingPercent = regression.getTrainingPercent(); + + } + + private static int findDependentVariableIndex(List fieldNames, String dependentVariable) { + for (int i = 0; i < fieldNames.size(); i++) { + if (fieldNames.get(i).equals(dependentVariable)) { + return i; + } + } + throw ExceptionsHelper.serverError("Could not find dependent variable [" + dependentVariable + "] in fields " + fieldNames); + } + + @Override + public void process(String[] row) { + if (canBeUsedForTraining(row)) { + if (isFirstRow) { + // Let's make sure we have at least one training row + isFirstRow = false; + } else if (isRandomlyExcludedFromTraining()) { + row[dependentVariableIndex] = EMPTY; + } + } + } + + private boolean canBeUsedForTraining(String[] row) { + return row[dependentVariableIndex].length() > 0; + } + + private boolean isRandomlyExcludedFromTraining() { + return random.nextDouble() * 100 > trainingPercent; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/AnomalyDetectionAuditor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/AnomalyDetectionAuditor.java index 64397893048a3..48c5872b057ed 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/AnomalyDetectionAuditor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/AnomalyDetectionAuditor.java @@ -15,6 +15,6 @@ public class AnomalyDetectionAuditor extends AbstractAuditor { public AnomalyDetectionAuditor(Client client, String nodeName) { - super(client, nodeName, AuditorField.NOTIFICATIONS_INDEX, ML_ORIGIN, AnomalyDetectionAuditMessage.builder()); + super(client, nodeName, AuditorField.NOTIFICATIONS_INDEX, ML_ORIGIN, AnomalyDetectionAuditMessage::new); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilderTests.java index 11335236bfb8b..ac8657575e2dd 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilderTests.java @@ -13,10 +13,12 @@ import org.elasticsearch.mock.orig.Mockito; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; @@ -92,7 +94,9 @@ public void init() { jobConfigProvider, jobResultsProvider, datafeedConfigProvider, - jobResultsPersister); + jobResultsPersister, + Settings.EMPTY, + "test_node"); } public void testBuild_GivenScrollDatafeedAndNewJob() throws Exception { @@ -202,6 +206,46 @@ public void testBuild_GivenBucketsRequestFails() { verify(taskHandler).accept(error); } + public void testBuildGivenRemoteIndicesButNoRemoteSearching() throws Exception { + Settings settings = Settings.builder().put(RemoteClusterService.ENABLE_REMOTE_CLUSTERS.getKey(), false).build(); + datafeedJobBuilder = + new DatafeedJobBuilder( + client, + xContentRegistry(), + auditor, + System::currentTimeMillis, + jobConfigProvider, + jobResultsProvider, + datafeedConfigProvider, + jobResultsPersister, + settings, + "test_node"); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeField("time"); + Job.Builder jobBuilder = DatafeedManagerTests.createDatafeedJob(); + jobBuilder.setDataDescription(dataDescription); + jobBuilder.setCreateTime(new Date()); + DatafeedConfig.Builder datafeed = DatafeedManagerTests.createDatafeedConfig("datafeed1", jobBuilder.getId()); + datafeed.setIndices(Collections.singletonList("remotecluster:index-*")); + + AtomicBoolean wasHandlerCalled = new AtomicBoolean(false); + ActionListener datafeedJobHandler = ActionListener.wrap( + datafeedJob -> fail("datafeed builder did not fail when remote index was given and remote clusters were not enabled"), + e -> { + assertThat(e.getMessage(), equalTo(Messages.getMessage(Messages.DATAFEED_NEEDS_REMOTE_CLUSTER_SEARCH, + "datafeed1", + "[remotecluster:index-*]", + "test_node"))); + wasHandlerCalled.compareAndSet(false, true); + } + ); + + givenJob(jobBuilder); + givenDatafeed(datafeed); + datafeedJobBuilder.build("datafeed1", datafeedJobHandler); + assertBusy(() -> wasHandlerCalled.get()); + } + private void givenJob(Job.Builder job) { Mockito.doAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorTests.java index 6d51923f68c75..db381373709c7 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; @@ -17,6 +18,7 @@ import org.elasticsearch.xpack.core.ml.dataframe.analyses.Regression; import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedField; import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedFields; +import org.elasticsearch.xpack.ml.test.SearchHitBuilder; import java.util.ArrayList; import java.util.Arrays; @@ -26,6 +28,7 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -67,7 +70,7 @@ public void testDetect_GivenNumericFieldWithMultipleTypes() { assertThat(allFields.get(0).getExtractionMethod(), equalTo(ExtractedField.ExtractionMethod.DOC_VALUE)); } - public void testDetect_GivenNonNumericField() { + public void testDetect_GivenOutlierDetectionAndNonNumericField() { FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() .addAggregatableField("some_keyword", "keyword").build(); @@ -75,7 +78,8 @@ public void testDetect_GivenNonNumericField() { SOURCE_INDEX, buildOutlierDetectionConfig(), RESULTS_FIELD, false, 100, fieldCapabilities); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> extractedFieldsDetector.detect()); - assertThat(e.getMessage(), equalTo("No compatible fields could be detected in index [source_index]")); + assertThat(e.getMessage(), equalTo("No compatible fields could be detected in index [source_index]." + + " Supported types are [boolean, byte, double, float, half_float, integer, long, scaled_float, short].")); } public void testDetect_GivenOutlierDetectionAndFieldWithNumericAndNonNumericTypes() { @@ -86,7 +90,8 @@ public void testDetect_GivenOutlierDetectionAndFieldWithNumericAndNonNumericType SOURCE_INDEX, buildOutlierDetectionConfig(), RESULTS_FIELD, false, 100, fieldCapabilities); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> extractedFieldsDetector.detect()); - assertThat(e.getMessage(), equalTo("No compatible fields could be detected in index [source_index]")); + assertThat(e.getMessage(), equalTo("No compatible fields could be detected in index [source_index]. " + + "Supported types are [boolean, byte, double, float, half_float, integer, long, scaled_float, short].")); } public void testDetect_GivenOutlierDetectionAndMultipleFields() { @@ -94,6 +99,7 @@ public void testDetect_GivenOutlierDetectionAndMultipleFields() { .addAggregatableField("some_float", "float") .addAggregatableField("some_long", "long") .addAggregatableField("some_keyword", "keyword") + .addAggregatableField("some_boolean", "boolean") .build(); ExtractedFieldsDetector extractedFieldsDetector = new ExtractedFieldsDetector( @@ -101,9 +107,9 @@ public void testDetect_GivenOutlierDetectionAndMultipleFields() { ExtractedFields extractedFields = extractedFieldsDetector.detect(); List allFields = extractedFields.getAllFields(); - assertThat(allFields.size(), equalTo(2)); + assertThat(allFields.size(), equalTo(3)); assertThat(allFields.stream().map(ExtractedField::getName).collect(Collectors.toSet()), - containsInAnyOrder("some_float", "some_long")); + containsInAnyOrder("some_float", "some_long", "some_boolean")); assertThat(allFields.stream().map(ExtractedField::getExtractionMethod).collect(Collectors.toSet()), contains(equalTo(ExtractedField.ExtractionMethod.DOC_VALUE))); } @@ -113,7 +119,8 @@ public void testDetect_GivenRegressionAndMultipleFields() { .addAggregatableField("some_float", "float") .addAggregatableField("some_long", "long") .addAggregatableField("some_keyword", "keyword") - .addAggregatableField("foo", "keyword") + .addAggregatableField("some_boolean", "boolean") + .addAggregatableField("foo", "double") .build(); ExtractedFieldsDetector extractedFieldsDetector = new ExtractedFieldsDetector( @@ -121,9 +128,9 @@ public void testDetect_GivenRegressionAndMultipleFields() { ExtractedFields extractedFields = extractedFieldsDetector.detect(); List allFields = extractedFields.getAllFields(); - assertThat(allFields.size(), equalTo(4)); + assertThat(allFields.size(), equalTo(5)); assertThat(allFields.stream().map(ExtractedField::getName).collect(Collectors.toList()), - contains("foo", "some_float", "some_keyword", "some_long")); + containsInAnyOrder("foo", "some_float", "some_keyword", "some_long", "some_boolean")); assertThat(allFields.stream().map(ExtractedField::getExtractionMethod).collect(Collectors.toSet()), contains(equalTo(ExtractedField.ExtractionMethod.DOC_VALUE))); } @@ -139,7 +146,71 @@ public void testDetect_GivenRegressionAndRequiredFieldMissing() { SOURCE_INDEX, buildRegressionConfig("foo"), RESULTS_FIELD, false, 100, fieldCapabilities); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> extractedFieldsDetector.detect()); - assertThat(e.getMessage(), equalTo("required fields [foo] are missing")); + assertThat(e.getMessage(), equalTo("required field [foo] is missing; analysis requires fields [foo]")); + } + + public void testDetect_GivenRegressionAndRequiredFieldExcluded() { + FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() + .addAggregatableField("some_float", "float") + .addAggregatableField("some_long", "long") + .addAggregatableField("some_keyword", "keyword") + .addAggregatableField("foo", "float") + .build(); + FetchSourceContext analyzedFields = new FetchSourceContext(true, new String[0], new String[] {"foo"}); + + ExtractedFieldsDetector extractedFieldsDetector = new ExtractedFieldsDetector( + SOURCE_INDEX, buildRegressionConfig("foo", analyzedFields), RESULTS_FIELD, false, 100, fieldCapabilities); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> extractedFieldsDetector.detect()); + + assertThat(e.getMessage(), equalTo("required field [foo] is missing; analysis requires fields [foo]")); + } + + public void testDetect_GivenRegressionAndRequiredFieldNotIncluded() { + FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() + .addAggregatableField("some_float", "float") + .addAggregatableField("some_long", "long") + .addAggregatableField("some_keyword", "keyword") + .addAggregatableField("foo", "float") + .build(); + FetchSourceContext analyzedFields = new FetchSourceContext(true, new String[] {"some_float", "some_keyword"}, new String[0]); + + ExtractedFieldsDetector extractedFieldsDetector = new ExtractedFieldsDetector( + SOURCE_INDEX, buildRegressionConfig("foo", analyzedFields), RESULTS_FIELD, false, 100, fieldCapabilities); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> extractedFieldsDetector.detect()); + + assertThat(e.getMessage(), equalTo("required field [foo] is missing; analysis requires fields [foo]")); + } + + public void testDetect_GivenFieldIsBothIncludedAndExcluded() { + FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() + .addAggregatableField("foo", "float") + .addAggregatableField("bar", "float") + .build(); + FetchSourceContext analyzedFields = new FetchSourceContext(true, new String[] {"foo", "bar"}, new String[] {"foo"}); + + ExtractedFieldsDetector extractedFieldsDetector = new ExtractedFieldsDetector( + SOURCE_INDEX, buildOutlierDetectionConfig(analyzedFields), RESULTS_FIELD, false, 100, fieldCapabilities); + ExtractedFields extractedFields = extractedFieldsDetector.detect(); + + List allFields = extractedFields.getAllFields(); + assertThat(allFields.size(), equalTo(1)); + assertThat(allFields.stream().map(ExtractedField::getName).collect(Collectors.toList()), contains("bar")); + } + + public void testDetect_GivenRegressionAndRequiredFieldHasInvalidType() { + FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() + .addAggregatableField("some_float", "float") + .addAggregatableField("some_long", "long") + .addAggregatableField("some_keyword", "keyword") + .addAggregatableField("foo", "keyword") + .build(); + + ExtractedFieldsDetector extractedFieldsDetector = new ExtractedFieldsDetector( + SOURCE_INDEX, buildRegressionConfig("foo"), RESULTS_FIELD, false, 100, fieldCapabilities); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> extractedFieldsDetector.detect()); + + assertThat(e.getMessage(), equalTo("invalid types [keyword] for required field [foo]; " + + "expected types are [byte, double, float, half_float, integer, long, scaled_float, short]")); } public void testDetect_GivenIgnoredField() { @@ -150,7 +221,20 @@ public void testDetect_GivenIgnoredField() { SOURCE_INDEX, buildOutlierDetectionConfig(), RESULTS_FIELD, false, 100, fieldCapabilities); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> extractedFieldsDetector.detect()); - assertThat(e.getMessage(), equalTo("No compatible fields could be detected in index [source_index]")); + assertThat(e.getMessage(), equalTo("No compatible fields could be detected in index [source_index]. " + + "Supported types are [boolean, byte, double, float, half_float, integer, long, scaled_float, short].")); + } + + public void testDetect_GivenIncludedIgnoredField() { + FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() + .addAggregatableField("_id", "float").build(); + FetchSourceContext analyzedFields = new FetchSourceContext(true, new String[]{"_id"}, new String[0]); + + ExtractedFieldsDetector extractedFieldsDetector = new ExtractedFieldsDetector( + SOURCE_INDEX, buildOutlierDetectionConfig(analyzedFields), RESULTS_FIELD, false, 100, fieldCapabilities); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> extractedFieldsDetector.detect()); + + assertThat(e.getMessage(), equalTo("field [_id] cannot be analyzed")); } public void testDetect_ShouldSortFieldsAlphabetically() { @@ -177,7 +261,7 @@ public void testDetect_ShouldSortFieldsAlphabetically() { assertThat(extractedFieldNames, equalTo(sortedFields)); } - public void testDetectedExtractedFields_GivenIncludeWithMissingField() { + public void testDetect_GivenIncludeWithMissingField() { FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() .addAggregatableField("my_field1", "float") .addAggregatableField("my_field2", "float") @@ -192,7 +276,7 @@ public void testDetectedExtractedFields_GivenIncludeWithMissingField() { assertThat(e.getMessage(), equalTo("No field [your_field1] could be detected")); } - public void testDetectedExtractedFields_GivenExcludeAllValidFields() { + public void testDetect_GivenExcludeAllValidFields() { FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() .addAggregatableField("my_field1", "float") .addAggregatableField("my_field2", "float") @@ -203,15 +287,15 @@ public void testDetectedExtractedFields_GivenExcludeAllValidFields() { ExtractedFieldsDetector extractedFieldsDetector = new ExtractedFieldsDetector( SOURCE_INDEX, buildOutlierDetectionConfig(desiredFields), RESULTS_FIELD, false, 100, fieldCapabilities); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> extractedFieldsDetector.detect()); - assertThat(e.getMessage(), equalTo("No compatible fields could be detected in index [source_index]")); + assertThat(e.getMessage(), equalTo("No compatible fields could be detected in index [source_index]. " + + "Supported types are [boolean, byte, double, float, half_float, integer, long, scaled_float, short].")); } - public void testDetectedExtractedFields_GivenInclusionsAndExclusions() { + public void testDetect_GivenInclusionsAndExclusions() { FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() .addAggregatableField("my_field1_nope", "float") .addAggregatableField("my_field1", "float") .addAggregatableField("your_field2", "float") - .addAggregatableField("your_keyword", "keyword") .build(); FetchSourceContext desiredFields = new FetchSourceContext(true, new String[]{"your*", "my_*"}, new String[]{"*nope"}); @@ -225,7 +309,25 @@ public void testDetectedExtractedFields_GivenInclusionsAndExclusions() { assertThat(extractedFieldNames, equalTo(Arrays.asList("my_field1", "your_field2"))); } - public void testDetectedExtractedFields_GivenIndexContainsResultsField() { + public void testDetect_GivenIncludedFieldHasUnsupportedType() { + FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() + .addAggregatableField("my_field1_nope", "float") + .addAggregatableField("my_field1", "float") + .addAggregatableField("your_field2", "float") + .addAggregatableField("your_keyword", "keyword") + .build(); + + FetchSourceContext desiredFields = new FetchSourceContext(true, new String[]{"your*", "my_*"}, new String[]{"*nope"}); + + ExtractedFieldsDetector extractedFieldsDetector = new ExtractedFieldsDetector( + SOURCE_INDEX, buildOutlierDetectionConfig(desiredFields), RESULTS_FIELD, false, 100, fieldCapabilities); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> extractedFieldsDetector.detect()); + + assertThat(e.getMessage(), equalTo("field [your_keyword] has unsupported type [keyword]. " + + "Supported types are [boolean, byte, double, float, half_float, integer, long, scaled_float, short].")); + } + + public void testDetect_GivenIndexContainsResultsField() { FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() .addAggregatableField(RESULTS_FIELD, "float") .addAggregatableField("my_field1", "float") @@ -241,7 +343,7 @@ public void testDetectedExtractedFields_GivenIndexContainsResultsField() { "please set a different results_field")); } - public void testDetectedExtractedFields_GivenIndexContainsResultsFieldAndTaskIsRestarting() { + public void testDetect_GivenIndexContainsResultsFieldAndTaskIsRestarting() { FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() .addAggregatableField(RESULTS_FIELD + ".outlier_score", "float") .addAggregatableField("my_field1", "float") @@ -258,7 +360,40 @@ public void testDetectedExtractedFields_GivenIndexContainsResultsFieldAndTaskIsR assertThat(extractedFieldNames, equalTo(Arrays.asList("my_field1", "your_field2"))); } - public void testDetectedExtractedFields_NullResultsField() { + public void testDetect_GivenIncludedResultsField() { + FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() + .addAggregatableField(RESULTS_FIELD, "float") + .addAggregatableField("my_field1", "float") + .addAggregatableField("your_field2", "float") + .addAggregatableField("your_keyword", "keyword") + .build(); + FetchSourceContext analyzedFields = new FetchSourceContext(true, new String[]{RESULTS_FIELD}, new String[0]); + + ExtractedFieldsDetector extractedFieldsDetector = new ExtractedFieldsDetector( + SOURCE_INDEX, buildOutlierDetectionConfig(analyzedFields), RESULTS_FIELD, false, 100, fieldCapabilities); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> extractedFieldsDetector.detect()); + + assertThat(e.getMessage(), equalTo("A field that matches the dest.results_field [ml] already exists; " + + "please set a different results_field")); + } + + public void testDetect_GivenIncludedResultsFieldAndTaskIsRestarting() { + FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() + .addAggregatableField(RESULTS_FIELD + ".outlier_score", "float") + .addAggregatableField("my_field1", "float") + .addAggregatableField("your_field2", "float") + .addAggregatableField("your_keyword", "keyword") + .build(); + FetchSourceContext analyzedFields = new FetchSourceContext(true, new String[]{RESULTS_FIELD}, new String[0]); + + ExtractedFieldsDetector extractedFieldsDetector = new ExtractedFieldsDetector( + SOURCE_INDEX, buildOutlierDetectionConfig(analyzedFields), RESULTS_FIELD, true, 100, fieldCapabilities); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> extractedFieldsDetector.detect()); + + assertThat(e.getMessage(), equalTo("No field [ml] could be detected")); + } + + public void testDetect_NullResultsField() { FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() .addAggregatableField(RESULTS_FIELD, "float") .addAggregatableField("my_field1", "float") @@ -275,7 +410,7 @@ public void testDetectedExtractedFields_NullResultsField() { assertThat(extractedFieldNames, equalTo(Arrays.asList(RESULTS_FIELD, "my_field1", "your_field2"))); } - public void testDetectedExtractedFields_GivenLessFieldsThanDocValuesLimit() { + public void testDetect_GivenLessFieldsThanDocValuesLimit() { FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() .addAggregatableField("field_1", "float") .addAggregatableField("field_2", "float") @@ -294,7 +429,7 @@ public void testDetectedExtractedFields_GivenLessFieldsThanDocValuesLimit() { contains(equalTo(ExtractedField.ExtractionMethod.DOC_VALUE))); } - public void testDetectedExtractedFields_GivenEqualFieldsToDocValuesLimit() { + public void testDetect_GivenEqualFieldsToDocValuesLimit() { FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() .addAggregatableField("field_1", "float") .addAggregatableField("field_2", "float") @@ -313,7 +448,7 @@ public void testDetectedExtractedFields_GivenEqualFieldsToDocValuesLimit() { contains(equalTo(ExtractedField.ExtractionMethod.DOC_VALUE))); } - public void testDetectedExtractedFields_GivenMoreFieldsThanDocValuesLimit() { + public void testDetect_GivenMoreFieldsThanDocValuesLimit() { FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() .addAggregatableField("field_1", "float") .addAggregatableField("field_2", "float") @@ -332,6 +467,37 @@ public void testDetectedExtractedFields_GivenMoreFieldsThanDocValuesLimit() { contains(equalTo(ExtractedField.ExtractionMethod.SOURCE))); } + public void testDetect_GivenBooleanField() { + FieldCapabilitiesResponse fieldCapabilities = new MockFieldCapsResponseBuilder() + .addAggregatableField("some_boolean", "boolean") + .build(); + + ExtractedFieldsDetector extractedFieldsDetector = new ExtractedFieldsDetector( + SOURCE_INDEX, buildOutlierDetectionConfig(), RESULTS_FIELD, false, 100, fieldCapabilities); + ExtractedFields extractedFields = extractedFieldsDetector.detect(); + + List allFields = extractedFields.getAllFields(); + assertThat(allFields.size(), equalTo(1)); + ExtractedField booleanField = allFields.get(0); + assertThat(booleanField.getTypes(), contains("boolean")); + assertThat(booleanField.getExtractionMethod(), equalTo(ExtractedField.ExtractionMethod.DOC_VALUE)); + + SearchHit hit = new SearchHitBuilder(42).addField("some_boolean", true).build(); + Object[] values = booleanField.value(hit); + assertThat(values.length, equalTo(1)); + assertThat(values[0], equalTo(1)); + + hit = new SearchHitBuilder(42).addField("some_boolean", false).build(); + values = booleanField.value(hit); + assertThat(values.length, equalTo(1)); + assertThat(values[0], equalTo(0)); + + hit = new SearchHitBuilder(42).addField("some_boolean", Arrays.asList(false, true, false)).build(); + values = booleanField.value(hit); + assertThat(values.length, equalTo(3)); + assertThat(values, arrayContaining(0, 1, 0)); + } + private static DataFrameAnalyticsConfig buildOutlierDetectionConfig() { return buildOutlierDetectionConfig(null); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManagerTests.java index 5141709b17e4f..a6ed542204326 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManagerTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ml.dataframe.process; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -65,7 +66,6 @@ public void setUpMocks() { executorServiceForJob = EsExecutors.newDirectExecutorService(); executorServiceForProcess = mock(ExecutorService.class); process = mock(AnalyticsProcess.class); - when(process.isProcessAlive()).thenReturn(true); when(process.readAnalyticsResults()).thenReturn(List.of(PROCESS_RESULT).iterator()); processFactory = mock(AnalyticsProcessFactory.class); when(processFactory.createAnalyticsProcess(anyString(), any(), any(), any())).thenReturn(process); @@ -93,9 +93,8 @@ public void testRunJob_EmptyDataFrame() { verifyNoMoreInteractions(process, listener); } - public void testRunJob_ProcessNotAlive() { - when(process.isProcessAlive()).thenReturn(false); - when(process.readError()).thenReturn("Error from inside the process"); + public void testRunJob_NoResults() throws Exception { + when(process.readAnalyticsResults()).thenReturn(List.of().iterator()); processManager.runJobAsync(TASK_ID, dataFrameAnalyticsConfig, dataExtractorFactory, listener); @@ -103,16 +102,18 @@ public void testRunJob_ProcessNotAlive() { ElasticsearchException exception = (ElasticsearchException) exceptionCaptor.getValue(); assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); assertThat(exception.getMessage(), containsString(TASK_ID)); - assertThat(exception.getMessage(), containsString("Error while starting process")); - assertThat(exception.getMessage(), containsString("Error from inside the process")); + assertThat(exception.getMessage(), containsString("no results")); - verify(process).isProcessAlive(); - verify(process).readError(); + InOrder inOrder = inOrder(process); + inOrder.verify(process).readAnalyticsResults(); + inOrder.verify(process).readError(); + inOrder.verify(process).consumeAndCloseOutputStream(); + inOrder.verify(process).close(); verifyNoMoreInteractions(process, listener); } - public void testRunJob_NoResults() throws Exception { - when(process.readAnalyticsResults()).thenReturn(List.of().iterator()); + public void testRunJob_MultipleResults() throws Exception { + when(process.readAnalyticsResults()).thenReturn(List.of(PROCESS_RESULT, PROCESS_RESULT).iterator()); processManager.runJobAsync(TASK_ID, dataFrameAnalyticsConfig, dataExtractorFactory, listener); @@ -120,18 +121,18 @@ public void testRunJob_NoResults() throws Exception { ElasticsearchException exception = (ElasticsearchException) exceptionCaptor.getValue(); assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); assertThat(exception.getMessage(), containsString(TASK_ID)); - assertThat(exception.getMessage(), containsString("no results")); + assertThat(exception.getMessage(), containsString("more than one result")); InOrder inOrder = inOrder(process); - inOrder.verify(process).isProcessAlive(); inOrder.verify(process).readAnalyticsResults(); + inOrder.verify(process).readError(); inOrder.verify(process).consumeAndCloseOutputStream(); inOrder.verify(process).close(); verifyNoMoreInteractions(process, listener); } - public void testRunJob_MultipleResults() throws Exception { - when(process.readAnalyticsResults()).thenReturn(List.of(PROCESS_RESULT, PROCESS_RESULT).iterator()); + public void testRunJob_OneResult_ParseException() throws Exception { + when(process.readAnalyticsResults()).thenThrow(new ElasticsearchParseException("cannot parse result")); processManager.runJobAsync(TASK_ID, dataFrameAnalyticsConfig, dataExtractorFactory, listener); @@ -139,15 +140,14 @@ public void testRunJob_MultipleResults() throws Exception { ElasticsearchException exception = (ElasticsearchException) exceptionCaptor.getValue(); assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); assertThat(exception.getMessage(), containsString(TASK_ID)); - assertThat(exception.getMessage(), containsString("more than one result")); + assertThat(exception.getMessage(), containsString("cannot parse result")); InOrder inOrder = inOrder(process); - inOrder.verify(process).isProcessAlive(); inOrder.verify(process).readAnalyticsResults(); + inOrder.verify(process).readError(); inOrder.verify(process).consumeAndCloseOutputStream(); inOrder.verify(process).close(); verifyNoMoreInteractions(process, listener); - } public void testRunJob_FailsOnClose() throws Exception { @@ -162,10 +162,32 @@ public void testRunJob_FailsOnClose() throws Exception { assertThat(exception.getMessage(), containsString("Error while closing process")); InOrder inOrder = inOrder(process); - inOrder.verify(process).isProcessAlive(); inOrder.verify(process).readAnalyticsResults(); inOrder.verify(process).consumeAndCloseOutputStream(); inOrder.verify(process).close(); + inOrder.verify(process).readError(); + verifyNoMoreInteractions(process, listener); + } + + public void testRunJob_FailsOnClose_ProcessReportsError() throws Exception { + doThrow(ExceptionsHelper.serverError("some LOG(ERROR) lines coming from cpp process")).when(process).close(); + when(process.readError()).thenReturn("Error from inside the process"); + + processManager.runJobAsync(TASK_ID, dataFrameAnalyticsConfig, dataExtractorFactory, listener); + + verify(listener).onFailure(exceptionCaptor.capture()); + ElasticsearchException exception = (ElasticsearchException) exceptionCaptor.getValue(); + assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); + assertThat(exception.getMessage(), containsString(TASK_ID)); + assertThat(exception.getMessage(), containsString("Error while closing process")); + assertThat(exception.getMessage(), containsString("some LOG(ERROR) lines coming from cpp process")); + assertThat(exception.getMessage(), containsString("Error from inside the process")); + + InOrder inOrder = inOrder(process); + inOrder.verify(process).readAnalyticsResults(); + inOrder.verify(process).consumeAndCloseOutputStream(); + inOrder.verify(process).close(); + inOrder.verify(process).readError(); verifyNoMoreInteractions(process, listener); } @@ -177,7 +199,6 @@ public void testRunJob_Ok() throws Exception { assertThat(result, equalTo(PROCESS_RESULT)); InOrder inOrder = inOrder(process); - inOrder.verify(process).isProcessAlive(); inOrder.verify(process).readAnalyticsResults(); inOrder.verify(process).consumeAndCloseOutputStream(); inOrder.verify(process).close(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/customprocessing/RegressionCustomProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/customprocessing/RegressionCustomProcessorTests.java new file mode 100644 index 0000000000000..adcd845059dac --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/customprocessing/RegressionCustomProcessorTests.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.dataframe.process.customprocessing; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.dataframe.analyses.Regression; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.IntStream; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; + +public class RegressionCustomProcessorTests extends ESTestCase { + + private List fields; + private int dependentVariableIndex; + private String dependentVariable; + + @Before + public void setUpTests() { + int fieldCount = randomIntBetween(1, 5); + fields = new ArrayList<>(fieldCount); + for (int i = 0; i < fieldCount; i++) { + fields.add(randomAlphaOfLength(10)); + } + dependentVariableIndex = randomIntBetween(0, fieldCount - 1); + dependentVariable = fields.get(dependentVariableIndex); + } + + public void testProcess_GivenRowsWithoutDependentVariableValue() { + CustomProcessor customProcessor = new RegressionCustomProcessor(fields, regression(dependentVariable, 50.0)); + + for (int i = 0; i < 100; i++) { + String[] row = new String[fields.size()]; + for (int fieldIndex = 0; fieldIndex < fields.size(); fieldIndex++) { + String value = fieldIndex == dependentVariableIndex ? "" : randomAlphaOfLength(10); + row[fieldIndex] = value; + } + + String[] processedRow = Arrays.copyOf(row, row.length); + customProcessor.process(processedRow); + + // As all these rows have no dependent variable value, they're not for training and should be unaffected + assertThat(Arrays.equals(processedRow, row), is(true)); + } + } + + public void testProcess_GivenRowsWithDependentVariableValue_AndTrainingPercentIsHundred() { + CustomProcessor customProcessor = new RegressionCustomProcessor(fields, regression(dependentVariable, 100.0)); + + for (int i = 0; i < 100; i++) { + String[] row = new String[fields.size()]; + for (int fieldIndex = 0; fieldIndex < fields.size(); fieldIndex++) { + String value = fieldIndex == dependentVariableIndex ? "" : randomAlphaOfLength(10); + row[fieldIndex] = value; + } + + String[] processedRow = Arrays.copyOf(row, row.length); + customProcessor.process(processedRow); + + // We should pick them all as training percent is 100 + assertThat(Arrays.equals(processedRow, row), is(true)); + } + } + + public void testProcess_GivenRowsWithDependentVariableValue_AndTrainingPercentIsRandom() { + double trainingPercent = randomDoubleBetween(1.0, 100.0, true); + double trainingFraction = trainingPercent / 100; + CustomProcessor customProcessor = new RegressionCustomProcessor(fields, regression(dependentVariable, trainingPercent)); + + int runCount = 20; + int rowsCount = 1000; + int[] trainingRowsPerRun = new int[runCount]; + for (int testIndex = 0; testIndex < runCount; testIndex++) { + int trainingRows = 0; + for (int i = 0; i < rowsCount; i++) { + String[] row = new String[fields.size()]; + for (int fieldIndex = 0; fieldIndex < fields.size(); fieldIndex++) { + row[fieldIndex] = randomAlphaOfLength(10); + } + + String[] processedRow = Arrays.copyOf(row, row.length); + customProcessor.process(processedRow); + + for (int fieldIndex = 0; fieldIndex < fields.size(); fieldIndex++) { + if (fieldIndex != dependentVariableIndex) { + assertThat(processedRow[fieldIndex], equalTo(row[fieldIndex])); + } + } + if (processedRow[dependentVariableIndex].length() > 0) { + assertThat(processedRow[dependentVariableIndex], equalTo(row[dependentVariableIndex])); + trainingRows++; + } + } + trainingRowsPerRun[testIndex] = trainingRows; + } + + double meanTrainingRows = IntStream.of(trainingRowsPerRun).average().getAsDouble(); + + // Now we need to calculate sensible bounds to assert against. + // We'll use 5 variances which should mean the test only fails once in 7M + // And, because we're doing multiple runs, we'll divide the variance with the number of runs to narrow the bounds + double expectedTrainingRows = trainingFraction * rowsCount; + double variance = rowsCount * (Math.pow(1 - trainingFraction, 2) * trainingFraction + + Math.pow(trainingFraction, 2) * (1 - trainingFraction)); + double lowerBound = expectedTrainingRows - 5 * Math.sqrt(variance / runCount); + double upperBound = expectedTrainingRows + 5 * Math.sqrt(variance / runCount); + + assertThat("Mean training rows [" + meanTrainingRows + "] was not within expected bounds of [" + lowerBound + ", " + + upperBound + "] given training fraction was [" + trainingFraction + "]", + meanTrainingRows, is(both(greaterThan(lowerBound)).and(lessThan(upperBound)))); + } + + public void testProcess_ShouldHaveAtLeastOneTrainingRow() { + CustomProcessor customProcessor = new RegressionCustomProcessor(fields, regression(dependentVariable, 1.0)); + + // We have some non-training rows and then a training row to check + // we maintain the first training row and not just the first row + for (int i = 0; i < 10; i++) { + String[] row = new String[fields.size()]; + for (int fieldIndex = 0; fieldIndex < fields.size(); fieldIndex++) { + if (i < 9 && fieldIndex == dependentVariableIndex) { + row[fieldIndex] = ""; + } else { + row[fieldIndex] = randomAlphaOfLength(10); + } + } + + String[] processedRow = Arrays.copyOf(row, row.length); + customProcessor.process(processedRow); + + assertThat(Arrays.equals(processedRow, row), is(true)); + } + } + + private static Regression regression(String dependentVariable, double trainingPercent) { + return new Regression(dependentVariable, null, null, null, null, null, null, trainingPercent); + } +} diff --git a/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java b/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java index 6ef9436c81579..061f6f9968feb 100644 --- a/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java +++ b/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java @@ -56,6 +56,7 @@ protected Collection> nodePlugins() { return plugins; } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/46174") public void testPinnedPromotions() throws Exception { assertAcked(prepareCreate("test") .addMapping("type1", diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheck.java index 8f5012e1ecaf1..eb12d9ef68855 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheck.java @@ -31,18 +31,18 @@ class PkiRealmBootstrapCheck implements BootstrapCheck { } /** - * If a PKI realm is enabled, checks to see if SSL and Client authentication are enabled on at + * If a PKI realm is enabled, and does not support delegation(default), checks to see if SSL and Client authentication are enabled on at * least one network communication layer. */ @Override public BootstrapCheckResult check(BootstrapContext context) { final Settings settings = context.settings(); final Map realms = RealmSettings.getRealmSettings(settings); - final boolean pkiRealmEnabled = realms.entrySet().stream() + final boolean pkiRealmEnabledWithoutDelegation = realms.entrySet().stream() .filter(e -> PkiRealmSettings.TYPE.equals(e.getKey().getType())) .map(Map.Entry::getValue) - .anyMatch(s -> s.getAsBoolean("enabled", true)); - if (pkiRealmEnabled) { + .anyMatch(s -> s.getAsBoolean("enabled", true) && (false == s.getAsBoolean("delegation.enabled", false))); + if (pkiRealmEnabledWithoutDelegation) { for (String contextName : getSslContextNames(settings)) { final SSLConfiguration configuration = sslService.getSSLConfiguration(contextName); if (sslService.isSSLClientAuthEnabled(configuration)) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 07c435c4311ec..408288a3f8dec 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -77,6 +77,7 @@ import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.security.SecuritySettings; import org.elasticsearch.xpack.core.security.action.CreateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; import org.elasticsearch.xpack.core.security.action.GetApiKeyAction; import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction; import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectAuthenticateAction; @@ -136,6 +137,7 @@ import org.elasticsearch.xpack.core.ssl.action.TransportGetCertificateInfoAction; import org.elasticsearch.xpack.core.ssl.rest.RestGetCertificateInfoAction; import org.elasticsearch.xpack.security.action.TransportCreateApiKeyAction; +import org.elasticsearch.xpack.security.action.TransportDelegatePkiAuthenticationAction; import org.elasticsearch.xpack.security.action.TransportGetApiKeyAction; import org.elasticsearch.xpack.security.action.TransportInvalidateApiKeyAction; import org.elasticsearch.xpack.security.action.filter.SecurityActionFilter; @@ -197,6 +199,7 @@ import org.elasticsearch.xpack.security.ingest.SetSecurityUserProcessor; import org.elasticsearch.xpack.security.rest.SecurityRestFilter; import org.elasticsearch.xpack.security.rest.action.RestAuthenticateAction; +import org.elasticsearch.xpack.security.rest.action.RestDelegatePkiAuthenticationAction; import org.elasticsearch.xpack.security.rest.action.apikey.RestCreateApiKeyAction; import org.elasticsearch.xpack.security.rest.action.apikey.RestGetApiKeyAction; import org.elasticsearch.xpack.security.rest.action.apikey.RestInvalidateApiKeyAction; @@ -729,6 +732,7 @@ public void onIndexModule(IndexModule module) { new ActionHandler<>(CreateApiKeyAction.INSTANCE, TransportCreateApiKeyAction.class), new ActionHandler<>(InvalidateApiKeyAction.INSTANCE, TransportInvalidateApiKeyAction.class), new ActionHandler<>(GetApiKeyAction.INSTANCE, TransportGetApiKeyAction.class), + new ActionHandler<>(DelegatePkiAuthenticationAction.INSTANCE, TransportDelegatePkiAuthenticationAction.class), usageAction, infoAction); } @@ -782,7 +786,8 @@ public List getRestHandlers(Settings settings, RestController restC new RestDeletePrivilegesAction(settings, restController, getLicenseState()), new RestCreateApiKeyAction(settings, restController, getLicenseState()), new RestInvalidateApiKeyAction(settings, restController, getLicenseState()), - new RestGetApiKeyAction(settings, restController, getLicenseState()) + new RestGetApiKeyAction(settings, restController, getLicenseState()), + new RestDelegatePkiAuthenticationAction(settings, restController, getLicenseState()) ); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportDelegatePkiAuthenticationAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportDelegatePkiAuthenticationAction.java new file mode 100644 index 0000000000000..abfacc6cc2aa2 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportDelegatePkiAuthenticationAction.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; +import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationRequest; +import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationResponse; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.security.authc.AuthenticationService; +import org.elasticsearch.xpack.security.authc.TokenService; +import org.elasticsearch.xpack.security.authc.pki.X509AuthenticationToken; + +import java.security.cert.X509Certificate; +import java.util.Map; + +/** + * Implements the exchange of an {@code X509Certificate} chain into an access token. The certificate chain is represented as an array where + * the first element is the target certificate containing the subject distinguished name that is requesting access. This may be followed by + * additional certificates, with each subsequent certificate being the one used to certify the previous one. The certificate chain is + * validated according to RFC 5280, by sequentially considering the trust configuration of every installed {@code PkiRealm} that has + * {@code PkiRealmSettings#DELEGATION_ENABLED_SETTING} set to {@code true} (default is {@code false}). A successfully trusted target + * certificate is also subject to the validation of the subject distinguished name according to that respective's realm + * {@code PkiRealmSettings#USERNAME_PATTERN_SETTING}. + * + * IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not + * validated. This is part of the TLS authentication process and it is delegated to the proxy calling this API. The proxy is trusted + * to have performed the TLS authentication, and this API translates that authentication into an Elasticsearch access token. + */ +public final class TransportDelegatePkiAuthenticationAction + extends HandledTransportAction { + + private static final Logger logger = LogManager.getLogger(TransportDelegatePkiAuthenticationAction.class); + + private final ThreadPool threadPool; + private final AuthenticationService authenticationService; + private final TokenService tokenService; + + @Inject + public TransportDelegatePkiAuthenticationAction(ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, + AuthenticationService authenticationService, TokenService tokenService) { + super(DelegatePkiAuthenticationAction.NAME, transportService, actionFilters, DelegatePkiAuthenticationRequest::new); + this.threadPool = threadPool; + this.authenticationService = authenticationService; + this.tokenService = tokenService; + } + + @Override + protected void doExecute(Task task, DelegatePkiAuthenticationRequest request, + ActionListener listener) { + final ThreadContext threadContext = threadPool.getThreadContext(); + Authentication delegateeAuthentication = Authentication.getAuthentication(threadContext); + if (delegateeAuthentication == null) { + listener.onFailure(new IllegalStateException("Delegatee authentication cannot be null")); + return; + } + final X509AuthenticationToken x509DelegatedToken = X509AuthenticationToken + .delegated(request.getCertificateChain().toArray(new X509Certificate[0]), delegateeAuthentication); + logger.trace("Attempting to authenticate delegated x509Token [{}]", x509DelegatedToken); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + authenticationService.authenticate(DelegatePkiAuthenticationAction.NAME, request, x509DelegatedToken, + ActionListener.wrap(authentication -> { + assert authentication != null : "authentication should never be null at this point"; + tokenService.createOAuth2Tokens(authentication, delegateeAuthentication, Map.of(), false, + ActionListener.wrap(tuple -> { + final TimeValue expiresIn = tokenService.getExpirationDelay(); + listener.onResponse(new DelegatePkiAuthenticationResponse(tuple.v1(), expiresIn)); + }, listener::onFailure)); + }, e -> { + logger.debug((Supplier) () -> new ParameterizedMessage("Delegated x509Token [{}] could not be authenticated", + x509DelegatedToken), e); + listener.onFailure(e); + })); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGetApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGetApiKeyAction.java index 403ce482805a2..994cb90b5f2b6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGetApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGetApiKeyAction.java @@ -9,38 +9,51 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.action.GetApiKeyAction; import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; import org.elasticsearch.xpack.core.security.action.GetApiKeyResponse; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.security.authc.ApiKeyService; public final class TransportGetApiKeyAction extends HandledTransportAction { private final ApiKeyService apiKeyService; + private final SecurityContext securityContext; @Inject - public TransportGetApiKeyAction(TransportService transportService, ActionFilters actionFilters, ApiKeyService apiKeyService) { + public TransportGetApiKeyAction(TransportService transportService, ActionFilters actionFilters, ApiKeyService apiKeyService, + SecurityContext context) { super(GetApiKeyAction.NAME, transportService, actionFilters, (Writeable.Reader) GetApiKeyRequest::new); this.apiKeyService = apiKeyService; + this.securityContext = context; } @Override protected void doExecute(Task task, GetApiKeyRequest request, ActionListener listener) { - if (Strings.hasText(request.getRealmName()) || Strings.hasText(request.getUserName())) { - apiKeyService.getApiKeysForRealmAndUser(request.getRealmName(), request.getUserName(), listener); - } else if (Strings.hasText(request.getApiKeyId())) { - apiKeyService.getApiKeyForApiKeyId(request.getApiKeyId(), listener); - } else if (Strings.hasText(request.getApiKeyName())) { - apiKeyService.getApiKeyForApiKeyName(request.getApiKeyName(), listener); - } else { - listener.onFailure(new IllegalArgumentException("One of [api key id, api key name, username, realm name] must be specified")); + String apiKeyId = request.getApiKeyId(); + String apiKeyName = request.getApiKeyName(); + String username = request.getUserName(); + String realm = request.getRealmName(); + + final Authentication authentication = securityContext.getAuthentication(); + if (authentication == null) { + listener.onFailure(new IllegalStateException("authentication is required")); + } + if (request.ownedByAuthenticatedUser()) { + assert username == null; + assert realm == null; + // restrict username and realm to current authenticated user. + username = authentication.getUser().principal(); + realm = ApiKeyService.getCreatorRealmName(authentication); } + + apiKeyService.getApiKeys(realm, username, apiKeyName, apiKeyId, listener); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportInvalidateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportInvalidateApiKeyAction.java index 886d15b1f257d..e175ae4b33a05 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportInvalidateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportInvalidateApiKeyAction.java @@ -9,36 +9,51 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction; import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyResponse; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.security.authc.ApiKeyService; public final class TransportInvalidateApiKeyAction extends HandledTransportAction { private final ApiKeyService apiKeyService; + private final SecurityContext securityContext; @Inject - public TransportInvalidateApiKeyAction(TransportService transportService, ActionFilters actionFilters, ApiKeyService apiKeyService) { + public TransportInvalidateApiKeyAction(TransportService transportService, ActionFilters actionFilters, ApiKeyService apiKeyService, + SecurityContext context) { super(InvalidateApiKeyAction.NAME, transportService, actionFilters, - (Writeable.Reader) InvalidateApiKeyRequest::new); + (Writeable.Reader) InvalidateApiKeyRequest::new); this.apiKeyService = apiKeyService; + this.securityContext = context; } @Override protected void doExecute(Task task, InvalidateApiKeyRequest request, ActionListener listener) { - if (Strings.hasText(request.getRealmName()) || Strings.hasText(request.getUserName())) { - apiKeyService.invalidateApiKeysForRealmAndUser(request.getRealmName(), request.getUserName(), listener); - } else if (Strings.hasText(request.getId())) { - apiKeyService.invalidateApiKeyForApiKeyId(request.getId(), listener); - } else { - apiKeyService.invalidateApiKeyForApiKeyName(request.getName(), listener); + String apiKeyId = request.getId(); + String apiKeyName = request.getName(); + String username = request.getUserName(); + String realm = request.getRealmName(); + + final Authentication authentication = securityContext.getAuthentication(); + if (authentication == null) { + listener.onFailure(new IllegalStateException("authentication is required")); + } + if (request.ownedByAuthenticatedUser()) { + assert username == null; + assert realm == null; + // restrict username and realm to current authenticated user. + username = authentication.getUser().principal(); + realm = ApiKeyService.getCreatorRealmName(authentication); } + + apiKeyService.invalidateApiKeys(realm, username, apiKeyName, apiKeyId, listener); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index a660fe641456a..72b5217692a3a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -71,7 +71,6 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.support.SecurityIndexManager; -import javax.crypto.SecretKeyFactory; import java.io.Closeable; import java.io.IOException; import java.io.UncheckedIOException; @@ -95,6 +94,8 @@ import java.util.function.Function; import java.util.stream.Collectors; +import javax.crypto.SecretKeyFactory; + import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; @@ -105,10 +106,14 @@ public class ApiKeyService { private static final Logger logger = LogManager.getLogger(ApiKeyService.class); private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); - static final String API_KEY_ID_KEY = "_security_api_key_id"; + public static final String API_KEY_ID_KEY = "_security_api_key_id"; + public static final String API_KEY_REALM_NAME = "_es_api_key"; + public static final String API_KEY_REALM_TYPE = "_es_api_key"; + public static final String API_KEY_CREATOR_REALM = "_security_api_key_creator_realm"; static final String API_KEY_ROLE_DESCRIPTORS_KEY = "_security_api_key_role_descriptors"; static final String API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY = "_security_api_key_limited_by_role_descriptors"; + public static final Setting PASSWORD_HASHING_ALGORITHM = new Setting<>( "xpack.security.authc.api_key.hashing.algorithm", "pbkdf2", Function.identity(), v -> { if (Hasher.getAvailableAlgoStoredHash().contains(v.toLowerCase(Locale.ROOT)) == false) { @@ -517,6 +522,7 @@ private void validateApiKeyExpiration(Map source, ApiKeyCredenti : limitedByRoleDescriptors.keySet().toArray(Strings.EMPTY_ARRAY); final User apiKeyUser = new User(principal, roleNames, null, null, metadata, true); final Map authResultMetadata = new HashMap<>(); + authResultMetadata.put(API_KEY_CREATOR_REALM, creator.get("realm")); authResultMetadata.put(API_KEY_ROLE_DESCRIPTORS_KEY, roleDescriptors); authResultMetadata.put(API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY, limitedByRoleDescriptors); authResultMetadata.put(API_KEY_ID_KEY, credentials.getId()); @@ -639,26 +645,34 @@ public void usedDeprecatedField(String usedName, String replacedWith) { } /** - * Invalidate API keys for given realm and user name. + * Invalidate API keys for given realm, user name, API key name and id. * @param realmName realm name - * @param userName user name + * @param username user name + * @param apiKeyName API key name + * @param apiKeyId API key id * @param invalidateListener listener for {@link InvalidateApiKeyResponse} */ - public void invalidateApiKeysForRealmAndUser(String realmName, String userName, - ActionListener invalidateListener) { + public void invalidateApiKeys(String realmName, String username, String apiKeyName, String apiKeyId, + ActionListener invalidateListener) { ensureEnabled(); - if (Strings.hasText(realmName) == false && Strings.hasText(userName) == false) { - logger.trace("No realm name or username provided"); - invalidateListener.onFailure(new IllegalArgumentException("realm name or username must be provided")); + if (Strings.hasText(realmName) == false && Strings.hasText(username) == false && Strings.hasText(apiKeyName) == false + && Strings.hasText(apiKeyId) == false) { + logger.trace("none of the parameters [api key id, api key name, username, realm name] were specified for invalidation"); + invalidateListener + .onFailure(new IllegalArgumentException("One of [api key id, api key name, username, realm name] must be specified")); } else { - findApiKeysForUserAndRealm(userName, realmName, true, false, ActionListener.wrap(apiKeyIds -> { - if (apiKeyIds.isEmpty()) { - logger.warn("No active api keys to invalidate for realm [{}] and username [{}]", realmName, userName); - invalidateListener.onResponse(InvalidateApiKeyResponse.emptyResponse()); - } else { - invalidateAllApiKeys(apiKeyIds.stream().map(apiKey -> apiKey.getId()).collect(Collectors.toSet()), invalidateListener); - } - }, invalidateListener::onFailure)); + findApiKeysForUserRealmApiKeyIdAndNameCombination(realmName, username, apiKeyName, apiKeyId, true, false, + ActionListener.wrap(apiKeys -> { + if (apiKeys.isEmpty()) { + logger.debug( + "No active api keys to invalidate for realm [{}], username [{}], api key name [{}] and api key id [{}]", + realmName, username, apiKeyName, apiKeyId); + invalidateListener.onResponse(InvalidateApiKeyResponse.emptyResponse()); + } else { + invalidateAllApiKeys(apiKeys.stream().map(apiKey -> apiKey.getId()).collect(Collectors.toSet()), + invalidateListener); + } + }, invalidateListener::onFailure)); } } @@ -666,71 +680,6 @@ private void invalidateAllApiKeys(Collection apiKeyIds, ActionListener invalidateListener) { - ensureEnabled(); - if (Strings.hasText(apiKeyId) == false) { - logger.trace("No api key id provided"); - invalidateListener.onFailure(new IllegalArgumentException("api key id must be provided")); - } else { - findApiKeysForApiKeyId(apiKeyId, true, false, ActionListener.wrap(apiKeyIds -> { - if (apiKeyIds.isEmpty()) { - logger.warn("No api key to invalidate for api key id [{}]", apiKeyId); - invalidateListener.onResponse(InvalidateApiKeyResponse.emptyResponse()); - } else { - invalidateAllApiKeys(apiKeyIds.stream().map(apiKey -> apiKey.getId()).collect(Collectors.toSet()), invalidateListener); - } - }, invalidateListener::onFailure)); - } - } - - /** - * Invalidate API key for given API key name - * @param apiKeyName API key name - * @param invalidateListener listener for {@link InvalidateApiKeyResponse} - */ - public void invalidateApiKeyForApiKeyName(String apiKeyName, ActionListener invalidateListener) { - ensureEnabled(); - if (Strings.hasText(apiKeyName) == false) { - logger.trace("No api key name provided"); - invalidateListener.onFailure(new IllegalArgumentException("api key name must be provided")); - } else { - findApiKeyForApiKeyName(apiKeyName, true, false, ActionListener.wrap(apiKeyIds -> { - if (apiKeyIds.isEmpty()) { - logger.warn("No api key to invalidate for api key name [{}]", apiKeyName); - invalidateListener.onResponse(InvalidateApiKeyResponse.emptyResponse()); - } else { - invalidateAllApiKeys(apiKeyIds.stream().map(apiKey -> apiKey.getId()).collect(Collectors.toSet()), invalidateListener); - } - }, invalidateListener::onFailure)); - } - } - - private void findApiKeysForUserAndRealm(String userName, String realmName, boolean filterOutInvalidatedKeys, - boolean filterOutExpiredKeys, ActionListener> listener) { - final SecurityIndexManager frozenSecurityIndex = securityIndex.freeze(); - if (frozenSecurityIndex.indexExists() == false) { - listener.onResponse(Collections.emptyList()); - } else if (frozenSecurityIndex.isAvailable() == false) { - listener.onFailure(frozenSecurityIndex.getUnavailableReason()); - } else { - final BoolQueryBuilder boolQuery = QueryBuilders.boolQuery() - .filter(QueryBuilders.termQuery("doc_type", "api_key")); - if (Strings.hasText(userName)) { - boolQuery.filter(QueryBuilders.termQuery("creator.principal", userName)); - } - if (Strings.hasText(realmName)) { - boolQuery.filter(QueryBuilders.termQuery("creator.realm", realmName)); - } - - findApiKeys(boolQuery, filterOutInvalidatedKeys, filterOutExpiredKeys, listener); - } - } - private void findApiKeys(final BoolQueryBuilder boolQuery, boolean filterOutInvalidatedKeys, boolean filterOutExpiredKeys, ActionListener> listener) { if (filterOutInvalidatedKeys) { @@ -767,35 +716,28 @@ private void findApiKeys(final BoolQueryBuilder boolQuery, boolean filterOutInva } } - private void findApiKeyForApiKeyName(String apiKeyName, boolean filterOutInvalidatedKeys, boolean filterOutExpiredKeys, - ActionListener> listener) { + private void findApiKeysForUserRealmApiKeyIdAndNameCombination(String realmName, String userName, String apiKeyName, String apiKeyId, + boolean filterOutInvalidatedKeys, boolean filterOutExpiredKeys, + ActionListener> listener) { final SecurityIndexManager frozenSecurityIndex = securityIndex.freeze(); if (frozenSecurityIndex.indexExists() == false) { listener.onResponse(Collections.emptyList()); } else if (frozenSecurityIndex.isAvailable() == false) { listener.onFailure(frozenSecurityIndex.getUnavailableReason()); } else { - final BoolQueryBuilder boolQuery = QueryBuilders.boolQuery() - .filter(QueryBuilders.termQuery("doc_type", "api_key")); + final BoolQueryBuilder boolQuery = QueryBuilders.boolQuery().filter(QueryBuilders.termQuery("doc_type", "api_key")); + if (Strings.hasText(realmName)) { + boolQuery.filter(QueryBuilders.termQuery("creator.realm", realmName)); + } + if (Strings.hasText(userName)) { + boolQuery.filter(QueryBuilders.termQuery("creator.principal", userName)); + } if (Strings.hasText(apiKeyName)) { boolQuery.filter(QueryBuilders.termQuery("name", apiKeyName)); } - - findApiKeys(boolQuery, filterOutInvalidatedKeys, filterOutExpiredKeys, listener); - } - } - - private void findApiKeysForApiKeyId(String apiKeyId, boolean filterOutInvalidatedKeys, boolean filterOutExpiredKeys, - ActionListener> listener) { - final SecurityIndexManager frozenSecurityIndex = securityIndex.freeze(); - if (frozenSecurityIndex.indexExists() == false) { - listener.onResponse(Collections.emptyList()); - } else if (frozenSecurityIndex.isAvailable() == false) { - listener.onFailure(frozenSecurityIndex.getUnavailableReason()); - } else { - final BoolQueryBuilder boolQuery = QueryBuilders.boolQuery() - .filter(QueryBuilders.termQuery("doc_type", "api_key")) - .filter(QueryBuilders.termQuery("_id", apiKeyId)); + if (Strings.hasText(apiKeyId)) { + boolQuery.filter(QueryBuilders.termQuery("_id", apiKeyId)); + } findApiKeys(boolQuery, filterOutInvalidatedKeys, filterOutExpiredKeys, listener); } @@ -818,9 +760,9 @@ private void indexInvalidation(Collection apiKeyIds, ActionListener listener) { - ensureEnabled(); - if (Strings.hasText(realmName) == false && Strings.hasText(userName) == false) { - logger.trace("No realm name or username provided"); - listener.onFailure(new IllegalArgumentException("realm name or username must be provided")); - } else { - findApiKeysForUserAndRealm(userName, realmName, false, false, ActionListener.wrap(apiKeyInfos -> { - if (apiKeyInfos.isEmpty()) { - logger.warn("No active api keys found for realm [{}] and username [{}]", realmName, userName); - listener.onResponse(GetApiKeyResponse.emptyResponse()); - } else { - listener.onResponse(new GetApiKeyResponse(apiKeyInfos)); - } - }, listener::onFailure)); - } - } - - /** - * Get API key for given API key id + * @param username user name + * @param apiKeyName API key name * @param apiKeyId API key id * @param listener listener for {@link GetApiKeyResponse} */ - public void getApiKeyForApiKeyId(String apiKeyId, ActionListener listener) { + public void getApiKeys(String realmName, String username, String apiKeyName, String apiKeyId, + ActionListener listener) { ensureEnabled(); - if (Strings.hasText(apiKeyId) == false) { - logger.trace("No api key id provided"); - listener.onFailure(new IllegalArgumentException("api key id must be provided")); + if (Strings.hasText(realmName) == false && Strings.hasText(username) == false && Strings.hasText(apiKeyName) == false + && Strings.hasText(apiKeyId) == false) { + logger.trace("none of the parameters [api key id, api key name, username, realm name] were specified for retrieval"); + listener.onFailure(new IllegalArgumentException("One of [api key id, api key name, username, realm name] must be specified")); } else { - findApiKeysForApiKeyId(apiKeyId, false, false, ActionListener.wrap(apiKeyInfos -> { + findApiKeysForUserRealmApiKeyIdAndNameCombination(realmName, username, apiKeyName, apiKeyId, false, false, + ActionListener.wrap(apiKeyInfos -> { if (apiKeyInfos.isEmpty()) { - logger.warn("No api key found for api key id [{}]", apiKeyId); + logger.debug("No active api keys found for realm [{}], user [{}], api key name [{}] and api key id [{}]", + realmName, username, apiKeyName, apiKeyId); listener.onResponse(GetApiKeyResponse.emptyResponse()); } else { listener.onResponse(new GetApiKeyResponse(apiKeyInfos)); @@ -969,24 +895,17 @@ public void getApiKeyForApiKeyId(String apiKeyId, ActionListener listener) { - ensureEnabled(); - if (Strings.hasText(apiKeyName) == false) { - logger.trace("No api key name provided"); - listener.onFailure(new IllegalArgumentException("api key name must be provided")); + public static String getCreatorRealmName(final Authentication authentication) { + if (authentication.getAuthenticatedBy().getType().equals(API_KEY_REALM_TYPE)) { + return (String) authentication.getMetadata().get(API_KEY_CREATOR_REALM); } else { - findApiKeyForApiKeyName(apiKeyName, false, false, ActionListener.wrap(apiKeyInfos -> { - if (apiKeyInfos.isEmpty()) { - logger.warn("No api key found for api key name [{}]", apiKeyName); - listener.onResponse(GetApiKeyResponse.emptyResponse()); - } else { - listener.onResponse(new GetApiKeyResponse(apiKeyInfos)); - } - }, listener::onFailure)); + return authentication.getAuthenticatedBy().getName(); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java index c140b2c397824..f5175b526be12 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java @@ -269,7 +269,7 @@ private void checkForApiKey() { apiKeyService.authenticateWithApiKeyIfPresent(threadContext, ActionListener.wrap(authResult -> { if (authResult.isAuthenticated()) { final User user = authResult.getUser(); - authenticatedBy = new RealmRef("_es_api_key", "_es_api_key", nodeName); + authenticatedBy = new RealmRef(ApiKeyService.API_KEY_REALM_NAME, ApiKeyService.API_KEY_REALM_TYPE, nodeName); writeAuthToContext(new Authentication(user, authenticatedBy, null, Version.CURRENT, Authentication.AuthenticationType.API_KEY, authResult.getMetadata())); } else if (authResult.getStatus() == AuthenticationResult.Status.TERMINATE) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java index 40d44503aef22..ca6e4e09c2d28 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java @@ -20,6 +20,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.Realm; @@ -30,6 +31,7 @@ import org.elasticsearch.xpack.core.ssl.CertParsingUtils; import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.security.authc.BytesKey; +import org.elasticsearch.xpack.security.authc.TokenService; import org.elasticsearch.xpack.security.authc.support.CachingRealm; import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; @@ -42,6 +44,7 @@ import java.security.cert.CertificateEncodingException; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; +import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -76,6 +79,7 @@ public class PkiRealm extends Realm implements CachingRealm { private final UserRoleMapper roleMapper; private final Cache cache; private DelegatedAuthorizationSupport delegatedRealms; + private final boolean delegationEnabled; public PkiRealm(RealmConfig config, ResourceWatcherService watcherService, NativeRoleMappingStore nativeRoleMappingStore) { this(config, new CompositeRoleMapper(config, watcherService, nativeRoleMappingStore)); @@ -84,6 +88,7 @@ public PkiRealm(RealmConfig config, ResourceWatcherService watcherService, Nativ // pkg private for testing PkiRealm(RealmConfig config, UserRoleMapper roleMapper) { super(config); + this.delegationEnabled = config.getSetting(PkiRealmSettings.DELEGATION_ENABLED_SETTING); this.trustManager = trustManagers(config); this.principalPattern = config.getSetting(PkiRealmSettings.USERNAME_PATTERN_SETTING); this.roleMapper = roleMapper; @@ -93,6 +98,7 @@ public PkiRealm(RealmConfig config, ResourceWatcherService watcherService, Nativ .setMaximumWeight(config.getSetting(PkiRealmSettings.CACHE_MAX_USERS_SETTING)) .build(); this.delegatedRealms = null; + validateAuthenticationDelegationConfiguration(config); } @Override @@ -141,15 +147,19 @@ public void authenticate(AuthenticationToken authToken, ActionListener) () -> new ParameterizedMessage("Using cached authentication for DN [{}], as principal [{}]", + token.dn(), user.principal())); if (delegatedRealms.hasDelegation()) { delegatedRealms.resolve(user.principal(), listener); } else { listener.onResponse(AuthenticationResult.success(user)); } - } else if (isCertificateChainTrusted(trustManager, token, logger) == false) { + } else if (false == delegationEnabled && token.isDelegated()) { + listener.onResponse(AuthenticationResult.unsuccessful("Realm does not permit delegation for " + token.dn(), null)); + } else if (false == isCertificateChainTrusted(token)) { listener.onResponse(AuthenticationResult.unsuccessful("Certificate for " + token.dn() + " is not trusted", null)); } else { // parse the principal again after validating the cert chain, and do not rely on the token.principal one, because that could @@ -188,7 +198,14 @@ public void authenticate(AuthenticationToken authToken, ActionListener listener) { - final Map metadata = Map.of("pki_dn", token.dn()); + final Map metadata; + if (token.isDelegated()) { + metadata = Map.of("pki_dn", token.dn(), + "pki_delegated_by_user", token.getDelegateeAuthentication().getUser().principal(), + "pki_delegated_by_realm", token.getDelegateeAuthentication().getAuthenticatedBy().getName()); + } else { + metadata = Map.of("pki_dn", token.dn()); + } final UserRoleMapper.UserData userData = new UserRoleMapper.UserData(principal, token.dn(), Set.of(), metadata, config); roleMapper.resolveRoles(userData, ActionListener.wrap(roles -> { final User computedUser = new User(principal, roles.toArray(new String[roles.size()]), null, null, metadata, true); @@ -218,8 +235,13 @@ static String getPrincipalFromSubjectDN(Pattern principalPattern, X509Authentica return principal; } - private static boolean isCertificateChainTrusted(X509TrustManager trustManager, X509AuthenticationToken token, Logger logger) { - if (trustManager != null) { + private boolean isCertificateChainTrusted(X509AuthenticationToken token) { + if (trustManager == null) { + // No extra trust managers specified + // If the token is NOT delegated then it is authenticated, because the certificate chain has been validated by the TLS channel. + // Otherwise, if the token is delegated, then it cannot be authenticated without a trustManager + return token.isDelegated() == false; + } else { try { trustManager.checkClientTrusted(token.credentials(), AUTH_TYPE); return true; @@ -232,9 +254,6 @@ private static boolean isCertificateChainTrusted(X509TrustManager trustManager, } return false; } - - // No extra trust managers specified, so at this point we can be considered authenticated. - return true; } private X509TrustManager trustManagers(RealmConfig realmConfig) { @@ -313,9 +332,44 @@ public void expireAll() { } } - private static BytesKey computeFingerprint(X509Certificate certificate) throws CertificateEncodingException { + @Override + public void usageStats(ActionListener> listener) { + super.usageStats(ActionListener.wrap(stats -> { + stats.put("has_truststore", trustManager != null); + stats.put("has_authorization_realms", delegatedRealms != null && delegatedRealms.hasDelegation()); + stats.put("has_default_username_pattern", PkiRealmSettings.DEFAULT_USERNAME_PATTERN.equals(principalPattern.pattern())); + stats.put("is_authentication_delegated", delegationEnabled); + listener.onResponse(stats); + }, listener::onFailure)); + } + + private void validateAuthenticationDelegationConfiguration(RealmConfig config) { + if (delegationEnabled) { + List exceptionMessages = new ArrayList<>(2); + if (this.trustManager == null) { + exceptionMessages.add("a trust configuration (" + + config.getConcreteSetting(PkiRealmSettings.CAPATH_SETTING).getKey() + " or " + + config.getConcreteSetting(PkiRealmSettings.TRUST_STORE_PATH).getKey() + ")"); + } + if (false == TokenService.isTokenServiceEnabled(config.settings())) { + exceptionMessages.add("that the token service be also enabled (" + + XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey() + ")"); + } + if (false == exceptionMessages.isEmpty()) { + String message = "PKI realms with delegation enabled require " + exceptionMessages.get(0); + if (exceptionMessages.size() == 2) { + message = message + " and " + exceptionMessages.get(1); + } + throw new IllegalStateException(message); + } + } + } + + static BytesKey computeTokenFingerprint(X509AuthenticationToken token) throws CertificateEncodingException { MessageDigest digest = MessageDigests.sha256(); - digest.update(certificate.getEncoded()); + for (X509Certificate certificate : token.credentials()) { + digest.update(certificate.getEncoded()); + } return new BytesKey(digest.digest()); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/X509AuthenticationToken.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/X509AuthenticationToken.java index 30722dbb8a446..57e05c4314229 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/X509AuthenticationToken.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/X509AuthenticationToken.java @@ -5,21 +5,38 @@ */ package org.elasticsearch.xpack.security.authc.pki; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.ssl.CertParsingUtils; import java.security.cert.X509Certificate; +import java.util.Arrays; import java.util.Objects; public class X509AuthenticationToken implements AuthenticationToken { private final String dn; private final X509Certificate[] credentials; + private final Authentication delegateeAuthentication; private String principal; public X509AuthenticationToken(X509Certificate[] certificates) { + this(certificates, null); + } + + private X509AuthenticationToken(X509Certificate[] certificates, Authentication delegateeAuthentication) { this.credentials = Objects.requireNonNull(certificates); + if (false == CertParsingUtils.isOrderedCertificateChain(Arrays.asList(certificates))) { + throw new IllegalArgumentException("certificates chain array is not ordered"); + } this.dn = certificates.length == 0 ? "" : certificates[0].getSubjectX500Principal().toString(); this.principal = this.dn; + this.delegateeAuthentication = delegateeAuthentication; + } + + public static X509AuthenticationToken delegated(X509Certificate[] certificates, Authentication delegateeAuthentication) { + Objects.requireNonNull(delegateeAuthentication); + return new X509AuthenticationToken(certificates, delegateeAuthentication); } @Override @@ -44,4 +61,12 @@ public String dn() { public void clearCredentials() { // noop } + + public boolean isDelegated() { + return delegateeAuthentication != null; + } + + public Authentication getDelegateeAuthentication() { + return delegateeAuthentication; + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java index 69153379f3b15..bd81d6db4743a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java @@ -64,6 +64,7 @@ import org.elasticsearch.xpack.security.audit.AuditLevel; import org.elasticsearch.xpack.security.audit.AuditTrailService; import org.elasticsearch.xpack.security.audit.AuditUtil; +import org.elasticsearch.xpack.security.authc.ApiKeyService; import org.elasticsearch.xpack.security.authz.interceptor.RequestInterceptor; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; @@ -572,6 +573,14 @@ private ElasticsearchSecurityException denialException(Authentication authentica return authorizationError("action [{}] is unauthorized for user [{}] run as [{}]", cause, action, authUser.principal(), authentication.getUser().principal()); } + // check for authentication by API key + if (authentication.getAuthenticatedBy().getType().equals(ApiKeyService.API_KEY_REALM_TYPE)) { + final String apiKeyId = (String) authentication.getMetadata().get(ApiKeyService.API_KEY_ID_KEY); + assert apiKeyId != null : "api key id must be present in the metadata"; + logger.debug("action [{}] is unauthorized for API key id [{}] of user [{}]", action, apiKeyId, authUser.principal()); + return authorizationError("action [{}] is unauthorized for API key id [{}] of user [{}]", cause, action, apiKeyId, + authUser.principal()); + } logger.debug("action [{}] is unauthorized for user [{}]", action, authUser.principal()); return authorizationError("action [{}] is unauthorized for user [{}]", cause, action, authUser.principal()); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java index df00474f6d69d..4b0e99d7290fd 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java @@ -32,6 +32,8 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.transport.TransportActionProxy; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.action.GetApiKeyAction; +import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; import org.elasticsearch.xpack.core.security.action.user.ChangePasswordAction; import org.elasticsearch.xpack.core.security.action.user.GetUserPrivilegesAction; @@ -62,6 +64,7 @@ import org.elasticsearch.xpack.core.security.authz.privilege.Privilege; import org.elasticsearch.xpack.core.security.support.Automatons; import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authc.ApiKeyService; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; @@ -86,7 +89,7 @@ public class RBACEngine implements AuthorizationEngine { private static final Predicate SAME_USER_PRIVILEGE = Automatons.predicate( - ChangePasswordAction.NAME, AuthenticateAction.NAME, HasPrivilegesAction.NAME, GetUserPrivilegesAction.NAME); + ChangePasswordAction.NAME, AuthenticateAction.NAME, HasPrivilegesAction.NAME, GetUserPrivilegesAction.NAME, GetApiKeyAction.NAME); private static final String INDEX_SUB_REQUEST_PRIMARY = IndexAction.NAME + "[p]"; private static final String INDEX_SUB_REQUEST_REPLICA = IndexAction.NAME + "[r]"; private static final String DELETE_SUB_REQUEST_PRIMARY = DeleteAction.NAME + "[p]"; @@ -137,7 +140,7 @@ public void authorizeClusterAction(RequestInfo requestInfo, AuthorizationInfo au ActionListener listener) { if (authorizationInfo instanceof RBACAuthorizationInfo) { final Role role = ((RBACAuthorizationInfo) authorizationInfo).getRole(); - if (role.checkClusterAction(requestInfo.getAction(), requestInfo.getRequest())) { + if (role.checkClusterAction(requestInfo.getAction(), requestInfo.getRequest(), requestInfo.getAuthentication())) { listener.onResponse(AuthorizationResult.granted()); } else if (checkSameUserPermissions(requestInfo.getAction(), requestInfo.getRequest(), requestInfo.getAuthentication())) { listener.onResponse(AuthorizationResult.granted()); @@ -154,26 +157,39 @@ public void authorizeClusterAction(RequestInfo requestInfo, AuthorizationInfo au boolean checkSameUserPermissions(String action, TransportRequest request, Authentication authentication) { final boolean actionAllowed = SAME_USER_PRIVILEGE.test(action); if (actionAllowed) { - if (request instanceof UserRequest == false) { - assert false : "right now only a user request should be allowed"; - return false; - } - UserRequest userRequest = (UserRequest) request; - String[] usernames = userRequest.usernames(); - if (usernames == null || usernames.length != 1 || usernames[0] == null) { - assert false : "this role should only be used for actions to apply to a single user"; + if (request instanceof UserRequest) { + UserRequest userRequest = (UserRequest) request; + String[] usernames = userRequest.usernames(); + if (usernames == null || usernames.length != 1 || usernames[0] == null) { + assert false : "this role should only be used for actions to apply to a single user"; + return false; + } + final String username = usernames[0]; + final boolean sameUsername = authentication.getUser().principal().equals(username); + if (sameUsername && ChangePasswordAction.NAME.equals(action)) { + return checkChangePasswordAction(authentication); + } + + assert AuthenticateAction.NAME.equals(action) || HasPrivilegesAction.NAME.equals(action) + || GetUserPrivilegesAction.NAME.equals(action) || sameUsername == false + : "Action '" + action + "' should not be possible when sameUsername=" + sameUsername; + return sameUsername; + } else if (request instanceof GetApiKeyRequest) { + GetApiKeyRequest getApiKeyRequest = (GetApiKeyRequest) request; + if (authentication.getAuthenticatedBy().getType().equals(ApiKeyService.API_KEY_REALM_TYPE)) { + assert authentication.getLookedUpBy() == null : "runAs not supported for api key authentication"; + // if authenticated by API key then the request must also contain same API key id + String authenticatedApiKeyId = (String) authentication.getMetadata().get(ApiKeyService.API_KEY_ID_KEY); + if (Strings.hasText(getApiKeyRequest.getApiKeyId())) { + return getApiKeyRequest.getApiKeyId().equals(authenticatedApiKeyId); + } else { + return false; + } + } + } else { + assert false : "right now only a user request or get api key request should be allowed"; return false; } - final String username = usernames[0]; - final boolean sameUsername = authentication.getUser().principal().equals(username); - if (sameUsername && ChangePasswordAction.NAME.equals(action)) { - return checkChangePasswordAction(authentication); - } - - assert AuthenticateAction.NAME.equals(action) || HasPrivilegesAction.NAME.equals(action) - || GetUserPrivilegesAction.NAME.equals(action) || sameUsername == false - : "Action '" + action + "' should not be possible when sameUsername=" + sameUsername; - return sameUsername; } return false; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestDelegatePkiAuthenticationAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestDelegatePkiAuthenticationAction.java new file mode 100644 index 0000000000000..c63d965a3b7c6 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestDelegatePkiAuthenticationAction.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.rest.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; +import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationRequest; +import org.elasticsearch.xpack.security.action.TransportDelegatePkiAuthenticationAction; +import org.elasticsearch.xpack.security.authc.Realms; +import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationResponse; +import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +/** + * Implements the exchange of an {@code X509Certificate} chain into an access token. The chain is represented as an ordered string array. + * Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) DER PKIX certificate value. + * See also {@link TransportDelegatePkiAuthenticationAction}. + */ +public final class RestDelegatePkiAuthenticationAction extends SecurityBaseRestHandler { + + protected Logger logger = LogManager.getLogger(RestDelegatePkiAuthenticationAction.class); + + public RestDelegatePkiAuthenticationAction(Settings settings, RestController controller, XPackLicenseState xPackLicenseState) { + super(settings, xPackLicenseState); + controller.registerHandler(POST, "/_security/delegate_pki", this); + } + + @Override + protected Exception checkFeatureAvailable(RestRequest request) { + Exception failedFeature = super.checkFeatureAvailable(request); + if (failedFeature != null) { + return failedFeature; + } else if (Realms.isRealmTypeAvailable(licenseState.allowedRealmType(), PkiRealmSettings.TYPE)) { + return null; + } else { + logger.info("The '{}' realm is not available under the current license", PkiRealmSettings.TYPE); + return LicenseUtils.newComplianceException(PkiRealmSettings.TYPE); + } + } + + @Override + protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + try (XContentParser parser = request.contentParser()) { + final DelegatePkiAuthenticationRequest delegatePkiRequest = DelegatePkiAuthenticationRequest.fromXContent(parser); + return channel -> client.execute(DelegatePkiAuthenticationAction.INSTANCE, delegatePkiRequest, + new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(DelegatePkiAuthenticationResponse delegatePkiResponse, XContentBuilder builder) + throws Exception { + delegatePkiResponse.toXContent(builder, channel.request()); + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + } + } + + @Override + public String getName() { + return "delegate_pki_action"; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java index 71ed5a06efb65..ca07952478444 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java @@ -39,7 +39,8 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClien final String apiKeyName = request.param("name"); final String userName = request.param("username"); final String realmName = request.param("realm_name"); - final GetApiKeyRequest getApiKeyRequest = new GetApiKeyRequest(realmName, userName, apiKeyId, apiKeyName); + final boolean myApiKeysOnly = request.paramAsBoolean("owner", false); + final GetApiKeyRequest getApiKeyRequest = new GetApiKeyRequest(realmName, userName, apiKeyId, apiKeyName, myApiKeysOnly); return channel -> client.execute(GetApiKeyAction.INSTANCE, getApiKeyRequest, new RestBuilderListener(channel) { @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyAction.java index b11a0edde42f8..0579932887677 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyAction.java @@ -31,7 +31,8 @@ public final class RestInvalidateApiKeyAction extends ApiKeyBaseRestHandler { static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("invalidate_api_key", a -> { - return new InvalidateApiKeyRequest((String) a[0], (String) a[1], (String) a[2], (String) a[3]); + return new InvalidateApiKeyRequest((String) a[0], (String) a[1], (String) a[2], (String) a[3], (a[4] == null) ? false : + (Boolean) a[4]); }); static { @@ -39,6 +40,7 @@ public final class RestInvalidateApiKeyAction extends ApiKeyBaseRestHandler { PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("username")); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("id")); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("name")); + PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), new ParseField("owner")); } public RestInvalidateApiKeyAction(Settings settings, RestController controller, XPackLicenseState licenseState) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheckTests.java index ec3e0a5132dfd..10f9c7891f0c7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheckTests.java @@ -89,6 +89,17 @@ public void testBootstrapCheckWithDisabledRealm() throws Exception { assertFalse(runCheck(settings, env).isFailure()); } + public void testBootstrapCheckWithDelegationEnabled() throws Exception { + Settings settings = Settings.builder() + .put("xpack.security.authc.realms.pki.test_pki.enabled", true) + .put("xpack.security.authc.realms.pki.test_pki.delegation.enabled", true) + .put("xpack.security.transport.ssl.client_authentication", "none") + .put("path.home", createTempDir()) + .build(); + Environment env = TestEnvironment.newEnvironment(settings); + assertFalse(runCheck(settings, env).isFailure()); + } + public void testBootstrapCheckWithClosedSecuredSetting() throws Exception { final boolean expectFail = randomBoolean(); final MockSecureSettings secureSettings = new MockSecureSettings(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index 5c6c04b6ad491..4c9e944c14f0f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -47,6 +47,7 @@ import java.util.Base64; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -88,6 +89,31 @@ public void wipeSecurityIndex() throws InterruptedException { deleteSecurityIndex(); } + @Override + public String configRoles() { + return super.configRoles() + "\n" + + "manage_api_key_role:\n" + + " cluster: [\"manage_api_key\"]\n" + + "manage_own_api_key_role:\n" + + " cluster: [\"manage_own_api_key\"]\n"; + } + + @Override + public String configUsers() { + final String usersPasswdHashed = new String( + getFastStoredHashAlgoForTests().hash(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); + return super.configUsers() + + "user_with_manage_api_key_role:" + usersPasswdHashed + "\n" + + "user_with_manage_own_api_key_role:" + usersPasswdHashed + "\n"; + } + + @Override + public String configUsersRoles() { + return super.configUsersRoles() + + "manage_api_key_role:user_with_manage_api_key_role\n" + + "manage_own_api_key_role:user_with_manage_own_api_key_role\n"; + } + private void awaitApiKeysRemoverCompletion() throws InterruptedException { for (ApiKeyService apiKeyService : internalCluster().getInstances(ApiKeyService.class)) { final boolean done = awaitBusy(() -> apiKeyService.isExpirationInProgress() == false); @@ -171,7 +197,7 @@ public void testCreateApiKeyFailsWhenApiKeyWithSameNameAlreadyExists() throws In // Now invalidate the API key PlainActionFuture listener = new PlainActionFuture<>(); - client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyName(keyName), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyName(keyName, false), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); verifyInvalidateResponse(1, responses, invalidateResponse); @@ -222,7 +248,7 @@ public void testInvalidateApiKeysForApiKeyId() throws InterruptedException, Exec Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); PlainActionFuture listener = new PlainActionFuture<>(); - client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(responses.get(0).getId()), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(responses.get(0).getId(), false), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); verifyInvalidateResponse(1, responses, invalidateResponse); } @@ -232,7 +258,8 @@ public void testInvalidateApiKeysForApiKeyName() throws InterruptedException, Ex Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); PlainActionFuture listener = new PlainActionFuture<>(); - client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyName(responses.get(0).getName()), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyName(responses.get(0).getName(), false), + listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); verifyInvalidateResponse(1, responses, invalidateResponse); } @@ -254,7 +281,8 @@ public void testInvalidatedApiKeysDeletedByRemover() throws Exception { List createdApiKeys = createApiKeys(2, null); PlainActionFuture listener = new PlainActionFuture<>(); - client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(0).getId()), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(0).getId(), false), + listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); assertThat(invalidateResponse.getInvalidatedApiKeys().size(), equalTo(1)); assertThat(invalidateResponse.getPreviouslyInvalidatedApiKeys().size(), equalTo(0)); @@ -270,7 +298,8 @@ public void testInvalidatedApiKeysDeletedByRemover() throws Exception { // invalidate API key to trigger remover listener = new PlainActionFuture<>(); - client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(1).getId()), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(1).getId(), false), + listener); assertThat(listener.get().getInvalidatedApiKeys().size(), is(1)); awaitApiKeysRemoverCompletion(); @@ -343,7 +372,8 @@ public void testExpiredApiKeysBehaviorWhenKeysExpired1WeekBeforeAnd1DayBefore() // Invalidate to trigger the remover PlainActionFuture listener = new PlainActionFuture<>(); - client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(2).getId()), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(2).getId(), false), + listener); assertThat(listener.get().getInvalidatedApiKeys().size(), is(1)); awaitApiKeysRemoverCompletion(); @@ -391,7 +421,7 @@ public void testActiveApiKeysWithNoExpirationNeverGetDeletedByRemover() throws E .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); PlainActionFuture listener = new PlainActionFuture<>(); // trigger expired keys remover - client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(responses.get(1).getId()), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(responses.get(1).getId(), false), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); assertThat(invalidateResponse.getInvalidatedApiKeys().size(), equalTo(1)); assertThat(invalidateResponse.getPreviouslyInvalidatedApiKeys().size(), equalTo(0)); @@ -414,7 +444,8 @@ public void testGetApiKeysForRealm() throws InterruptedException, ExecutionExcep Set expectedValidKeyIds = null; if (invalidate) { PlainActionFuture listener = new PlainActionFuture<>(); - client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(responses.get(0).getId()), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(responses.get(0).getId(), false), + listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); invalidatedApiKeyIds = invalidateResponse.getInvalidatedApiKeys(); expectedValidKeyIds = responses.stream().filter(o -> !o.getId().equals(responses.get(0).getId())).map(o -> o.getId()) @@ -459,7 +490,7 @@ public void testGetApiKeysForApiKeyId() throws InterruptedException, ExecutionEx Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); PlainActionFuture listener = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyId(responses.get(0).getId()), listener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyId(responses.get(0).getId(), false), listener); GetApiKeyResponse response = listener.get(); verifyGetResponse(1, responses, response, Collections.singleton(responses.get(0).getId()), null); } @@ -469,15 +500,112 @@ public void testGetApiKeysForApiKeyName() throws InterruptedException, Execution Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); PlainActionFuture listener = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyName(responses.get(0).getName()), listener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyName(responses.get(0).getName(), false), listener); + GetApiKeyResponse response = listener.get(); + verifyGetResponse(1, responses, response, Collections.singleton(responses.get(0).getId()), null); + } + + public void testGetApiKeysOwnedByCurrentAuthenticatedUser() throws InterruptedException, ExecutionException { + int noOfSuperuserApiKeys = randomIntBetween(3, 5); + int noOfApiKeysForUserWithManageApiKeyRole = randomIntBetween(3, 5); + List defaultUserCreatedKeys = createApiKeys(noOfSuperuserApiKeys, null); + String userWithManageApiKeyRole = randomFrom("user_with_manage_api_key_role", "user_with_manage_own_api_key_role"); + List userWithManageApiKeyRoleApiKeys = createApiKeys(userWithManageApiKeyRole, + noOfApiKeysForUserWithManageApiKeyRole, null, "monitor"); + final Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken + .basicAuthHeaderValue(userWithManageApiKeyRole, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); + + PlainActionFuture listener = new PlainActionFuture<>(); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.forOwnedApiKeys(), listener); + GetApiKeyResponse response = listener.get(); + verifyGetResponse(userWithManageApiKeyRole, noOfApiKeysForUserWithManageApiKeyRole, userWithManageApiKeyRoleApiKeys, + response, userWithManageApiKeyRoleApiKeys.stream().map(o -> o.getId()).collect(Collectors.toSet()), null); + } + + public void testInvalidateApiKeysOwnedByCurrentAuthenticatedUser() throws InterruptedException, ExecutionException { + int noOfSuperuserApiKeys = randomIntBetween(3, 5); + int noOfApiKeysForUserWithManageApiKeyRole = randomIntBetween(3, 5); + List defaultUserCreatedKeys = createApiKeys(noOfSuperuserApiKeys, null); + String userWithManageApiKeyRole = randomFrom("user_with_manage_api_key_role", "user_with_manage_own_api_key_role"); + List userWithManageApiKeyRoleApiKeys = createApiKeys(userWithManageApiKeyRole, + noOfApiKeysForUserWithManageApiKeyRole, null, "monitor"); + final Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken + .basicAuthHeaderValue(userWithManageApiKeyRole, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); + + PlainActionFuture listener = new PlainActionFuture<>(); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.forOwnedApiKeys(), listener); + InvalidateApiKeyResponse invalidateResponse = listener.get(); + + verifyInvalidateResponse(noOfApiKeysForUserWithManageApiKeyRole, userWithManageApiKeyRoleApiKeys, invalidateResponse); + } + + public void testApiKeyAuthorizationApiKeyMustBeAbleToRetrieveItsOwnInformationButNotAnyOtherKeysCreatedBySameOwner() + throws InterruptedException, ExecutionException { + List responses = createApiKeys(SecuritySettingsSource.TEST_SUPERUSER,2, null, (String[]) null); + final String base64ApiKeyKeyValue = Base64.getEncoder().encodeToString( + (responses.get(0).getId() + ":" + responses.get(0).getKey().toString()).getBytes(StandardCharsets.UTF_8)); + Client client = client().filterWithHeader(Map.of("Authorization", "ApiKey " + base64ApiKeyKeyValue)); + PlainActionFuture listener = new PlainActionFuture<>(); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyId(responses.get(0).getId(), randomBoolean()), listener); GetApiKeyResponse response = listener.get(); verifyGetResponse(1, responses, response, Collections.singleton(responses.get(0).getId()), null); + + final PlainActionFuture failureListener = new PlainActionFuture<>(); + // for any other API key id, it must deny access + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyId(responses.get(1).getId(), randomBoolean()), + failureListener); + ElasticsearchSecurityException ese = expectThrows(ElasticsearchSecurityException.class, () -> failureListener.actionGet()); + assertErrorMessage(ese, "cluster:admin/xpack/security/api_key/get", SecuritySettingsSource.TEST_SUPERUSER, + responses.get(0).getId()); + + final PlainActionFuture failureListener1 = new PlainActionFuture<>(); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.forOwnedApiKeys(), failureListener1); + ese = expectThrows(ElasticsearchSecurityException.class, () -> failureListener1.actionGet()); + assertErrorMessage(ese, "cluster:admin/xpack/security/api_key/get", SecuritySettingsSource.TEST_SUPERUSER, + responses.get(0).getId()); } - private void verifyGetResponse(int noOfApiKeys, List responses, GetApiKeyResponse response, - Set validApiKeyIds, - List invalidatedApiKeyIds) { - assertThat(response.getApiKeyInfos().length, equalTo(noOfApiKeys)); + public void testApiKeyWithManageOwnPrivilegeIsAbleToInvalidateItselfButNotAnyOtherKeysCreatedBySameOwner() + throws InterruptedException, ExecutionException { + List responses = createApiKeys(SecuritySettingsSource.TEST_SUPERUSER, 2, null, "manage_own_api_key"); + final String base64ApiKeyKeyValue = Base64.getEncoder().encodeToString( + (responses.get(0).getId() + ":" + responses.get(0).getKey().toString()).getBytes(StandardCharsets.UTF_8)); + Client client = client().filterWithHeader(Map.of("Authorization", "ApiKey " + base64ApiKeyKeyValue)); + + final PlainActionFuture failureListener = new PlainActionFuture<>(); + // for any other API key id, it must deny access + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(responses.get(1).getId(), randomBoolean()), + failureListener); + ElasticsearchSecurityException ese = expectThrows(ElasticsearchSecurityException.class, () -> failureListener.actionGet()); + assertErrorMessage(ese, "cluster:admin/xpack/security/api_key/invalidate", SecuritySettingsSource.TEST_SUPERUSER, + responses.get(0).getId()); + + final PlainActionFuture failureListener1 = new PlainActionFuture<>(); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.forOwnedApiKeys(), failureListener1); + ese = expectThrows(ElasticsearchSecurityException.class, () -> failureListener1.actionGet()); + assertErrorMessage(ese, "cluster:admin/xpack/security/api_key/invalidate", SecuritySettingsSource.TEST_SUPERUSER, + responses.get(0).getId()); + + PlainActionFuture listener = new PlainActionFuture<>(); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(responses.get(0).getId(), randomBoolean()), + listener); + InvalidateApiKeyResponse invalidateResponse = listener.get(); + + assertThat(invalidateResponse.getInvalidatedApiKeys().size(), equalTo(1)); + assertThat(invalidateResponse.getInvalidatedApiKeys(), containsInAnyOrder(responses.get(0).getId())); + assertThat(invalidateResponse.getPreviouslyInvalidatedApiKeys().size(), equalTo(0)); + assertThat(invalidateResponse.getErrors().size(), equalTo(0)); + } + + private void verifyGetResponse(int expectedNumberOfApiKeys, List responses, + GetApiKeyResponse response, Set validApiKeyIds, List invalidatedApiKeyIds) { + verifyGetResponse(SecuritySettingsSource.TEST_SUPERUSER, expectedNumberOfApiKeys, responses, response, validApiKeyIds, + invalidatedApiKeyIds); + } + + private void verifyGetResponse(String user, int expectedNumberOfApiKeys, List responses, + GetApiKeyResponse response, Set validApiKeyIds, List invalidatedApiKeyIds) { + assertThat(response.getApiKeyInfos().length, equalTo(expectedNumberOfApiKeys)); List expectedIds = responses.stream().filter(o -> validApiKeyIds.contains(o.getId())).map(o -> o.getId()) .collect(Collectors.toList()); List actualIds = Arrays.stream(response.getApiKeyInfos()).filter(o -> o.isInvalidated() == false).map(o -> o.getId()) @@ -489,7 +617,7 @@ private void verifyGetResponse(int noOfApiKeys, List respo .collect(Collectors.toList()); assertThat(actualNames, containsInAnyOrder(expectedNames.toArray(Strings.EMPTY_ARRAY))); Set expectedUsernames = (validApiKeyIds.isEmpty()) ? Collections.emptySet() - : Collections.singleton(SecuritySettingsSource.TEST_SUPERUSER); + : Set.of(user); Set actualUsernames = Arrays.stream(response.getApiKeyInfos()).filter(o -> o.isInvalidated() == false) .map(o -> o.getUsername()).collect(Collectors.toSet()); assertThat(actualUsernames, containsInAnyOrder(expectedUsernames.toArray(Strings.EMPTY_ARRAY))); @@ -498,15 +626,18 @@ private void verifyGetResponse(int noOfApiKeys, List respo .map(o -> o.getId()).collect(Collectors.toList()); assertThat(invalidatedApiKeyIds, containsInAnyOrder(actualInvalidatedApiKeyIds.toArray(Strings.EMPTY_ARRAY))); } - } private List createApiKeys(int noOfApiKeys, TimeValue expiration) { + return createApiKeys(SecuritySettingsSource.TEST_SUPERUSER, noOfApiKeys, expiration, "monitor"); + } + + private List createApiKeys(String user, int noOfApiKeys, TimeValue expiration, String... clusterPrivileges) { List responses = new ArrayList<>(); for (int i = 0; i < noOfApiKeys; i++) { - final RoleDescriptor descriptor = new RoleDescriptor("role", new String[] { "monitor" }, null, null); + final RoleDescriptor descriptor = new RoleDescriptor("role", clusterPrivileges, null, null); Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken - .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); + .basicAuthHeaderValue(user, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); final CreateApiKeyResponse response = new CreateApiKeyRequestBuilder(client) .setName("test-key-" + randomAlphaOfLengthBetween(5, 9) + i).setExpiration(expiration) .setRoleDescriptors(Collections.singletonList(descriptor)).get(); @@ -517,4 +648,9 @@ private List createApiKeys(int noOfApiKeys, TimeValue expi assertThat(responses.size(), is(noOfApiKeys)); return responses; } + + private void assertErrorMessage(final ElasticsearchSecurityException ese, String action, String userName, String apiKeyId) { + assertThat(ese.getMessage(), + is("action [" + action + "] is unauthorized for API key id [" + apiKeyId + "] of user [" + userName + "]")); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index 0491d20d74c8a..031f5ccec0696 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -191,6 +191,7 @@ public void testValidateApiKey() throws Exception { sourceMap.put("limited_by_role_descriptors", Collections.singletonMap("limited role", Collections.singletonMap("cluster", "all"))); Map creatorMap = new HashMap<>(); creatorMap.put("principal", "test_user"); + creatorMap.put("realm", "realm1"); creatorMap.put("metadata", Collections.emptyMap()); sourceMap.put("creator", creatorMap); sourceMap.put("api_key_invalidated", false); @@ -209,6 +210,7 @@ public void testValidateApiKey() throws Exception { assertThat(result.getMetadata().get(ApiKeyService.API_KEY_ROLE_DESCRIPTORS_KEY), equalTo(sourceMap.get("role_descriptors"))); assertThat(result.getMetadata().get(ApiKeyService.API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY), equalTo(sourceMap.get("limited_by_role_descriptors"))); + assertThat(result.getMetadata().get(ApiKeyService.API_KEY_CREATOR_REALM), is("realm1")); sourceMap.put("expiration_time", Clock.systemUTC().instant().plus(1L, ChronoUnit.HOURS).toEpochMilli()); future = new PlainActionFuture<>(); @@ -222,6 +224,7 @@ public void testValidateApiKey() throws Exception { assertThat(result.getMetadata().get(ApiKeyService.API_KEY_ROLE_DESCRIPTORS_KEY), equalTo(sourceMap.get("role_descriptors"))); assertThat(result.getMetadata().get(ApiKeyService.API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY), equalTo(sourceMap.get("limited_by_role_descriptors"))); + assertThat(result.getMetadata().get(ApiKeyService.API_KEY_CREATOR_REALM), is("realm1")); sourceMap.put("expiration_time", Clock.systemUTC().instant().minus(1L, ChronoUnit.HOURS).toEpochMilli()); future = new PlainActionFuture<>(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java index 6061469d7003e..d5e123c2313bb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java @@ -210,6 +210,9 @@ private Settings.Builder pkiSettings(boolean useTrustStore) { } else { builder.putList("certificate_authorities", generateRandomStringArray(5, 32, false, false)); } + if (randomBoolean()) { + builder.put("delegation.enabled", randomBoolean()); + } return builder; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index 36c9b79538272..64d6cfd938f81 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -49,6 +49,7 @@ import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; import org.elasticsearch.xpack.core.security.action.user.PutUserRequestBuilder; import org.elasticsearch.xpack.core.security.action.user.SetEnabledRequestBuilder; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.permission.Role; @@ -369,10 +370,11 @@ public void testCreateAndUpdateRole() { } } else { final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); GetRolesResponse getRolesResponse = new GetRolesRequestBuilder(client()).names("test_role").get(); assertTrue("test_role does not exist!", getRolesResponse.hasRoles()); assertTrue("any cluster permission should be authorized", - Role.builder(getRolesResponse.roles()[0], null).build().cluster().check("cluster:admin/foo", request)); + Role.builder(getRolesResponse.roles()[0], null).build().cluster().check("cluster:admin/foo", request, authentication)); preparePutRole("test_role") .cluster("none") @@ -383,7 +385,7 @@ public void testCreateAndUpdateRole() { assertTrue("test_role does not exist!", getRolesResponse.hasRoles()); assertFalse("no cluster permission should be authorized", - Role.builder(getRolesResponse.roles()[0], null).build().cluster().check("cluster:admin/bar", request)); + Role.builder(getRolesResponse.roles()[0], null).build().cluster().check("cluster:admin/bar", request, authentication)); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthDelegationIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthDelegationIntegTests.java new file mode 100644 index 0000000000000..fd84e71b91e0a --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthDelegationIntegTests.java @@ -0,0 +1,338 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.authc.pki; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.client.security.AuthenticateResponse; +import org.elasticsearch.client.security.PutRoleMappingRequest; +import org.elasticsearch.client.security.RefreshPolicy; +import org.elasticsearch.client.security.AuthenticateResponse.RealmInfo; +import org.elasticsearch.client.security.DeleteRoleMappingRequest; +import org.elasticsearch.client.security.support.expressiondsl.fields.FieldRoleMapperExpression; +import org.elasticsearch.client.security.DelegatePkiAuthenticationRequest; +import org.elasticsearch.client.security.DelegatePkiAuthenticationResponse; +import org.elasticsearch.client.security.InvalidateTokenRequest; +import org.elasticsearch.client.security.InvalidateTokenResponse; +import org.elasticsearch.client.security.user.User; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequestBuilder; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.junit.Before; +import org.elasticsearch.test.SecuritySettingsSource; + +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; +import java.util.Collections; +import java.util.Arrays; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.emptyCollectionOf; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.startsWith; + +public class PkiAuthDelegationIntegTests extends SecurityIntegTestCase { + + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true) + // pki1 does not allow delegation + .put("xpack.security.authc.realms.pki.pki1.order", "1") + .putList("xpack.security.authc.realms.pki.pki1.certificate_authorities", + getDataPath("/org/elasticsearch/xpack/security/action/pki_delegation/testRootCA.crt").toString()) + .put("xpack.security.authc.realms.pki.pki1.files.role_mapping", getDataPath("role_mapping.yml")) + // pki2 allows delegation but has a non-matching username pattern + .put("xpack.security.authc.realms.pki.pki2.order", "2") + .putList("xpack.security.authc.realms.pki.pki2.certificate_authorities", + getDataPath("/org/elasticsearch/xpack/security/action/pki_delegation/testRootCA.crt").toString()) + .put("xpack.security.authc.realms.pki.pki2.username_pattern", "CN=MISMATCH(.*?)(?:,|$)") + .put("xpack.security.authc.realms.pki.pki2.delegation.enabled", true) + .put("xpack.security.authc.realms.pki.pki2.files.role_mapping", getDataPath("role_mapping.yml")) + // pki3 allows delegation and the username pattern (default) matches + .put("xpack.security.authc.realms.pki.pki3.order", "3") + .putList("xpack.security.authc.realms.pki.pki3.certificate_authorities", + getDataPath("/org/elasticsearch/xpack/security/action/pki_delegation/testRootCA.crt").toString()) + .put("xpack.security.authc.realms.pki.pki3.delegation.enabled", true) + .put("xpack.security.authc.realms.pki.pki3.files.role_mapping", getDataPath("role_mapping.yml")) + .build(); + } + + @Override + protected String configUsers() { + final String usersPasswdHashed = new String(Hasher.resolve( + randomFrom("pbkdf2", "pbkdf2_1000", "bcrypt", "bcrypt9")).hash(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); + return super.configUsers() + + "user_manage:" + usersPasswdHashed + "\n" + + "user_manage_security:" + usersPasswdHashed + "\n" + + "user_delegate_pki:" + usersPasswdHashed + "\n" + + "user_all:" + usersPasswdHashed + "\n" + + "kibana_system:" + usersPasswdHashed + "\n"; + } + + @Override + protected String configRoles() { + return super.configRoles() + "\n" + + "role_manage:\n" + + " cluster: [ manage ]\n" + + "\n" + + "role_manage_security:\n" + + " cluster: [ manage_security ]\n" + + "\n" + + "role_delegate_pki:\n" + + " cluster: [ delegate_pki ]\n" + + "\n" + + "role_all:\n" + + " cluster: [ all ]\n"; + } + + @Override + protected String configUsersRoles() { + return super.configUsersRoles() + "\n" + + "role_manage:user_manage\n" + + "role_manage_security:user_manage_security\n" + + "role_delegate_pki:user_delegate_pki\n" + + "role_all:user_all\n" + + "kibana_system:kibana_system\n"; + } + + @Override + protected boolean transportSSLEnabled() { + return true; + } + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + @Before + void clearRealmCache() { + new ClearRealmCacheRequestBuilder(client()).get(); + } + + public void testDelegateThenAuthenticate() throws Exception { + final X509Certificate clientCertificate = readCertForPkiDelegation("testClient.crt"); + final X509Certificate intermediateCA = readCertForPkiDelegation("testIntermediateCA.crt"); + final X509Certificate rootCA = readCertForPkiDelegation("testRootCA.crt"); + DelegatePkiAuthenticationRequest delegatePkiRequest; + // trust root is optional + if (randomBoolean()) { + delegatePkiRequest = new DelegatePkiAuthenticationRequest(Arrays.asList(clientCertificate, intermediateCA)); + } else { + delegatePkiRequest = new DelegatePkiAuthenticationRequest(Arrays.asList(clientCertificate, intermediateCA, rootCA)); + } + + try (RestHighLevelClient restClient = new TestRestHighLevelClient()) { + for (String delegateeUsername : Arrays.asList("user_all", "user_delegate_pki", "kibana_system")) { + // delegate + RequestOptions.Builder optionsBuilder = RequestOptions.DEFAULT.toBuilder(); + optionsBuilder.addHeader("Authorization", + basicAuthHeaderValue(delegateeUsername, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); + DelegatePkiAuthenticationResponse delegatePkiResponse = restClient.security().delegatePkiAuthentication(delegatePkiRequest, + optionsBuilder.build()); + String token = delegatePkiResponse.getAccessToken(); + assertThat(token, is(notNullValue())); + // authenticate + optionsBuilder = RequestOptions.DEFAULT.toBuilder(); + optionsBuilder.addHeader("Authorization", "Bearer " + token); + AuthenticateResponse resp = restClient.security().authenticate(optionsBuilder.build()); + User user = resp.getUser(); + assertThat(user, is(notNullValue())); + assertThat(user.getUsername(), is("Elasticsearch Test Client")); + RealmInfo authnRealm = resp.getAuthenticationRealm(); + assertThat(authnRealm, is(notNullValue())); + assertThat(authnRealm.getName(), is("pki3")); + assertThat(authnRealm.getType(), is("pki")); + } + } + } + + public void testTokenInvalidate() throws Exception { + final X509Certificate clientCertificate = readCertForPkiDelegation("testClient.crt"); + final X509Certificate intermediateCA = readCertForPkiDelegation("testIntermediateCA.crt"); + final X509Certificate rootCA = readCertForPkiDelegation("testRootCA.crt"); + DelegatePkiAuthenticationRequest delegatePkiRequest; + // trust root is optional + if (randomBoolean()) { + delegatePkiRequest = new DelegatePkiAuthenticationRequest(Arrays.asList(clientCertificate, intermediateCA)); + } else { + delegatePkiRequest = new DelegatePkiAuthenticationRequest(Arrays.asList(clientCertificate, intermediateCA, rootCA)); + } + + try (RestHighLevelClient restClient = new TestRestHighLevelClient()) { + String delegateeUsername = randomFrom("user_all", "user_delegate_pki", "kibana_system"); + // delegate + RequestOptions.Builder optionsBuilder = RequestOptions.DEFAULT.toBuilder(); + optionsBuilder.addHeader("Authorization", + basicAuthHeaderValue(delegateeUsername, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); + DelegatePkiAuthenticationResponse delegatePkiResponse = restClient.security().delegatePkiAuthentication(delegatePkiRequest, + optionsBuilder.build()); + String token = delegatePkiResponse.getAccessToken(); + assertThat(token, is(notNullValue())); + // authenticate + optionsBuilder = RequestOptions.DEFAULT.toBuilder(); + optionsBuilder.addHeader("Authorization", "Bearer " + token); + AuthenticateResponse resp = restClient.security().authenticate(optionsBuilder.build()); + User user = resp.getUser(); + assertThat(user, is(notNullValue())); + assertThat(user.getUsername(), is("Elasticsearch Test Client")); + assertThat(user.getMetadata().get("pki_dn"), is(notNullValue())); + assertThat(user.getMetadata().get("pki_dn"), is("O=org, OU=Elasticsearch, CN=Elasticsearch Test Client")); + assertThat(user.getMetadata().get("pki_delegated_by_user"), is(notNullValue())); + assertThat(user.getMetadata().get("pki_delegated_by_user"), is(delegateeUsername)); + assertThat(user.getMetadata().get("pki_delegated_by_realm"), is(notNullValue())); + assertThat(user.getMetadata().get("pki_delegated_by_realm"), is("file")); + // no roles because no role mappings + assertThat(user.getRoles(), is(emptyCollectionOf(String.class))); + RealmInfo authnRealm = resp.getAuthenticationRealm(); + assertThat(authnRealm, is(notNullValue())); + assertThat(authnRealm.getName(), is("pki3")); + assertThat(authnRealm.getType(), is("pki")); + // invalidate + InvalidateTokenRequest invalidateRequest = new InvalidateTokenRequest(token, null, null, null); + optionsBuilder = RequestOptions.DEFAULT.toBuilder(); + optionsBuilder.addHeader("Authorization", + basicAuthHeaderValue(delegateeUsername, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); + InvalidateTokenResponse invalidateResponse = restClient.security().invalidateToken(invalidateRequest, optionsBuilder.build()); + assertThat(invalidateResponse.getInvalidatedTokens(), is(1)); + assertThat(invalidateResponse.getErrorsCount(), is(0)); + // failed authenticate + ElasticsearchStatusException e1 = expectThrows(ElasticsearchStatusException.class, () -> restClient.security() + .authenticate(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "Bearer " + token).build())); + assertThat(e1.getMessage(), is("Elasticsearch exception [type=security_exception, reason=token expired]")); + } + } + + public void testDelegateUnauthorized() throws Exception { + final X509Certificate clientCertificate = readCertForPkiDelegation("testClient.crt"); + final X509Certificate intermediateCA = readCertForPkiDelegation("testIntermediateCA.crt"); + final X509Certificate rootCA = readCertForPkiDelegation("testRootCA.crt"); + DelegatePkiAuthenticationRequest delegatePkiRequest; + // trust root is optional + if (randomBoolean()) { + delegatePkiRequest = new DelegatePkiAuthenticationRequest(Arrays.asList(clientCertificate, intermediateCA)); + } else { + delegatePkiRequest = new DelegatePkiAuthenticationRequest(Arrays.asList(clientCertificate, intermediateCA, rootCA)); + } + try (RestHighLevelClient restClient = new TestRestHighLevelClient()) { + for (String delegateeUsername : Arrays.asList("user_manage", "user_manage_security")) { + RequestOptions.Builder optionsBuilder = RequestOptions.DEFAULT.toBuilder(); + optionsBuilder.addHeader("Authorization", + basicAuthHeaderValue(delegateeUsername, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> { + restClient.security().delegatePkiAuthentication(delegatePkiRequest, optionsBuilder.build()); + }); + assertThat(e.getMessage(), startsWith("Elasticsearch exception [type=security_exception, reason=action" + + " [cluster:admin/xpack/security/delegate_pki] is unauthorized for user")); + } + } + } + + public void testDelegatePkiWithRoleMapping() throws Exception { + X509Certificate clientCertificate = readCertForPkiDelegation("testClient.crt"); + X509Certificate intermediateCA = readCertForPkiDelegation("testIntermediateCA.crt"); + X509Certificate rootCA = readCertForPkiDelegation("testRootCA.crt"); + DelegatePkiAuthenticationRequest delegatePkiRequest; + // trust root is optional + if (randomBoolean()) { + delegatePkiRequest = new DelegatePkiAuthenticationRequest(Arrays.asList(clientCertificate, intermediateCA)); + } else { + delegatePkiRequest = new DelegatePkiAuthenticationRequest(Arrays.asList(clientCertificate, intermediateCA, rootCA)); + } + final RequestOptions testUserOptions = RequestOptions.DEFAULT.toBuilder() + .addHeader("Authorization", basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, + new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()))) + .build(); + try (RestHighLevelClient restClient = new TestRestHighLevelClient()) { + // put role mappings for delegated PKI + PutRoleMappingRequest request = new PutRoleMappingRequest("role_by_delegated_user", true, + Collections.singletonList("role_by_delegated_user"), Collections.emptyList(), + new FieldRoleMapperExpression("metadata.pki_delegated_by_user", "test_user"), null, RefreshPolicy.IMMEDIATE); + restClient.security().putRoleMapping(request, testUserOptions); + request = new PutRoleMappingRequest("role_by_delegated_realm", true, Collections.singletonList("role_by_delegated_realm"), + Collections.emptyList(), new FieldRoleMapperExpression("metadata.pki_delegated_by_realm", "file"), null, + RefreshPolicy.IMMEDIATE); + restClient.security().putRoleMapping(request, testUserOptions); + // delegate + DelegatePkiAuthenticationResponse delegatePkiResponse = restClient.security().delegatePkiAuthentication(delegatePkiRequest, + testUserOptions); + // authenticate + AuthenticateResponse resp = restClient.security().authenticate(RequestOptions.DEFAULT.toBuilder() + .addHeader("Authorization", "Bearer " + delegatePkiResponse.getAccessToken()).build()); + User user = resp.getUser(); + assertThat(user, is(notNullValue())); + assertThat(user.getUsername(), is("Elasticsearch Test Client")); + assertThat(user.getMetadata().get("pki_dn"), is(notNullValue())); + assertThat(user.getMetadata().get("pki_dn"), is("O=org, OU=Elasticsearch, CN=Elasticsearch Test Client")); + assertThat(user.getMetadata().get("pki_delegated_by_user"), is(notNullValue())); + assertThat(user.getMetadata().get("pki_delegated_by_user"), is("test_user")); + assertThat(user.getMetadata().get("pki_delegated_by_realm"), is(notNullValue())); + assertThat(user.getMetadata().get("pki_delegated_by_realm"), is("file")); + // assert roles + assertThat(user.getRoles(), containsInAnyOrder("role_by_delegated_user", "role_by_delegated_realm")); + RealmInfo authnRealm = resp.getAuthenticationRealm(); + assertThat(authnRealm, is(notNullValue())); + assertThat(authnRealm.getName(), is("pki3")); + assertThat(authnRealm.getType(), is("pki")); + // delete role mappings for delegated PKI + restClient.security().deleteRoleMapping(new DeleteRoleMappingRequest("role_by_delegated_user", RefreshPolicy.IMMEDIATE), + testUserOptions); + restClient.security().deleteRoleMapping(new DeleteRoleMappingRequest("role_by_delegated_realm", RefreshPolicy.IMMEDIATE), + testUserOptions); + } + } + + public void testIncorrectCertChain() throws Exception { + X509Certificate clientCertificate = readCertForPkiDelegation("testClient.crt"); + X509Certificate intermediateCA = readCertForPkiDelegation("testIntermediateCA.crt"); + X509Certificate bogusCertificate = readCertForPkiDelegation("bogus.crt"); + RequestOptions.Builder optionsBuilder = RequestOptions.DEFAULT.toBuilder(); + optionsBuilder.addHeader("Authorization", basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, + new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()))); + try (RestHighLevelClient restClient = new TestRestHighLevelClient()) { + // incomplete cert chain + DelegatePkiAuthenticationRequest delegatePkiRequest1 = new DelegatePkiAuthenticationRequest(Arrays.asList(clientCertificate)); + ElasticsearchStatusException e1 = expectThrows(ElasticsearchStatusException.class, + () -> restClient.security().delegatePkiAuthentication(delegatePkiRequest1, optionsBuilder.build())); + assertThat(e1.getMessage(), is("Elasticsearch exception [type=security_exception, reason=unable to authenticate user" + + " [O=org, OU=Elasticsearch, CN=Elasticsearch Test Client] for action [cluster:admin/xpack/security/delegate_pki]]")); + // swapped order + DelegatePkiAuthenticationRequest delegatePkiRequest2 = new DelegatePkiAuthenticationRequest( + Arrays.asList(intermediateCA, clientCertificate)); + ValidationException e2 = expectThrows(ValidationException.class, + () -> restClient.security().delegatePkiAuthentication(delegatePkiRequest2, optionsBuilder.build())); + assertThat(e2.getMessage(), is("Validation Failed: 1: certificates chain must be an ordered chain;")); + // bogus certificate + DelegatePkiAuthenticationRequest delegatePkiRequest3 = new DelegatePkiAuthenticationRequest(Arrays.asList(bogusCertificate)); + ElasticsearchStatusException e3 = expectThrows(ElasticsearchStatusException.class, + () -> restClient.security().delegatePkiAuthentication(delegatePkiRequest3, optionsBuilder.build())); + assertThat(e3.getMessage(), startsWith("Elasticsearch exception [type=security_exception, reason=unable to authenticate user")); + } + } + + private X509Certificate readCertForPkiDelegation(String certName) throws Exception { + Path path = getDataPath("/org/elasticsearch/xpack/security/action/pki_delegation/" + certName); + try (InputStream in = Files.newInputStream(path)) { + CertificateFactory factory = CertificateFactory.getInstance("X.509"); + return (X509Certificate) factory.generateCertificate(in); + } + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java index e5eb265979a87..132c22846cb6f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java @@ -17,6 +17,8 @@ import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.InternalRealmsSettings; import org.elasticsearch.xpack.core.security.authc.Realm; @@ -25,6 +27,7 @@ import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.support.NoOpLogger; import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authc.BytesKey; import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.junit.Before; @@ -40,6 +43,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.regex.Pattern; @@ -47,6 +51,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; @@ -72,14 +77,18 @@ public void setup() throws Exception { when(licenseState.isAuthorizationRealmAllowed()).thenReturn(true); } - public void testTokenSupport() { + public void testTokenSupport() throws Exception { RealmConfig config = new RealmConfig(new RealmConfig.RealmIdentifier("pki", "my_pki"), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); PkiRealm realm = new PkiRealm(config, mock(UserRoleMapper.class)); + assertRealmUsageStats(realm, false, false, true, false); assertThat(realm.supports(null), is(false)); assertThat(realm.supports(new UsernamePasswordToken("", new SecureString(new char[0]))), is(false)); - assertThat(realm.supports(new X509AuthenticationToken(new X509Certificate[0])), is(true)); + X509AuthenticationToken token = randomBoolean() + ? X509AuthenticationToken.delegated(new X509Certificate[0], mock(Authentication.class)) + : new X509AuthenticationToken(new X509Certificate[0]); + assertThat(realm.supports(token), is(true)); } public void testExtractToken() throws Exception { @@ -92,6 +101,7 @@ public void testExtractToken() throws Exception { X509AuthenticationToken token = realm.token(threadContext); assertThat(token, is(notNullValue())); assertThat(token.dn(), is("CN=Elasticsearch Test Node, OU=elasticsearch, O=org")); + assertThat(token.isDelegated(), is(false)); } public void testAuthenticateBasedOnCertToken() throws Exception { @@ -114,7 +124,6 @@ private void assertSuccessfulAuthentication(Set roles) throws Exception final String expectedUsername = PkiRealm.getPrincipalFromSubjectDN(Pattern.compile(PkiRealmSettings.DEFAULT_USERNAME_PATTERN), token, NoOpLogger.INSTANCE); final AuthenticationResult result = authenticate(token, realm); - final PlainActionFuture future; assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); User user = result.getUser(); assertThat(user, is(notNullValue())); @@ -199,6 +208,7 @@ public void testCustomUsernamePatternMatches() throws Exception { X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); UserRoleMapper roleMapper = buildRoleMapper(); PkiRealm realm = buildRealm(roleMapper, settings); + assertRealmUsageStats(realm, false, false, false, false); threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); X509AuthenticationToken token = realm.token(threadContext); @@ -218,6 +228,7 @@ public void testCustomUsernamePatternMismatchesAndNullToken() throws Exception { X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); UserRoleMapper roleMapper = buildRoleMapper(); PkiRealm realm = buildRealm(roleMapper, settings); + assertRealmUsageStats(realm, false, false, false, false); threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); X509AuthenticationToken token = realm.token(threadContext); @@ -238,6 +249,7 @@ public void testVerificationUsingATruststore() throws Exception { .build(); ThreadContext threadContext = new ThreadContext(globalSettings); PkiRealm realm = buildRealm(roleMapper, settings); + assertRealmUsageStats(realm, true, false, true, false); threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); @@ -249,6 +261,97 @@ public void testVerificationUsingATruststore() throws Exception { assertThat(user.roles().length, is(0)); } + public void testAuthenticationDelegationFailsWithoutTokenServiceAndTruststore() throws Exception { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + Settings settings = Settings.builder() + .put(globalSettings) + .put("xpack.security.authc.realms.pki.my_pki.delegation.enabled", true) + .build(); + IllegalStateException e = expectThrows(IllegalStateException.class, + () -> new PkiRealm(new RealmConfig(new RealmConfig.RealmIdentifier("pki", "my_pki"), settings, + TestEnvironment.newEnvironment(globalSettings), threadContext), mock(UserRoleMapper.class))); + assertThat(e.getMessage(), + is("PKI realms with delegation enabled require a trust configuration " + + "(xpack.security.authc.realms.pki.my_pki.certificate_authorities or " + + "xpack.security.authc.realms.pki.my_pki.truststore.path)" + + " and that the token service be also enabled (xpack.security.authc.token.enabled)")); + } + + public void testAuthenticationDelegationFailsWithoutTruststore() throws Exception { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + Settings settings = Settings.builder() + .put(globalSettings) + .put("xpack.security.authc.realms.pki.my_pki.delegation.enabled", true) + .put("xpack.security.authc.token.enabled", true) + .build(); + IllegalStateException e = expectThrows(IllegalStateException.class, + () -> new PkiRealm(new RealmConfig(new RealmConfig.RealmIdentifier("pki", "my_pki"), settings, + TestEnvironment.newEnvironment(globalSettings), threadContext), mock(UserRoleMapper.class))); + assertThat(e.getMessage(), + is("PKI realms with delegation enabled require a trust configuration " + + "(xpack.security.authc.realms.pki.my_pki.certificate_authorities " + + "or xpack.security.authc.realms.pki.my_pki.truststore.path)")); + } + + public void testAuthenticationDelegationSuccess() throws Exception { + X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); + Authentication mockAuthentication = mock(Authentication.class); + User mockUser = mock(User.class); + when(mockUser.principal()).thenReturn("mockup_delegate_username"); + RealmRef mockRealmRef = mock(RealmRef.class); + when(mockRealmRef.getName()).thenReturn("mockup_delegate_realm"); + when(mockAuthentication.getUser()).thenReturn(mockUser); + when(mockAuthentication.getAuthenticatedBy()).thenReturn(mockRealmRef); + X509AuthenticationToken delegatedToken = X509AuthenticationToken.delegated(new X509Certificate[] { certificate }, + mockAuthentication); + + UserRoleMapper roleMapper = buildRoleMapper(); + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.security.authc.realms.pki.my_pki.truststore.secure_password", "testnode"); + Settings settings = Settings.builder() + .put(globalSettings) + .put("xpack.security.authc.realms.pki.my_pki.truststore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks")) + .put("xpack.security.authc.realms.pki.my_pki.delegation.enabled", true) + .put("xpack.security.authc.token.enabled", true) + .setSecureSettings(secureSettings) + .build(); + PkiRealm realmWithDelegation = buildRealm(roleMapper, settings); + assertRealmUsageStats(realmWithDelegation, true, false, true, true); + + AuthenticationResult result = authenticate(delegatedToken, realmWithDelegation); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS)); + assertThat(result.getUser(), is(notNullValue())); + assertThat(result.getUser().principal(), is("Elasticsearch Test Node")); + assertThat(result.getUser().roles(), is(notNullValue())); + assertThat(result.getUser().roles().length, is(0)); + assertThat(result.getUser().metadata().get("pki_delegated_by_user"), is("mockup_delegate_username")); + assertThat(result.getUser().metadata().get("pki_delegated_by_realm"), is("mockup_delegate_realm")); + } + + public void testAuthenticationDelegationFailure() throws Exception { + X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); + X509AuthenticationToken delegatedToken = X509AuthenticationToken.delegated(new X509Certificate[] { certificate }, + mock(Authentication.class)); + + UserRoleMapper roleMapper = buildRoleMapper(); + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.security.authc.realms.pki.my_pki.truststore.secure_password", "testnode"); + Settings settings = Settings.builder() + .put(globalSettings) + .put("xpack.security.authc.realms.pki.my_pki.truststore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks")) + .setSecureSettings(secureSettings) + .build(); + PkiRealm realmNoDelegation = buildRealm(roleMapper, settings); + assertRealmUsageStats(realmNoDelegation, true, false, true, false); + + AuthenticationResult result = authenticate(delegatedToken, realmNoDelegation); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.CONTINUE)); + assertThat(result.getUser(), is(nullValue())); + assertThat(result.getMessage(), containsString("Realm does not permit delegation for")); + } + public void testVerificationFailsUsingADifferentTruststore() throws Exception { X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); UserRoleMapper roleMapper = buildRoleMapper(); @@ -262,6 +365,7 @@ public void testVerificationFailsUsingADifferentTruststore() throws Exception { .build(); ThreadContext threadContext = new ThreadContext(settings); PkiRealm realm = buildRealm(roleMapper, settings); + assertRealmUsageStats(realm, true, false, true, false); threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); @@ -374,6 +478,7 @@ public void testDelegatedAuthorization() throws Exception { .build(); final UserRoleMapper roleMapper = buildRoleMapper(Collections.emptySet(), token.dn()); final PkiRealm pkiRealm = buildRealm(roleMapper, realmSettings, otherRealm); + assertRealmUsageStats(pkiRealm, false, true, true, false); AuthenticationResult result = authenticate(token, pkiRealm); assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS)); @@ -388,6 +493,50 @@ public void testDelegatedAuthorization() throws Exception { assertThat(result.getUser(), sameInstance(lookupUser2)); } + public void testX509AuthenticationTokenOrdered() throws Exception { + X509Certificate[] mockCertChain = new X509Certificate[2]; + mockCertChain[0] = mock(X509Certificate.class); + when(mockCertChain[0].getIssuerX500Principal()).thenReturn(new X500Principal("CN=Test, OU=elasticsearch, O=org")); + mockCertChain[1] = mock(X509Certificate.class); + when(mockCertChain[1].getSubjectX500Principal()).thenReturn(new X500Principal("CN=Not Test, OU=elasticsearch, O=org")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new X509AuthenticationToken(mockCertChain)); + assertThat(e.getMessage(), is("certificates chain array is not ordered")); + } + + private void assertRealmUsageStats(Realm realm, Boolean hasTruststore, Boolean hasAuthorizationRealms, + Boolean hasDefaultUsernamePattern, Boolean isAuthenticationDelegated) throws Exception { + final PlainActionFuture> future = new PlainActionFuture<>(); + realm.usageStats(future); + Map usage = future.get(); + assertThat(usage.get("has_truststore"), is(hasTruststore)); + assertThat(usage.get("has_authorization_realms"), is(hasAuthorizationRealms)); + assertThat(usage.get("has_default_username_pattern"), is(hasDefaultUsernamePattern)); + assertThat(usage.get("is_authentication_delegated"), is(isAuthenticationDelegated)); + } + + public void testX509AuthenticationTokenCaching() throws Exception { + X509Certificate[] mockCertChain = new X509Certificate[2]; + mockCertChain[0] = mock(X509Certificate.class); + when(mockCertChain[0].getSubjectX500Principal()).thenReturn(new X500Principal("CN=Test, OU=elasticsearch, O=org")); + when(mockCertChain[0].getIssuerX500Principal()).thenReturn(new X500Principal("CN=Test CA, OU=elasticsearch, O=org")); + when(mockCertChain[0].getEncoded()).thenReturn(randomByteArrayOfLength(2)); + mockCertChain[1] = mock(X509Certificate.class); + when(mockCertChain[1].getSubjectX500Principal()).thenReturn(new X500Principal("CN=Test CA, OU=elasticsearch, O=org")); + when(mockCertChain[1].getEncoded()).thenReturn(randomByteArrayOfLength(3)); + BytesKey cacheKey = PkiRealm.computeTokenFingerprint(new X509AuthenticationToken(mockCertChain)); + + BytesKey sameCacheKey = PkiRealm + .computeTokenFingerprint(new X509AuthenticationToken(new X509Certificate[] { mockCertChain[0], mockCertChain[1] })); + assertThat(cacheKey, is(sameCacheKey)); + + BytesKey cacheKeyClient = PkiRealm.computeTokenFingerprint(new X509AuthenticationToken(new X509Certificate[] { mockCertChain[0] })); + assertThat(cacheKey, is(not(cacheKeyClient))); + + BytesKey cacheKeyRoot = PkiRealm.computeTokenFingerprint(new X509AuthenticationToken(new X509Certificate[] { mockCertChain[1] })); + assertThat(cacheKey, is(not(cacheKeyRoot))); + assertThat(cacheKeyClient, is(not(cacheKeyRoot))); + } + static X509Certificate readCert(Path path) throws Exception { try (InputStream in = Files.newInputStream(path)) { CertificateFactory factory = CertificateFactory.getInstance("X.509"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index d7f3252dd95a5..600cc6531fa4e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -119,7 +119,6 @@ import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; -import org.elasticsearch.xpack.core.security.support.Automatons; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; @@ -321,9 +320,8 @@ public void testAuthorizeUsingConditionalPrivileges() throws IOException { @Override public ClusterPermission.Builder buildPermission(ClusterPermission.Builder builder) { final Predicate requestPredicate = r -> r == request; - final Predicate actionPredicate = - Automatons.predicate(((ActionClusterPrivilege) ClusterPrivilegeResolver.MANAGE_SECURITY).getAllowedActionPatterns()); - builder.add(this, actionPredicate, requestPredicate); + builder.add(this, ((ActionClusterPrivilege) ClusterPrivilegeResolver.MANAGE_SECURITY).getAllowedActionPatterns(), + requestPredicate); return builder; } }; @@ -348,9 +346,8 @@ public void testAuthorizationDeniedWhenConditionalPrivilegesDoNotMatch() throws @Override public ClusterPermission.Builder buildPermission(ClusterPermission.Builder builder) { final Predicate requestPredicate = r -> false; - final Predicate actionPredicate = - Automatons.predicate(((ActionClusterPrivilege) ClusterPrivilegeResolver.MANAGE_SECURITY).getAllowedActionPatterns()); - builder.add(this, actionPredicate,requestPredicate); + builder.add(this, ((ActionClusterPrivilege) ClusterPrivilegeResolver.MANAGE_SECURITY).getAllowedActionPatterns(), + requestPredicate); return builder; } }; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java index 0a431ec95f5ee..fd84afea365be 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java @@ -21,6 +21,8 @@ import org.elasticsearch.license.GetLicenseAction; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.action.GetApiKeyAction; +import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequestBuilder; @@ -51,6 +53,7 @@ import org.elasticsearch.xpack.core.security.authz.privilege.Privilege; import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authc.ApiKeyService; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; import org.elasticsearch.xpack.security.authz.RBACEngine.RBACAuthorizationInfo; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; @@ -63,6 +66,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Set; import static java.util.Collections.emptyMap; @@ -232,6 +236,53 @@ public void testSameUserPermissionDoesNotAllowChangePasswordForLookedUpByOtherRe verifyNoMoreInteractions(authentication, lookedUpBy, authenticatedBy); } + public void testSameUserPermissionAllowsSelfApiKeyInfoRetrievalWhenAuthenticatedByApiKey() { + final User user = new User("joe"); + final String apiKeyId = randomAlphaOfLengthBetween(4, 7); + final TransportRequest request = GetApiKeyRequest.usingApiKeyId(apiKeyId, false); + final Authentication authentication = mock(Authentication.class); + final Authentication.RealmRef authenticatedBy = mock(Authentication.RealmRef.class); + when(authentication.getUser()).thenReturn(user); + when(authentication.getAuthenticatedBy()).thenReturn(authenticatedBy); + when(authenticatedBy.getType()).thenReturn(ApiKeyService.API_KEY_REALM_TYPE); + when(authentication.getMetadata()).thenReturn(Map.of(ApiKeyService.API_KEY_ID_KEY, apiKeyId)); + + assertTrue(engine.checkSameUserPermissions(GetApiKeyAction.NAME, request, authentication)); + } + + public void testSameUserPermissionDeniesApiKeyInfoRetrievalWhenAuthenticatedByADifferentApiKey() { + final User user = new User("joe"); + final String apiKeyId = randomAlphaOfLengthBetween(4, 7); + final TransportRequest request = GetApiKeyRequest.usingApiKeyId(apiKeyId, false); + final Authentication authentication = mock(Authentication.class); + final Authentication.RealmRef authenticatedBy = mock(Authentication.RealmRef.class); + when(authentication.getUser()).thenReturn(user); + when(authentication.getAuthenticatedBy()).thenReturn(authenticatedBy); + when(authenticatedBy.getType()).thenReturn(ApiKeyService.API_KEY_REALM_TYPE); + when(authentication.getMetadata()).thenReturn(Map.of(ApiKeyService.API_KEY_ID_KEY, randomAlphaOfLengthBetween(4, 7))); + + assertFalse(engine.checkSameUserPermissions(GetApiKeyAction.NAME, request, authentication)); + } + + public void testSameUserPermissionDeniesApiKeyInfoRetrievalWhenLookedupByIsPresent() { + final User user = new User("joe"); + final String apiKeyId = randomAlphaOfLengthBetween(4, 7); + final TransportRequest request = GetApiKeyRequest.usingApiKeyId(apiKeyId, false); + final Authentication authentication = mock(Authentication.class); + final Authentication.RealmRef authenticatedBy = mock(Authentication.RealmRef.class); + final Authentication.RealmRef lookedupBy = mock(Authentication.RealmRef.class); + when(authentication.getUser()).thenReturn(user); + when(authentication.getAuthenticatedBy()).thenReturn(authenticatedBy); + when(authentication.getLookedUpBy()).thenReturn(lookedupBy); + when(authenticatedBy.getType()).thenReturn(ApiKeyService.API_KEY_REALM_TYPE); + when(authentication.getMetadata()).thenReturn(Map.of(ApiKeyService.API_KEY_ID_KEY, randomAlphaOfLengthBetween(4, 7))); + + final AssertionError assertionError = expectThrows(AssertionError.class, () -> engine.checkSameUserPermissions(GetApiKeyAction.NAME, + request, authentication)); + assertNotNull(assertionError); + assertThat(assertionError.getLocalizedMessage(), is("runAs not supported for api key authentication")); + } + /** * This tests that action names in the request are considered "matched" by the relevant named privilege * (in this case that {@link DeleteAction} and {@link IndexAction} are satisfied by {@link IndexPrivilege#WRITE}). diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index 4ab525a43da2f..58d19ed2c97e1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -51,7 +51,6 @@ import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; -import org.elasticsearch.xpack.core.security.support.Automatons; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.SystemUser; import org.elasticsearch.xpack.core.security.user.User; @@ -546,13 +545,13 @@ public void testMergingBasicRoles() { final TransportRequest request1 = mock(TransportRequest.class); final TransportRequest request2 = mock(TransportRequest.class); final TransportRequest request3 = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); ConfigurableClusterPrivilege ccp1 = new MockConfigurableClusterPrivilege() { @Override public ClusterPermission.Builder buildPermission(ClusterPermission.Builder builder) { - Predicate predicate1 = - Automatons.predicate(((ActionClusterPrivilege) ClusterPrivilegeResolver.MANAGE_SECURITY).getAllowedActionPatterns()); - builder.add(this, predicate1, req -> req == request1); + builder.add(this, ((ActionClusterPrivilege) ClusterPrivilegeResolver.MANAGE_SECURITY).getAllowedActionPatterns(), + req -> req == request1); return builder; } }; @@ -582,9 +581,8 @@ public ClusterPermission.Builder buildPermission(ClusterPermission.Builder build ConfigurableClusterPrivilege ccp2 = new MockConfigurableClusterPrivilege() { @Override public ClusterPermission.Builder buildPermission(ClusterPermission.Builder builder) { - Predicate predicate2 = - Automatons.predicate(((ActionClusterPrivilege) ClusterPrivilegeResolver.MANAGE_SECURITY).getAllowedActionPatterns()); - builder.add(this, predicate2, req -> req == request2); + builder.add(this, ((ActionClusterPrivilege) ClusterPrivilegeResolver.MANAGE_SECURITY).getAllowedActionPatterns(), + req -> req == request2); return builder; } }; @@ -626,12 +624,14 @@ public ClusterPermission.Builder buildPermission(ClusterPermission.Builder build CompositeRolesStore.buildRoleFromDescriptors(Sets.newHashSet(role1, role2), cache, privilegeStore, future); Role role = future.actionGet(); - assertThat(role.cluster().check(ClusterStateAction.NAME, randomFrom(request1, request2, request3)), equalTo(true)); - assertThat(role.cluster().check(SamlAuthenticateAction.NAME, randomFrom(request1, request2, request3)), equalTo(true)); - assertThat(role.cluster().check(ClusterUpdateSettingsAction.NAME, randomFrom(request1, request2, request3)), equalTo(false)); + assertThat(role.cluster().check(ClusterStateAction.NAME, randomFrom(request1, request2, request3), authentication), equalTo(true)); + assertThat(role.cluster().check(SamlAuthenticateAction.NAME, randomFrom(request1, request2, request3), authentication), + equalTo(true)); + assertThat(role.cluster().check(ClusterUpdateSettingsAction.NAME, randomFrom(request1, request2, request3), authentication), + equalTo(false)); - assertThat(role.cluster().check(PutUserAction.NAME, randomFrom(request1, request2)), equalTo(true)); - assertThat(role.cluster().check(PutUserAction.NAME, request3), equalTo(false)); + assertThat(role.cluster().check(PutUserAction.NAME, randomFrom(request1, request2), authentication), equalTo(true)); + assertThat(role.cluster().check(PutUserAction.NAME, request3, authentication), equalTo(false)); final Predicate allowedRead = role.indices().allowedIndicesMatcher(GetAction.NAME); assertThat(allowedRead.test("abc-123"), equalTo(true)); @@ -1076,7 +1076,7 @@ public void testApiKeyAuthUsesApiKeyServiceWithScopedRole() throws IOException { PlainActionFuture roleFuture = new PlainActionFuture<>(); compositeRolesStore.getRoles(authentication.getUser(), authentication, roleFuture); Role role = roleFuture.actionGet(); - assertThat(role.checkClusterAction("cluster:admin/foo", Empty.INSTANCE), is(false)); + assertThat(role.checkClusterAction("cluster:admin/foo", Empty.INSTANCE, mock(Authentication.class)), is(false)); assertThat(effectiveRoleDescriptors.get(), is(nullValue())); verify(apiKeyService).getRoleForApiKey(eq(authentication), any(ActionListener.class)); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java index 6555dbd882377..3a2c30891008e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.audit.logfile.CapturingLogger; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; import org.elasticsearch.xpack.core.security.authz.permission.IndicesPermission; @@ -351,14 +352,15 @@ public void testAutoReload() throws Exception { assertEquals(1, modifiedRoles.size()); assertTrue(modifiedRoles.contains("role5")); final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); descriptors = store.roleDescriptors(Collections.singleton("role5")); assertThat(descriptors, notNullValue()); assertEquals(1, descriptors.size()); Role role = Role.builder(descriptors.iterator().next(), null).build(); assertThat(role, notNullValue()); assertThat(role.names(), equalTo(new String[] { "role5" })); - assertThat(role.cluster().check("cluster:monitor/foo/bar", request), is(true)); - assertThat(role.cluster().check("cluster:admin/foo/bar", request), is(false)); + assertThat(role.cluster().check("cluster:monitor/foo/bar", request, authentication), is(true)); + assertThat(role.cluster().check("cluster:admin/foo/bar", request, authentication), is(false)); // truncate to remove some final Set truncatedFileRolesModified = new HashSet<>(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java index c706a251dda35..d1046a175670c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java @@ -8,11 +8,11 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; @@ -36,6 +36,7 @@ import java.time.temporal.ChronoUnit; import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.arrayContaining; @@ -133,6 +134,76 @@ void doExecute(ActionType action, Request request, ActionListener param; + if (isGetRequestForOwnedKeysOnly) { + param = mapBuilder().put("owner", Boolean.TRUE.toString()).map(); + } else { + param = mapBuilder().put("owner", Boolean.FALSE.toString()).put("realm_name", "realm-1").map(); + } + + final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) + .withParams(param).build(); + + final SetOnce responseSetOnce = new SetOnce<>(); + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + @Override + public void sendResponse(RestResponse restResponse) { + responseSetOnce.set(restResponse); + } + }; + + final Instant creation = Instant.now(); + final Instant expiration = randomFrom(Arrays.asList(null, Instant.now().plus(10, ChronoUnit.DAYS))); + final ApiKey apiKey1 = new ApiKey("api-key-name-1", "api-key-id-1", creation, expiration, false, + "user-x", "realm-1"); + final ApiKey apiKey2 = new ApiKey("api-key-name-2", "api-key-id-2", creation, expiration, false, + "user-y", "realm-1"); + final GetApiKeyResponse getApiKeyResponseExpectedWhenOwnerFlagIsTrue = new GetApiKeyResponse(Collections.singletonList(apiKey1)); + final GetApiKeyResponse getApiKeyResponseExpectedWhenOwnerFlagIsFalse = new GetApiKeyResponse(List.of(apiKey1, apiKey2)); + + try (NodeClient client = new NodeClient(Settings.EMPTY, threadPool) { + @SuppressWarnings("unchecked") + @Override + public + void doExecute(ActionType action, Request request, ActionListener listener) { + GetApiKeyRequest getApiKeyRequest = (GetApiKeyRequest) request; + ActionRequestValidationException validationException = getApiKeyRequest.validate(); + if (validationException != null) { + listener.onFailure(validationException); + return; + } + + if (getApiKeyRequest.ownedByAuthenticatedUser()) { + listener.onResponse((Response) getApiKeyResponseExpectedWhenOwnerFlagIsTrue); + } else if (getApiKeyRequest.getRealmName() != null && getApiKeyRequest.getRealmName().equals("realm-1")) { + listener.onResponse((Response) getApiKeyResponseExpectedWhenOwnerFlagIsFalse); + } + } + }) { + final RestGetApiKeyAction restGetApiKeyAction = new RestGetApiKeyAction(Settings.EMPTY, mockRestController, mockLicenseState); + + restGetApiKeyAction.handleRequest(restRequest, restChannel, client); + + final RestResponse restResponse = responseSetOnce.get(); + assertNotNull(restResponse); + assertThat(restResponse.status(), is(RestStatus.OK)); + final GetApiKeyResponse actual = GetApiKeyResponse + .fromXContent(createParser(XContentType.JSON.xContent(), restResponse.content())); + if (isGetRequestForOwnedKeysOnly) { + assertThat(actual.getApiKeyInfos().length, is(1)); + assertThat(actual.getApiKeyInfos(), + arrayContaining(apiKey1)); + } else { + assertThat(actual.getApiKeyInfos().length, is(2)); + assertThat(actual.getApiKeyInfos(), + arrayContaining(apiKey1, apiKey2)); + } + } + + } + private static MapBuilder mapBuilder() { return MapBuilder.newMapBuilder(); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java index 21e65c485fb2b..51f700ba44000 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java @@ -8,11 +8,11 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; @@ -24,6 +24,7 @@ import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; @@ -31,8 +32,11 @@ import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyResponse; import java.util.Collections; +import java.util.List; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -119,4 +123,71 @@ void doExecute(ActionType action, Request request, ActionListener responseSetOnce = new SetOnce<>(); + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + @Override + public void sendResponse(RestResponse restResponse) { + responseSetOnce.set(restResponse); + } + }; + + final InvalidateApiKeyResponse invalidateApiKeyResponseExpectedWhenOwnerFlagIsTrue = new InvalidateApiKeyResponse( + List.of("api-key-id-1"), Collections.emptyList(), null); + final InvalidateApiKeyResponse invalidateApiKeyResponseExpectedWhenOwnerFlagIsFalse = new InvalidateApiKeyResponse( + List.of("api-key-id-1", "api-key-id-2"), Collections.emptyList(), null); + + try (NodeClient client = new NodeClient(Settings.EMPTY, threadPool) { + @SuppressWarnings("unchecked") + @Override + public + void doExecute(ActionType action, Request request, ActionListener listener) { + InvalidateApiKeyRequest invalidateApiKeyRequest = (InvalidateApiKeyRequest) request; + ActionRequestValidationException validationException = invalidateApiKeyRequest.validate(); + if (validationException != null) { + listener.onFailure(validationException); + return; + } + + if (invalidateApiKeyRequest.ownedByAuthenticatedUser()) { + listener.onResponse((Response) invalidateApiKeyResponseExpectedWhenOwnerFlagIsTrue); + } else if (invalidateApiKeyRequest.getRealmName() != null && invalidateApiKeyRequest.getRealmName().equals("realm-1")) { + listener.onResponse((Response) invalidateApiKeyResponseExpectedWhenOwnerFlagIsFalse); + } + } + }) { + final RestInvalidateApiKeyAction restInvalidateApiKeyAction = new RestInvalidateApiKeyAction(Settings.EMPTY, mockRestController, + mockLicenseState); + + restInvalidateApiKeyAction.handleRequest(restRequest, restChannel, client); + + final RestResponse restResponse = responseSetOnce.get(); + assertNotNull(restResponse); + assertThat(restResponse.status(), is(RestStatus.OK)); + final InvalidateApiKeyResponse actual = InvalidateApiKeyResponse + .fromXContent(createParser(XContentType.JSON.xContent(), restResponse.content())); + if (isInvalidateRequestForOwnedKeysOnly) { + assertThat(actual.getInvalidatedApiKeys().size(), is(1)); + assertThat(actual.getInvalidatedApiKeys(), + containsInAnyOrder("api-key-id-1")); + } else { + assertThat(actual.getInvalidatedApiKeys().size(), is(2)); + assertThat(actual.getInvalidatedApiKeys(), + containsInAnyOrder("api-key-id-1", "api-key-id-2")); + } + } + + } } diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/README.asciidoc b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/README.asciidoc new file mode 100644 index 0000000000000..3230bdde7e2ce --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/README.asciidoc @@ -0,0 +1,35 @@ += Certificate Chain details +This document details the steps used to create the certificate chain in this directory. +The chain has a length of 3: the Root CA, the Intermediate CA and the Client Certificate. +All openssl commands use the same configuration file, albeit different sections of it. +The OpenSSL Configuration file is located in this directory as `openssl_config.cnf`. + +== Instructions on generating self-signed Root CA +The self-signed Root CA, 'testRootCA.crt', and its associated private key in this directory +have been generated using the following openssl commands. + +[source,shell] +----------------------------------------------------------------------------------------------------------- +openssl genrsa -out testRootCA.key 2048 +openssl req -x509 -new -key testRootCA.key -days 1460 -subj "/CN=Elasticsearch Test Root CA/OU=elasticsearch/O=org" -out testRootCA.crt -config ./openssl_config.cnf +----------------------------------------------------------------------------------------------------------- + +== Instructions on generating the Intermediate CA +The `testIntermediateCA.crt` CA certificate is "issued" by the `testRootCA.crt`. + +[source,shell] +----------------------------------------------------------------------------------------------------------- +openssl genrsa -out testIntermediateCA.key 2048 +openssl req -new -key testIntermediateCA.key -subj "/CN=Elasticsearch Test Intermediate CA/OU=Elasticsearch/O=org" -out testIntermediateCA.csr -config ./openssl_config.cnf +openssl x509 -req -in testIntermediateCA.csr -CA testRootCA.crt -CAkey testRootCA.key -CAcreateserial -out testIntermediateCA.crt -days 1460 -sha256 -extensions v3_ca -extfile ./openssl_config.cnf +----------------------------------------------------------------------------------------------------------- + +== Instructions on generating the Client Certificate +The `testClient.crt` end entity certificate is "issued" by the `testIntermediateCA.crt`. + +[source,shell] +----------------------------------------------------------------------------------------------------------- +openssl genrsa -out testClient.key 2048 +openssl req -new -key testClient.key -subj "/CN=Elasticsearch Test Client/OU=Elasticsearch/O=org" -out testClient.csr -config ./openssl_config.cnf +openssl x509 -req -in testClient.csr -CA testIntermediateCA.crt -CAkey testIntermediateCA.key -CAcreateserial -out testClient.crt -days 1460 -sha256 -extensions usr_cert -extfile ./openssl_config.cnf +----------------------------------------------------------------------------------------------------------- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/bogus.crt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/bogus.crt new file mode 100644 index 0000000000000..4b1bc66be0f74 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/bogus.crt @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/openssl_config.cnf b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/openssl_config.cnf new file mode 100644 index 0000000000000..64ff556f35219 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/openssl_config.cnf @@ -0,0 +1,185 @@ +#################################################################### +# CA Definition +[ ca ] +default_ca = CA_default # The default ca section + +#################################################################### +# Per the above, this is where we define CA values +[ CA_default ] + +# By default we use "user certificate" extensions when signing +x509_extensions = usr_cert # The extentions to add to the cert + +# Honor extensions requested of us +copy_extensions = copy + +# Comment out the following two lines for the "traditional" +# (and highly broken) format. +name_opt = ca_default # Subject Name options +cert_opt = ca_default # Certificate field options + +# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs +# so this is commented out by default to leave a V1 CRL. +# crlnumber must also be commented out to leave a V1 CRL. +#crl_extensions = crl_ext +default_days = 1460 # how long to certify for +default_md = sha256 # which md to use. +preserve = no # keep passed DN ordering + +# A few difference way of specifying how similar the request should look +# For type CA, the listed attributes must be the same, and the optional +# and supplied fields are just that :-) +policy = policy_anything + +#################################################################### +# The default policy for the CA when signing requests, requires some +# resemblence to the CA cert +# +[ policy_match ] +countryName = match # Must be the same as the CA +stateOrProvinceName = match # Must be the same as the CA +organizationName = match # Must be the same as the CA +organizationalUnitName = optional # not required +commonName = supplied # must be there, whatever it is +emailAddress = optional # not required + +#################################################################### +# An alternative policy not referred to anywhere in this file. Can +# be used by specifying '-policy policy_anything' to ca(8). +# +[ policy_anything ] +countryName = optional +stateOrProvinceName = optional +localityName = optional +organizationName = optional +organizationalUnitName = optional +commonName = supplied +emailAddress = optional + +#################################################################### +# This is where we define how to generate CSRs +[ req ] +default_bits = 2048 +default_keyfile = privkey.pem +distinguished_name = req_distinguished_name # where to get DN for reqs +attributes = req_attributes # req attributes +x509_extensions = v3_ca # The extentions to add to self signed certs +req_extensions = v3_req # The extensions to add to req's + +# This sets a mask for permitted string types. There are several options. +# default: PrintableString, T61String, BMPString. +# pkix : PrintableString, BMPString. +# utf8only: only UTF8Strings. +# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings). +# MASK:XXXX a literal mask value. +# WARNING: current versions of Netscape crash on BMPStrings or UTF8Strings +# so use this option with caution! +string_mask = nombstr + + +#################################################################### +# Per "req" section, this is where we define DN info +[ req_distinguished_name ] + +0.organizationName = Organization Name (company) +0.organizationName_default = org + +organizationalUnitName = Organizational Unit Name (eg, section) +organizationalUnitName_default = elasticsearch + +commonName = Common Name (hostname, IP, or your name) +commonName_default = Elasticsearch Test Certificate +commonName_max = 64 + +#################################################################### +# We don't want these, but the section must exist +[ req_attributes ] +#challengePassword = A challenge password +#challengePassword_min = 4 +#challengePassword_max = 20 +#unstructuredName = An optional company name + + +#################################################################### +# Extensions for when we sign normal certs (specified as default) +[ usr_cert ] + +# User certs aren't CAs, by definition +basicConstraints=CA:false + +# Here are some examples of the usage of nsCertType. If it is omitted +# the certificate can be used for anything *except* object signing. +# This is OK for an SSL server. +#nsCertType = server +# For an object signing certificate this would be used. +#nsCertType = objsign +# For normal client use this is typical +#nsCertType = client, email +# and for everything including object signing: +#nsCertType = client, email, objsign +# This is typical in keyUsage for a client certificate. +#keyUsage = nonRepudiation, digitalSignature, keyEncipherment + +# PKIX recommendations harmless if included in all certificates. +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid,issuer + +# This stuff is for subjectAltName and issuerAltname. +# Import the email address. +#subjectAltName=email:copy +# An alternative to produce certificates that aren't +# deprecated according to PKIX. +#subjectAltName=email:move + + +#################################################################### +# Extension for requests +[ v3_req ] +basicConstraints = CA:FALSE + +# PKIX recommendation. +subjectKeyIdentifier = hash + +subjectAltName = @alt_names + +#################################################################### +# An alternative section of extensions, not referred to anywhere +# else in the config. We'll use this via '-extensions v3_ca' when +# using ca(8) to sign another CA. +# +[ v3_ca ] + +# PKIX recommendation. +subjectKeyIdentifier=hash +authorityKeyIdentifier = keyid,issuer + +# This is what PKIX recommends but some broken software chokes on critical +# extensions. +#basicConstraints = critical,CA:true +# So we do this instead. +basicConstraints = CA:true + +# Key usage: this is typical for a CA certificate. However since it will +# prevent it being used as an test self-signed certificate it is best +# left out by default. +# keyUsage = cRLSign, keyCertSign + +# Some might want this also +# nsCertType = sslCA, emailCA + +# Include email address in subject alt name: another PKIX recommendation +#subjectAltName=email:move +# Copy issuer details +#issuerAltName=issuer:copy + +subjectAltName = @alt_names + +[ alt_names ] +DNS.1 = localhost +DNS.2 = localhost.localdomain +DNS.3 = localhost4 +DNS.4 = localhost4.localdomain4 +DNS.5 = localhost6 +DNS.6 = localhost6.localdomain6 +IP.1 = 127.0.0.1 +IP.2 = ::1 diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testClient.crt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testClient.crt new file mode 100644 index 0000000000000..45efce91ef33a --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testClient.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIJAIxTS7Qdho9jMA0GCSqGSIb3DQEBCwUAMFMxKzApBgNV +BAMTIkVsYXN0aWNzZWFyY2ggVGVzdCBJbnRlcm1lZGlhdGUgQ0ExFjAUBgNVBAsT +DUVsYXN0aWNzZWFyY2gxDDAKBgNVBAoTA29yZzAeFw0xOTA3MTkxMzMzNDFaFw0y +MzA3MTgxMzMzNDFaMEoxIjAgBgNVBAMTGUVsYXN0aWNzZWFyY2ggVGVzdCBDbGll +bnQxFjAUBgNVBAsTDUVsYXN0aWNzZWFyY2gxDDAKBgNVBAoTA29yZzCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBANHgMX2aX8t0nj4sGLNuKISmmXIYCj9R +wRqS7L03l9Nng7kOKnhHu/nXDt7zMRJyHj+q6FAt5khlavYSVCQyrDybRuA5z31g +OdqXerrjs2OXS5HSHNvoDAnHFsaYX/5geMewVTtc/vqpd7Ph/QtaKfmG2FK0JNQo +0k24tcgCIcyMtBh6BA70yGBM0OT8GdOgd/d/mA7mRhaxIUMNYQzRYRsp4hMnnWoO +TkR5Q8KSO3MKw9dPSpPe8EnwtJE10S3s5aXmgytru/xQqrFycPBNj4KbKVmqMP0G +60CzXik5pr2LNvOFz3Qb6sYJtqeZF+JKgGWdaTC89m63+TEnUHqk0lcCAwEAAaNN +MEswCQYDVR0TBAIwADAdBgNVHQ4EFgQU/+aAD6Q4mFq1vpHorC25/OY5zjcwHwYD +VR0jBBgwFoAU8siFCiMiYZZm/95qFC75AG/LRE0wDQYJKoZIhvcNAQELBQADggEB +AIRpCgDLpvXcgDHUk10uhxev21mlIbU+VP46ANnCuj0UELhTrdTuWvO1PAI4z+Wb +DUxryQfOOXO9R6D0dE5yR56L/J7d+KayW34zU7yRDZM7+rXpocdQ1Ex8mjP9HJ/B +f56YZTBQJpXeDrKow4FvtkI3bcIMkqmbG16LHQXeG3RS4ds4S4wCnE2nA6vIn9y+ +4R999q6y1VSBORrYULcDWxS54plHLEdiMr1vVallg82AGobS9GMcTL2U4Nx5IYZG +7sbTk3LrDxVpVg/S2wLofEdOEwqCeHug/iOihNLJBabEW6z4TDLJAVW5KCY1Dfhk +YlBfHn7vxKkfKoCUK/yLWWI= +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testClient.key b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testClient.key new file mode 100644 index 0000000000000..186e6f86745f1 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testClient.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA0eAxfZpfy3SePiwYs24ohKaZchgKP1HBGpLsvTeX02eDuQ4q +eEe7+dcO3vMxEnIeP6roUC3mSGVq9hJUJDKsPJtG4DnPfWA52pd6uuOzY5dLkdIc +2+gMCccWxphf/mB4x7BVO1z++ql3s+H9C1op+YbYUrQk1CjSTbi1yAIhzIy0GHoE +DvTIYEzQ5PwZ06B393+YDuZGFrEhQw1hDNFhGyniEyedag5ORHlDwpI7cwrD109K +k97wSfC0kTXRLezlpeaDK2u7/FCqsXJw8E2PgpspWaow/QbrQLNeKTmmvYs284XP +dBvqxgm2p5kX4kqAZZ1pMLz2brf5MSdQeqTSVwIDAQABAoIBAQDAjP767Ioc4LZZ +9h0HafaUlUDMs4+bPkd7OPcoNnv+AceRHZULW0zz0EIdfGM2OCrWYNfYz/Op0hpK +/s/hkfgBdriU+ZUKwyDxEu8Pzd6EbYdwlqPRgdihk92qgJv5hsro8jeQSibJFHf1 +Ok3tf2BpRTTs08fCOl2P3vowMPyPa5Ho9bf4lzP8IsR2BZvoaev3za9ZWR6ZDzE6 +EWkBBNgIU4aPn1IJ6dz2+rVtN6+xXET0eYSBEac3xMQaPWLEX0EDBYPW1d+mUva/ +3lJvTrs3g8oyiTyVu0l9Yxdgox1mtgmrqqwxJ6XuouzImuXMMDXaz0K/E/+u2yPF +V6kRvWuJAoGBAPOnEgBC3ezl+x+47cgbwpy97uZhZmV9HkMrSH9DKDwC+t57TdGX +ypt2S/IS/vbPupFv0aHaWmJ6SN/HyTN4znwuulV3kE8mEpQzIPbluWfgQzT6ukJe ++YFI/+IXwIRBLA7khtfo01LGHSmLTENsnd/aoRySY3K6zJz36Ys3vFdjAoGBANyC +7rF5YjPdgsAgOT7EboNGkc8UuW/Sh3xRp0c4Y+PBenf60yA5XkRJLYR4sZDjWTr0 +aKBY7Y8r+59U+bBrwUuhhoW08JZ/SBWja05+4DhH0ToA3vtbPv9lRyQfkF1DdBkn +XpyM2vaJE5M454acwnKJ81AyoueYtZ8pD3Q7c219AoGAJ+F1wdMwDgGKvCOB0Boz +HYK9IrpYj04OcQIZqLLuV/xI4befAiptQEr5nVLcprtTl1CNKIfb+Xh4iyBhX2pr +qcngN/MNDNd3fQhtYdwyH72GYpqTeB+hiTbQo0ot+bfNJVbkd1ylkkvZJB6nyfVy +VdysOEgBvRq0OREfCemCi28CgYEAoF1EE6NQDKICTZDhsMkQCb5PmcbbmPwFdh63 +xW64DlGNrCWoVt4BtS12wck4cUM1iE9oq3wgv6df5Z7ZuziSKVt9xk0xTnGgTcQ7 +7KkOjT+FZGZvw2K3bOsNkrK1vW2pyAU+pCE3uGU17DJNBjOIod27Kk649C61ntsw +lvoJVs0CgYBLr9pzBRPyD5/lM9hm2EI7ITa+fVcu3V3bJfXENHKzpb0lB2fhl0PI +swpiU8RUEKWyjBuHsdQdxg7AgFi/7s+SX7KLo4cudDRd73iiXYdNGB7R0/MAG8Jl +/lMXn14noS4trA8fNGGg/2fANTBtLTbOX9i4s7clAo8ETywQ33owug== +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testIntermediateCA.crt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testIntermediateCA.crt new file mode 100644 index 0000000000000..7d8781b888901 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testIntermediateCA.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEBTCCAu2gAwIBAgIJAIx9twpbtGkCMA0GCSqGSIb3DQEBCwUAMEsxIzAhBgNV +BAMTGkVsYXN0aWNzZWFyY2ggVGVzdCBSb290IENBMRYwFAYDVQQLEw1lbGFzdGlj +c2VhcmNoMQwwCgYDVQQKEwNvcmcwHhcNMTkwNzE5MTMzMjM0WhcNMjMwNzE4MTMz +MjM0WjBTMSswKQYDVQQDEyJFbGFzdGljc2VhcmNoIFRlc3QgSW50ZXJtZWRpYXRl +IENBMRYwFAYDVQQLEw1FbGFzdGljc2VhcmNoMQwwCgYDVQQKEwNvcmcwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCnJ2KTJZnQzOt0uUf+5oLNcvDLnnWY +LzXZpOOX666Almwx+PVkDxkiGSe0QB9RWJqHSrsP1ryGIeCIzGMOctLt6QA7Peee +HdrKqOQgN620nDSd2EZ3s0Iddh1Ns/lfTtBJCP/03suaktm7j8EYKAyOlTIUhiKm +sTFlxPUSKjbtR4wR1ljnKN8X+j/ghr9mWhQrMR9rsGFObU8DQFho2Ti90C4HoMNU +dy4j+2G3VVpaq4he4/4CbPrWQQ3dKGpzVAngIuAv4eQ/y88EHAFwutxQZWAew4Va +5y3O112acSb9oC7g0NHQcBnos/WIChF5ki8V3LFnxN7jYvUUk9YxfA8hAgMBAAGj +geMwgeAwHQYDVR0OBBYEFPLIhQojImGWZv/eahQu+QBvy0RNMB8GA1UdIwQYMBaA +FM4SyNzpz82ihQ160zrLUVaWfI+1MAwGA1UdEwQFMAMBAf8wgY8GA1UdEQSBhzCB +hIIJbG9jYWxob3N0ghVsb2NhbGhvc3QubG9jYWxkb21haW6CCmxvY2FsaG9zdDSC +F2xvY2FsaG9zdDQubG9jYWxkb21haW40ggpsb2NhbGhvc3Q2ghdsb2NhbGhvc3Q2 +LmxvY2FsZG9tYWluNocEfwAAAYcQAAAAAAAAAAAAAAAAAAAAATANBgkqhkiG9w0B +AQsFAAOCAQEAMkh4nUi2yt5TX+ryBWaaA4/2ZOsxSeec5E1EjemPMUWGzFipV1YY +k/mpv51E+BbPgtmGMG8Win/PETKYuX8D+zPauFEmJmyJmm5B4mr1406RWERqNDql +36sOw89G0mDT/wIB4tkNdh830ml+d75aRVVB4X5pFAE8ZzI3g4OW4YxT3ZfUEhDl +QeGVatobvIaX8KpNSevjFAFuQzSgj61VXI+2+UIRV4tJP2xEqu5ISuArHcGhvNlS +bU3vZ80tTCa0tHyJrVqaqtQ23MDBzYPj6wJ/pvBQWAgZKnC3qJgXlJ9des117I1g +J98AXCDGu5LBW/p2C9VpSktpnfzsX4NHqg== +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testIntermediateCA.key b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testIntermediateCA.key new file mode 100644 index 0000000000000..5147725f4486a --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testIntermediateCA.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEApydikyWZ0MzrdLlH/uaCzXLwy551mC812aTjl+uugJZsMfj1 +ZA8ZIhkntEAfUViah0q7D9a8hiHgiMxjDnLS7ekAOz3nnh3ayqjkIDettJw0ndhG +d7NCHXYdTbP5X07QSQj/9N7LmpLZu4/BGCgMjpUyFIYiprExZcT1Eio27UeMEdZY +5yjfF/o/4Ia/ZloUKzEfa7BhTm1PA0BYaNk4vdAuB6DDVHcuI/tht1VaWquIXuP+ +Amz61kEN3Shqc1QJ4CLgL+HkP8vPBBwBcLrcUGVgHsOFWuctztddmnEm/aAu4NDR +0HAZ6LP1iAoReZIvFdyxZ8Te42L1FJPWMXwPIQIDAQABAoIBABp4z1C0dL6vpV5v +9Wn2AaMd3+qvZro6R9H3HiAyMAmnSO1FGz/EcFuJFlOikBMm8BobCLMCdAreFJw1 +mj5wit0ouGOpcyQEYGEWDELZ7oWa825IESjl18OosA1dQlIIvk3Cwh56pk4NkbP1 +mUQFG6/9CthbQeOaTlNqtNEypE5Bc+JGbQaUhRP6tF+Rxnpys2nIJt/Vp9khw0Du +K7Z6astunhfPDwLFGwHhflc6re1B+mxpLKTDHCcydJo2Kuh/LuuEtPkE5Ar4LwQk +D+/61iZHC4B8/4IkBlAsgCJ1B18L6JdTbSYeVlepkSkJML5t6z+cvt5VcObF7F8X +pPZn+kECgYEA2NaB0eshWNnHTMRv+sE92DCv0M7uV1eKtaopxOElAKJ/J2gpqcTh +GzdTVRg1M2LgVNk97ViL5bsXaVStRe085m8oA0bI9WbIoQRUFp40dRFRUjl+4TN0 +pdxXL4VmQMWuwlO6p8/JY8sInnHVCT+2z8lek8P3bdtTQZV9OZQTn0kCgYEAxVe8 +obJdnUSXuRDWg588TW35PNqOTJcerIU6eRKwafvCcrhMoX62Xbv6y6kKXndW/JuW +AbfSNiAOV+HGUbf8Xc54Xzk2mouoJA0S0tJ040jqOkFOaKIxYQudTU8y9bTXNsAk +oX3wOhlt2q9xffAK1gYffP5XPXnYnsb8qaMIeRkCgYBM9yaxOgJmJTbGmtscaEbp +W66sMScMPXhwruuQhFG7/fGgLSrMpaM5I9QiWitYB/qUY1/FxS4y5suSiYnPTjvV +lxLexttBr6/65yxpstHv06vHwby1dqwqyyDvLyxyRTiYpVuVgP18vG5cvw7c746W +BmXZkS9cAQN2Pfdq3pJwcQKBgEbCZd2owg5hCPIPyosZbpro4uRiDYIC8bm0b7n3 +7I+j+R3/XWLOt382pv+dlh03N1aORyRIkDReHCaAywaELRZJsTmbnyudBeYfVe+I +DOduPqYywnWcKo58hqOw0Tnu5Pg5vyi0qo16jrxKCiy5BHmnamT8IbXmWbjc6r28 +uo4JAoGAfAPvPJ2fV5vpzr4LPoVyaSiFj414D+5XYxX6CWpdTryelpP2Rs1VfJ1a +7EusUtWs26pAKwttDY4yoTvog7rrskgtXzisaoNMDbH/PfsoqjMnnIgakvKmHpUM +l6E1ecWFExEg5v6yvmxFC7JIUzIYOoysWu3X44G8rQ+vDQNRFZQ= +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testRootCA.crt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testRootCA.crt new file mode 100644 index 0000000000000..50ba7a21727a6 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testRootCA.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID/TCCAuWgAwIBAgIJAIAPVUXOUQDNMA0GCSqGSIb3DQEBCwUAMEsxIzAhBgNV +BAMTGkVsYXN0aWNzZWFyY2ggVGVzdCBSb290IENBMRYwFAYDVQQLEw1lbGFzdGlj +c2VhcmNoMQwwCgYDVQQKEwNvcmcwHhcNMTkwNzE5MTMzMjIwWhcNMjMwNzE4MTMz +MjIwWjBLMSMwIQYDVQQDExpFbGFzdGljc2VhcmNoIFRlc3QgUm9vdCBDQTEWMBQG +A1UECxMNZWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAzIgn8r2kirt90id0uoi6YEGBPx+XDzthLbLsN+M0 +nXhj40OVcGPiww+cre14bJr0M6MG4CvFjRJc92RoVrE8+7XOKt0bgiHeVM+b0LEh +wVMH9koararPVMo0CjCMN4ChHMOWKBPUNZswvk+pFC+QbTcfgQLycqh+lTB1O6l3 +hPnmunEqhLIj9ke3FwA326igdb+16EbKYVL2c5unNoC5ZMc5Z9bnn4/GNXptkHhy ++SvG7IZKW2pAzei3Df/n47ZhJfQKERUCe9eO7b/ZmTEzAzYj9xucE5lYcpkOZd6g +IMU3vXe4FeD/BM4sOLkKTtMejiElEecxw8cLI9Nji/0y1wIDAQABo4HjMIHgMB0G +A1UdDgQWBBTOEsjc6c/NooUNetM6y1FWlnyPtTAfBgNVHSMEGDAWgBTOEsjc6c/N +ooUNetM6y1FWlnyPtTAMBgNVHRMEBTADAQH/MIGPBgNVHREEgYcwgYSCCWxvY2Fs +aG9zdIIVbG9jYWxob3N0LmxvY2FsZG9tYWluggpsb2NhbGhvc3Q0ghdsb2NhbGhv +c3Q0LmxvY2FsZG9tYWluNIIKbG9jYWxob3N0NoIXbG9jYWxob3N0Ni5sb2NhbGRv +bWFpbjaHBH8AAAGHEAAAAAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQELBQADggEB +ACHjwoDJILv77sQ5QN6SoAp6GYqiC9/doDIzDFCd/WP7G8EbaosHM6jM7NbrlK3g +PNTzuY1pLPoI3YJSO4Al/UfzEffaYSbZC2QZG9F6fUSWhvR+nxzPSXWkjzIInv1j +pPMgnUl6oJaUbsSR/evtvWNSxrM3LewkRTOoktkXM6SjTUHjdP6ikrkrarrWZgzr +K30BqGL6kDSv9LkyXe6RSgQDtQe51Yut+lKGCcy8AoEwG/3cjb7XnrWcFsJXjYbf +4m3QsS8yHU/O/xgyvVHOfki+uGVepzSjdzDMLE1GBkju05NR2eJZ8omj/QiJa0+z +1d/AOKExvWvo1yQ28ORcwo4= +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testRootCA.key b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testRootCA.key new file mode 100644 index 0000000000000..148bbd52bd76f --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testRootCA.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAzIgn8r2kirt90id0uoi6YEGBPx+XDzthLbLsN+M0nXhj40OV +cGPiww+cre14bJr0M6MG4CvFjRJc92RoVrE8+7XOKt0bgiHeVM+b0LEhwVMH9koa +rarPVMo0CjCMN4ChHMOWKBPUNZswvk+pFC+QbTcfgQLycqh+lTB1O6l3hPnmunEq +hLIj9ke3FwA326igdb+16EbKYVL2c5unNoC5ZMc5Z9bnn4/GNXptkHhy+SvG7IZK +W2pAzei3Df/n47ZhJfQKERUCe9eO7b/ZmTEzAzYj9xucE5lYcpkOZd6gIMU3vXe4 +FeD/BM4sOLkKTtMejiElEecxw8cLI9Nji/0y1wIDAQABAoIBAQC6LMnoPFW1brs1 ++3JWhTTZf2btlYzEcbGgjnhU2v0+xaJu8UrrFhEIq4JcE4gFm/rjsecFUPKu2eND +0eLj3st699+lxsRObRPbMWtMyJ/IQRNDTesA4DV/odtC1zQbJXwCGcrpyjrlXNE+ +unZWiIE32PBVV+BnHBa1KHneCAFiSRLrySAiDAnTIJxB6ufweoxevLoJPPNLlbo7 +H2jv6g1Som/Imjhof4KhD/1Q04Sed2wScSS/7Bz38eO68HG4NMFY+M2/cLzrbflg +QdeKHNhoIGnSFMEW5TCVlI4qrP8zvPPdZmLOMBT+Ocm3pc5xDAPwFYCe8wH1DVn+ +b3sVpwu5AoGBAOhFA7gUDZjRBkNAqJfbUdhdWSslePQsjeTKsu5rc4gk2aiL4bZ4 +fxG0Dq1hX7FjAmYrGqnsXsbxxDnCkhXGH1lY73kF0Zzwr2Pg1yRHyn1nCinhD4g4 +G2vBr37QtWn4wS/L7V//D3xrcCTG3QgAmvZZ99tYgqlmnUzmawdZ8kQ7AoGBAOFt +qg7sTSNWVpKkfkyX2NXvBMt5e3Qcwnge2pX+SBgljwjNUwSSMLwxdBDSyDXIhk8W +s4pJLtMDJsT/2WBKC9WJm9m3gc7yYZznLJ+5YPcieXHGGNXCRldPePhTIjnL591H +CSXoc3BZ2iKK745BYuPqSuLb2XfE3/hwoaFR4S4VAoGAQ6ywG7dECu2ELJ4vQSe2 +3hq8u1SMvGAq66mfntYR8G4EORagqkDLjUXwLNY9Qnr9nPUcLLxhFQgmS0oEtHFo +eujtxU5Lt7Vs9OXy6XA9cHJQRMl9dAwc+TWSw5ld8kV3TEzXmevAAFlxcFW82vMK +M5MdI3zTfTYXyOst7hNoAjcCgYAhz/cgAeWYFU0q9a1UA7qsbAuGEZSo1997cPVM +ZjWeGZQYt+Np3hudPrWwCE2rc4Zhun/3j/6L+/8GsXGDddfMkbVktJet2ME3bZ1N +39phdzRMEnCLL3aphewZIy8RCDqhABSpMPKPuYp0f+5qofgZQ300BdHamxcVBp/X +uJZT+QKBgQDdJQd+QxfCb8BZ11fWtyWJWQWZMmyX2EEbAIMvYQP3xh8PHmw2JoiQ +VQ103bCkegJ1S7ubrGltdt8pyjN4rrByXJmxCe1Y/LSHIp9w8D3jaiLCRSk1EmBw +jXjnZoiJn3GV5jmbV10hzrn7jqRcwhYA5zuoE7qb604V7cPZLzHtog== +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java index b91a6f335ed47..b15d3e620d203 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java @@ -9,7 +9,9 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.ingest.Processor; import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; @@ -17,6 +19,7 @@ import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; import org.elasticsearch.xpack.spatial.index.mapper.ShapeFieldMapper; import org.elasticsearch.xpack.spatial.index.query.ShapeQueryBuilder; +import org.elasticsearch.xpack.spatial.ingest.CircleProcessor; import java.util.Arrays; import java.util.Collections; @@ -26,7 +29,7 @@ import static java.util.Collections.singletonList; -public class SpatialPlugin extends Plugin implements ActionPlugin, MapperPlugin, SearchPlugin { +public class SpatialPlugin extends Plugin implements ActionPlugin, MapperPlugin, SearchPlugin, IngestPlugin { public SpatialPlugin(Settings settings) { } @@ -49,4 +52,9 @@ public Map getMappers() { public List> getQueries() { return singletonList(new QuerySpec<>(ShapeQueryBuilder.NAME, ShapeQueryBuilder::new, ShapeQueryBuilder::fromXContent)); } + + @Override + public Map getProcessors(Processor.Parameters parameters) { + return Map.of(CircleProcessor.TYPE, new CircleProcessor.Factory()); + } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialUtils.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialUtils.java new file mode 100644 index 0000000000000..49ace55986027 --- /dev/null +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialUtils.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.spatial; + +import org.apache.lucene.util.SloppyMath; +import org.elasticsearch.geometry.Circle; +import org.elasticsearch.geometry.LinearRing; +import org.elasticsearch.geometry.Polygon; +import org.elasticsearch.index.mapper.GeoShapeIndexer; + +/** + * Utility class for storing different helpful re-usable spatial functions + */ +public class SpatialUtils { + + private SpatialUtils() {} + + /** + * Makes an n-gon, centered at the provided circle's center, and each vertex approximately + * {@link Circle#getRadiusMeters()} away from the center. + * + * This does not split the polygon across the date-line. Relies on {@link GeoShapeIndexer} to + * split prepare polygon for indexing. + * + * Adapted from from org.apache.lucene.geo.GeoTestUtil + * */ + public static Polygon createRegularGeoShapePolygon(Circle circle, int gons) { + double[][] result = new double[2][]; + result[0] = new double[gons+1]; + result[1] = new double[gons+1]; + for(int i=0; i circle.getRadiusMeters()) { + // too big + factor -= step; + if (last == 1) { + step /= 2.0; + } + last = -1; + } else if (distanceMeters < circle.getRadiusMeters()) { + // too small + factor += step; + if (last == -1) { + step /= 2.0; + } + last = 1; + } + } + } + + // close poly + result[0][gons] = result[0][0]; + result[1][gons] = result[1][0]; + return new Polygon(new LinearRing(result[0], result[1])); + } + + /** + * Makes an n-gon, centered at the provided circle's center. This assumes + * distance measured in cartesian geometry. + **/ + public static Polygon createRegularShapePolygon(Circle circle, int gons) { + double[][] result = new double[2][]; + result[0] = new double[gons+1]; + result[1] = new double[gons+1]; + for(int i=0; i valueWrapper; + if (obj instanceof Map || obj instanceof String) { + valueWrapper = Map.of("shape", obj); + } else { + throw new IllegalArgumentException("field [" + field + "] must be a WKT Circle or a GeoJSON Circle value"); + } + + MapXContentParser parser = new MapXContentParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, valueWrapper, XContentType.JSON); + try { + parser.nextToken(); // START_OBJECT + parser.nextToken(); // "shape" field key + parser.nextToken(); // shape value + GeometryFormat geometryFormat = PARSER.geometryFormat(parser); + Geometry geometry = geometryFormat.fromXContent(parser); + if (ShapeType.CIRCLE.equals(geometry.type())) { + Circle circle = (Circle) geometry; + int numSides = numSides(circle.getRadiusMeters()); + final Geometry polygonizedCircle; + switch (circleShapeFieldType) { + case GEO_SHAPE: + polygonizedCircle = SpatialUtils.createRegularGeoShapePolygon(circle, numSides); + break; + case SHAPE: + polygonizedCircle = SpatialUtils.createRegularShapePolygon(circle, numSides); + break; + default: + throw new IllegalStateException("invalid shape_type [" + circleShapeFieldType + "]"); + } + XContentBuilder newValueBuilder = XContentFactory.jsonBuilder().startObject().field("val"); + geometryFormat.toXContent(polygonizedCircle, newValueBuilder, ToXContent.EMPTY_PARAMS); + newValueBuilder.endObject(); + Map newObj = XContentHelper.convertToMap( + BytesReference.bytes(newValueBuilder), true, XContentType.JSON).v2(); + ingestDocument.setFieldValue(targetField, newObj.get("val")); + } else { + throw new IllegalArgumentException("found [" + geometry.type() + "] instead of circle"); + } + } catch (Exception e) { + throw new IllegalArgumentException("invalid circle definition", e); + } + + return ingestDocument; + } + + @Override + public String getType() { + return TYPE; + } + + String field() { + return field; + } + + String targetField() { + return targetField; + } + + double errorDistance() { + return errorDistance; + } + + CircleShapeFieldType shapeType() { + return circleShapeFieldType; + } + + int numSides(double radiusMeters) { + int val = (int) Math.ceil(2 * Math.PI / Math.acos(1 - errorDistance / radiusMeters)); + return Math.min(MAXIMUM_NUMBER_OF_SIDES, Math.max(MINIMUM_NUMBER_OF_SIDES, val)); + } + + + public static final class Factory implements Processor.Factory { + + public CircleProcessor create(Map registry, String processorTag, Map config) { + String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); + String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field", field); + boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); + double radiusDistance = Math.abs(ConfigurationUtils.readDoubleProperty(TYPE, processorTag, config, "error_distance")); + CircleShapeFieldType circleFieldType = CircleShapeFieldType.parse( + ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "shape_type")); + return new CircleProcessor(processorTag, field, targetField, ignoreMissing, radiusDistance, circleFieldType); + } + } + + enum CircleShapeFieldType { + SHAPE, GEO_SHAPE; + + public static CircleShapeFieldType parse(String value) { + EnumSet validValues = EnumSet.allOf(CircleShapeFieldType.class); + try { + return valueOf(value.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("illegal [shape_type] value [" + value + "]. valid values are " + + Arrays.toString(validValues.toArray())); + } + } + } +} diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialUtilsTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialUtilsTests.java new file mode 100644 index 0000000000000..df773fbb7c419 --- /dev/null +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialUtilsTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.spatial; + +import org.apache.lucene.util.SloppyMath; +import org.elasticsearch.geometry.Circle; +import org.elasticsearch.geometry.LinearRing; +import org.elasticsearch.geometry.Polygon; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.equalTo; + +public class SpatialUtilsTests extends ESTestCase { + + public void testCreateRegularGeoShapePolygon() { + double lon = randomDoubleBetween(-20, 20, true); + double lat = randomDoubleBetween(-20, 20, true); + double radiusMeters = randomDoubleBetween(10, 10000, true); + Circle circle = new Circle(lon, lat, radiusMeters); + int numSides = randomIntBetween(4, 1000); + Polygon polygon = SpatialUtils.createRegularGeoShapePolygon(circle, numSides); + LinearRing outerShell = polygon.getPolygon(); + int numPoints = outerShell.length(); + + // check no holes created + assertThat(polygon.getNumberOfHoles(), equalTo(0)); + // check there are numSides edges + assertThat(numPoints, equalTo(numSides + 1)); + // check that all the points are about a radius away from the center + for (int i = 0; i < numPoints ; i++) { + double actualDistance = SloppyMath + .haversinMeters(circle.getY(), circle.getX(), outerShell.getY(i), outerShell.getX(i)); + assertThat(actualDistance, closeTo(radiusMeters, 0.1)); + } + } + + public void testCreateRegularShapePolygon() { + double x = randomDoubleBetween(-20, 20, true); + double y = randomDoubleBetween(-20, 20, true); + double radius = randomDoubleBetween(10, 10000, true); + Circle circle = new Circle(x, y, radius); + int numSides = randomIntBetween(4, 1000); + Polygon polygon = SpatialUtils.createRegularShapePolygon(circle, numSides); + LinearRing outerShell = polygon.getPolygon(); + int numPoints = outerShell.length(); + + // check no holes created + assertThat(polygon.getNumberOfHoles(), equalTo(0)); + // check there are numSides edges + assertThat(numPoints, equalTo(numSides + 1)); + // check that all the points are about a radius away from the center + for (int i = 0; i < numPoints ; i++) { + double deltaX = circle.getX() - outerShell.getX(i); + double deltaY = circle.getY() - outerShell.getY(i); + double distance = Math.sqrt(deltaX * deltaX + deltaY * deltaY); + assertThat(distance, closeTo(radius, 0.0001)); + } + } +} diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorFactoryTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorFactoryTests.java new file mode 100644 index 0000000000000..be4544c10305e --- /dev/null +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorFactoryTests.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.spatial.ingest; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.test.ESTestCase; + +import org.junit.Before; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class CircleProcessorFactoryTests extends ESTestCase { + + private CircleProcessor.Factory factory; + + @Before + public void init() { + factory = new CircleProcessor.Factory(); + } + + public void testCreateGeoShape() { + Map config = new HashMap<>(); + config.put("field", "field1"); + config.put("error_distance", 0.002); + config.put("shape_type", "geo_shape"); + String processorTag = randomAlphaOfLength(10); + CircleProcessor processor = factory.create(null, processorTag, config); + assertThat(processor.getTag(), equalTo(processorTag)); + assertThat(processor.field(), equalTo("field1")); + assertThat(processor.targetField(), equalTo("field1")); + assertThat(processor.errorDistance(), equalTo(0.002)); + assertThat(processor.shapeType(), equalTo(CircleProcessor.CircleShapeFieldType.GEO_SHAPE)); + } + + public void testCreateShape() { + Map config = new HashMap<>(); + config.put("field", "field1"); + config.put("error_distance", 0.002); + config.put("shape_type", "shape"); + String processorTag = randomAlphaOfLength(10); + CircleProcessor processor = factory.create(null, processorTag, config); + assertThat(processor.getTag(), equalTo(processorTag)); + assertThat(processor.field(), equalTo("field1")); + assertThat(processor.targetField(), equalTo("field1")); + assertThat(processor.errorDistance(), equalTo(0.002)); + assertThat(processor.shapeType(), equalTo(CircleProcessor.CircleShapeFieldType.SHAPE)); + } + + public void testCreateInvalidShapeType() { + Map config = new HashMap<>(); + config.put("field", "field1"); + config.put("error_distance", 0.002); + config.put("shape_type", "invalid"); + String processorTag = randomAlphaOfLength(10); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> factory.create(null, processorTag, config)); + assertThat(e.getMessage(), equalTo("illegal [shape_type] value [invalid]. valid values are [SHAPE, GEO_SHAPE]")); + } + + public void testCreateMissingField() { + Map config = new HashMap<>(); + String processorTag = randomAlphaOfLength(10); + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, processorTag, config)); + assertThat(e.getMessage(), equalTo("[field] required property is missing")); + } + + public void testCreateWithTargetField() { + Map config = new HashMap<>(); + config.put("field", "field1"); + config.put("target_field", "other"); + config.put("error_distance", 0.002); + config.put("shape_type", "geo_shape"); + String processorTag = randomAlphaOfLength(10); + CircleProcessor processor = factory.create(null, processorTag, config); + assertThat(processor.getTag(), equalTo(processorTag)); + assertThat(processor.field(), equalTo("field1")); + assertThat(processor.targetField(), equalTo("other")); + assertThat(processor.errorDistance(), equalTo(0.002)); + assertThat(processor.shapeType(), equalTo(CircleProcessor.CircleShapeFieldType.GEO_SHAPE)); + } + + public void testCreateWithNoErrorDistanceDefined() { + Map config = new HashMap<>(); + config.put("field", "field1"); + String processorTag = randomAlphaOfLength(10); + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, processorTag, config)); + assertThat(e.getMessage(), equalTo("[error_distance] required property is missing")); + } +} diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java new file mode 100644 index 0000000000000..55f3e84ed56fe --- /dev/null +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java @@ -0,0 +1,277 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.spatial.ingest; + +import org.apache.lucene.document.Document; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.geo.GeoJson; +import org.elasticsearch.common.geo.ShapeRelation; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.geometry.Circle; +import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.Polygon; +import org.elasticsearch.geometry.utils.StandardValidator; +import org.elasticsearch.geometry.utils.WellKnownText; +import org.elasticsearch.index.mapper.GeoShapeFieldMapper; +import org.elasticsearch.index.mapper.GeoShapeIndexer; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.VectorGeoShapeQueryProcessor; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.RandomDocumentPicks; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.spatial.SpatialUtils; +import org.elasticsearch.xpack.spatial.index.mapper.ShapeFieldMapper; +import org.elasticsearch.xpack.spatial.index.mapper.ShapeIndexer; +import org.elasticsearch.xpack.spatial.index.query.ShapeQueryProcessor; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument; +import static org.elasticsearch.xpack.spatial.ingest.CircleProcessor.CircleShapeFieldType; +import static org.elasticsearch.xpack.spatial.ingest.CircleProcessor.CircleShapeFieldType.GEO_SHAPE; +import static org.elasticsearch.xpack.spatial.ingest.CircleProcessor.CircleShapeFieldType.SHAPE; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class CircleProcessorTests extends ESTestCase { + private static final WellKnownText WKT = new WellKnownText(true, new StandardValidator(true)); + + public void testNumSides() { + double radiusDistanceMeters = randomDoubleBetween(0.01, 6371000, true); + CircleShapeFieldType shapeType = randomFrom(SHAPE, GEO_SHAPE); + CircleProcessor processor = new CircleProcessor("tag", "field", "field", false, radiusDistanceMeters, shapeType); + + // radius is same as error distance + assertThat(processor.numSides(radiusDistanceMeters), equalTo(4)); + // radius is much smaller than error distance + assertThat(processor.numSides(0), equalTo(4)); + // radius is much larger than error distance + assertThat(processor.numSides(Math.pow(radiusDistanceMeters, 100)), equalTo(1000)); + // radius is 5 times longer than error distance + assertThat(processor.numSides(5*radiusDistanceMeters), equalTo(10)); + + } + + public void testFieldNotFound() throws Exception { + CircleProcessor processor = new CircleProcessor("tag", "field", "field", false, 10, GEO_SHAPE); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + Exception e = expectThrows(Exception.class, () -> processor.execute(ingestDocument)); + assertThat(e.getMessage(), containsString("not present as part of path [field]")); + } + + public void testFieldNotFoundWithIgnoreMissing() throws Exception { + CircleProcessor processor = new CircleProcessor("tag", "field", "field", true, 10, GEO_SHAPE); + IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); + processor.execute(ingestDocument); + assertIngestDocument(originalIngestDocument, ingestDocument); + } + + public void testNullValue() throws Exception { + CircleProcessor processor = new CircleProcessor("tag", "field", "field", false, 10, GEO_SHAPE); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", null)); + Exception e = expectThrows(Exception.class, () -> processor.execute(ingestDocument)); + assertThat(e.getMessage(), equalTo("field [field] is null, cannot process it.")); + } + + public void testNullValueWithIgnoreMissing() throws Exception { + CircleProcessor processor = new CircleProcessor("tag", "field", "field", true, 10, GEO_SHAPE); + IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", null)); + IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); + processor.execute(ingestDocument); + assertIngestDocument(originalIngestDocument, ingestDocument); + } + + @SuppressWarnings("unchecked") + public void testJson() throws IOException { + Circle circle = new Circle(101.0, 1.0, 10); + HashMap map = new HashMap<>(); + HashMap circleMap = new HashMap<>(); + circleMap.put("type", "Circle"); + circleMap.put("coordinates", List.of(circle.getLon(), circle.getLat())); + circleMap.put("radius", circle.getRadiusMeters() + "m"); + map.put("field", circleMap); + Geometry expectedPoly = SpatialUtils.createRegularGeoShapePolygon(circle, 4); + assertThat(expectedPoly, instanceOf(Polygon.class)); + IngestDocument ingestDocument = new IngestDocument(map, Collections.emptyMap()); + CircleProcessor processor = new CircleProcessor("tag", "field", "field", false, 10, GEO_SHAPE); + processor.execute(ingestDocument); + Map polyMap = ingestDocument.getFieldValue("field", Map.class); + XContentBuilder builder = XContentFactory.jsonBuilder(); + GeoJson.toXContent(expectedPoly, builder, ToXContent.EMPTY_PARAMS); + Tuple> expected = XContentHelper.convertToMap(BytesReference.bytes(builder), + true, XContentType.JSON); + assertThat(polyMap, equalTo(expected.v2())); + } + + public void testWKT() { + Circle circle = new Circle(101.0, 0.0, 2); + HashMap map = new HashMap<>(); + map.put("field", WKT.toWKT(circle)); + Geometry expectedPoly = SpatialUtils.createRegularGeoShapePolygon(circle, 4); + IngestDocument ingestDocument = new IngestDocument(map, Collections.emptyMap()); + CircleProcessor processor = new CircleProcessor("tag", "field", "field",false, 2, GEO_SHAPE); + processor.execute(ingestDocument); + String polyString = ingestDocument.getFieldValue("field", String.class); + assertThat(polyString, equalTo(WKT.toWKT(expectedPoly))); + } + + public void testInvalidWKT() { + HashMap map = new HashMap<>(); + map.put("field", "invalid"); + IngestDocument ingestDocument = new IngestDocument(map, Collections.emptyMap()); + CircleProcessor processor = new CircleProcessor("tag", "field", "field", false, 10, GEO_SHAPE); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + assertThat(e.getMessage(), equalTo("invalid circle definition")); + map.put("field", "POINT (30 10)"); + e = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + assertThat(e.getMessage(), equalTo("invalid circle definition")); + } + + public void testMissingField() { + IngestDocument ingestDocument = new IngestDocument(new HashMap<>(), Collections.emptyMap()); + CircleProcessor processor = new CircleProcessor("tag", "field", "field", false, 10, GEO_SHAPE); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + assertThat(e.getMessage(), equalTo("field [field] not present as part of path [field]")); + } + + public void testInvalidType() { + Map field = new HashMap<>(); + field.put("coordinates", List.of(100, 100)); + field.put("radius", "10m"); + Map map = new HashMap<>(); + map.put("field", field); + IngestDocument ingestDocument = new IngestDocument(map, Collections.emptyMap()); + CircleProcessor processor = new CircleProcessor("tag", "field", "field", false, 10, GEO_SHAPE); + + for (Object value : new Object[] { null, 4.0, "not_circle"}) { + field.put("type", value); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + assertThat(e.getMessage(), equalTo("invalid circle definition")); + } + } + + public void testInvalidCoordinates() { + Map field = new HashMap<>(); + field.put("type", "circle"); + field.put("radius", "10m"); + Map map = new HashMap<>(); + map.put("field", field); + IngestDocument ingestDocument = new IngestDocument(map, Collections.emptyMap()); + CircleProcessor processor = new CircleProcessor("tag", "field", "field", false, 10, GEO_SHAPE); + + for (Object value : new Object[] { null, "not_circle"}) { + field.put("coordinates", value); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + assertThat(e.getMessage(), equalTo("invalid circle definition")); + } + } + + public void testInvalidRadius() { + Map field = new HashMap<>(); + field.put("type", "circle"); + field.put("coordinates", List.of(100.0, 1.0)); + Map map = new HashMap<>(); + map.put("field", field); + IngestDocument ingestDocument = new IngestDocument(map, Collections.emptyMap()); + CircleProcessor processor = new CircleProcessor("tag", "field", "field", false, 10, GEO_SHAPE); + + for (Object value : new Object[] { null, "NotNumber", "10.0fs"}) { + field.put("radius", value); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + assertThat(e.getMessage(), equalTo("invalid circle definition")); + } + } + + public void testGeoShapeQueryAcrossDateline() throws IOException { + String fieldName = "circle"; + Circle circle = new Circle(179.999746, 67.1726, randomDoubleBetween(1000, 300000, true)); + int numSides = randomIntBetween(4, 1000); + Geometry geometry = SpatialUtils.createRegularGeoShapePolygon(circle, numSides); + + MappedFieldType shapeType = new GeoShapeFieldMapper.GeoShapeFieldType(); + shapeType.setHasDocValues(false); + shapeType.setName(fieldName); + + VectorGeoShapeQueryProcessor processor = new VectorGeoShapeQueryProcessor(); + QueryShardContext mockedContext = mock(QueryShardContext.class); + when(mockedContext.fieldMapper(any())).thenReturn(shapeType); + Query sameShapeQuery = processor.process(geometry, fieldName, ShapeRelation.INTERSECTS, mockedContext); + Query pointOnDatelineQuery = processor.process(new Point(180, circle.getLat()), fieldName, + ShapeRelation.INTERSECTS, mockedContext); + + try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + Document doc = new Document(); + GeoShapeIndexer indexer = new GeoShapeIndexer(true, fieldName); + Geometry normalized = indexer.prepareForIndexing(geometry); + for (IndexableField field : indexer.indexShape(null, normalized)) { + doc.add(field); + } + w.addDocument(doc); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + assertThat(searcher.search(sameShapeQuery, 1).totalHits.value, equalTo(1L)); + assertThat(searcher.search(pointOnDatelineQuery, 1).totalHits.value, equalTo(1L)); + } + } + } + + public void testShapeQuery() throws IOException { + String fieldName = "circle"; + Circle circle = new Circle(0, 0, 10); + int numSides = randomIntBetween(4, 1000); + Geometry geometry = SpatialUtils.createRegularShapePolygon(circle, numSides); + + MappedFieldType shapeType = new ShapeFieldMapper.ShapeFieldType(); + shapeType.setHasDocValues(false); + shapeType.setName(fieldName); + + ShapeQueryProcessor processor = new ShapeQueryProcessor(); + QueryShardContext mockedContext = mock(QueryShardContext.class); + when(mockedContext.fieldMapper(any())).thenReturn(shapeType); + Query sameShapeQuery = processor.process(geometry, fieldName, ShapeRelation.INTERSECTS, mockedContext); + Query centerPointQuery = processor.process(new Point(circle.getLon(), circle.getLat()), fieldName, + ShapeRelation.INTERSECTS, mockedContext); + + try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + Document doc = new Document(); + ShapeIndexer indexer = new ShapeIndexer(fieldName); + Geometry normalized = indexer.prepareForIndexing(geometry); + for (IndexableField field : indexer.indexShape(null, normalized)) { + doc.add(field); + } + w.addDocument(doc); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + assertThat(searcher.search(sameShapeQuery, 1).totalHits.value, equalTo(1L)); + assertThat(searcher.search(centerPointQuery, 1).totalHits.value, equalTo(1L)); + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/analytics/cumulative_cardinality.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/analytics/cumulative_cardinality.yml new file mode 100644 index 0000000000000..b59912e86f2a5 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/analytics/cumulative_cardinality.yml @@ -0,0 +1,86 @@ +setup: + - skip: + features: headers + - do: + indices.create: + index: foo + body: + mappings: + properties: + timestamp: + type: date + user: + type: keyword + + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + bulk: + refresh: true + body: + - index: + _index: "foo" + - timestamp: "2017-01-01T05:00:00Z" + user: "a" + + - index: + _index: "foo" + - timestamp: "2017-01-01T05:00:00Z" + user: "b" + + - index: + _index: "foo" + - timestamp: "2017-01-01T05:00:00Z" + user: "c" + + - index: + _index: "foo" + - timestamp: "2017-01-02T05:00:00Z" + user: "a" + + - index: + _index: "foo" + - timestamp: "2017-01-02T05:00:00Z" + user: "b" + + - index: + _index: "foo" + - timestamp: "2017-01-03T05:00:00Z" + user: "d" + +--- +"Basic Search": + + - do: + search: + index: "foo" + body: + size: 0 + aggs: + histo: + date_histogram: + field: "timestamp" + calendar_interval: "day" + aggs: + distinct_users: + cardinality: + field: "user" + total_users: + cumulative_cardinality: + buckets_path: "distinct_users" + + - length: { aggregations.histo.buckets: 3 } + - match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T00:00:00.000Z" } + - match: { aggregations.histo.buckets.0.doc_count: 3 } + - match: { aggregations.histo.buckets.0.distinct_users.value: 3 } + - match: { aggregations.histo.buckets.0.total_users.value: 3 } + - match: { aggregations.histo.buckets.1.key_as_string: "2017-01-02T00:00:00.000Z" } + - match: { aggregations.histo.buckets.1.doc_count: 2 } + - match: { aggregations.histo.buckets.1.distinct_users.value: 2 } + - match: { aggregations.histo.buckets.1.total_users.value: 3 } + - match: { aggregations.histo.buckets.2.key_as_string: "2017-01-03T00:00:00.000Z" } + - match: { aggregations.histo.buckets.2.doc_count: 1 } + - match: { aggregations.histo.buckets.2.distinct_users.value: 1 } + - match: { aggregations.histo.buckets.2.total_users.value: 4 } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml index ed53c5a52c7b0..95c838509f0b8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml @@ -1142,6 +1142,52 @@ setup: } } +--- +"Test put regression given training_percent is less than one": + + - do: + catch: /\[training_percent\] must be a double in \[1, 100\]/ + ml.put_data_frame_analytics: + id: "regression-training-percent-is-less-than-one" + body: > + { + "source": { + "index": "index-source" + }, + "dest": { + "index": "index-dest" + }, + "analysis": { + "regression": { + "dependent_variable": "foo", + "training_percent": 0.999 + } + } + } + +--- +"Test put regression given training_percent is greater than hundred": + + - do: + catch: /\[training_percent\] must be a double in \[1, 100\]/ + ml.put_data_frame_analytics: + id: "regression-training-percent-is-greater-than-hundred" + body: > + { + "source": { + "index": "index-source" + }, + "dest": { + "index": "index-dest" + }, + "analysis": { + "regression": { + "dependent_variable": "foo", + "training_percent": 100.1 + } + } + } + --- "Test put regression given valid": @@ -1163,7 +1209,8 @@ setup: "gamma": 0.42, "eta": 0.5, "maximum_number_trees": 400, - "feature_bag_fraction": 0.3 + "feature_bag_fraction": 0.3, + "training_percent": 60.3 } } } @@ -1177,7 +1224,8 @@ setup: "gamma": 0.42, "eta": 0.5, "maximum_number_trees": 400, - "feature_bag_fraction": 0.3 + "feature_bag_fraction": 0.3, + "training_percent": 60.3 } }} - is_true: create_time @@ -1210,7 +1258,8 @@ setup: - match: { dest.index: "index-dest" } - match: { analysis: { "regression":{ - "dependent_variable": "foo" + "dependent_variable": "foo", + "training_percent": 100.0 } }} - is_true: create_time diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_data_frame_analytics.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_data_frame_analytics.yml index 6417ef1e4c6b3..3e9c73a3fa8c6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_data_frame_analytics.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_data_frame_analytics.yml @@ -62,6 +62,34 @@ id: "foo" --- +"Test start with compatible fields but no data": + - do: + indices.create: + index: empty-index-with-compatible-fields + body: + mappings: + properties: + long_field: { "type": "long" } + + - do: + ml.put_data_frame_analytics: + id: "empty-with-compatible-fields" + body: > + { + "source": { + "index": "empty-index-with-compatible-fields" + }, + "dest": { + "index": "empty-index-with-compatible-fields-dest" + }, + "analysis": {"outlier_detection":{}} + } + + - do: + catch: /Unable to start empty-with-compatible-fields as there are no analyzable data in source indices \[empty-index-with-compatible-fields\]/ + ml.start_data_frame_analytics: + id: "empty-with-compatible-fields" +--- "Test start with inconsistent body/param ids": - do: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/11_builtin.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/11_builtin.yml index 2e23a85b7e737..dd36e6e603080 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/11_builtin.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/11_builtin.yml @@ -15,5 +15,5 @@ setup: # This is fragile - it needs to be updated every time we add a new cluster/index privilege # I would much prefer we could just check that specific entries are in the array, but we don't have # an assertion for that - - length: { "cluster" : 28 } + - length: { "cluster" : 30 } - length: { "index" : 16 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/15_dense_vector_l1l2.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/15_dense_vector_l1l2.yml index 5845c17f5a080..dbb274d077645 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/15_dense_vector_l1l2.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/15_dense_vector_l1l2.yml @@ -65,7 +65,7 @@ setup: - match: {hits.hits.1._id: "2"} - gte: {hits.hits.1._score: 12.29} - - lte: {hits.hits.1._score: 12.30} + - lte: {hits.hits.1._score: 12.31} - match: {hits.hits.2._id: "3"} - gte: {hits.hits.2._score: 0.00} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/35_sparse_vector_l1l2.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/35_sparse_vector_l1l2.yml index 05d210df7578a..3e24c8c211a69 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/35_sparse_vector_l1l2.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/35_sparse_vector_l1l2.yml @@ -63,7 +63,7 @@ setup: - match: {hits.hits.1._id: "2"} - gte: {hits.hits.1._score: 12.29} - - lte: {hits.hits.1._score: 12.30} + - lte: {hits.hits.1._score: 12.31} - match: {hits.hits.2._id: "3"} - gte: {hits.hits.2._score: 0.00} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml index 1f2e5ce9625e8..5a228d1be54a8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml @@ -28,6 +28,8 @@ - is_true: features.monitoring - is_true: features.monitoring.enabled # - is_false: features.monitoring.available TODO fix once licensing is fixed + - is_true: features.analytics + - is_true: features.analytics.enabled - do: license.post: @@ -77,6 +79,8 @@ - is_true: features.monitoring - is_true: features.monitoring.enabled - is_true: features.monitoring.available + - is_true: features.analytics.enabled + - is_true: features.analytics.available - is_true: tagline - do: @@ -89,6 +93,7 @@ - is_true: graph.available - is_true: monitoring.enabled - is_true: monitoring.available + - is_true: analytics.available - do: xpack.info: diff --git a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/mapper/VectorEncoderDecoder.java b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/mapper/VectorEncoderDecoder.java index 31b94ae108e63..67078b370ea98 100644 --- a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/mapper/VectorEncoderDecoder.java +++ b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/mapper/VectorEncoderDecoder.java @@ -130,7 +130,7 @@ public void swap(int i, int j) { * @param values - values for the sparse query vector * @param n - number of dimensions */ - public static void sortSparseDimsDoubleValues(int[] dims, double[] values, int n) { + public static void sortSparseDimsFloatValues(int[] dims, float[] values, int n) { new InPlaceMergeSorter() { @Override public int compare(int i, int j) { @@ -143,7 +143,7 @@ public void swap(int i, int j) { dims[i] = dims[j]; dims[j] = tempDim; - double tempValue = values[j]; + float tempValue = values[j]; values[j] = values[i]; values[i] = tempValue; } diff --git a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtils.java b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtils.java index 10631aba4ce2d..9c54f267ca143 100644 --- a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtils.java +++ b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtils.java @@ -14,7 +14,7 @@ import java.util.List; import java.util.Map; -import static org.elasticsearch.xpack.vectors.mapper.VectorEncoderDecoder.sortSparseDimsDoubleValues; +import static org.elasticsearch.xpack.vectors.mapper.VectorEncoderDecoder.sortSparseDimsFloatValues; public class ScoreScriptUtils { @@ -37,7 +37,7 @@ public static double l1norm(List queryVector, VectorScriptDocValues.Dens Iterator queryVectorIter = queryVector.iterator(); double l1norm = 0; for (int dim = 0; dim < docVector.length; dim++){ - l1norm += Math.abs(queryVectorIter.next().doubleValue() - docVector[dim]); + l1norm += Math.abs(queryVectorIter.next().floatValue() - docVector[dim]); } return l1norm; } @@ -59,7 +59,7 @@ public static double l2norm(List queryVector, VectorScriptDocValues.Dens Iterator queryVectorIter = queryVector.iterator(); double l2norm = 0; for (int dim = 0; dim < docVector.length; dim++){ - double diff = queryVectorIter.next().doubleValue() - docVector[dim]; + double diff = queryVectorIter.next().floatValue() - docVector[dim]; l2norm += diff * diff; } return Math.sqrt(l2norm); @@ -97,11 +97,11 @@ public static final class CosineSimilarity { // calculate queryVectorMagnitude once per query execution public CosineSimilarity(List queryVector) { this.queryVector = queryVector; - double doubleValue; + double dotProduct = 0; for (Number value : queryVector) { - doubleValue = value.doubleValue(); - dotProduct += doubleValue * doubleValue; + float floatValue = value.floatValue(); + dotProduct += floatValue * floatValue; } this.queryVectorMagnitude = Math.sqrt(dotProduct); } @@ -130,7 +130,7 @@ private static double intDotProduct(List v1, float[] v2){ double v1v2DotProduct = 0; Iterator v1Iter = v1.iterator(); for (int dim = 0; dim < v2.length; dim++) { - v1v2DotProduct += v1Iter.next().doubleValue() * v2[dim]; + v1v2DotProduct += v1Iter.next().floatValue() * v2[dim]; } return v1v2DotProduct; } @@ -139,7 +139,7 @@ private static double intDotProduct(List v1, float[] v2){ //**************FUNCTIONS FOR SPARSE VECTORS public static class VectorSparseFunctions { - final double[] queryValues; + final float[] queryValues; final int[] queryDims; // prepare queryVector once per script execution @@ -147,7 +147,7 @@ public static class VectorSparseFunctions { public VectorSparseFunctions(Map queryVector) { //break vector into two arrays dims and values int n = queryVector.size(); - queryValues = new double[n]; + queryValues = new float[n]; queryDims = new int[n]; int i = 0; for (Map.Entry dimValue : queryVector.entrySet()) { @@ -156,11 +156,11 @@ public VectorSparseFunctions(Map queryVector) { } catch (final NumberFormatException e) { throw new IllegalArgumentException("Failed to parse a query vector dimension, it must be an integer!", e); } - queryValues[i] = dimValue.getValue().doubleValue(); + queryValues[i] = dimValue.getValue().floatValue(); i++; } // Sort dimensions in the ascending order and sort values in the same order as their corresponding dimensions - sortSparseDimsDoubleValues(queryDims, queryValues, n); + sortSparseDimsFloatValues(queryDims, queryValues, n); } } @@ -317,7 +317,7 @@ public double cosineSimilaritySparse(VectorScriptDocValues.SparseVectorScriptDoc } } - private static double intDotProductSparse(double[] v1Values, int[] v1Dims, float[] v2Values, int[] v2Dims) { + private static double intDotProductSparse(float[] v1Values, int[] v1Dims, float[] v2Values, int[] v2Dims) { double v1v2DotProduct = 0; int v1Index = 0; int v2Index = 0; diff --git a/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtilsTests.java b/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtilsTests.java index f9bb87ece0ca8..87f8f83c06bd7 100644 --- a/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtilsTests.java +++ b/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtilsTests.java @@ -36,11 +36,11 @@ public void testDenseVectorFunctions() { BytesRef encodedDocVector = mockEncodeDenseVector(docVector); VectorScriptDocValues.DenseVectorScriptDocValues dvs = mock(VectorScriptDocValues.DenseVectorScriptDocValues.class); when(dvs.getEncodedValue()).thenReturn(encodedDocVector); - List queryVector = Arrays.asList(0.5, 111.3, -13.0, 14.8, -156.0); + List queryVector = Arrays.asList(0.5f, 111.3f, -13.0f, 14.8f, -156.0f); // test dotProduct double result = dotProduct(queryVector, dvs); - assertEquals("dotProduct result is not equal to the expected value!", 65425.626, result, 0.001); + assertEquals("dotProduct result is not equal to the expected value!", 65425.624, result, 0.001); // test cosineSimilarity CosineSimilarity cosineSimilarity = new CosineSimilarity(queryVector); @@ -91,7 +91,7 @@ public void testSparseVectorFunctions() { // test dotProduct DotProductSparse docProductSparse = new DotProductSparse(queryVector); double result = docProductSparse.dotProductSparse(dvs); - assertEquals("dotProductSparse result is not equal to the expected value!", 65425.626, result, 0.001); + assertEquals("dotProductSparse result is not equal to the expected value!", 65425.624, result, 0.001); // test cosineSimilarity CosineSimilaritySparse cosineSimilaritySparse = new CosineSimilaritySparse(queryVector); @@ -128,7 +128,7 @@ public void testSparseVectorMissingDimensions1() { // test dotProduct DotProductSparse docProductSparse = new DotProductSparse(queryVector); double result = docProductSparse.dotProductSparse(dvs); - assertEquals("dotProductSparse result is not equal to the expected value!", 65425.626, result, 0.001); + assertEquals("dotProductSparse result is not equal to the expected value!", 65425.624, result, 0.001); // test cosineSimilarity CosineSimilaritySparse cosineSimilaritySparse = new CosineSimilaritySparse(queryVector); @@ -165,7 +165,7 @@ public void testSparseVectorMissingDimensions2() { // test dotProduct DotProductSparse docProductSparse = new DotProductSparse(queryVector); double result = docProductSparse.dotProductSparse(dvs); - assertEquals("dotProductSparse result is not equal to the expected value!", 65425.626, result, 0.001); + assertEquals("dotProductSparse result is not equal to the expected value!", 65425.624, result, 0.001); // test cosineSimilarity CosineSimilaritySparse cosineSimilaritySparse = new CosineSimilaritySparse(queryVector); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java index 99945c0937538..49e27f51ea7c4 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java @@ -334,12 +334,13 @@ static Tuple createURI(HttpRequest request) { String part = pathParts[i]; boolean isLast = i == pathParts.length - 1; if (Strings.isEmpty(part) == false) { - String appendPart = part; + unescapedPathParts.add(URLDecoder.decode(part, StandardCharsets.UTF_8.name())); + // if the passed URL ends with a slash, adding an empty string to the + // unescaped paths will ensure the slash will be added back boolean appendSlash = isPathEndsWithSlash && isLast; if (appendSlash) { - appendPart += "/"; + unescapedPathParts.add(""); } - unescapedPathParts.add(URLDecoder.decode(appendPart, StandardCharsets.UTF_8.name())); } } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/ActionWrapperTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/ActionWrapperTests.java index 8931319501b6d..e6825a0cac93e 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/ActionWrapperTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/ActionWrapperTests.java @@ -54,7 +54,7 @@ public class ActionWrapperTests extends ESTestCase { private Watch watch = mock(Watch.class); @SuppressWarnings("unchecked") private ExecutableAction executableAction = mock(ExecutableAction.class); - private ActionWrapper actionWrapper = new ActionWrapper("_action", null, NeverCondition.INSTANCE, null, executableAction, null); + private ActionWrapper actionWrapper = new ActionWrapper("_action", null, NeverCondition.INSTANCE, null, executableAction, null, null); public void testThatUnmetActionConditionResetsAckStatus() throws Exception { WatchStatus watchStatus = new WatchStatus(now, Collections.singletonMap("_action", createActionStatus(State.ACKED))); @@ -84,7 +84,7 @@ public void testThatMultipleResultsCanBeReturned() throws Exception { final ExecutableAction executableAction = new ExecutableLoggingAction(loggingAction, logger, new MockTextTemplateEngine()); ActionWrapper wrapper = new ActionWrapper("_action", null, InternalAlwaysCondition.INSTANCE, null, executableAction, - "ctx.payload.my_path"); + "ctx.payload.my_path", null); WatchExecutionContext ctx = mockExecutionContent(watch); Payload.Simple payload = new Payload.Simple(Map.of("my_path", @@ -111,7 +111,7 @@ public void testThatMultipleResultsCanBeReturned() throws Exception { public void testThatSpecifiedPathIsNotCollection() { ActionWrapper wrapper = new ActionWrapper("_action", null, InternalAlwaysCondition.INSTANCE, null, executableAction, - "ctx.payload.my_path"); + "ctx.payload.my_path", null); WatchExecutionContext ctx = mockExecutionContent(watch); Payload.Simple payload = new Payload.Simple(Map.of("my_path", "not a map")); when(ctx.payload()).thenReturn(payload); @@ -127,7 +127,7 @@ public void testThatSpecifiedPathIsNotCollection() { public void testEmptyCollection() { ActionWrapper wrapper = new ActionWrapper("_action", null, InternalAlwaysCondition.INSTANCE, null, executableAction, - "ctx.payload.my_path"); + "ctx.payload.my_path", null); WatchExecutionContext ctx = mockExecutionContent(watch); Payload.Simple payload = new Payload.Simple(Map.of("my_path", Collections.emptyList())); when(ctx.payload()).thenReturn(payload); @@ -143,7 +143,7 @@ public void testEmptyCollection() { public void testPartialFailure() throws Exception { ActionWrapper wrapper = new ActionWrapper("_action", null, InternalAlwaysCondition.INSTANCE, null, executableAction, - "ctx.payload.my_path"); + "ctx.payload.my_path", null); WatchExecutionContext ctx = mockExecutionContent(watch); Payload.Simple payload = new Payload.Simple(Map.of("my_path", List.of( @@ -165,9 +165,9 @@ public void testPartialFailure() throws Exception { assertThat(result.action().status(), is(Action.Result.Status.PARTIAL_FAILURE)); } - public void testLimitOfNumberOfActionsExecuted() throws Exception { + public void testDefaultLimitOfNumberOfActionsExecuted() throws Exception { ActionWrapper wrapper = new ActionWrapper("_action", null, InternalAlwaysCondition.INSTANCE, null, executableAction, - "ctx.payload.my_path"); + "ctx.payload.my_path", null); WatchExecutionContext ctx = mockExecutionContent(watch); List> itemsPayload = new ArrayList<>(); for (int i = 0; i < 101; i++) { @@ -193,11 +193,49 @@ public void testLimitOfNumberOfActionsExecuted() throws Exception { assertThat(map.get("foreach"), instanceOf(List.class)); List> actions = (List) map.get("foreach"); assertThat(actions, hasSize(100)); + assertThat(map, hasKey("max_iterations")); + assertThat(map.get("max_iterations"), is(100)); assertThat(map, hasKey("number_of_actions_executed")); assertThat(map.get("number_of_actions_executed"), is(100)); } } + public void testConfiguredLimitOfNumberOfActionsExecuted() throws Exception { + int randomMaxIterations = randomIntBetween(1, 1000); + ActionWrapper wrapper = new ActionWrapper("_action", null, InternalAlwaysCondition.INSTANCE, null, executableAction, + "ctx.payload.my_path", randomMaxIterations); + WatchExecutionContext ctx = mockExecutionContent(watch); + List> itemsPayload = new ArrayList<>(); + for (int i = 0; i < randomMaxIterations + 1; i++) { + final Action.Result actionResult = new LoggingAction.Result.Success("log_message " + i);; + final Payload singleItemPayload = new Payload.Simple(Map.of("key", String.valueOf(i))); + itemsPayload.add(Map.of("key", String.valueOf(i))); + when(executableAction.execute(eq("_action"), eq(ctx), eq(singleItemPayload))).thenReturn(actionResult); + } + + Payload.Simple payload = new Payload.Simple(Map.of("my_path", itemsPayload)); + when(ctx.payload()).thenReturn(payload); + when(executableAction.logger()).thenReturn(logger); + + ActionWrapperResult result = wrapper.execute(ctx); + assertThat(result.action().status(), is(Action.Result.Status.SUCCESS)); + + // check that action toXContent contains all the results + try (XContentBuilder builder = jsonBuilder()) { + result.toXContent(builder, ToXContent.EMPTY_PARAMS); + final String json = Strings.toString(builder); + final Map map = XContentHelper.convertToMap(JsonXContent.jsonXContent, json, true); + assertThat(map, hasKey("foreach")); + assertThat(map.get("foreach"), instanceOf(List.class)); + List> actions = (List) map.get("foreach"); + assertThat(actions, hasSize(randomMaxIterations)); + assertThat(map, hasKey("max_iterations")); + assertThat(map.get("max_iterations"), is(randomMaxIterations)); + assertThat(map, hasKey("number_of_actions_executed")); + assertThat(map.get("number_of_actions_executed"), is(randomMaxIterations)); + } + } + private WatchExecutionContext mockExecutionContent(Watch watch) { WatchExecutionContext ctx = mock(WatchExecutionContext.class); when(watch.id()).thenReturn("watchId"); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java index 55febe4f9a602..4d93e58924491 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java @@ -751,7 +751,7 @@ public void testCreateUri() throws Exception { private void assertCreateUri(String uri, String expectedPath) { final HttpRequest request = HttpRequest.builder().fromUrl(uri).build(); final Tuple tuple = HttpClient.createURI(request); - assertThat(tuple.v2().getPath(), is(expectedPath)); + assertThat(tuple.v2().getRawPath(), is(expectedPath)); } public static ClusterService mockClusterService() { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java index 53f060b22c3e1..d42ddfeb2fe2c 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java @@ -227,7 +227,7 @@ public void testExecute() throws Exception { when(action.type()).thenReturn("MY_AWESOME_TYPE"); when(action.execute("_action", context, payload)).thenReturn(actionResult); - ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null); + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null, null); WatchStatus watchStatus = new WatchStatus(now, singletonMap("_action", new ActionStatus(now))); @@ -313,7 +313,7 @@ public void testExecuteFailedInput() throws Exception { ExecutableAction action = mock(ExecutableAction.class); when(action.execute("_action", context, payload)).thenReturn(actionResult); - ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null); + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null, null); WatchStatus watchStatus = new WatchStatus(now, singletonMap("_action", new ActionStatus(now))); when(watch.input()).thenReturn(input); @@ -378,7 +378,7 @@ public void testExecuteFailedCondition() throws Exception { ExecutableAction action = mock(ExecutableAction.class); when(action.execute("_action", context, payload)).thenReturn(actionResult); - ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null); + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null, null); WatchStatus watchStatus = new WatchStatus(now, singletonMap("_action", new ActionStatus(now))); when(watch.input()).thenReturn(input); @@ -442,7 +442,7 @@ public void testExecuteFailedWatchTransform() throws Exception { ExecutableAction action = mock(ExecutableAction.class); when(action.execute("_action", context, payload)).thenReturn(actionResult); - ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null); + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null, null); WatchStatus watchStatus = new WatchStatus(now, singletonMap("_action", new ActionStatus(now))); when(watch.input()).thenReturn(input); @@ -520,7 +520,7 @@ public void testExecuteFailedActionTransform() throws Exception { when(action.logger()).thenReturn(logger); when(action.execute("_action", context, payload)).thenReturn(actionResult); - ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null); + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null, null); WatchStatus watchStatus = new WatchStatus(now, singletonMap("_action", new ActionStatus(now))); @@ -600,7 +600,7 @@ public void testExecuteInner() throws Exception { ExecutableAction action = mock(ExecutableAction.class); when(action.execute("_action", context, payload)).thenReturn(actionResult); - ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null); + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null, null); ZonedDateTime time = clock.instant().atZone(ZoneOffset.UTC); WatchStatus watchStatus = new WatchStatus(time, singletonMap("_action", new ActionStatus(now))); @@ -649,7 +649,7 @@ public void testExecuteInnerThrottled() throws Exception { ExecutableAction action = mock(ExecutableAction.class); when(action.type()).thenReturn("_type"); - ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null); + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null, null); ZonedDateTime time = clock.instant().atZone(ZoneOffset.UTC); WatchStatus watchStatus = new WatchStatus(time, singletonMap("_action", new ActionStatus(now))); @@ -712,7 +712,7 @@ public void testExecuteInnerConditionNotMet() throws Exception { ExecutableAction action = mock(ExecutableAction.class); when(action.type()).thenReturn("_type"); - ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null); + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null, null); ZonedDateTime time = clock.instant().atZone(ZoneOffset.UTC); WatchStatus watchStatus = new WatchStatus(time, singletonMap("_action", new ActionStatus(now))); @@ -769,7 +769,7 @@ public void testExecuteInnerConditionNotMetDueToException() throws Exception { ExecutableAction action = mock(ExecutableAction.class); when(action.type()).thenReturn("_type"); when(action.logger()).thenReturn(logger); - ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null); + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null, null); ZonedDateTime time = clock.instant().atZone(ZoneOffset.UTC); WatchStatus watchStatus = new WatchStatus(time, singletonMap("_action", new ActionStatus(now))); @@ -817,7 +817,7 @@ public void testExecuteConditionNotMet() throws Exception { ExecutableCondition actionCondition = mock(ExecutableCondition.class); ExecutableTransform actionTransform = mock(ExecutableTransform.class); ExecutableAction action = mock(ExecutableAction.class); - ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null); + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action, null, null); ZonedDateTime time = clock.instant().atZone(ZoneOffset.UTC); WatchStatus watchStatus = new WatchStatus(time, singletonMap("_action", new ActionStatus(now))); @@ -946,7 +946,7 @@ public void testThatTriggeredWatchDeletionHappensOnlyIfWatchExists() throws Exce when(action.type()).thenReturn("MY_AWESOME_TYPE"); when(action.execute("_action", context, payload)).thenReturn(actionResult); - ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, null, null, action, null); + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, null, null, action, null, null); WatchStatus watchStatus = new WatchStatus(now, singletonMap("_action", new ActionStatus(now))); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java index aa1231baa17a4..c10f010f98ef3 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java @@ -158,7 +158,7 @@ public static Watch createTestWatch(String watchName, Client client, HttpClient httpRequest.path(new TextTemplate("/foobarbaz/{{ctx.watch_id}}")); httpRequest.body(new TextTemplate("{{ctx.watch_id}} executed with {{ctx.payload.response.hits.total_hits}} hits")); actions.add(new ActionWrapper("_webhook", null, null, null, new ExecutableWebhookAction(new WebhookAction(httpRequest.build()), - logger, httpClient, engine), null)); + logger, httpClient, engine), null, null)); EmailTemplate email = EmailTemplate.builder().from("from@test.com").to("to@test.com").build(); @@ -166,7 +166,7 @@ public static Watch createTestWatch(String watchName, Client client, HttpClient EmailAction action = new EmailAction(email, "testaccount", auth, Profile.STANDARD, null, null); ExecutableEmailAction executale = new ExecutableEmailAction(action, logger, emailService, engine, new HtmlSanitizer(Settings.EMPTY), Collections.emptyMap()); - actions.add(new ActionWrapper("_email", null, null, null, executale, null)); + actions.add(new ActionWrapper("_email", null, null, null, executale, null, null)); ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); Map statuses = new HashMap<>(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java index feadeba084d3e..e810c14615b73 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java @@ -441,7 +441,7 @@ public void testParseWatchWithoutTriggerDoesNotWork() throws Exception { private WatchParser createWatchparser() throws Exception { LoggingAction loggingAction = new LoggingAction(new TextTemplate("foo"), null, null); List actions = Collections.singletonList(new ActionWrapper("_logging_", randomThrottler(), null, null, - new ExecutableLoggingAction(loggingAction, logger, new MockTextTemplateEngine()), null)); + new ExecutableLoggingAction(loggingAction, logger, new MockTextTemplateEngine()), null, null)); ScheduleRegistry scheduleRegistry = registry(new IntervalSchedule(new IntervalSchedule.Interval(1, IntervalSchedule.Interval.Unit.SECONDS))); @@ -585,7 +585,8 @@ private List randomActions() { randomFrom(DataAttachment.JSON, DataAttachment.YAML), EmailAttachments.EMPTY_ATTACHMENTS); list.add(new ActionWrapper("_email_" + randomAlphaOfLength(8), randomThrottler(), AlwaysConditionTests.randomCondition(scriptService), randomTransform(), - new ExecutableEmailAction(action, logger, emailService, templateEngine, htmlSanitizer, Collections.emptyMap()), null)); + new ExecutableEmailAction(action, logger, emailService, templateEngine, htmlSanitizer, + Collections.emptyMap()), null, null)); } if (randomBoolean()) { ZoneOffset timeZone = randomBoolean() ? ZoneOffset.UTC : null; @@ -596,7 +597,7 @@ private List randomActions() { list.add(new ActionWrapper("_index_" + randomAlphaOfLength(8), randomThrottler(), AlwaysConditionTests.randomCondition(scriptService), randomTransform(), new ExecutableIndexAction(action, logger, client, TimeValue.timeValueSeconds(30), - TimeValue.timeValueSeconds(30)), null)); + TimeValue.timeValueSeconds(30)), null, null)); } if (randomBoolean()) { HttpRequestTemplate httpRequest = HttpRequestTemplate.builder("test.host", randomIntBetween(8000, 9000)) @@ -606,7 +607,7 @@ private List randomActions() { WebhookAction action = new WebhookAction(httpRequest); list.add(new ActionWrapper("_webhook_" + randomAlphaOfLength(8), randomThrottler(), AlwaysConditionTests.randomCondition(scriptService), randomTransform(), - new ExecutableWebhookAction(action, logger, httpClient, templateEngine), null)); + new ExecutableWebhookAction(action, logger, httpClient, templateEngine), null, null)); } return list; } diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index 70767faf33499..b962e912cf438 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -80,8 +80,7 @@ task bwcTest { task copyTestNodeKeyMaterial(type: Copy) { from project(':x-pack:plugin:core').files('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt') into outputDir } @@ -116,21 +115,15 @@ for (Version version : bwcVersions.indexCompatible) { setting 'xpack.security.enabled', 'true' setting 'xpack.security.transport.ssl.enabled', 'true' - rootProject.globalInfo.ready { - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' - } - } + + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + setting 'xpack.license.self_generated.type', 'trial' dependsOn copyTestNodeKeyMaterial extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') keystoreFile 'xpack.watcher.encryption_key', "${project.projectDir}/src/test/resources/system_key" setting 'xpack.watcher.encrypt_sensitive_data', 'true' @@ -162,19 +155,13 @@ for (Version version : bwcVersions.indexCompatible) { // some tests rely on the translog not being flushed setting 'indices.memory.shard_inactive_time', '20m' setting 'xpack.security.enabled', 'true' - rootProject.globalInfo.ready { - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' - } - } + + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + setting 'xpack.license.self_generated.type', 'trial' dependsOn copyTestNodeKeyMaterial - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 46e9bc10b21ed..3298373209941 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -83,8 +83,7 @@ task bwcTest { task copyTestNodeKeyMaterial(type: Copy) { from project(':x-pack:plugin:core').files('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt') into outputDir } @@ -122,18 +121,11 @@ for (Version version : bwcVersions.wireCompatible) { setting 'xpack.security.authc.token.enabled', 'true' setting 'xpack.security.authc.token.timeout', '60m' setting 'xpack.security.audit.enabled', 'true' - rootProject.globalInfo.ready { - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' - } - } + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + dependsOn copyTestNodeKeyMaterial - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') if (version.onOrAfter('7.0.0')) { @@ -188,22 +180,15 @@ for (Version version : bwcVersions.wireCompatible) { setting 'xpack.security.enabled', 'true' setting 'xpack.security.transport.ssl.enabled', 'true' setting 'xpack.security.authc.token.timeout', '60m' - rootProject.globalInfo.ready { - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' - } - } + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + setting 'node.attr.upgraded', 'true' setting 'xpack.security.authc.token.enabled', 'true' setting 'xpack.security.audit.enabled', 'true' setting 'node.name', "upgraded-node-${stopNode}" dependsOn copyTestNodeKeyMaterial - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') if (version.onOrAfter('7.0.0')) {