diff --git a/.ci/os.ps1 b/.ci/os.ps1 index 028bb460a71d6..7ec09daceed7e 100644 --- a/.ci/os.ps1 +++ b/.ci/os.ps1 @@ -27,10 +27,6 @@ New-Item -ItemType directory -Path \tmp $ErrorActionPreference="Continue" # TODO: remove the task exclusions once dependencies are set correctly and these don't run for Windows or buldiung the deb on windows is fixed -& .\gradlew.bat -g "C:\Users\$env:username\.gradle" --parallel --scan --console=plain destructiveDistroTest ` - -x :distribution:packages:buildOssDeb ` - -x :distribution:packages:buildDeb ` - -x :distribution:packages:buildOssRpm ` - -x :distribution:packages:buildRpm ` +& .\gradlew.bat -g "C:\Users\$env:username\.gradle" --parallel --scan --console=plain destructiveDistroTest exit $LastExitCode diff --git a/TESTING.asciidoc b/TESTING.asciidoc index e9279c3c7ce41..1ec29e781564d 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -43,6 +43,12 @@ supports a remote debugging option: ./gradlew run --debug-jvm --------------------------------------------------------------------------- +This will instruct all JVMs (including any that run cli tools such as creating the keyring or adding users) +to suspend and initiate a debug connection on port incrementing from `5005`. +As such the IDE needs to be instructed to listen for connections on this port. +Since we might run multiple JVMs as part of configuring and starting the cluster it's +recommended to have the option to aut restart checked. + ==== Distribution By default a node is started with the zip distribution. diff --git a/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java b/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java index 361e483d4a840..27cde3b6e1bf7 100644 --- a/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java +++ b/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java @@ -37,7 +37,7 @@ * Since how to reap a given service is platform and service dependent, this tool * operates on system commands to execute. It takes a single argument, a directory * that will contain files with reaping commands. Each line in each file will be - * executed with {@link Runtime#getRuntime()#exec}. + * executed with {@link Runtime#exec(String)}. * * The main method will wait indefinitely on the parent process (Gradle) by * reading from stdin. When Gradle shuts down, whether normally or abruptly, the diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 5accd45d647ba..18b10b9e3104b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -676,6 +676,10 @@ class BuildPlugin implements Plugin { */ (javadoc.options as CoreJavadocOptions).addBooleanOption('html5', true) } + // ensure javadoc task is run with 'check' + project.pluginManager.withPlugin('lifecycle-base') { + project.tasks.getByName(LifecycleBasePlugin.CHECK_TASK_NAME).dependsOn(project.tasks.withType(Javadoc)) + } configureJavadocJar(project) } @@ -891,7 +895,6 @@ class BuildPlugin implements Plugin { test.systemProperty('io.netty.noUnsafe', 'true') test.systemProperty('io.netty.noKeySetOptimization', 'true') test.systemProperty('io.netty.recycler.maxCapacityPerThread', '0') - test.systemProperty('io.netty.allocator.numDirectArenas', '0') test.testLogging { TestLoggingContainer logging -> logging.showExceptions = true diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java index c9152486a1c51..1fb22812683ee 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java @@ -42,7 +42,7 @@ public JarHellTask() { @TaskAction public void runJarHellCheck() { LoggedExec.javaexec(getProject(), spec -> { - spec.classpath(getClasspath()); + spec.environment("CLASSPATH", getClasspath().getAsPath()); spec.setMain("org.elasticsearch.bootstrap.JarHell"); }); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java index 1512e9f2cf73c..129c74df7276d 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java @@ -36,7 +36,7 @@ public Boolean getDebug() { @Override public void beforeStart() { - int debugPort = 8000; + int debugPort = 5005; int httpPort = 9200; int transportPort = 9300; Map additionalSettings = System.getProperties().entrySet().stream() @@ -57,7 +57,7 @@ public void beforeStart() { "Running elasticsearch in debug mode, {} suspending until connected on debugPort {}", node, debugPort ); - node.jvmArgs("-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=" + debugPort); + node.jvmArgs("-agentlib:jdwp=transport=dt_socket,server=n,suspend=y,address=" + debugPort); debugPort += 1; } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsResponse.java index 8f4232cca6e2c..f53f125cbf788 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsResponse.java @@ -133,7 +133,7 @@ public boolean equals(Object obj) { && Objects.equals(id, other.id) && docVersion == other.docVersion && found == other.found - && tookInMillis == tookInMillis + && tookInMillis == other.tookInMillis && Objects.equals(termVectorList, other.termVectorList); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/enrich/NamedPolicy.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/enrich/NamedPolicy.java index ea0ea52e892bd..4e158fc532667 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/enrich/NamedPolicy.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/enrich/NamedPolicy.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -60,7 +59,7 @@ private static void declareParserOptions(ConstructingObjectParser parser) parser.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { XContentBuilder builder = XContentBuilder.builder(p.contentType().xContent()); builder.copyCurrentStructure(p); - return BytesArray.bytes(builder); + return BytesReference.bytes(builder); }, QUERY_FIELD); parser.declareStringArray(ConstructingObjectParser.constructorArg(), INDICES_FIELD); parser.declareString(ConstructingObjectParser.constructorArg(), MATCH_FIELD_FIELD); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ilm/IndexLifecycleExplainResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ilm/IndexLifecycleExplainResponse.java index 11a401271fcac..b5e87b496f80d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ilm/IndexLifecycleExplainResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ilm/IndexLifecycleExplainResponse.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -86,7 +85,7 @@ public class IndexLifecycleExplainResponse implements ToXContentObject { PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { XContentBuilder builder = JsonXContent.contentBuilder(); builder.copyCurrentStructure(p); - return BytesArray.bytes(builder); + return BytesReference.bytes(builder); }, STEP_INFO_FIELD); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> PhaseExecutionInfo.parse(p, ""), PHASE_EXECUTION_INFO); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/classification/MulticlassConfusionMatrixMetric.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/classification/MulticlassConfusionMatrixMetric.java index a8e8545009b25..7199660e94d0c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/classification/MulticlassConfusionMatrixMetric.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/classification/MulticlassConfusionMatrixMetric.java @@ -21,17 +21,17 @@ import org.elasticsearch.client.ml.dataframe.evaluation.EvaluationMetric; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.Collections; -import java.util.Map; +import java.util.List; import java.util.Objects; -import java.util.TreeMap; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; /** @@ -97,32 +97,28 @@ public int hashCode() { public static class Result implements EvaluationMetric.Result { private static final ParseField CONFUSION_MATRIX = new ParseField("confusion_matrix"); - private static final ParseField OTHER_CLASSES_COUNT = new ParseField("_other_"); + private static final ParseField OTHER_ACTUAL_CLASS_COUNT = new ParseField("other_actual_class_count"); @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "multiclass_confusion_matrix_result", true, a -> new Result((Map>) a[0], (long) a[1])); + "multiclass_confusion_matrix_result", true, a -> new Result((List) a[0], (Long) a[1])); static { - PARSER.declareObject( - constructorArg(), - (p, c) -> p.map(TreeMap::new, p2 -> p2.map(TreeMap::new, XContentParser::longValue)), - CONFUSION_MATRIX); - PARSER.declareLong(constructorArg(), OTHER_CLASSES_COUNT); + PARSER.declareObjectArray(optionalConstructorArg(), ActualClass.PARSER, CONFUSION_MATRIX); + PARSER.declareLong(optionalConstructorArg(), OTHER_ACTUAL_CLASS_COUNT); } public static Result fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } - // Immutable - private final Map> confusionMatrix; - private final long otherClassesCount; + private final List confusionMatrix; + private final Long otherActualClassCount; - public Result(Map> confusionMatrix, long otherClassesCount) { - this.confusionMatrix = Collections.unmodifiableMap(Objects.requireNonNull(confusionMatrix)); - this.otherClassesCount = otherClassesCount; + public Result(@Nullable List confusionMatrix, @Nullable Long otherActualClassCount) { + this.confusionMatrix = confusionMatrix != null ? Collections.unmodifiableList(Objects.requireNonNull(confusionMatrix)) : null; + this.otherActualClassCount = otherActualClassCount; } @Override @@ -130,19 +126,23 @@ public String getMetricName() { return NAME; } - public Map> getConfusionMatrix() { + public List getConfusionMatrix() { return confusionMatrix; } - public long getOtherClassesCount() { - return otherClassesCount; + public Long getOtherActualClassCount() { + return otherActualClassCount; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(CONFUSION_MATRIX.getPreferredName(), confusionMatrix); - builder.field(OTHER_CLASSES_COUNT.getPreferredName(), otherClassesCount); + if (confusionMatrix != null) { + builder.field(CONFUSION_MATRIX.getPreferredName(), confusionMatrix); + } + if (otherActualClassCount != null) { + builder.field(OTHER_ACTUAL_CLASS_COUNT.getPreferredName(), otherActualClassCount); + } builder.endObject(); return builder; } @@ -153,12 +153,140 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; Result that = (Result) o; return Objects.equals(this.confusionMatrix, that.confusionMatrix) - && this.otherClassesCount == that.otherClassesCount; + && Objects.equals(this.otherActualClassCount, that.otherActualClassCount); } @Override public int hashCode() { - return Objects.hash(confusionMatrix, otherClassesCount); + return Objects.hash(confusionMatrix, otherActualClassCount); + } + } + + public static class ActualClass implements ToXContentObject { + + private static final ParseField ACTUAL_CLASS = new ParseField("actual_class"); + private static final ParseField ACTUAL_CLASS_DOC_COUNT = new ParseField("actual_class_doc_count"); + private static final ParseField PREDICTED_CLASSES = new ParseField("predicted_classes"); + private static final ParseField OTHER_PREDICTED_CLASS_DOC_COUNT = new ParseField("other_predicted_class_doc_count"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "multiclass_confusion_matrix_actual_class", + true, + a -> new ActualClass((String) a[0], (Long) a[1], (List) a[2], (Long) a[3])); + + static { + PARSER.declareString(optionalConstructorArg(), ACTUAL_CLASS); + PARSER.declareLong(optionalConstructorArg(), ACTUAL_CLASS_DOC_COUNT); + PARSER.declareObjectArray(optionalConstructorArg(), PredictedClass.PARSER, PREDICTED_CLASSES); + PARSER.declareLong(optionalConstructorArg(), OTHER_PREDICTED_CLASS_DOC_COUNT); + } + + private final String actualClass; + private final Long actualClassDocCount; + private final List predictedClasses; + private final Long otherPredictedClassDocCount; + + public ActualClass(@Nullable String actualClass, + @Nullable Long actualClassDocCount, + @Nullable List predictedClasses, + @Nullable Long otherPredictedClassDocCount) { + this.actualClass = actualClass; + this.actualClassDocCount = actualClassDocCount; + this.predictedClasses = predictedClasses != null ? Collections.unmodifiableList(predictedClasses) : null; + this.otherPredictedClassDocCount = otherPredictedClassDocCount; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (actualClass != null) { + builder.field(ACTUAL_CLASS.getPreferredName(), actualClass); + } + if (actualClassDocCount != null) { + builder.field(ACTUAL_CLASS_DOC_COUNT.getPreferredName(), actualClassDocCount); + } + if (predictedClasses != null) { + builder.field(PREDICTED_CLASSES.getPreferredName(), predictedClasses); + } + if (otherPredictedClassDocCount != null) { + builder.field(OTHER_PREDICTED_CLASS_DOC_COUNT.getPreferredName(), otherPredictedClassDocCount); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ActualClass that = (ActualClass) o; + return Objects.equals(this.actualClass, that.actualClass) + && Objects.equals(this.actualClassDocCount, that.actualClassDocCount) + && Objects.equals(this.predictedClasses, that.predictedClasses) + && Objects.equals(this.otherPredictedClassDocCount, that.otherPredictedClassDocCount); + } + + @Override + public int hashCode() { + return Objects.hash(actualClass, actualClassDocCount, predictedClasses, otherPredictedClassDocCount); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } + + public static class PredictedClass implements ToXContentObject { + + private static final ParseField PREDICTED_CLASS = new ParseField("predicted_class"); + private static final ParseField COUNT = new ParseField("count"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "multiclass_confusion_matrix_predicted_class", true, a -> new PredictedClass((String) a[0], (Long) a[1])); + + static { + PARSER.declareString(optionalConstructorArg(), PREDICTED_CLASS); + PARSER.declareLong(optionalConstructorArg(), COUNT); + } + + private final String predictedClass; + private final Long count; + + public PredictedClass(@Nullable String predictedClass, @Nullable Long count) { + this.predictedClass = predictedClass; + this.count = count; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (predictedClass != null) { + builder.field(PREDICTED_CLASS.getPreferredName(), predictedClass); + } + if (count != null) { + builder.field(COUNT.getPreferredName(), count); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PredictedClass that = (PredictedClass) o; + return Objects.equals(this.predictedClass, that.predictedClass) + && Objects.equals(this.count, that.count); + } + + @Override + public int hashCode() { + return Objects.hash(predictedClass, count); } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 7a96846dcdce8..d48c07ede60e5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -127,6 +127,8 @@ import org.elasticsearch.client.ml.dataframe.QueryConfig; import org.elasticsearch.client.ml.dataframe.evaluation.classification.Classification; import org.elasticsearch.client.ml.dataframe.evaluation.classification.MulticlassConfusionMatrixMetric; +import org.elasticsearch.client.ml.dataframe.evaluation.classification.MulticlassConfusionMatrixMetric.ActualClass; +import org.elasticsearch.client.ml.dataframe.evaluation.classification.MulticlassConfusionMatrixMetric.PredictedClass; import org.elasticsearch.client.ml.dataframe.evaluation.regression.MeanSquaredErrorMetric; import org.elasticsearch.client.ml.dataframe.evaluation.regression.RSquaredMetric; import org.elasticsearch.client.ml.dataframe.evaluation.regression.Regression; @@ -1777,7 +1779,7 @@ public void testEvaluateDataFrame_Classification() throws IOException { .add(docForClassification(indexName, "dog", "dog")) .add(docForClassification(indexName, "dog", "dog")) .add(docForClassification(indexName, "dog", "dog")) - .add(docForClassification(indexName, "horse", "cat")); + .add(docForClassification(indexName, "ant", "cat")); highLevelClient().bulk(regressionBulk, RequestOptions.DEFAULT); MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); @@ -1800,11 +1802,23 @@ public void testEvaluateDataFrame_Classification() throws IOException { assertThat( mcmResult.getConfusionMatrix(), equalTo( - Map.of( - "cat", Map.of("cat", 3L, "dog", 1L, "horse", 0L, "_other_", 1L), - "dog", Map.of("cat", 1L, "dog", 3L, "horse", 0L), - "horse", Map.of("cat", 1L, "dog", 0L, "horse", 0L)))); - assertThat(mcmResult.getOtherClassesCount(), equalTo(0L)); + List.of( + new ActualClass( + "ant", + 1L, + List.of(new PredictedClass("ant", 0L), new PredictedClass("cat", 1L), new PredictedClass("dog", 0L)), + 0L), + new ActualClass( + "cat", + 5L, + List.of(new PredictedClass("ant", 0L), new PredictedClass("cat", 3L), new PredictedClass("dog", 1L)), + 1L), + new ActualClass( + "dog", + 4L, + List.of(new PredictedClass("ant", 0L), new PredictedClass("cat", 1L), new PredictedClass("dog", 3L)), + 0L)))); + assertThat(mcmResult.getOtherActualClassCount(), equalTo(0L)); } { // Explicit size provided for MulticlassConfusionMatrixMetric metric EvaluateDataFrameRequest evaluateDataFrameRequest = @@ -1824,10 +1838,11 @@ public void testEvaluateDataFrame_Classification() throws IOException { assertThat( mcmResult.getConfusionMatrix(), equalTo( - Map.of( - "cat", Map.of("cat", 3L, "dog", 1L, "_other_", 1L), - "dog", Map.of("cat", 1L, "dog", 3L)))); - assertThat(mcmResult.getOtherClassesCount(), equalTo(1L)); + List.of( + new ActualClass("cat", 5L, List.of(new PredictedClass("cat", 3L), new PredictedClass("dog", 1L)), 1L), + new ActualClass("dog", 4L, List.of(new PredictedClass("cat", 1L), new PredictedClass("dog", 3L)), 0L) + ))); + assertThat(mcmResult.getOtherActualClassCount(), equalTo(1L)); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/EnrichDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/EnrichDocumentationIT.java index 14e46bc9ef09f..96bae2d62152e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/EnrichDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/EnrichDocumentationIT.java @@ -58,6 +58,10 @@ public void cleanup() { public void testPutPolicy() throws Exception { RestHighLevelClient client = highLevelClient(); + CreateIndexRequest createIndexRequest = new CreateIndexRequest("users") + .mapping(Map.of("properties", Map.of("email", Map.of("type", "keyword")))); + client.indices().create(createIndexRequest, RequestOptions.DEFAULT); + // tag::enrich-put-policy-request PutPolicyRequest putPolicyRequest = new PutPolicyRequest( "users-policy", "match", List.of("users"), @@ -104,6 +108,10 @@ public void testDeletePolicy() throws Exception { RestHighLevelClient client = highLevelClient(); { + CreateIndexRequest createIndexRequest = new CreateIndexRequest("users") + .mapping(Map.of("properties", Map.of("email", Map.of("type", "keyword")))); + client.indices().create(createIndexRequest, RequestOptions.DEFAULT); + // Add a policy, so that it can be deleted: PutPolicyRequest putPolicyRequest = new PutPolicyRequest( "users-policy", "match", List.of("users"), @@ -155,6 +163,10 @@ public void onFailure(Exception e) { public void testGetPolicy() throws Exception { RestHighLevelClient client = highLevelClient(); + CreateIndexRequest createIndexRequest = new CreateIndexRequest("users") + .mapping(Map.of("properties", Map.of("email", Map.of("type", "keyword")))); + client.indices().create(createIndexRequest, RequestOptions.DEFAULT); + PutPolicyRequest putPolicyRequest = new PutPolicyRequest( "users-policy", "match", List.of("users"), "email", List.of("address", "zip", "city", "state")); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index cfc0d2a191942..36947fd1b3b99 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -142,6 +142,8 @@ import org.elasticsearch.client.ml.dataframe.evaluation.Evaluation; import org.elasticsearch.client.ml.dataframe.evaluation.EvaluationMetric; import org.elasticsearch.client.ml.dataframe.evaluation.classification.MulticlassConfusionMatrixMetric; +import org.elasticsearch.client.ml.dataframe.evaluation.classification.MulticlassConfusionMatrixMetric.ActualClass; +import org.elasticsearch.client.ml.dataframe.evaluation.classification.MulticlassConfusionMatrixMetric.PredictedClass; import org.elasticsearch.client.ml.dataframe.evaluation.regression.MeanSquaredErrorMetric; import org.elasticsearch.client.ml.dataframe.evaluation.regression.RSquaredMetric; import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.AucRocMetric; @@ -3355,18 +3357,30 @@ public void testEvaluateDataFrame_Classification() throws Exception { MulticlassConfusionMatrixMetric.Result multiclassConfusionMatrix = response.getMetricByName(MulticlassConfusionMatrixMetric.NAME); // <1> - Map> confusionMatrix = multiclassConfusionMatrix.getConfusionMatrix(); // <2> - long otherClassesCount = multiclassConfusionMatrix.getOtherClassesCount(); // <3> + List confusionMatrix = multiclassConfusionMatrix.getConfusionMatrix(); // <2> + long otherClassesCount = multiclassConfusionMatrix.getOtherActualClassCount(); // <3> // end::evaluate-data-frame-results-classification assertThat(multiclassConfusionMatrix.getMetricName(), equalTo(MulticlassConfusionMatrixMetric.NAME)); assertThat( confusionMatrix, equalTo( - Map.of( - "cat", Map.of("cat", 3L, "dog", 1L, "ant", 0L, "_other_", 1L), - "dog", Map.of("cat", 1L, "dog", 3L, "ant", 0L), - "ant", Map.of("cat", 1L, "dog", 0L, "ant", 0L)))); + List.of( + new ActualClass( + "ant", + 1L, + List.of(new PredictedClass("ant", 0L), new PredictedClass("cat", 1L), new PredictedClass("dog", 0L)), + 0L), + new ActualClass( + "cat", + 5L, + List.of(new PredictedClass("ant", 0L), new PredictedClass("cat", 3L), new PredictedClass("dog", 1L)), + 1L), + new ActualClass( + "dog", + 4L, + List.of(new PredictedClass("ant", 0L), new PredictedClass("cat", 1L), new PredictedClass("dog", 3L)), + 0L)))); assertThat(otherClassesCount, equalTo(0L)); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/enrich/GetPolicyResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/enrich/GetPolicyResponseTests.java index fc0cfb733390f..ee9b25cd6a6c0 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/enrich/GetPolicyResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/enrich/GetPolicyResponseTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.client.enrich; import org.elasticsearch.client.AbstractResponseTestCase; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -80,7 +79,7 @@ private static EnrichPolicy createRandomEnrichPolicy(XContentType xContentType){ try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { builder.startObject(); builder.endObject(); - BytesReference querySource = BytesArray.bytes(builder); + BytesReference querySource = BytesReference.bytes(builder); return new EnrichPolicy( randomAlphaOfLength(4), randomBoolean() ? new EnrichPolicy.QuerySource(querySource, xContentType) : null, diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/classification/MulticlassConfusionMatrixMetricResultTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/classification/MulticlassConfusionMatrixMetricResultTests.java index 800a2cf7b9836..55b74eb94ea21 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/classification/MulticlassConfusionMatrixMetricResultTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/classification/MulticlassConfusionMatrixMetricResultTests.java @@ -19,19 +19,21 @@ package org.elasticsearch.client.ml.dataframe.evaluation.classification; import org.elasticsearch.client.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; +import org.elasticsearch.client.ml.dataframe.evaluation.classification.MulticlassConfusionMatrixMetric.ActualClass; +import org.elasticsearch.client.ml.dataframe.evaluation.classification.MulticlassConfusionMatrixMetric.PredictedClass; +import org.elasticsearch.client.ml.dataframe.evaluation.classification.MulticlassConfusionMatrixMetric.Result; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.List; -import java.util.Map; -import java.util.TreeMap; import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.Stream; -public class MulticlassConfusionMatrixMetricResultTests extends AbstractXContentTestCase { +public class MulticlassConfusionMatrixMetricResultTests extends AbstractXContentTestCase { @Override protected NamedXContentRegistry xContentRegistry() { @@ -39,26 +41,28 @@ protected NamedXContentRegistry xContentRegistry() { } @Override - protected MulticlassConfusionMatrixMetric.Result createTestInstance() { + protected Result createTestInstance() { int numClasses = randomIntBetween(2, 100); List classNames = Stream.generate(() -> randomAlphaOfLength(10)).limit(numClasses).collect(Collectors.toList()); - Map> confusionMatrix = new TreeMap<>(); + List actualClasses = new ArrayList<>(numClasses); for (int i = 0; i < numClasses; i++) { - Map row = new TreeMap<>(); - confusionMatrix.put(classNames.get(i), row); + List predictedClasses = new ArrayList<>(numClasses); for (int j = 0; j < numClasses; j++) { - if (randomBoolean()) { - row.put(classNames.get(i), randomNonNegativeLong()); - } + predictedClasses.add(new PredictedClass(classNames.get(j), randomBoolean() ? randomNonNegativeLong() : null)); } + actualClasses.add( + new ActualClass( + classNames.get(i), + randomBoolean() ? randomNonNegativeLong() : null, + predictedClasses, + randomBoolean() ? randomNonNegativeLong() : null)); } - long otherClassesCount = randomNonNegativeLong(); - return new MulticlassConfusionMatrixMetric.Result(confusionMatrix, otherClassesCount); + return new Result(actualClasses, randomBoolean() ? randomNonNegativeLong() : null); } @Override - protected MulticlassConfusionMatrixMetric.Result doParseInstance(XContentParser parser) throws IOException { - return MulticlassConfusionMatrixMetric.Result.fromXContent(parser); + protected Result doParseInstance(XContentParser parser) throws IOException { + return Result.fromXContent(parser); } @Override diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index a1ab72890a43c..25c50c0fa7392 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -19,6 +19,7 @@ import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.MavenFilteringHack import org.redline_rpm.header.Flags +import org.elasticsearch.gradle.OS import java.nio.file.Files import java.nio.file.Path @@ -105,6 +106,9 @@ addProcessFilesTask('rpm', false, false) // is the same Closure commonPackageConfig(String type, boolean oss, boolean jdk) { return { + onlyIf { + OS.current().equals(OS.WINDOWS) == false + } dependsOn "process${oss ? 'Oss' : ''}${jdk ? '' : 'NoJdk'}${type.capitalize()}Files" packageName "elasticsearch${oss ? '-oss' : ''}" arch (type == 'deb' ? 'amd64' : 'X86_64') diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index 6ebf089e381b6..2002c97df8244 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -80,7 +80,6 @@ -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true -Dio.netty.recycler.maxCapacityPerThread=0 --Dio.netty.allocator.numDirectArenas=0 # log4j 2 -Dlog4j.shutdownHookEnabled=false diff --git a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java index d0d5bef9cfcf4..4a0eab45fb6ee 100644 --- a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java +++ b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java @@ -55,14 +55,6 @@ static List choose(final List userDefinedJvmOptions) throws Inte final List ergonomicChoices = new ArrayList<>(); final Map> finalJvmOptions = finalJvmOptions(userDefinedJvmOptions); final long heapSize = extractHeapSize(finalJvmOptions); - final Map systemProperties = extractSystemProperties(userDefinedJvmOptions); - if (systemProperties.containsKey("io.netty.allocator.type") == false) { - if (heapSize <= 1 << 30) { - ergonomicChoices.add("-Dio.netty.allocator.type=unpooled"); - } else { - ergonomicChoices.add("-Dio.netty.allocator.type=pooled"); - } - } final long maxDirectMemorySize = extractMaxDirectMemorySize(finalJvmOptions); if (maxDirectMemorySize == 0) { ergonomicChoices.add("-XX:MaxDirectMemorySize=" + heapSize / 2); diff --git a/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java b/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java index 7fe5cd0cf98b0..ee049a57d8528 100644 --- a/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java +++ b/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java @@ -117,20 +117,6 @@ public void testExtractNoSystemProperties() { assertTrue(parsedSystemProperties.isEmpty()); } - public void testPooledMemoryChoiceOnSmallHeap() throws InterruptedException, IOException { - final String smallHeap = randomFrom(Arrays.asList("64M", "512M", "1024M", "1G")); - assertThat( - JvmErgonomics.choose(Arrays.asList("-Xms" + smallHeap, "-Xmx" + smallHeap)), - hasItem("-Dio.netty.allocator.type=unpooled")); - } - - public void testPooledMemoryChoiceOnNotSmallHeap() throws InterruptedException, IOException { - final String largeHeap = randomFrom(Arrays.asList("1025M", "2048M", "2G", "8G")); - assertThat( - JvmErgonomics.choose(Arrays.asList("-Xms" + largeHeap, "-Xmx" + largeHeap)), - hasItem("-Dio.netty.allocator.type=pooled")); - } - public void testMaxDirectMemorySizeChoice() throws InterruptedException, IOException { final Map heapMaxDirectMemorySize = Map.of( "64M", Long.toString((64L << 20) / 2), diff --git a/docs/reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc index 8ad2403f38e0a..712538ec2786c 100644 --- a/docs/reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc @@ -1,18 +1,176 @@ [[analysis-cjk-bigram-tokenfilter]] -=== CJK Bigram Token Filter +=== CJK bigram token filter +++++ +CJK bigram +++++ -The `cjk_bigram` token filter forms bigrams out of the CJK -terms that are generated by the <> -or the `icu_tokenizer` (see {plugins}/analysis-icu-tokenizer.html[`analysis-icu` plugin]). +Forms https://en.wikipedia.org/wiki/Bigram[bigrams] out of CJK (Chinese, +Japanese, and Korean) tokens. -By default, when a CJK character has no adjacent characters to form a bigram, -it is output in unigram form. If you always want to output both unigrams and -bigrams, set the `output_unigrams` flag to `true`. This can be used for a -combined unigram+bigram approach. +This filter is included in {es}'s built-in <>. It uses Lucene's +https://lucene.apache.org/core/{lucene_version_path}/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html[CJKBigramFilter]. -Bigrams are generated for characters in `han`, `hiragana`, `katakana` and -`hangul`, but bigrams can be disabled for particular scripts with the -`ignored_scripts` parameter. All non-CJK input is passed through unmodified. + +[[analysis-cjk-bigram-tokenfilter-analyze-ex]] +==== Example + +The following <> request demonstrates how the +CJK bigram token filter works. + +[source,console] +-------------------------------------------------- +GET /_analyze +{ + "tokenizer" : "standard", + "filter" : ["cjk_bigram"], + "text" : "東京都は、日本の首都であり" +} +-------------------------------------------------- + +The filter produces the following tokens: + +[source,text] +-------------------------------------------------- +[ 東京, 京都, 都は, 日本, 本の, の首, 首都, 都で, であ, あり ] +-------------------------------------------------- + +///////////////////// +[source,console-result] +-------------------------------------------------- +{ + "tokens" : [ + { + "token" : "東京", + "start_offset" : 0, + "end_offset" : 2, + "type" : "", + "position" : 0 + }, + { + "token" : "京都", + "start_offset" : 1, + "end_offset" : 3, + "type" : "", + "position" : 1 + }, + { + "token" : "都は", + "start_offset" : 2, + "end_offset" : 4, + "type" : "", + "position" : 2 + }, + { + "token" : "日本", + "start_offset" : 5, + "end_offset" : 7, + "type" : "", + "position" : 3 + }, + { + "token" : "本の", + "start_offset" : 6, + "end_offset" : 8, + "type" : "", + "position" : 4 + }, + { + "token" : "の首", + "start_offset" : 7, + "end_offset" : 9, + "type" : "", + "position" : 5 + }, + { + "token" : "首都", + "start_offset" : 8, + "end_offset" : 10, + "type" : "", + "position" : 6 + }, + { + "token" : "都で", + "start_offset" : 9, + "end_offset" : 11, + "type" : "", + "position" : 7 + }, + { + "token" : "であ", + "start_offset" : 10, + "end_offset" : 12, + "type" : "", + "position" : 8 + }, + { + "token" : "あり", + "start_offset" : 11, + "end_offset" : 13, + "type" : "", + "position" : 9 + } + ] +} +-------------------------------------------------- +///////////////////// + +[[analysis-cjk-bigram-tokenfilter-analyzer-ex]] +==== Add to an analyzer + +The following <> request uses the +CJK bigram token filter to configure a new +<>. + +[source,console] +-------------------------------------------------- +PUT /cjk_bigram_example +{ + "settings" : { + "analysis" : { + "analyzer" : { + "standard_cjk_bigram" : { + "tokenizer" : "standard", + "filter" : ["cjk_bigram"] + } + } + } + } +} +-------------------------------------------------- + + +[[analysis-cjk-bigram-tokenfilter-configure-parms]] +==== Configurable parameters + +`ignored_scripts`:: ++ +-- +(Optional, array of character scripts) +Array of character scripts for which to disable bigrams. +Possible values: + +* `han` +* `hangul` +* `hiragana` +* `katakana` + +All non-CJK input is passed through unmodified. +-- + +`output_unigrams` +(Optional, boolean) +If `true`, emit tokens in both bigram and +https://en.wikipedia.org/wiki/N-gram[unigram] form. If `false`, a CJK character +is output in unigram form when it has no adjacent characters. Defaults to +`false`. + +[[analysis-cjk-bigram-tokenfilter-customize]] +==== Customize + +To customize the CJK bigram token filter, duplicate it to create the basis +for a new custom token filter. You can modify the filter using its configurable +parameters. [source,console] -------------------------------------------------- @@ -30,9 +188,9 @@ PUT /cjk_bigram_example "han_bigrams_filter" : { "type" : "cjk_bigram", "ignored_scripts": [ + "hangul", "hiragana", - "katakana", - "hangul" + "katakana" ], "output_unigrams" : true } diff --git a/docs/reference/analysis/tokenfilters/cjk-width-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/cjk-width-tokenfilter.asciidoc index 21bde5509a6a1..83b3ba8dee776 100644 --- a/docs/reference/analysis/tokenfilters/cjk-width-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/cjk-width-tokenfilter.asciidoc @@ -1,12 +1,83 @@ [[analysis-cjk-width-tokenfilter]] -=== CJK Width Token Filter +=== CJK width token filter +++++ +CJK width +++++ -The `cjk_width` token filter normalizes CJK width differences: +Normalizes width differences in CJK (Chinese, Japanese, and Korean) characters +as follows: -* Folds fullwidth ASCII variants into the equivalent basic Latin -* Folds halfwidth Katakana variants into the equivalent Kana +* Folds full-width ASCII character variants into the equivalent basic Latin +characters +* Folds half-width Katakana character variants into the equivalent Kana +characters -NOTE: This token filter can be viewed as a subset of NFKC/NFKD -Unicode normalization. See the {plugins}/analysis-icu-normalization-charfilter.html[`analysis-icu` plugin] -for full normalization support. +This filter is included in {es}'s built-in <>. It uses Lucene's +https://lucene.apache.org/core/{lucene_version_path}/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html[CJKWidthFilter]. +NOTE: This token filter can be viewed as a subset of NFKC/NFKD Unicode +normalization. See the +{plugins}/analysis-icu-normalization-charfilter.html[`analysis-icu` plugin] for +full normalization support. + +[[analysis-cjk-width-tokenfilter-analyze-ex]] +==== Example + +[source,console] +-------------------------------------------------- +GET /_analyze +{ + "tokenizer" : "standard", + "filter" : ["cjk_width"], + "text" : "シーサイドライナー" +} +-------------------------------------------------- + +The filter produces the following token: + +[source,text] +-------------------------------------------------- +シーサイドライナー +-------------------------------------------------- + +///////////////////// +[source,console-result] +-------------------------------------------------- +{ + "tokens" : [ + { + "token" : "シーサイドライナー", + "start_offset" : 0, + "end_offset" : 10, + "type" : "", + "position" : 0 + } + ] +} +-------------------------------------------------- +///////////////////// + +[[analysis-cjk-width-tokenfilter-analyzer-ex]] +==== Add to an analyzer + +The following <> request uses the +CJK width token filter to configure a new +<>. + +[source,console] +-------------------------------------------------- +PUT /cjk_width_example +{ + "settings" : { + "analysis" : { + "analyzer" : { + "standard_cjk_width" : { + "tokenizer" : "standard", + "filter" : ["cjk_width"] + } + } + } + } +} +-------------------------------------------------- diff --git a/docs/reference/ingest/apis/enrich/delete-enrich-policy.asciidoc b/docs/reference/ingest/apis/enrich/delete-enrich-policy.asciidoc index f0ebd40ff4112..3052e33cb5e65 100644 --- a/docs/reference/ingest/apis/enrich/delete-enrich-policy.asciidoc +++ b/docs/reference/ingest/apis/enrich/delete-enrich-policy.asciidoc @@ -12,6 +12,13 @@ Deletes an existing enrich policy and its enrich index. [source,console] ---- PUT /users +{ + "mappings" : { + "properties" : { + "email" : { "type" : "keyword" } + } + } +} PUT /_enrich/policy/my-policy { diff --git a/docs/reference/ingest/apis/enrich/get-enrich-policy.asciidoc b/docs/reference/ingest/apis/enrich/get-enrich-policy.asciidoc index 2ddc4c8ffc951..4c41a31643654 100644 --- a/docs/reference/ingest/apis/enrich/get-enrich-policy.asciidoc +++ b/docs/reference/ingest/apis/enrich/get-enrich-policy.asciidoc @@ -12,6 +12,13 @@ Returns information about an enrich policy. [source,console] ---- PUT /users +{ + "mappings" : { + "properties" : { + "email" : { "type" : "keyword" } + } + } +} PUT /_enrich/policy/my-policy { diff --git a/docs/reference/ingest/apis/enrich/put-enrich-policy.asciidoc b/docs/reference/ingest/apis/enrich/put-enrich-policy.asciidoc index 59054fd5aa6b8..df92ef265312c 100644 --- a/docs/reference/ingest/apis/enrich/put-enrich-policy.asciidoc +++ b/docs/reference/ingest/apis/enrich/put-enrich-policy.asciidoc @@ -12,6 +12,13 @@ Creates an enrich policy. [source,console] ---- PUT /users +{ + "mappings" : { + "properties" : { + "email" : { "type" : "keyword" } + } + } +} ---- //// diff --git a/docs/reference/modules/cross-cluster-search.asciidoc b/docs/reference/modules/cross-cluster-search.asciidoc index e85fe99944684..48526e4f66549 100644 --- a/docs/reference/modules/cross-cluster-search.asciidoc +++ b/docs/reference/modules/cross-cluster-search.asciidoc @@ -242,9 +242,31 @@ PUT _cluster/settings If `cluster_two` is disconnected or unavailable during a {ccs}, {es} won't include matching documents from that cluster in the final results. -[float] +[discrete] [[ccs-works]] == How {ccs} works + +include::./remote-clusters.asciidoc[tag=how-remote-clusters-work] + +[discrete] +[[ccs-gateway-seed-nodes]] +=== Selecting gateway and seed nodes + +Gateway and seed nodes need to be accessible from the local cluster via your +network. + +By default, any master-ineligible node can act as a gateway node. If wanted, +you can define the gateway nodes for a cluster by setting +`cluster.remote.node.attr.gateway` to `true`. + +For {ccs}, we recommend you use gateway nodes that are capable of serving as +<> for search requests. If +wanted, the seed nodes for a cluster can be a subset of these gateway nodes. + +[discrete] +[[ccs-network-delays]] +=== How {ccs} handles network delays + Because {ccs} involves sending requests to remote clusters, any network delays can impact search speed. To avoid slow searches, {ccs} offers two options for handling network delays: @@ -268,11 +290,9 @@ latency. + See <> to learn how this option works. - - [float] [[ccs-min-roundtrips]] -=== Minimize network roundtrips +==== Minimize network roundtrips Here's how {ccs} works when you minimize network roundtrips. @@ -297,7 +317,7 @@ image:images/ccs/ccs-min-roundtrip-client-response.png[] [float] [[ccs-unmin-roundtrips]] -=== Don't minimize network roundtrips +==== Don't minimize network roundtrips Here's how {ccs} works when you don't minimize network roundtrips. diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 1ab58ac2bf54b..e7e820d71cf67 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -13,12 +13,16 @@ connections to a remote cluster. This functionality is used in <>. endif::[] +// tag::how-remote-clusters-work[] Remote cluster connections work by configuring a remote cluster and connecting only to a limited number of nodes in that remote cluster. Each remote cluster is referenced by a name and a list of seed nodes. When a remote cluster is registered, its cluster state is retrieved from one of the seed nodes and up to three _gateway nodes_ are selected to be connected to as part of remote -cluster requests. All the communication required between different clusters +cluster requests. +// end::how-remote-clusters-work[] + +All the communication required between different clusters goes through the <>. Remote cluster connections consist of uni-directional connections from the coordinating node to the selected remote _gateway nodes_ only. diff --git a/docs/reference/transform/limitations.asciidoc b/docs/reference/transform/limitations.asciidoc index 1d3d38c943691..463f66f24e3e5 100644 --- a/docs/reference/transform/limitations.asciidoc +++ b/docs/reference/transform/limitations.asciidoc @@ -33,6 +33,14 @@ upgrade from 7.2 to a newer version, and {transforms} have been created in 7.2, the {transforms} UI (earler {dataframe} UI) will not work. Please wait until all nodes have been upgraded to the newer version before using the {transforms} UI. +[float] +[[transform-rolling-upgrade-limitation]] +==== {transforms-cap} reassignment suspended during a rolling upgrade from 7.2 and 7.3 + +If your cluster contains mixed version nodes, for example during a rolling +upgrade from 7.2 or 7.3 to a newer version, {transforms} whose nodes are stopped will +not be reassigned until the upgrade is complete. After the upgrade is done, {transforms} +resume automatically; no action is required. [float] [[transform-datatype-limitations]] @@ -181,9 +189,9 @@ for the {transform} checkpoint to complete. [float] [[transform-scheduling-limitations]] -==== {cdataframe-cap} scheduling limitations +==== {ctransform-cap} scheduling limitations -A {cdataframe} periodically checks for changes to source data. The functionality +A {ctransform} periodically checks for changes to source data. The functionality of the scheduler is currently limited to a basic periodic timer which can be within the `frequency` range from 1s to 1h. The default is 1m. This is designed to run little and often. When choosing a `frequency` for this timer consider @@ -206,7 +214,7 @@ When using the API to delete a failed {transform}, first stop it using [float] [[transform-availability-limitations]] -==== {cdataframes-cap} may give incorrect results if documents are not yet available to search +==== {ctransforms-cap} may give incorrect results if documents are not yet available to search After a document is indexed, there is a very small delay until it is available to search. diff --git a/gradle/build-scan.gradle b/gradle/build-scan.gradle index 3e47058596c61..09e9e950db49b 100644 --- a/gradle/build-scan.gradle +++ b/gradle/build-scan.gradle @@ -18,8 +18,8 @@ buildScan { tag 'CI' tag System.getenv('JOB_NAME') link 'Jenkins Build', System.getenv('BUILD_URL') - link 'GCP Upload', - "https://console.cloud.google.com/storage/elasticsearch-ci-artifacts/jobs/${jobName}/${buildNumber}.tar.bz2" + link 'Additional Logs', + "https://console.cloud.google.com/storage/elasticsearch-ci-artifacts/jobs/${jobName}/build/${buildNumber}.tar.bz2" System.getenv('NODE_LABELS').split(' ').each { value 'Jenkins Worker Label', it } diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 62e2d6aa2bf86..98cb23ae28515 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -66,7 +66,7 @@ integTestRunner { TaskProvider pooledTest = tasks.register("pooledTest", Test) { include '**/*Tests.class' systemProperty 'es.set.netty.runtime.available.processors', 'false' - systemProperty 'io.netty.allocator.type', 'pooled' + systemProperty 'es.use_unpooled_allocator', 'false' } // TODO: we can't use task avoidance here because RestIntegTestTask does the testcluster creation RestIntegTestTask pooledIntegTest = tasks.create("pooledIntegTest", RestIntegTestTask) { @@ -75,7 +75,7 @@ RestIntegTestTask pooledIntegTest = tasks.create("pooledIntegTest", RestIntegTes } } testClusters.pooledIntegTest { - systemProperty 'io.netty.allocator.type', 'pooled' + systemProperty 'es.use_unpooled_allocator', 'false' } check.dependsOn(pooledTest, pooledIntegTest) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 6c1579bc28362..cefa589a103f7 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -20,7 +20,6 @@ package org.elasticsearch.http.netty4; import io.netty.bootstrap.ServerBootstrap; -import io.netty.buffer.ByteBufAllocator; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelHandler; @@ -32,7 +31,6 @@ import io.netty.channel.RecvByteBufAllocator; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.nio.NioChannelOption; -import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.handler.codec.ByteToMessageDecoder; import io.netty.handler.codec.http.HttpContentCompressor; import io.netty.handler.codec.http.HttpContentDecompressor; @@ -63,7 +61,7 @@ import org.elasticsearch.http.HttpServerChannel; import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.CopyBytesServerSocketChannel; +import org.elasticsearch.transport.NettyAllocator; import org.elasticsearch.transport.netty4.Netty4Utils; import java.net.InetSocketAddress; @@ -186,14 +184,12 @@ protected void doStart() { serverBootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX))); - // If direct buffer pooling is disabled, use the CopyBytesServerSocketChannel which will create child - // channels of type CopyBytesSocketChannel. CopyBytesSocketChannel pool a single direct buffer - // per-event-loop thread to be used for IO operations. - if (ByteBufAllocator.DEFAULT.isDirectBufferPooled()) { - serverBootstrap.channel(NioServerSocketChannel.class); - } else { - serverBootstrap.channel(CopyBytesServerSocketChannel.class); - } + // NettyAllocator will return the channel type designed to work with the configuredAllocator + serverBootstrap.channel(NettyAllocator.getServerChannelType()); + + // Set the allocators for both the server channel and the child channels created + serverBootstrap.option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()); + serverBootstrap.childOption(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()); serverBootstrap.childHandler(configureServerChannelHandler()); serverBootstrap.handler(new ServerChannelExceptionHandler(this)); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/NettyAllocator.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/NettyAllocator.java new file mode 100644 index 0000000000000..bfe0a92a9f2b8 --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/NettyAllocator.java @@ -0,0 +1,214 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.buffer.UnpooledByteBufAllocator; +import io.netty.channel.Channel; +import io.netty.channel.ServerChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.monitor.jvm.JvmInfo; + +public class NettyAllocator { + + private static final ByteBufAllocator ALLOCATOR; + + private static final String USE_UNPOOLED = "es.use_unpooled_allocator"; + private static final String USE_NETTY_DEFAULT = "es.unsafe.use_netty_default_allocator"; + + static { + if (Booleans.parseBoolean(System.getProperty(USE_NETTY_DEFAULT), false)) { + ALLOCATOR = ByteBufAllocator.DEFAULT; + } else { + ByteBufAllocator delegate; + if (useUnpooled()) { + delegate = new NoDirectBuffers(UnpooledByteBufAllocator.DEFAULT); + } else { + int nHeapArena = PooledByteBufAllocator.defaultNumHeapArena(); + int pageSize = PooledByteBufAllocator.defaultPageSize(); + int maxOrder = PooledByteBufAllocator.defaultMaxOrder(); + int tinyCacheSize = PooledByteBufAllocator.defaultTinyCacheSize(); + int smallCacheSize = PooledByteBufAllocator.defaultSmallCacheSize(); + int normalCacheSize = PooledByteBufAllocator.defaultNormalCacheSize(); + boolean useCacheForAllThreads = PooledByteBufAllocator.defaultUseCacheForAllThreads(); + delegate = new PooledByteBufAllocator(false, nHeapArena, 0, pageSize, maxOrder, tinyCacheSize, + smallCacheSize, normalCacheSize, useCacheForAllThreads); + } + ALLOCATOR = new NoDirectBuffers(delegate); + } + } + + public static boolean useCopySocket() { + return ALLOCATOR instanceof NoDirectBuffers; + } + + public static ByteBufAllocator getAllocator() { + return ALLOCATOR; + } + + public static Class getChannelType() { + if (ALLOCATOR instanceof NoDirectBuffers) { + return CopyBytesSocketChannel.class; + } else { + return NioSocketChannel.class; + } + } + + public static Class getServerChannelType() { + if (ALLOCATOR instanceof NoDirectBuffers) { + return CopyBytesServerSocketChannel.class; + } else { + return NioServerSocketChannel.class; + } + } + + private static boolean useUnpooled() { + if (System.getProperty(USE_UNPOOLED) != null) { + return Booleans.parseBoolean(System.getProperty(USE_UNPOOLED)); + } else { + long heapSize = JvmInfo.jvmInfo().getMem().getHeapMax().getBytes(); + return heapSize <= 1 << 30; + } + } + + private static class NoDirectBuffers implements ByteBufAllocator { + + private final ByteBufAllocator delegate; + + private NoDirectBuffers(ByteBufAllocator delegate) { + this.delegate = delegate; + } + + @Override + public ByteBuf buffer() { + return heapBuffer(); + } + + @Override + public ByteBuf buffer(int initialCapacity) { + return heapBuffer(initialCapacity); + } + + @Override + public ByteBuf buffer(int initialCapacity, int maxCapacity) { + return heapBuffer(initialCapacity, maxCapacity); + } + + @Override + public ByteBuf ioBuffer() { + return heapBuffer(); + } + + @Override + public ByteBuf ioBuffer(int initialCapacity) { + return heapBuffer(initialCapacity); + } + + @Override + public ByteBuf ioBuffer(int initialCapacity, int maxCapacity) { + return heapBuffer(initialCapacity, maxCapacity); + } + + @Override + public ByteBuf heapBuffer() { + return delegate.heapBuffer(); + } + + @Override + public ByteBuf heapBuffer(int initialCapacity) { + return delegate.heapBuffer(initialCapacity); + } + + @Override + public ByteBuf heapBuffer(int initialCapacity, int maxCapacity) { + return delegate.heapBuffer(initialCapacity, maxCapacity); + } + + @Override + public ByteBuf directBuffer() { + // TODO: Currently the Netty SslHandler requests direct ByteBufs even when interacting with the + // JDK SSLEngine. This will be fixed in a future version of Netty. For now, return a heap + // ByteBuf. After a Netty upgrade, return to throwing UnsupportedOperationException + return heapBuffer(); + } + + @Override + public ByteBuf directBuffer(int initialCapacity) { + // TODO: Currently the Netty SslHandler requests direct ByteBufs even when interacting with the + // JDK SSLEngine. This will be fixed in a future version of Netty. For now, return a heap + // ByteBuf. After a Netty upgrade, return to throwing UnsupportedOperationException + return heapBuffer(initialCapacity); + } + + @Override + public ByteBuf directBuffer(int initialCapacity, int maxCapacity) { + // TODO: Currently the Netty SslHandler requests direct ByteBufs even when interacting with the + // JDK SSLEngine. This will be fixed in a future version of Netty. For now, return a heap + // ByteBuf. After a Netty upgrade, return to throwing UnsupportedOperationException + return heapBuffer(initialCapacity, maxCapacity); + } + + @Override + public CompositeByteBuf compositeBuffer() { + return compositeHeapBuffer(); + } + + @Override + public CompositeByteBuf compositeBuffer(int maxNumComponents) { + return compositeHeapBuffer(maxNumComponents); + } + + @Override + public CompositeByteBuf compositeHeapBuffer() { + return delegate.compositeHeapBuffer(); + } + + @Override + public CompositeByteBuf compositeHeapBuffer(int maxNumComponents) { + return delegate.compositeHeapBuffer(maxNumComponents); + } + + @Override + public CompositeByteBuf compositeDirectBuffer() { + throw new UnsupportedOperationException("Direct buffers not supported."); + } + + @Override + public CompositeByteBuf compositeDirectBuffer(int maxNumComponents) { + throw new UnsupportedOperationException("Direct buffers not supported."); + } + + @Override + public boolean isDirectBufferPooled() { + assert delegate.isDirectBufferPooled() == false; + return false; + } + + @Override + public int calculateNewCapacity(int minNewCapacity, int maxCapacity) { + return delegate.calculateNewCapacity(minNewCapacity, maxCapacity); + } + } +} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/ByteBufBytesReference.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/ByteBufBytesReference.java index d8af523bf17df..3f7ff0d6e2a0f 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/ByteBufBytesReference.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/ByteBufBytesReference.java @@ -20,6 +20,7 @@ import io.netty.buffer.ByteBuf; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.AbstractBytesReference; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -27,7 +28,7 @@ import java.io.OutputStream; import java.nio.charset.StandardCharsets; -final class ByteBufBytesReference extends BytesReference { +final class ByteBufBytesReference extends AbstractBytesReference { private final ByteBuf buffer; private final int length; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index d3e43e16dd5f4..93f41285c5f3d 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -21,7 +21,6 @@ import io.netty.bootstrap.Bootstrap; import io.netty.bootstrap.ServerBootstrap; -import io.netty.buffer.ByteBufAllocator; import io.netty.channel.AdaptiveRecvByteBufAllocator; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; @@ -34,8 +33,6 @@ import io.netty.channel.RecvByteBufAllocator; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.nio.NioChannelOption; -import io.netty.channel.socket.nio.NioServerSocketChannel; -import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.util.AttributeKey; import io.netty.util.concurrent.Future; import org.apache.logging.log4j.LogManager; @@ -59,8 +56,7 @@ import org.elasticsearch.core.internal.net.NetUtils; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.CopyBytesServerSocketChannel; -import org.elasticsearch.transport.CopyBytesSocketChannel; +import org.elasticsearch.transport.NettyAllocator; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TransportSettings; @@ -152,13 +148,9 @@ private Bootstrap createClientBootstrap(NioEventLoopGroup eventLoopGroup) { final Bootstrap bootstrap = new Bootstrap(); bootstrap.group(eventLoopGroup); - // If direct buffer pooling is disabled, use the CopyBytesSocketChannel which will pool a single - // direct buffer per-event-loop thread which will be used for IO operations. - if (ByteBufAllocator.DEFAULT.isDirectBufferPooled()) { - bootstrap.channel(NioSocketChannel.class); - } else { - bootstrap.channel(CopyBytesSocketChannel.class); - } + // NettyAllocator will return the channel type designed to work with the configured allocator + bootstrap.channel(NettyAllocator.getChannelType()); + bootstrap.option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()); bootstrap.option(ChannelOption.TCP_NODELAY, TransportSettings.TCP_NO_DELAY.get(settings)); bootstrap.option(ChannelOption.SO_KEEPALIVE, TransportSettings.TCP_KEEP_ALIVE.get(settings)); @@ -216,14 +208,12 @@ private void createServerBootstrap(ProfileSettings profileSettings, NioEventLoop serverBootstrap.group(eventLoopGroup); - // If direct buffer pooling is disabled, use the CopyBytesServerSocketChannel which will create child - // channels of type CopyBytesSocketChannel. CopyBytesSocketChannel pool a single direct buffer - // per-event-loop thread to be used for IO operations. - if (ByteBufAllocator.DEFAULT.isDirectBufferPooled()) { - serverBootstrap.channel(NioServerSocketChannel.class); - } else { - serverBootstrap.channel(CopyBytesServerSocketChannel.class); - } + // NettyAllocator will return the channel type designed to work with the configuredAllocator + serverBootstrap.channel(NettyAllocator.getServerChannelType()); + + // Set the allocators for both the server channel and the child channels created + serverBootstrap.option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()); + serverBootstrap.childOption(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()); serverBootstrap.childHandler(getServerChannelInitializer(name)); serverBootstrap.handler(new ServerChannelExceptionHandler()); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java b/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java index 9d8baf9e3f871..d4a6706153a9b 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java @@ -18,6 +18,7 @@ */ package org.elasticsearch; +import io.netty.buffer.PooledByteBufAllocator; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; @@ -25,8 +26,8 @@ import org.elasticsearch.transport.Netty4Plugin; import org.elasticsearch.transport.netty4.Netty4Transport; -import java.util.Arrays; import java.util.Collection; +import java.util.Collections; public abstract class ESNetty4IntegTestCase extends ESIntegTestCase { @@ -54,6 +55,13 @@ protected Settings nodeSettings(int nodeOrdinal) { @Override protected Collection> nodePlugins() { - return Arrays.asList(Netty4Plugin.class); + return Collections.singletonList(Netty4Plugin.class); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + assertEquals(0, PooledByteBufAllocator.DEFAULT.metric().usedHeapMemory()); + assertEquals(0, PooledByteBufAllocator.DEFAULT.metric().usedDirectMemory()); } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java index a595de3a47ed9..558e833c74bed 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java @@ -25,10 +25,10 @@ import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; -import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; @@ -45,6 +45,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.NettyAllocator; import java.io.Closeable; import java.net.SocketAddress; @@ -84,7 +85,10 @@ static Collection returnOpaqueIds(Collection responses private final Bootstrap clientBootstrap; Netty4HttpClient() { - clientBootstrap = new Bootstrap().channel(NioSocketChannel.class).group(new NioEventLoopGroup()); + clientBootstrap = new Bootstrap() + .channel(NettyAllocator.getChannelType()) + .option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) + .group(new NioEventLoopGroup(1)); } public Collection get(SocketAddress remoteAddress, String... uris) throws InterruptedException { diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index 0b210995795bf..884cf46a1688f 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport.netty4; +import io.netty.buffer.PooledByteBufAllocator; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -45,6 +46,13 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase { + @Override + public void tearDown() throws Exception { + super.tearDown(); + assertEquals(0, PooledByteBufAllocator.DEFAULT.metric().usedHeapMemory()); + assertEquals(0, PooledByteBufAllocator.DEFAULT.metric().usedDirectMemory()); + } + @Override protected Transport build(Settings settings, final Version version, ClusterSettings clusterSettings, boolean doHandshake) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); @@ -73,5 +81,4 @@ public void testConnectException() throws UnknownHostException { assertThat(e.getMessage(), containsString("[127.0.0.1:9876]")); } } - } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index a43a96a438d03..28993bd475a06 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -265,10 +265,4 @@ protected String requestUniqueId(final HttpExchange exchange) { + (range != null ? " " + range : ""); } } - - @Override - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/47948") - public void testIndicesDeletedFromRepository() throws Exception { - - } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/ByteBufUtils.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/ByteBufUtils.java index f805773c46c0c..78bc8f3728209 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/ByteBufUtils.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/ByteBufUtils.java @@ -23,6 +23,7 @@ import io.netty.buffer.Unpooled; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.common.bytes.AbstractBytesReference; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -72,7 +73,7 @@ static BytesReference toBytesReference(final ByteBuf buffer) { return new ByteBufBytesReference(buffer, buffer.readableBytes()); } - private static class ByteBufBytesReference extends BytesReference { + private static class ByteBufBytesReference extends AbstractBytesReference { private final ByteBuf buffer; private final int length; diff --git a/server/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReference.java new file mode 100644 index 0000000000000..6bfeb023bf3aa --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReference.java @@ -0,0 +1,264 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.bytes; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.function.ToIntBiFunction; + +public abstract class AbstractBytesReference implements BytesReference { + + private Integer hash = null; // we cache the hash of this reference since it can be quite costly to re-calculated it + + @Override + public int getInt(int index) { + return (get(index) & 0xFF) << 24 | (get(index + 1) & 0xFF) << 16 | (get(index + 2) & 0xFF) << 8 | get(index + 3) & 0xFF; + } + + @Override + public int indexOf(byte marker, int from) { + final int to = length(); + for (int i = from; i < to; i++) { + if (get(i) == marker) { + return i; + } + } + return -1; + } + + @Override + public StreamInput streamInput() throws IOException { + return new MarkSupportingStreamInputWrapper(this); + } + + @Override + public void writeTo(OutputStream os) throws IOException { + final BytesRefIterator iterator = iterator(); + BytesRef ref; + while ((ref = iterator.next()) != null) { + os.write(ref.bytes, ref.offset, ref.length); + } + } + + @Override + public String utf8ToString() { + return toBytesRef().utf8ToString(); + } + + @Override + public BytesRefIterator iterator() { + return new BytesRefIterator() { + BytesRef ref = length() == 0 ? null : toBytesRef(); + @Override + public BytesRef next() throws IOException { + BytesRef r = ref; + ref = null; // only return it once... + return r; + } + }; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other instanceof BytesReference) { + final BytesReference otherRef = (BytesReference) other; + if (length() != otherRef.length()) { + return false; + } + return compareIterators(this, otherRef, (a, b) -> + a.bytesEquals(b) ? 0 : 1 // this is a call to BytesRef#bytesEquals - this method is the hot one in the comparison + ) == 0; + } + return false; + } + + @Override + public int hashCode() { + if (hash == null) { + final BytesRefIterator iterator = iterator(); + BytesRef ref; + int result = 1; + try { + while ((ref = iterator.next()) != null) { + for (int i = 0; i < ref.length; i++) { + result = 31 * result + ref.bytes[ref.offset + i]; + } + } + } catch (IOException ex) { + throw new AssertionError("wont happen", ex); + } + return hash = result; + } else { + return hash.intValue(); + } + } + + @Override + public int compareTo(final BytesReference other) { + return compareIterators(this, other, BytesRef::compareTo); + } + + /** + * Compares the two references using the given int function. + */ + private static int compareIterators(final BytesReference a, final BytesReference b, final ToIntBiFunction f) { + try { + // we use the iterators since it's a 0-copy comparison where possible! + final long lengthToCompare = Math.min(a.length(), b.length()); + final BytesRefIterator aIter = a.iterator(); + final BytesRefIterator bIter = b.iterator(); + BytesRef aRef = aIter.next(); + BytesRef bRef = bIter.next(); + if (aRef != null && bRef != null) { // do we have any data? + aRef = aRef.clone(); // we clone since we modify the offsets and length in the iteration below + bRef = bRef.clone(); + if (aRef.length == a.length() && bRef.length == b.length()) { // is it only one array slice we are comparing? + return f.applyAsInt(aRef, bRef); + } else { + for (int i = 0; i < lengthToCompare;) { + if (aRef.length == 0) { + aRef = aIter.next().clone(); // must be non null otherwise we have a bug + } + if (bRef.length == 0) { + bRef = bIter.next().clone(); // must be non null otherwise we have a bug + } + final int aLength = aRef.length; + final int bLength = bRef.length; + final int length = Math.min(aLength, bLength); // shrink to the same length and use the fast compare in lucene + aRef.length = bRef.length = length; + // now we move to the fast comparison - this is the hot part of the loop + int diff = f.applyAsInt(aRef, bRef); + aRef.length = aLength; + bRef.length = bLength; + + if (diff != 0) { + return diff; + } + advance(aRef, length); + advance(bRef, length); + i += length; + } + } + } + // One is a prefix of the other, or, they are equal: + return a.length() - b.length(); + } catch (IOException ex) { + throw new AssertionError("can not happen", ex); + } + } + + private static void advance(final BytesRef ref, final int length) { + assert ref.length >= length : " ref.length: " + ref.length + " length: " + length; + assert ref.offset+length < ref.bytes.length || (ref.offset+length == ref.bytes.length && ref.length-length == 0) + : "offset: " + ref.offset + " ref.bytes.length: " + ref.bytes.length + " length: " + length + " ref.length: " + ref.length; + ref.length -= length; + ref.offset += length; + } + + /** + * Instead of adding the complexity of {@link InputStream#reset()} etc to the actual impl + * this wrapper builds it on top of the BytesReferenceStreamInput which is much simpler + * that way. + */ + private static final class MarkSupportingStreamInputWrapper extends StreamInput { + // can't use FilterStreamInput it needs to reset the delegate + private final BytesReference reference; + private BytesReferenceStreamInput input; + private int mark = 0; + + private MarkSupportingStreamInputWrapper(BytesReference reference) throws IOException { + this.reference = reference; + this.input = new BytesReferenceStreamInput(reference.iterator(), reference.length()); + } + + @Override + public byte readByte() throws IOException { + return input.readByte(); + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + input.readBytes(b, offset, len); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + return input.read(b, off, len); + } + + @Override + public void close() throws IOException { + input.close(); + } + + @Override + public int read() throws IOException { + return input.read(); + } + + @Override + public int available() throws IOException { + return input.available(); + } + + @Override + protected void ensureCanReadBytes(int length) throws EOFException { + input.ensureCanReadBytes(length); + } + + @Override + public void reset() throws IOException { + input = new BytesReferenceStreamInput(reference.iterator(), reference.length()); + input.skip(mark); + } + + @Override + public boolean markSupported() { + return true; + } + + @Override + public void mark(int readLimit) { + // readLimit is optional it only guarantees that the stream remembers data upto this limit but it can remember more + // which we do in our case + this.mark = input.getOffset(); + } + + @Override + public long skip(long n) throws IOException { + return input.skip(n); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + BytesRef bytes = toBytesRef(); + return builder.value(bytes.bytes, bytes.offset, bytes.length); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/bytes/ByteBufferReference.java b/server/src/main/java/org/elasticsearch/common/bytes/ByteBufferReference.java index d696f060802f3..07353ea67eced 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/ByteBufferReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/ByteBufferReference.java @@ -31,7 +31,7 @@ * changed, those changes will not be reflected in this reference. Any changes to the underlying data in the * byte buffer will be reflected in this reference. */ -public class ByteBufferReference extends BytesReference { +public class ByteBufferReference extends AbstractBytesReference { private final ByteBuffer buffer; private final int length; diff --git a/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java b/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java index 9761ad0c42c67..b54617dec1248 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java @@ -23,7 +23,7 @@ import java.util.Objects; -public final class BytesArray extends BytesReference { +public final class BytesArray extends AbstractBytesReference { public static final BytesArray EMPTY = new BytesArray(BytesRef.EMPTY_BYTES, 0, 0); private final byte[] bytes; diff --git a/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java index 2c4867cbdfed9..8e4ecd2d3d3ca 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.common.bytes; import org.apache.lucene.util.BytesRef; @@ -26,26 +27,22 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.ByteArrayOutputStream; -import java.io.EOFException; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.function.ToIntBiFunction; + /** * A reference to bytes. */ -public abstract class BytesReference implements Comparable, ToXContentFragment { - - private Integer hash = null; // we cache the hash of this reference since it can be quite costly to re-calculated it +public interface BytesReference extends Comparable, ToXContentFragment { /** * Convert an {@link XContentBuilder} into a BytesReference. This method closes the builder, * so no further fields may be added. */ - public static BytesReference bytes(XContentBuilder xContentBuilder) { + static BytesReference bytes(XContentBuilder xContentBuilder) { xContentBuilder.close(); OutputStream stream = xContentBuilder.getOutputStream(); if (stream instanceof ByteArrayOutputStream) { @@ -55,139 +52,11 @@ public static BytesReference bytes(XContentBuilder xContentBuilder) { } } - /** - * Returns the byte at the specified index. Need to be between 0 and length. - */ - public abstract byte get(int index); - - /** - * Returns the integer read from the 4 bytes (BE) starting at the given index. - */ - public int getInt(int index) { - return (get(index) & 0xFF) << 24 | (get(index + 1) & 0xFF) << 16 | (get(index + 2) & 0xFF) << 8 | get(index + 3) & 0xFF; - } - - /** - * Finds the index of the first occurrence of the given marker between within the given bounds. - * @param marker marker byte to search - * @param from lower bound for the index to check (inclusive) - * @return first index of the marker or {@code -1} if not found - */ - public int indexOf(byte marker, int from) { - final int to = length(); - for (int i = from; i < to; i++) { - if (get(i) == marker) { - return i; - } - } - return -1; - } - - /** - * The length. - */ - public abstract int length(); - - /** - * Slice the bytes from the {@code from} index up to {@code length}. - */ - public abstract BytesReference slice(int from, int length); - - /** - * The amount of memory used by this BytesReference - */ - public abstract long ramBytesUsed(); - - /** - * A stream input of the bytes. - */ - public StreamInput streamInput() throws IOException { - return new MarkSupportingStreamInputWrapper(this); - } - - /** - * Writes the bytes directly to the output stream. - */ - public void writeTo(OutputStream os) throws IOException { - final BytesRefIterator iterator = iterator(); - BytesRef ref; - while ((ref = iterator.next()) != null) { - os.write(ref.bytes, ref.offset, ref.length); - } - } - - /** - * Interprets the referenced bytes as UTF8 bytes, returning the resulting string - */ - public String utf8ToString() { - return toBytesRef().utf8ToString(); - } - - /** - * Converts to Lucene BytesRef. - */ - public abstract BytesRef toBytesRef(); - - /** - * Returns a BytesRefIterator for this BytesReference. This method allows - * access to the internal pages of this reference without copying them. Use with care! - * @see BytesRefIterator - */ - public BytesRefIterator iterator() { - return new BytesRefIterator() { - BytesRef ref = length() == 0 ? null : toBytesRef(); - @Override - public BytesRef next() throws IOException { - BytesRef r = ref; - ref = null; // only return it once... - return r; - } - }; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } - if (other instanceof BytesReference) { - final BytesReference otherRef = (BytesReference) other; - if (length() != otherRef.length()) { - return false; - } - return compareIterators(this, otherRef, (a, b) -> - a.bytesEquals(b) ? 0 : 1 // this is a call to BytesRef#bytesEquals - this method is the hot one in the comparison - ) == 0; - } - return false; - } - - @Override - public int hashCode() { - if (hash == null) { - final BytesRefIterator iterator = iterator(); - BytesRef ref; - int result = 1; - try { - while ((ref = iterator.next()) != null) { - for (int i = 0; i < ref.length; i++) { - result = 31 * result + ref.bytes[ref.offset + i]; - } - } - } catch (IOException ex) { - throw new AssertionError("wont happen", ex); - } - return hash = result; - } else { - return hash.intValue(); - } - } - /** * Returns a compact array from the given BytesReference. The returned array won't be copied unless necessary. If you need * to modify the returned array use {@code BytesRef.deepCopyOf(reference.toBytesRef()} instead */ - public static byte[] toBytes(BytesReference reference) { + static byte[] toBytes(BytesReference reference) { final BytesRef bytesRef = reference.toBytesRef(); if (bytesRef.offset == 0 && bytesRef.length == bytesRef.bytes.length) { return bytesRef.bytes; @@ -198,7 +67,7 @@ public static byte[] toBytes(BytesReference reference) { /** * Returns an array of byte buffers from the given BytesReference. */ - public static ByteBuffer[] toByteBuffers(BytesReference reference) { + static ByteBuffer[] toByteBuffers(BytesReference reference) { BytesRefIterator byteRefIterator = reference.iterator(); BytesRef r; try { @@ -217,7 +86,7 @@ public static ByteBuffer[] toByteBuffers(BytesReference reference) { /** * Returns BytesReference composed of the provided ByteBuffers. */ - public static BytesReference fromByteBuffers(ByteBuffer[] buffers) { + static BytesReference fromByteBuffers(ByteBuffer[] buffers) { int bufferCount = buffers.length; if (bufferCount == 0) { return BytesArray.EMPTY; @@ -233,146 +102,63 @@ public static BytesReference fromByteBuffers(ByteBuffer[] buffers) { } } - @Override - public int compareTo(final BytesReference other) { - return compareIterators(this, other, BytesRef::compareTo); - } - /** - * Compares the two references using the given int function. + * Returns the byte at the specified index. Need to be between 0 and length. */ - private static int compareIterators(final BytesReference a, final BytesReference b, final ToIntBiFunction f) { - try { - // we use the iterators since it's a 0-copy comparison where possible! - final long lengthToCompare = Math.min(a.length(), b.length()); - final BytesRefIterator aIter = a.iterator(); - final BytesRefIterator bIter = b.iterator(); - BytesRef aRef = aIter.next(); - BytesRef bRef = bIter.next(); - if (aRef != null && bRef != null) { // do we have any data? - aRef = aRef.clone(); // we clone since we modify the offsets and length in the iteration below - bRef = bRef.clone(); - if (aRef.length == a.length() && bRef.length == b.length()) { // is it only one array slice we are comparing? - return f.applyAsInt(aRef, bRef); - } else { - for (int i = 0; i < lengthToCompare;) { - if (aRef.length == 0) { - aRef = aIter.next().clone(); // must be non null otherwise we have a bug - } - if (bRef.length == 0) { - bRef = bIter.next().clone(); // must be non null otherwise we have a bug - } - final int aLength = aRef.length; - final int bLength = bRef.length; - final int length = Math.min(aLength, bLength); // shrink to the same length and use the fast compare in lucene - aRef.length = bRef.length = length; - // now we move to the fast comparison - this is the hot part of the loop - int diff = f.applyAsInt(aRef, bRef); - aRef.length = aLength; - bRef.length = bLength; - - if (diff != 0) { - return diff; - } - advance(aRef, length); - advance(bRef, length); - i += length; - } - } - } - // One is a prefix of the other, or, they are equal: - return a.length() - b.length(); - } catch (IOException ex) { - throw new AssertionError("can not happen", ex); - } - } - - private static void advance(final BytesRef ref, final int length) { - assert ref.length >= length : " ref.length: " + ref.length + " length: " + length; - assert ref.offset+length < ref.bytes.length || (ref.offset+length == ref.bytes.length && ref.length-length == 0) - : "offset: " + ref.offset + " ref.bytes.length: " + ref.bytes.length + " length: " + length + " ref.length: " + ref.length; - ref.length -= length; - ref.offset += length; - } + byte get(int index); /** - * Instead of adding the complexity of {@link InputStream#reset()} etc to the actual impl - * this wrapper builds it on top of the BytesReferenceStreamInput which is much simpler - * that way. + * Returns the integer read from the 4 bytes (BE) starting at the given index. */ - private static final class MarkSupportingStreamInputWrapper extends StreamInput { - // can't use FilterStreamInput it needs to reset the delegate - private final BytesReference reference; - private BytesReferenceStreamInput input; - private int mark = 0; + int getInt(int index); - private MarkSupportingStreamInputWrapper(BytesReference reference) throws IOException { - this.reference = reference; - this.input = new BytesReferenceStreamInput(reference.iterator(), reference.length()); - } - - @Override - public byte readByte() throws IOException { - return input.readByte(); - } - - @Override - public void readBytes(byte[] b, int offset, int len) throws IOException { - input.readBytes(b, offset, len); - } - - @Override - public int read(byte[] b, int off, int len) throws IOException { - return input.read(b, off, len); - } - - @Override - public void close() throws IOException { - input.close(); - } + /** + * Finds the index of the first occurrence of the given marker between within the given bounds. + * @param marker marker byte to search + * @param from lower bound for the index to check (inclusive) + * @return first index of the marker or {@code -1} if not found + */ + int indexOf(byte marker, int from); - @Override - public int read() throws IOException { - return input.read(); - } + /** + * The length. + */ + int length(); - @Override - public int available() throws IOException { - return input.available(); - } + /** + * Slice the bytes from the {@code from} index up to {@code length}. + */ + BytesReference slice(int from, int length); - @Override - protected void ensureCanReadBytes(int length) throws EOFException { - input.ensureCanReadBytes(length); - } + /** + * The amount of memory used by this BytesReference + */ + long ramBytesUsed(); - @Override - public void reset() throws IOException { - input = new BytesReferenceStreamInput(reference.iterator(), reference.length()); - input.skip(mark); - } + /** + * A stream input of the bytes. + */ + StreamInput streamInput() throws IOException; - @Override - public boolean markSupported() { - return true; - } + /** + * Writes the bytes directly to the output stream. + */ + void writeTo(OutputStream os) throws IOException; - @Override - public void mark(int readLimit) { - // readLimit is optional it only guarantees that the stream remembers data upto this limit but it can remember more - // which we do in our case - this.mark = input.getOffset(); - } + /** + * Interprets the referenced bytes as UTF8 bytes, returning the resulting string + */ + String utf8ToString(); - @Override - public long skip(long n) throws IOException { - return input.skip(n); - } - } + /** + * Converts to Lucene BytesRef. + */ + BytesRef toBytesRef(); - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - BytesRef bytes = toBytesRef(); - return builder.value(bytes.bytes, bytes.offset, bytes.length); - } + /** + * Returns a BytesRefIterator for this BytesReference. This method allows + * access to the internal pages of this reference without copying them. Use with care! + * @see BytesRefIterator + */ + BytesRefIterator iterator(); } diff --git a/server/src/main/java/org/elasticsearch/common/bytes/CompositeBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/CompositeBytesReference.java index 4845102b89bcd..84a2b06eb801f 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/CompositeBytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/CompositeBytesReference.java @@ -34,7 +34,7 @@ * * Note, {@link #toBytesRef()} will materialize all pages in this BytesReference. */ -public final class CompositeBytesReference extends BytesReference { +public final class CompositeBytesReference extends AbstractBytesReference { private final BytesReference[] references; private final int[] offsets; diff --git a/server/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java index 9f2619cd1aa72..cfb13608ac63e 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java @@ -31,7 +31,7 @@ * A page based bytes reference, internally holding the bytes in a paged * data structure. */ -public class PagedBytesReference extends BytesReference { +public class PagedBytesReference extends AbstractBytesReference { private static final int PAGE_SIZE = PageCacheRecycler.BYTE_PAGE_SIZE; diff --git a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java new file mode 100644 index 0000000000000..ad23f8156bf58 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.bytes; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * An extension to {@link BytesReference} that requires releasing its content. This + * class exists to make it explicit when a bytes reference needs to be released, and when not. + */ +public final class ReleasableBytesReference implements Releasable, BytesReference { + + private final BytesReference delegate; + private final Releasable releasable; + + public ReleasableBytesReference(BytesReference delegate, Releasable releasable) { + this.delegate = delegate; + this.releasable = releasable; + } + + @Override + public void close() { + Releasables.close(releasable); + } + + @Override + public byte get(int index) { + return delegate.get(index); + } + + @Override + public int getInt(int index) { + return delegate.getInt(index); + } + + @Override + public int indexOf(byte marker, int from) { + return delegate.indexOf(marker, from); + } + + @Override + public int length() { + return delegate.length(); + } + + @Override + public BytesReference slice(int from, int length) { + return delegate.slice(from, length); + } + + @Override + public long ramBytesUsed() { + return delegate.ramBytesUsed(); + } + + @Override + public StreamInput streamInput() throws IOException { + return delegate.streamInput(); + } + + @Override + public void writeTo(OutputStream os) throws IOException { + delegate.writeTo(os); + } + + @Override + public String utf8ToString() { + return delegate.utf8ToString(); + } + + @Override + public BytesRef toBytesRef() { + return delegate.toBytesRef(); + } + + @Override + public BytesRefIterator iterator() { + return delegate.iterator(); + } + + @Override + public int compareTo(BytesReference o) { + return delegate.compareTo(o); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return delegate.toXContent(builder, params); + } + + @Override + public boolean isFragment() { + return delegate.isFragment(); + } + + @Override + public boolean equals(Object obj) { + return delegate.equals(obj); + } + + @Override + public int hashCode() { + return delegate.hashCode(); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java deleted file mode 100644 index 209a6edc5696a..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.bytes; - -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.util.ByteArray; - -/** - * An extension to {@link PagedBytesReference} that requires releasing its content. This - * class exists to make it explicit when a bytes reference needs to be released, and when not. - */ -public final class ReleasablePagedBytesReference extends PagedBytesReference implements Releasable { - - private final Releasable releasable; - - public ReleasablePagedBytesReference(ByteArray byteArray, int length, Releasable releasable) { - super(byteArray, length); - this.releasable = releasable; - } - - @Override - public void close() { - Releasables.close(releasable); - } - -} diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java index 725ecd1c3cc4f..64a91e9cdd891 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java @@ -19,7 +19,8 @@ package org.elasticsearch.common.io.stream; -import org.elasticsearch.common.bytes.ReleasablePagedBytesReference; +import org.elasticsearch.common.bytes.PagedBytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; @@ -31,7 +32,7 @@ * expecting it to require releasing its content ({@link #bytes()}) once done. *

* Please note, closing this stream will release the bytes that are in use by any - * {@link ReleasablePagedBytesReference} returned from {@link #bytes()}, so this + * {@link ReleasableBytesReference} returned from {@link #bytes()}, so this * stream should only be closed after the bytes have been output or copied * elsewhere. */ @@ -55,8 +56,8 @@ public ReleasableBytesStreamOutput(int expectedSize, BigArrays bigArrays) { * the bytes in the stream. */ @Override - public ReleasablePagedBytesReference bytes() { - return new ReleasablePagedBytesReference(bytes, count, releasable); + public ReleasableBytesReference bytes() { + return new ReleasableBytesReference(new PagedBytesReference(bytes, count), releasable); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 858e6edd3fbfc..2b537e8806165 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -457,8 +457,8 @@ private void restore(final IndexShard indexShard, final Repository repository, f } final IndexId indexId = repository.getRepositoryData().resolveIndexId(indexName); assert indexShard.getEngineOrNull() == null; - repository.restoreShard(indexShard.store(), restoreSource.snapshot().getSnapshotId(), - restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState()); + repository.restoreShard(indexShard.store(), restoreSource.snapshot().getSnapshotId(), indexId, snapshotShardId, + indexShard.recoveryState()); final Store store = indexShard.store(); bootstrap(indexShard, store); assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index e4d977c1ebb21..e38880797785b 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.bytes.ReleasablePagedBytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -535,7 +535,7 @@ public Location add(final Operation operation) throws IOException { out.seek(start); out.writeInt(operationSize); out.seek(end); - final ReleasablePagedBytesReference bytes = out.bytes(); + final ReleasableBytesReference bytes = out.bytes(); try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); if (operation.primaryTerm() > current.getPrimaryTerm()) { @@ -1593,7 +1593,7 @@ public static void writeOperations(StreamOutput outStream, List toWri out.seek(start); out.writeInt(operationSize); out.seek(end); - ReleasablePagedBytesReference bytes = out.bytes(); + ReleasableBytesReference bytes = out.bytes(); bytes.writeTo(outStream); } } finally { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileTransfer.java b/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileTransfer.java index 09366a38a9957..fedf601961476 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileTransfer.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileTransfer.java @@ -57,7 +57,7 @@ * one of the networking threads which receive/handle the responses of the current pending file chunk requests. This process will continue * until all chunk requests are sent/responded. */ -abstract class MultiFileTransfer implements Closeable { +public abstract class MultiFileTransfer implements Closeable { private Status status = Status.PROCESSING; private final Logger logger; private final ActionListener listener; @@ -121,7 +121,7 @@ private void handleItems(List>> return; } final long requestSeqId = requestSeqIdTracker.generateSeqNo(); - sendChunkRequest(request.v2(), ActionListener.wrap( + executeChunkRequest(request.v2(), ActionListener.wrap( r -> addItem(requestSeqId, request.v1(), null), e -> addItem(requestSeqId, request.v1(), e))); } @@ -179,7 +179,7 @@ private Tuple getNextRequest() throws Exception { protected abstract Request nextChunkRequest(StoreFileMetaData md) throws IOException; - protected abstract void sendChunkRequest(Request request, ActionListener listener); + protected abstract void executeChunkRequest(Request request, ActionListener listener); protected abstract void handleError(StoreFileMetaData md, Exception e) throws Exception; @@ -195,7 +195,7 @@ private static class FileChunkResponseItem { } } - protected interface ChunkRequest { + public interface ChunkRequest { /** * @return {@code true} if this chunk request is the last chunk of the current file */ diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileWriter.java b/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileWriter.java index 87a6d18671a6f..b6a5d2c908842 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileWriter.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileWriter.java @@ -27,9 +27,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.util.concurrent.AbstractRefCounted; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; +import org.elasticsearch.transport.Transports; import java.io.IOException; import java.util.Arrays; @@ -39,10 +41,12 @@ import java.util.Map; import java.util.PriorityQueue; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicBoolean; -public class MultiFileWriter implements Releasable { +public class MultiFileWriter extends AbstractRefCounted implements Releasable { public MultiFileWriter(Store store, RecoveryState.Index indexState, String tempFilePrefix, Logger logger, Runnable ensureOpen) { + super("multi_file_writer"); this.store = store; this.indexState = indexState; this.tempFilePrefix = tempFilePrefix; @@ -51,6 +55,7 @@ public MultiFileWriter(Store store, RecoveryState.Index indexState, String tempF } private final Runnable ensureOpen; + private final AtomicBoolean closed = new AtomicBoolean(false); private final Logger logger; private final Store store; private final RecoveryState.Index indexState; @@ -64,6 +69,7 @@ public MultiFileWriter(Store store, RecoveryState.Index indexState, String tempF public void writeFileChunk(StoreFileMetaData fileMetaData, long position, BytesReference content, boolean lastChunk) throws IOException { + assert Transports.assertNotTransportThread("multi_file_writer"); final FileChunkWriter writer = fileChunkWriters.computeIfAbsent(fileMetaData.name(), name -> new FileChunkWriter()); writer.writeChunk(new FileChunk(fileMetaData, content, position, lastChunk)); } @@ -138,6 +144,13 @@ private void innerWriteFileChunk(StoreFileMetaData fileMetaData, long position, @Override public void close() { + if (closed.compareAndSet(false, true)) { + decRef(); + } + } + + @Override + protected void closeInternal() { fileChunkWriters.clear(); // clean open index outputs Iterator> iterator = openIndexOutputs.entrySet().iterator(); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index c14fbca308910..1d45d048c9ba4 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -890,7 +890,7 @@ protected FileChunk nextChunkRequest(StoreFileMetaData md) throws IOException { } @Override - protected void sendChunkRequest(FileChunk request, ActionListener listener) { + protected void executeChunkRequest(FileChunk request, ActionListener listener) { cancellableThreads.checkForCancel(); recoveryTarget.writeFileChunk( request.md, request.position, request.content, request.lastChunk, translogOps.getAsInt(), listener); diff --git a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java index f048528ab789e..1379ad74c95ce 100644 --- a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java @@ -19,7 +19,6 @@ package org.elasticsearch.repositories; import org.apache.lucene.index.IndexCommit; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -122,14 +121,13 @@ public void snapshotShard(Store store, MapperService mapperService, SnapshotId s in.snapshotShard(store, mapperService, snapshotId, indexId, snapshotIndexCommit, snapshotStatus, listener); } @Override - public void restoreShard(Store store, SnapshotId snapshotId, - Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { - in.restoreShard(store, snapshotId, version, indexId, snapshotShardId, recoveryState); + public void restoreShard(Store store, SnapshotId snapshotId, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { + in.restoreShard(store, snapshotId, indexId, snapshotShardId, recoveryState); } @Override - public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId) { - return in.getShardSnapshotStatus(snapshotId, version, indexId, shardId); + public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) { + return in.getShardSnapshotStatus(snapshotId, indexId, shardId); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index 4804b0852ea5b..53142920dc835 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -19,7 +19,6 @@ package org.elasticsearch.repositories; import org.apache.lucene.index.IndexCommit; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -204,24 +203,19 @@ void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshot * The index can be renamed on restore, hence different {@code shardId} and {@code snapshotShardId} are supplied. * @param store the store to restore the index into * @param snapshotId snapshot id - * @param version version of elasticsearch that created this snapshot * @param indexId id of the index in the repository from which the restore is occurring * @param snapshotShardId shard id (in the snapshot) * @param recoveryState recovery state */ - void restoreShard(Store store, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, - RecoveryState recoveryState); + void restoreShard(Store store, SnapshotId snapshotId, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState); /** * Retrieve shard snapshot status for the stored snapshot * * @param snapshotId snapshot id - * @param version version of elasticsearch that created this snapshot * @param indexId the snapshotted index id for the shard to get status for * @param shardId shard id * @return snapshot status */ - IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId); - - + IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId); } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 59c6a248ca0f4..82af167a04f6d 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -22,13 +22,16 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexFormatTooNewException; +import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.RateLimiter; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.StepListener; @@ -37,6 +40,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; @@ -1142,24 +1146,58 @@ public void onFailure(Exception e) { } @Override - public void restoreShard(Store store, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, + public void restoreShard(Store store, SnapshotId snapshotId, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { ShardId shardId = store.shardId(); try { final BlobContainer container = shardContainer(indexId, snapshotShardId); BlobStoreIndexShardSnapshot snapshot = loadShardSnapshot(container, snapshotId); SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles()); - new FileRestoreContext(metadata.name(), shardId, snapshotId, recoveryState, BUFFER_SIZE) { + new FileRestoreContext(metadata.name(), shardId, snapshotId, recoveryState) { @Override - protected InputStream fileInputStream(BlobStoreIndexShardSnapshot.FileInfo fileInfo) { - final InputStream dataBlobCompositeStream = new SlicedInputStream(fileInfo.numberOfParts()) { - @Override - protected InputStream openSlice(long slice) throws IOException { - return container.readBlob(fileInfo.partName(slice)); + protected void restoreFiles(List filesToRecover, Store store) throws IOException { + // restore the files from the snapshot to the Lucene store + for (final BlobStoreIndexShardSnapshot.FileInfo fileToRecover : filesToRecover) { + logger.trace("[{}] [{}] restoring file [{}]", shardId, snapshotId, fileToRecover.name()); + restoreFile(fileToRecover, store); + } + } + + private void restoreFile(BlobStoreIndexShardSnapshot.FileInfo fileInfo, Store store) throws IOException { + boolean success = false; + + try (InputStream stream = maybeRateLimit(new SlicedInputStream(fileInfo.numberOfParts()) { + @Override + protected InputStream openSlice(long slice) throws IOException { + return container.readBlob(fileInfo.partName(slice)); + } + }, + restoreRateLimiter, restoreRateLimitingTimeInNanos)) { + try (IndexOutput indexOutput = + store.createVerifyingOutput(fileInfo.physicalName(), fileInfo.metadata(), IOContext.DEFAULT)) { + final byte[] buffer = new byte[BUFFER_SIZE]; + int length; + while ((length = stream.read(buffer)) > 0) { + indexOutput.writeBytes(buffer, 0, length); + recoveryState.getIndex().addRecoveredBytesToFile(fileInfo.physicalName(), length); + } + Store.verify(indexOutput); + indexOutput.close(); + store.directory().sync(Collections.singleton(fileInfo.physicalName())); + success = true; + } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { + try { + store.markStoreCorrupted(ex); + } catch (IOException e) { + logger.warn("store cannot be marked as corrupted", e); + } + throw ex; + } finally { + if (success == false) { + store.deleteQuiet(fileInfo.physicalName()); + } } - }; - return restoreRateLimiter == null ? dataBlobCompositeStream - : new RateLimitingInputStream(dataBlobCompositeStream, restoreRateLimiter, restoreRateLimitingTimeInNanos::inc); + } } }.restore(snapshotFiles, store); } catch (Exception e) { @@ -1167,8 +1205,12 @@ protected InputStream openSlice(long slice) throws IOException { } } + private static InputStream maybeRateLimit(InputStream stream, @Nullable RateLimiter rateLimiter, CounterMetric metric) { + return rateLimiter == null ? stream : new RateLimitingInputStream(stream, rateLimiter, metric::inc); + } + @Override - public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId) { + public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) { BlobStoreIndexShardSnapshot snapshot = loadShardSnapshot(shardContainer(indexId, shardId), snapshotId); return IndexShardSnapshotStatus.newDone(snapshot.startTime(), snapshot.time(), snapshot.incrementalFileCount(), snapshot.totalFileCount(), @@ -1328,13 +1370,9 @@ private void snapshotFile(BlobStoreIndexShardSnapshot.FileInfo fileInfo, IndexId for (int i = 0; i < fileInfo.numberOfParts(); i++) { final long partBytes = fileInfo.partBytes(i); - InputStream inputStream = new InputStreamIndexInput(indexInput, partBytes); - if (snapshotRateLimiter != null) { - inputStream = new RateLimitingInputStream(inputStream, snapshotRateLimiter, - snapshotRateLimitingTimeInNanos::inc); - } // Make reads abortable by mutating the snapshotStatus object - inputStream = new FilterInputStream(inputStream) { + final InputStream inputStream = new FilterInputStream(maybeRateLimit( + new InputStreamIndexInput(indexInput, partBytes), snapshotRateLimiter, snapshotRateLimitingTimeInNanos)) { @Override public int read() throws IOException { checkAborted(); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java index 7a848bf0f3b63..914d87202c682 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java @@ -21,11 +21,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.index.CorruptIndexException; -import org.apache.lucene.index.IndexFormatTooNewException; -import org.apache.lucene.index.IndexFormatTooOldException; -import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.IndexOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.index.shard.ShardId; @@ -38,10 +33,8 @@ import org.elasticsearch.snapshots.SnapshotId; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -63,7 +56,6 @@ public abstract class FileRestoreContext { protected final RecoveryState recoveryState; protected final SnapshotId snapshotId; protected final ShardId shardId; - protected final int bufferSize; /** * Constructs new restore context @@ -71,15 +63,12 @@ public abstract class FileRestoreContext { * @param shardId shard id to restore into * @param snapshotId snapshot id * @param recoveryState recovery state to report progress - * @param bufferSize buffer size for restore */ - protected FileRestoreContext(String repositoryName, ShardId shardId, SnapshotId snapshotId, RecoveryState recoveryState, - int bufferSize) { + protected FileRestoreContext(String repositoryName, ShardId shardId, SnapshotId snapshotId, RecoveryState recoveryState) { this.repositoryName = repositoryName; this.recoveryState = recoveryState; this.snapshotId = snapshotId; this.shardId = shardId; - this.bufferSize = bufferSize; } /** @@ -194,54 +183,16 @@ public void restore(SnapshotFiles snapshotFiles, Store store) { } } - protected void restoreFiles(List filesToRecover, Store store) throws IOException { - // restore the files from the snapshot to the Lucene store - for (final BlobStoreIndexShardSnapshot.FileInfo fileToRecover : filesToRecover) { - logger.trace("[{}] [{}] restoring file [{}]", shardId, snapshotId, fileToRecover.name()); - restoreFile(fileToRecover, store); - } - } - - protected abstract InputStream fileInputStream(BlobStoreIndexShardSnapshot.FileInfo fileInfo); + /** + * Restores given list of {@link BlobStoreIndexShardSnapshot.FileInfo} to the given {@link Store}. + * + * @param filesToRecover List of files to restore + * @param store Store to restore into + */ + protected abstract void restoreFiles(List filesToRecover, Store store) throws IOException; @SuppressWarnings("unchecked") private static Iterable concat(Store.RecoveryDiff diff) { return Iterables.concat(diff.different, diff.missing); } - - /** - * Restores a file - * - * @param fileInfo file to be restored - */ - private void restoreFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, final Store store) throws IOException { - boolean success = false; - - try (InputStream stream = fileInputStream(fileInfo)) { - try (IndexOutput indexOutput = store.createVerifyingOutput(fileInfo.physicalName(), fileInfo.metadata(), IOContext.DEFAULT)) { - final byte[] buffer = new byte[bufferSize]; - int length; - while ((length = stream.read(buffer)) > 0) { - indexOutput.writeBytes(buffer, 0, length); - recoveryState.getIndex().addRecoveredBytesToFile(fileInfo.physicalName(), length); - } - Store.verify(indexOutput); - indexOutput.close(); - store.directory().sync(Collections.singleton(fileInfo.physicalName())); - success = true; - } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { - try { - store.markStoreCorrupted(ex); - } catch (IOException e) { - logger.warn("store cannot be marked as corrupted", e); - } - throw ex; - } finally { - if (success == false) { - store.deleteQuiet(fileInfo.physicalName()); - } - } - } - } - } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index ac4869938ea4e..46854b5983352 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -691,7 +691,6 @@ public Map snapshotShards(final String reposi } else { shardSnapshotStatus = repository.getShardSnapshotStatus( snapshotInfo.snapshotId(), - snapshotInfo.version(), indexId, shardId); } diff --git a/server/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTests.java b/server/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTests.java index 40e02c560a9c0..64f960040aab7 100644 --- a/server/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTests.java +++ b/server/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.bytes; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.util.ByteArray; import org.hamcrest.Matchers; @@ -35,13 +34,12 @@ protected BytesReference newBytesReference(int length) throws IOException { @Override protected BytesReference newBytesReferenceWithOffsetOfZero(int length) throws IOException { - // we know bytes stream output always creates a paged bytes reference, we use it to create randomized content - ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(length, bigarrays); + ByteArray byteArray = bigarrays.newByteArray(length); for (int i = 0; i < length; i++) { - out.writeByte((byte) random().nextInt(1 << 8)); + byteArray.set(i, (byte) random().nextInt(1 << 8)); } - assertThat(out.size(), Matchers.equalTo(length)); - BytesReference ref = out.bytes(); + assertThat(byteArray.size(), Matchers.equalTo((long) length)); + BytesReference ref = new PagedBytesReference(byteArray, length); assertThat(ref.length(), Matchers.equalTo(length)); assertThat(ref, Matchers.instanceOf(PagedBytesReference.class)); return ref; diff --git a/server/src/test/java/org/elasticsearch/common/bytes/ReleasableBytesReferenceTests.java b/server/src/test/java/org/elasticsearch/common/bytes/ReleasableBytesReferenceTests.java new file mode 100644 index 0000000000000..58818a83cca9a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/bytes/ReleasableBytesReferenceTests.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.bytes; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.util.ByteArray; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class ReleasableBytesReferenceTests extends AbstractBytesReferenceTestCase { + + @Override + protected BytesReference newBytesReference(int length) throws IOException { + return newBytesReferenceWithOffsetOfZero(length); + } + + @Override + protected BytesReference newBytesReferenceWithOffsetOfZero(int length) throws IOException { + BytesReference delegate; + String composite = "composite"; + String paged = "paged"; + String array = "array"; + String type = randomFrom(composite, paged, array); + if (array.equals(type)) { + final BytesStreamOutput out = new BytesStreamOutput(length); + for (int i = 0; i < length; i++) { + out.writeByte((byte) random().nextInt(1 << 8)); + } + assertThat(length, equalTo(out.size())); + BytesArray ref = new BytesArray(out.bytes().toBytesRef().bytes, 0, length); + assertThat(length, equalTo(ref.length())); + assertThat(ref.length(), Matchers.equalTo(length)); + delegate = ref; + } else if (paged.equals(type)) { + ByteArray byteArray = bigarrays.newByteArray(length); + for (int i = 0; i < length; i++) { + byteArray.set(i, (byte) random().nextInt(1 << 8)); + } + assertThat(byteArray.size(), Matchers.equalTo((long) length)); + BytesReference ref = new PagedBytesReference(byteArray, length); + assertThat(ref.length(), Matchers.equalTo(length)); + delegate = ref; + } else { + assert composite.equals(type); + List referenceList = new ArrayList<>(); + for (int i = 0; i < length; ) { + int remaining = length - i; + int sliceLength = randomIntBetween(1, remaining); + ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(sliceLength, bigarrays); + for (int j = 0; j < sliceLength; j++) { + out.writeByte((byte) random().nextInt(1 << 8)); + } + assertThat(sliceLength, equalTo(out.size())); + referenceList.add(out.bytes()); + i += sliceLength; + } + BytesReference ref = new CompositeBytesReference(referenceList.toArray(new BytesReference[0])); + assertThat(length, equalTo(ref.length())); + delegate = ref; + } + return new ReleasableBytesReference(delegate, () -> { + }); + } + + @Override + public void testToBytesRefSharedPage() throws IOException { + // CompositeBytesReference doesn't share pages + } + + @Override + public void testSliceArrayOffset() throws IOException { + // the assertions in this test only work on no-composite buffers + } + + @Override + public void testSliceToBytesRef() throws IOException { + // CompositeBytesReference shifts offsets + } +} diff --git a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java index 3bfc649820fee..85670e893b970 100644 --- a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java +++ b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java @@ -22,7 +22,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.bytes.ReleasablePagedBytesReference; +import org.elasticsearch.common.bytes.PagedBytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.lease.Releasable; @@ -331,7 +332,7 @@ public RestRequest.Method method() { // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released final BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); final ByteArray byteArray = bigArrays.newByteArray(0, false); - final BytesReference content = new ReleasablePagedBytesReference(byteArray, 0 , byteArray); + final BytesReference content = new ReleasableBytesReference(new PagedBytesReference(byteArray, 0) , byteArray); channel.sendResponse(new TestRestResponse(RestStatus.METHOD_NOT_ALLOWED, content)); Class> listenerClass = (Class>) (Class) ActionListener.class; @@ -368,7 +369,7 @@ public HttpResponse createResponse(RestStatus status, BytesReference content) { // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released final BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); final ByteArray byteArray = bigArrays.newByteArray(0, false); - final BytesReference content = new ReleasablePagedBytesReference(byteArray, 0 , byteArray); + final BytesReference content = new ReleasableBytesReference(new PagedBytesReference(byteArray, 0) , byteArray); expectThrows(IllegalArgumentException.class, () -> channel.sendResponse(new TestRestResponse(RestStatus.OK, content))); diff --git a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index 9141481b88219..dbff49460015b 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -104,12 +104,12 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { () -> IndexingSlowLogMessage.of(index, doc, 10, true, 3)); assertThat(e, hasToString(containsString("_failed_to_convert_[Unrecognized token 'invalid':" + " was expecting ('true', 'false' or 'null')\\n" - + " at [Source: org.elasticsearch.common.bytes.BytesReference$MarkSupportingStreamInputWrapper"))); + + " at [Source: org.elasticsearch.common.bytes.AbstractBytesReference$MarkSupportingStreamInputWrapper"))); assertNotNull(e.getCause()); assertThat(e.getCause(), instanceOf(JsonParseException.class)); assertThat(e.getCause(), hasToString(containsString("Unrecognized token 'invalid':" + " was expecting ('true', 'false' or 'null')\n" - + " at [Source: org.elasticsearch.common.bytes.BytesReference$MarkSupportingStreamInputWrapper"))); + + " at [Source: org.elasticsearch.common.bytes.AbstractBytesReference$MarkSupportingStreamInputWrapper"))); } public void testReformatSetting() { diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 1b57f822c284c..da6c1807f0e34 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -2340,8 +2340,8 @@ public void testRestoreShard() throws IOException { target.markAsRecovering("store", new RecoveryState(routing, localNode, null)); assertTrue(target.restoreFromRepository(new RestoreOnlyRepository("test") { @Override - public void restoreShard(Store store, SnapshotId snapshotId, - Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { + public void restoreShard(Store store, SnapshotId snapshotId, IndexId indexId, ShardId snapshotShardId, + RecoveryState recoveryState) { try { cleanLuceneIndex(targetStore.directory()); for (String file : sourceStore.directory().listAll()) { diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java index e27aefdf13fff..6575503341c4e 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java @@ -32,6 +32,7 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.AbstractBytesReference; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; @@ -455,7 +456,7 @@ public void testEqualsKey() throws IOException { assertNotEquals(key1, key5); } - private class TestBytesReference extends BytesReference { + private class TestBytesReference extends AbstractBytesReference { int dummyValue; TestBytesReference(int dummyValue) { diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java index 3fcfd05a665a4..ff6e7194a653b 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.repositories; import org.apache.lucene.index.IndexCommit; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -204,13 +203,12 @@ public void snapshotShard(Store store, MapperService mapperService, SnapshotId s } @Override - public void restoreShard(Store store, SnapshotId snapshotId, - Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { - + public void restoreShard(Store store, SnapshotId snapshotId, IndexId indexId, ShardId snapshotShardId, + RecoveryState recoveryState) { } @Override - public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId) { + public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) { return null; } diff --git a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java index 3c2d59564deac..45ad368535380 100644 --- a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java @@ -118,8 +118,7 @@ public void testSnapshotAndRestore() throws IOException, InterruptedException { new UnassignedInfo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED, "")); routing = ShardRoutingHelper.initialize(routing, localNode.getId(), 0); RecoveryState state = new RecoveryState(routing, localNode, null); - runGeneric(threadPool, () -> - repository.restoreShard(store, snapshotId, Version.CURRENT, indexId, shardId, state)); + runGeneric(threadPool, () -> repository.restoreShard(store, snapshotId, indexId, shardId, state)); assertTrue(state.getIndex().recoveredBytes() > 0); assertEquals(0, state.getIndex().reusedFileCount()); assertEquals(indexCommit.getFileNames().size(), state.getIndex().recoveredFileCount()); @@ -141,13 +140,13 @@ public void testSnapshotAndRestore() throws IOException, InterruptedException { // roll back to the first snap and then incrementally restore RecoveryState firstState = new RecoveryState(routing, localNode, null); runGeneric(threadPool, () -> - repository.restoreShard(store, snapshotId, Version.CURRENT, indexId, shardId, firstState)); + repository.restoreShard(store, snapshotId, indexId, shardId, firstState)); assertEquals("should reuse everything except of .liv and .si", commitFileNames.size()-2, firstState.getIndex().reusedFileCount()); RecoveryState secondState = new RecoveryState(routing, localNode, null); runGeneric(threadPool, () -> - repository.restoreShard(store, incSnapshotId, Version.CURRENT, indexId, shardId, secondState)); + repository.restoreShard(store, incSnapshotId, indexId, shardId, secondState)); assertEquals(secondState.getIndex().reusedFileCount(), commitFileNames.size()-2); assertEquals(secondState.getIndex().recoveredFileCount(), 2); List recoveredFiles = diff --git a/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java index 478ac149dd1f8..2e8a44faa0d06 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java @@ -591,7 +591,7 @@ public void testCompareTo() throws IOException { for (int j = crazyStream.size(); j < crazyLength; j++) { crazyStream.writeByte((byte) random().nextInt(1 << 8)); } - PagedBytesReference crazyReference = crazyStream.bytes(); + ReleasableBytesReference crazyReference = crazyStream.bytes(); assertFalse(crazyReference.compareTo(bytesReference) == 0); assertEquals(0, crazyReference.slice(offset, length).compareTo( diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 28c18398797fa..5824e35e41bb1 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -806,7 +806,7 @@ protected void recoverShardFromSnapshot(final IndexShard shard, final ShardRouting shardRouting = newShardRouting(shardId, node.getId(), true, ShardRoutingState.INITIALIZING, recoverySource); shard.markAsRecovering("from snapshot", new RecoveryState(shardRouting, node, null)); repository.restoreShard(shard.store(), - snapshot.getSnapshotId(), version, + snapshot.getSnapshotId(), indexId, shard.shardId(), shard.recoveryState()); diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java index b837b9379cd9a..6891ab1385cbe 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.shard; import org.apache.lucene.index.IndexCommit; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -136,7 +135,7 @@ public void snapshotShard(Store store, MapperService mapperService, SnapshotId s } @Override - public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId) { + public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) { return null; } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java index e47bdeee3c225..4876f64301204 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java @@ -157,7 +157,7 @@ public void handle(final HttpExchange exchange) throws IOException { final boolean canFailRequest = canFailRequest(exchange); final int count = requests.computeIfAbsent(requestId, req -> new AtomicInteger(0)).incrementAndGet(); - if (count >= maxErrorsPerRequest || canFailRequest == false || randomBoolean()) { + if (count >= maxErrorsPerRequest || canFailRequest == false) { requests.remove(requestId); delegate.handle(exchange); } else { diff --git a/test/framework/src/test/java/org/elasticsearch/test/AbstractXContentTestCaseTests.java b/test/framework/src/test/java/org/elasticsearch/test/AbstractXContentTestCaseTests.java index 2acb89befabf4..09524111fd6a8 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/AbstractXContentTestCaseTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/AbstractXContentTestCaseTests.java @@ -55,4 +55,4 @@ public void testInsertRandomFieldsAndShuffle() throws Exception { assertThat(mapOrdered.keySet().iterator().next(), not(equalTo("field"))); } } -} \ No newline at end of file +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java index 9f348c5a470b4..58afee18ddbf2 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java @@ -12,7 +12,8 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.bytes.ReleasablePagedBytesReference; +import org.elasticsearch.common.bytes.PagedBytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -59,7 +60,8 @@ protected void doExecute(Task task, GetCcrRestoreFileChunkRequest request, String sessionUUID = request.getSessionUUID(); // This is currently safe to do because calling `onResponse` will serialize the bytes to the network layer data // structure on the same thread. So the bytes will be copied before the reference is released. - try (ReleasablePagedBytesReference reference = new ReleasablePagedBytesReference(array, bytesRequested, array)) { + PagedBytesReference pagedBytesReference = new PagedBytesReference(array, bytesRequested); + try (ReleasableBytesReference reference = new ReleasableBytesReference(pagedBytesReference, array)) { try (CcrRestoreSourceService.SessionReader sessionReader = restoreSourceService.getSessionReader(sessionUUID)) { long offsetAfterRead = sessionReader.readFileBytes(fileName, reference); long offsetBeforeRead = offsetAfterRead - reference.length(); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index 156b7d927c7d0..12c8e72a0cb02 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -12,16 +12,15 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.IndexCommit; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.support.ListenerTimeouts; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -32,18 +31,16 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.Index; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException; import org.elasticsearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException; import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; @@ -55,6 +52,7 @@ import org.elasticsearch.index.snapshots.blobstore.SnapshotFiles; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; +import org.elasticsearch.indices.recovery.MultiFileTransfer; import org.elasticsearch.indices.recovery.MultiFileWriter; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.IndexId; @@ -81,7 +79,6 @@ import java.io.Closeable; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -89,12 +86,11 @@ import java.util.Map; import java.util.Optional; import java.util.Set; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.LongConsumer; import java.util.function.Supplier; +import java.util.stream.Collectors; import static org.elasticsearch.index.seqno.RetentionLeaseActions.RETAIN_ALL; -import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.retentionLeaseId; import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.syncAddRetentionLease; import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.syncRenewRetentionLease; @@ -297,8 +293,7 @@ public void snapshotShard(Store store, MapperService mapperService, SnapshotId s } @Override - public void restoreShard(Store store, SnapshotId snapshotId, - Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { + public void restoreShard(Store store, SnapshotId snapshotId, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { // TODO: Add timeouts to network calls / the restore process. createEmptyStore(store); ShardId shardId = store.shardId(); @@ -409,7 +404,7 @@ void acquireRetentionLeaseOnLeader( } @Override - public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId leaderShardId) { + public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId leaderShardId) { throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); } @@ -454,7 +449,7 @@ private static class RestoreSession extends FileRestoreContext implements Closea RestoreSession(String repositoryName, Client remoteClient, String sessionUUID, DiscoveryNode node, ShardId shardId, RecoveryState recoveryState, Store.MetadataSnapshot sourceMetaData, long mappingVersion, ThreadPool threadPool, CcrSettings ccrSettings, LongConsumer throttleListener) { - super(repositoryName, shardId, SNAPSHOT_ID, recoveryState, Math.toIntExact(ccrSettings.getChunkSize().getBytes())); + super(repositoryName, shardId, SNAPSHOT_ID, recoveryState); this.remoteClient = remoteClient; this.sessionUUID = sessionUUID; this.node = node; @@ -476,102 +471,82 @@ void restoreFiles(Store store) { } @Override - protected void restoreFiles(List filesToRecover, Store store) throws IOException { + protected void restoreFiles(List filesToRecover, Store store) { logger.trace("[{}] starting CCR restore of {} files", shardId, filesToRecover); + final PlainActionFuture restoreFilesFuture = new PlainActionFuture<>(); + final List mds = filesToRecover.stream().map(FileInfo::metadata).collect(Collectors.toList()); + final MultiFileTransfer multiFileTransfer = new MultiFileTransfer<>( + logger, threadPool.getThreadContext(), restoreFilesFuture, ccrSettings.getMaxConcurrentFileChunks(), mds) { - try (MultiFileWriter multiFileWriter = new MultiFileWriter(store, recoveryState.getIndex(), "", logger, () -> { - })) { - final LocalCheckpointTracker requestSeqIdTracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); - final AtomicReference> error = new AtomicReference<>(); + final MultiFileWriter multiFileWriter = new MultiFileWriter(store, recoveryState.getIndex(), "", logger, () -> {}); + long offset = 0; - for (FileInfo fileInfo : filesToRecover) { - final long fileLength = fileInfo.length(); - long offset = 0; - while (offset < fileLength && error.get() == null) { - final long requestSeqId = requestSeqIdTracker.generateSeqNo(); - try { - requestSeqIdTracker.waitForProcessedOpsToComplete(requestSeqId - ccrSettings.getMaxConcurrentFileChunks()); - - if (error.get() != null) { - requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); - break; - } - - final int bytesRequested = Math.toIntExact( - Math.min(ccrSettings.getChunkSize().getBytes(), fileLength - offset)); - offset += bytesRequested; - - final GetCcrRestoreFileChunkRequest request = - new GetCcrRestoreFileChunkRequest(node, sessionUUID, fileInfo.name(), bytesRequested); - logger.trace("[{}] [{}] fetching chunk for file [{}], expected offset: {}, size: {}", shardId, snapshotId, - fileInfo.name(), offset, bytesRequested); - - TimeValue timeout = ccrSettings.getRecoveryActionTimeout(); - ActionListener listener = - ListenerTimeouts.wrapWithTimeout(threadPool, ActionListener.wrap( - r -> threadPool.generic().execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - error.compareAndSet(null, Tuple.tuple(fileInfo.metadata(), e)); - requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); - } - - @Override - protected void doRun() throws Exception { - final int actualChunkSize = r.getChunk().length(); - logger.trace("[{}] [{}] got response for file [{}], offset: {}, length: {}", shardId, - snapshotId, fileInfo.name(), r.getOffset(), actualChunkSize); - final long nanosPaused = ccrSettings.getRateLimiter().maybePause(actualChunkSize); - throttleListener.accept(nanosPaused); - final boolean lastChunk = r.getOffset() + actualChunkSize >= fileLength; - multiFileWriter.writeFileChunk(fileInfo.metadata(), r.getOffset(), r.getChunk(), lastChunk); - requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); - } - }), - e -> { - error.compareAndSet(null, Tuple.tuple(fileInfo.metadata(), e)); - requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); - } - ), timeout, ThreadPool.Names.GENERIC, GetCcrRestoreFileChunkAction.NAME); - remoteClient.execute(GetCcrRestoreFileChunkAction.INSTANCE, request, listener); - } catch (Exception e) { - error.compareAndSet(null, Tuple.tuple(fileInfo.metadata(), e)); - requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); - } - } + @Override + protected void onNewFile(StoreFileMetaData md) { + offset = 0; } - try { - requestSeqIdTracker.waitForProcessedOpsToComplete(requestSeqIdTracker.getMaxSeqNo()); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new ElasticsearchException(e); + @Override + protected FileChunk nextChunkRequest(StoreFileMetaData md) { + final int bytesRequested = Math.toIntExact(Math.min(ccrSettings.getChunkSize().getBytes(), md.length() - offset)); + offset += bytesRequested; + return new FileChunk(md, bytesRequested, offset == md.length()); } - if (error.get() != null) { - handleError(store, error.get().v2()); + + @Override + protected void executeChunkRequest(FileChunk request, ActionListener listener) { + final ActionListener threadedListener + = new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.GENERIC, ActionListener.wrap( + r -> { + writeFileChunk(request.md, r); + listener.onResponse(null); + }, listener::onFailure), false); + + remoteClient.execute(GetCcrRestoreFileChunkAction.INSTANCE, + new GetCcrRestoreFileChunkRequest(node, sessionUUID, request.md.name(), request.bytesRequested), + ListenerTimeouts.wrapWithTimeout(threadPool, threadedListener, ccrSettings.getRecoveryActionTimeout(), + ThreadPool.Names.GENERIC, GetCcrRestoreFileChunkAction.NAME)); } - } - logger.trace("[{}] completed CCR restore", shardId); - } + private void writeFileChunk(StoreFileMetaData md, + GetCcrRestoreFileChunkAction.GetCcrRestoreFileChunkResponse r) throws Exception { + final int actualChunkSize = r.getChunk().length(); + logger.trace("[{}] [{}] got response for file [{}], offset: {}, length: {}", + shardId, snapshotId, md.name(), r.getOffset(), actualChunkSize); + final long nanosPaused = ccrSettings.getRateLimiter().maybePause(actualChunkSize); + throttleListener.accept(nanosPaused); + multiFileWriter.incRef(); + try (Releasable ignored = multiFileWriter::decRef) { + final boolean lastChunk = r.getOffset() + actualChunkSize >= md.length(); + multiFileWriter.writeFileChunk(md, r.getOffset(), r.getChunk(), lastChunk); + } catch (Exception e) { + handleError(md, e); + throw e; + } + } - private void handleError(Store store, Exception e) throws IOException { - final IOException corruptIndexException; - if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(e)) != null) { - try { - store.markStoreCorrupted(corruptIndexException); - } catch (IOException ioe) { - logger.warn("store cannot be marked as corrupted", e); + @Override + protected void handleError(StoreFileMetaData md, Exception e) throws Exception { + final IOException corruptIndexException; + if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(e)) != null) { + try { + store.markStoreCorrupted(corruptIndexException); + } catch (IOException ioe) { + logger.warn("store cannot be marked as corrupted", e); + } + throw corruptIndexException; + } + throw e; } - throw corruptIndexException; - } else { - ExceptionsHelper.reThrowIfNotNull(e); - } - } - @Override - protected InputStream fileInputStream(FileInfo fileInfo) { - throw new UnsupportedOperationException(); + @Override + public void close() { + multiFileWriter.close(); + } + }; + multiFileTransfer.start(); + restoreFilesFuture.actionGet(); + logger.trace("[{}] completed CCR restore", shardId); } @Override @@ -580,5 +555,22 @@ public void close() { ClearCcrRestoreSessionAction.ClearCcrRestoreSessionResponse response = remoteClient.execute(ClearCcrRestoreSessionAction.INSTANCE, clearRequest).actionGet(ccrSettings.getRecoveryActionTimeout()); } + + private static class FileChunk implements MultiFileTransfer.ChunkRequest { + final StoreFileMetaData md; + final int bytesRequested; + final boolean lastChunk; + + FileChunk(StoreFileMetaData md, int bytesRequested, boolean lastChunk) { + this.md = md; + this.bytesRequested = bytesRequested; + this.lastChunk = lastChunk; + } + + @Override + public boolean lastChunk() { + return lastChunk; + } + } } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java index 6ec062eaeee08..472660f102db6 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.ccr; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; @@ -13,11 +12,11 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESIntegTestCase; @@ -33,24 +32,24 @@ import org.elasticsearch.xpack.core.ccr.action.GetAutoFollowPatternAction; import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Locale; -import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import static java.util.stream.Collectors.toUnmodifiableList; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/47917") public class AutoFollowIT extends CcrIntegTestCase { @Override @@ -164,7 +163,7 @@ public void testAutoFollowManyIndices() throws Exception { // Delete auto follow pattern and make sure that in the background the auto follower has stopped // then the leader index created after that should never be auto followed: - deleteAutoFollowPatternSetting(); + deleteAutoFollowPattern("my-pattern"); try { assertBusy(() -> { metaData[0] = getFollowerCluster().clusterService().state().metaData(); @@ -471,34 +470,40 @@ public void testPauseAndResumeWithMultipleAutoFollowPatterns() throws Exception .build(); final String[] prefixes = {"logs-", "users-", "docs-", "monitoring-", "data-", "system-", "events-", "files-"}; - if (randomBoolean()) { - // sometimes create indices in the remote cluster that match the future auto follow patterns - Arrays.stream(prefixes).forEach(prefix -> createLeaderIndex(prefix + "ignored", leaderIndexSettings)); - } - // create auto follow patterns - final List autoFollowPatterns = new ArrayList<>(prefixes.length); - for (String prefix : prefixes) { - String name = prefix + "pattern"; - putAutoFollowPatterns(name, new String[]{prefix + "*"}); - autoFollowPatterns.add(name); - assertBusy(() -> assertThat(getAutoFollowStats().getAutoFollowedClusters().size(), equalTo(1))); - assertTrue(getAutoFollowPattern(name).isActive()); - } + // create an auto follow pattern for each prefix + final List autoFollowPatterns = Arrays.stream(prefixes) + .map(prefix -> { + final String pattern = prefix + "pattern"; + putAutoFollowPatterns(pattern, new String[]{prefix + "*"}); + return pattern; + }).collect(toUnmodifiableList()); - // no following indices are created yet - assertThat(followerClient().admin().indices().prepareStats("copy-*").get().getIndices().size(), equalTo(0)); + // pick up some random pattern to pause + final List pausedAutoFollowerPatterns = randomSubsetOf(randomIntBetween(1, 3), autoFollowPatterns); + + // all patterns should be active + assertBusy(() -> autoFollowPatterns.forEach(pattern -> assertTrue(getAutoFollowPattern(pattern).isActive()))); + assertBusy(() -> assertThat(getAutoFollowStats().getAutoFollowedClusters().size(), equalTo(1))); - // create random indices in the remote cluster that match the patterns final AtomicBoolean running = new AtomicBoolean(true); - final Set leaderIndices = ConcurrentCollections.newConcurrentSet(); + final AtomicInteger leaderIndices = new AtomicInteger(0); + + // start creating new indices on the remote cluster final Thread createNewLeaderIndicesThread = new Thread(() -> { - while (running.get()) { + int leaderIndicesCount; + while (running.get() && (leaderIndicesCount = leaderIndices.incrementAndGet()) < 20) { + final String prefix = randomFrom(prefixes); + final String leaderIndex = prefix + leaderIndicesCount; try { - String indexName = randomFrom(prefixes) + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - createLeaderIndex(indexName, leaderIndexSettings); - leaderIndices.add(indexName); - Thread.sleep(randomIntBetween(100, 500)); + createLeaderIndex(leaderIndex, leaderIndexSettings); + ensureLeaderGreen(leaderIndex); + if (pausedAutoFollowerPatterns.stream().noneMatch(pattern -> pattern.startsWith(prefix))) { + final String followingIndex = "copy-" + leaderIndex; + assertBusy(() -> assertTrue(ESIntegTestCase.indexExists(followingIndex, followerClient()))); + } else { + Thread.sleep(200L); + } } catch (Exception e) { throw new AssertionError(e); } @@ -506,46 +511,45 @@ public void testPauseAndResumeWithMultipleAutoFollowPatterns() throws Exception }); createNewLeaderIndicesThread.start(); - // wait for some leader indices to be auto-followed - assertBusy(() -> - assertThat(getAutoFollowStats().getNumberOfSuccessfulFollowIndices(), greaterThanOrEqualTo((long) prefixes.length))); + // wait for 3 leader indices to be created on the remote cluster + assertBusy(() -> assertThat(leaderIndices.get(), greaterThanOrEqualTo(3))); + assertBusy(() -> assertThat(getAutoFollowStats().getNumberOfSuccessfulFollowIndices(), greaterThanOrEqualTo(3L))); - final int nbLeaderIndices = leaderIndices.size(); - - // pause some random patterns - final List pausedAutoFollowerPatterns = randomSubsetOf(autoFollowPatterns); + // now pause some random patterns pausedAutoFollowerPatterns.forEach(this::pauseAutoFollowPattern); - assertBusy(() -> pausedAutoFollowerPatterns.forEach(pattern -> assertFalse(getAutoFollowPattern(pattern).isActive()))); + assertBusy(() -> autoFollowPatterns.forEach(pattern -> + assertThat(getAutoFollowPattern(pattern).isActive(), equalTo(pausedAutoFollowerPatterns.contains(pattern) == false)))); - assertBusy(() -> { - final int expectedAutoFollowedClusters = pausedAutoFollowerPatterns.size() != autoFollowPatterns.size() ? 1 : 0; - assertThat(getAutoFollowStats().getAutoFollowedClusters().size(), equalTo(expectedAutoFollowedClusters)); - if (expectedAutoFollowedClusters > 0) { - // wait for more indices to be created in the remote cluster while some patterns are paused - assertThat(leaderIndices.size(), greaterThan(nbLeaderIndices + 3)); - } - }); - ensureFollowerGreen(true, "copy-*"); + // wait for more leader indices to be created on the remote cluster + assertBusy(() -> assertThat(leaderIndices.get(), greaterThanOrEqualTo(6))); + assertBusy(() -> assertThat(getAutoFollowStats().getNumberOfSuccessfulFollowIndices(), greaterThanOrEqualTo(6L))); // resume auto follow patterns pausedAutoFollowerPatterns.forEach(this::resumeAutoFollowPattern); - assertBusy(() -> pausedAutoFollowerPatterns.forEach(pattern -> assertTrue(getAutoFollowPattern(pattern).isActive()))); + assertBusy(() -> autoFollowPatterns.forEach(pattern -> assertTrue(getAutoFollowPattern(pattern).isActive()))); + + // wait for more leader indices to be created on the remote cluster + assertBusy(() -> assertThat(leaderIndices.get(), greaterThanOrEqualTo(9))); + assertBusy(() -> assertThat(getAutoFollowStats().getNumberOfSuccessfulFollowIndices(), greaterThanOrEqualTo(9L))); - // stop creating indices in the remote cluster running.set(false); createNewLeaderIndicesThread.join(); - ensureLeaderGreen(leaderIndices.toArray(new String[0])); - // check that all leader indices have been correctly auto followed - assertBusy(() -> { - final Client client = followerClient(); - assertThat(client.admin().indices().prepareStats("copy-*").get().getIndices().size(), equalTo(leaderIndices.size())); - leaderIndices.stream() - .map(leaderIndex -> "copy-" + leaderIndex) - .forEach(followerIndex -> - assertTrue("following index must exist: " + followerIndex, ESIntegTestCase.indexExists(followerIndex, client))); - }); + List matchingPrefixes = Arrays.stream(prefixes).map(prefix -> prefix + "*").collect(Collectors.toList()); + for (IndexMetaData leaderIndexMetaData : leaderClient().admin().cluster().prepareState().get().getState().metaData()) { + final String leaderIndex = leaderIndexMetaData.getIndex().getName(); + if (Regex.simpleMatch(matchingPrefixes, leaderIndex)) { + String followingIndex = "copy-" + leaderIndex; + assertBusy(() -> assertThat("Following index [" + followingIndex + "] must exists", + ESIntegTestCase.indexExists(followingIndex, followerClient()), is(true))); + } + } + + autoFollowPatterns.forEach(this::deleteAutoFollowPattern); + + ensureFollowerGreen("copy-*"); + assertThat(followerClient().admin().indices().prepareStats("copy-*").get().getIndices().size(), equalTo(leaderIndices.get())); } private void putAutoFollowPatterns(String name, String[] patterns) { @@ -558,8 +562,8 @@ private void putAutoFollowPatterns(String name, String[] patterns) { assertTrue(followerClient().execute(PutAutoFollowPatternAction.INSTANCE, request).actionGet().isAcknowledged()); } - private void deleteAutoFollowPatternSetting() { - DeleteAutoFollowPatternAction.Request request = new DeleteAutoFollowPatternAction.Request("my-pattern"); + private void deleteAutoFollowPattern(final String name) { + DeleteAutoFollowPatternAction.Request request = new DeleteAutoFollowPatternAction.Request(name); assertTrue(followerClient().execute(DeleteAutoFollowPatternAction.INSTANCE, request).actionGet().isAcknowledged()); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 778091eaa745a..fcfc62fb8a194 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -453,8 +453,8 @@ protected synchronized void recoverPrimary(IndexShard primary) { primary.markAsRecovering("remote recovery from leader", new RecoveryState(routing, localNode, null)); primary.restoreFromRepository(new RestoreOnlyRepository(index.getName()) { @Override - public void restoreShard(Store store, SnapshotId snapshotId, - Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { + public void restoreShard(Store store, SnapshotId snapshotId, IndexId indexId, ShardId snapshotShardId, + RecoveryState recoveryState) { try { IndexShard leader = leaderGroup.getPrimary(); Lucene.cleanLuceneIndex(primary.store().directory()); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java index d5a5d8cd7811f..232d48833107e 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java @@ -127,8 +127,8 @@ public void testRestoreShard() throws IOException { target.markAsRecovering("store", new RecoveryState(routing, localNode, null)); assertTrue(target.restoreFromRepository(new RestoreOnlyRepository("test") { @Override - public void restoreShard(Store store, SnapshotId snapshotId, - Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { + public void restoreShard(Store store, SnapshotId snapshotId, IndexId indexId, ShardId snapshotShardId, + RecoveryState recoveryState) { try { cleanLuceneIndex(targetStore.directory()); for (String file : sourceStore.directory().listAll()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 6adee04991500..69ff6dddcbe48 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -16,10 +16,12 @@ import org.elasticsearch.xpack.core.monitoring.MonitoringField; import java.util.Collections; +import java.util.EnumSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.BiFunction; @@ -28,6 +30,9 @@ */ public class XPackLicenseState { + public static final Set FIPS_ALLOWED_LICENSE_OPERATION_MODES = + EnumSet.of(License.OperationMode.PLATINUM, License.OperationMode.TRIAL); + /** Messages for each feature which are printed when the license expires. */ static final Map EXPIRATION_MESSAGES; static { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java index 4de7c51f8daac..61f2cbaccd0f2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java @@ -50,6 +50,7 @@ public final class ClientHelper { public static final String DEPRECATION_ORIGIN = "deprecation"; public static final String PERSISTENT_TASK_ORIGIN = "persistent_tasks"; public static final String ROLLUP_ORIGIN = "rollup"; + public static final String ENRICH_ORIGIN = "enrich"; public static final String TRANSFORM_ORIGIN = "transform"; private ClientHelper() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/ExecuteEnrichPolicyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/ExecuteEnrichPolicyAction.java index e5c6ab5eb67ee..661cc4f9467b0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/ExecuteEnrichPolicyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/ExecuteEnrichPolicyAction.java @@ -68,12 +68,6 @@ public ActionRequestValidationException validate() { return null; } - // This will be displayed in tasks api and allows stats api to figure out which policies are being executed. - @Override - public String getDescription() { - return name; - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java index f9f851a2af908..0a0b082e5bd1b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java @@ -8,7 +8,6 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -78,7 +77,7 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { XContentBuilder builder = JsonXContent.contentBuilder(); builder.copyCurrentStructure(p); - return BytesArray.bytes(builder); + return BytesReference.bytes(builder); }, STEP_INFO_FIELD); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> PhaseExecutionInfo.parse(p, ""), PHASE_EXECUTION_INFO); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/MulticlassConfusionMatrix.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/MulticlassConfusionMatrix.java index d9d47ab9aab20..9f0150c5b8fe6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/MulticlassConfusionMatrix.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/MulticlassConfusionMatrix.java @@ -9,7 +9,9 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilders; @@ -25,14 +27,14 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Objects; import java.util.Optional; -import java.util.TreeMap; import java.util.stream.Collectors; +import static java.util.Comparator.comparing; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; @@ -111,18 +113,19 @@ public final List aggs(String actualField, String predictedF .size(size)); } if (result == null) { // This is step 2 - KeyedFilter[] keyedFilters = + KeyedFilter[] keyedFiltersActual = + topActualClassNames.stream() + .map(className -> new KeyedFilter(className, QueryBuilders.termQuery(actualField, className))) + .toArray(KeyedFilter[]::new); + KeyedFilter[] keyedFiltersPredicted = topActualClassNames.stream() .map(className -> new KeyedFilter(className, QueryBuilders.termQuery(predictedField, className))) .toArray(KeyedFilter[]::new); return List.of( AggregationBuilders.cardinality(STEP_2_CARDINALITY_OF_ACTUAL_CLASS) .field(actualField), - AggregationBuilders.terms(STEP_2_AGGREGATE_BY_ACTUAL_CLASS) - .field(actualField) - .order(List.of(BucketOrder.count(false), BucketOrder.key(true))) - .size(size) - .subAggregation(AggregationBuilders.filters(STEP_2_AGGREGATE_BY_PREDICTED_CLASS, keyedFilters) + AggregationBuilders.filters(STEP_2_AGGREGATE_BY_ACTUAL_CLASS, keyedFiltersActual) + .subAggregation(AggregationBuilders.filters(STEP_2_AGGREGATE_BY_PREDICTED_CLASS, keyedFiltersPredicted) .otherBucket(true) .otherBucketKey(OTHER_BUCKET_KEY))); } @@ -133,26 +136,31 @@ public final List aggs(String actualField, String predictedF public void process(Aggregations aggs) { if (topActualClassNames == null && aggs.get(STEP_1_AGGREGATE_BY_ACTUAL_CLASS) != null) { Terms termsAgg = aggs.get(STEP_1_AGGREGATE_BY_ACTUAL_CLASS); - topActualClassNames = termsAgg.getBuckets().stream().map(Terms.Bucket::getKeyAsString).collect(Collectors.toList()); + topActualClassNames = termsAgg.getBuckets().stream().map(Terms.Bucket::getKeyAsString).sorted().collect(Collectors.toList()); } if (result == null && aggs.get(STEP_2_AGGREGATE_BY_ACTUAL_CLASS) != null) { Cardinality cardinalityAgg = aggs.get(STEP_2_CARDINALITY_OF_ACTUAL_CLASS); - Terms termsAgg = aggs.get(STEP_2_AGGREGATE_BY_ACTUAL_CLASS); - Map> counts = new TreeMap<>(); - for (Terms.Bucket bucket : termsAgg.getBuckets()) { + Filters filtersAgg = aggs.get(STEP_2_AGGREGATE_BY_ACTUAL_CLASS); + List actualClasses = new ArrayList<>(filtersAgg.getBuckets().size()); + for (Filters.Bucket bucket : filtersAgg.getBuckets()) { String actualClass = bucket.getKeyAsString(); - Map subCounts = new TreeMap<>(); - counts.put(actualClass, subCounts); + long actualClassDocCount = bucket.getDocCount(); Filters subAgg = bucket.getAggregations().get(STEP_2_AGGREGATE_BY_PREDICTED_CLASS); + List predictedClasses = new ArrayList<>(); + long otherPredictedClassDocCount = 0; for (Filters.Bucket subBucket : subAgg.getBuckets()) { String predictedClass = subBucket.getKeyAsString(); - Long docCount = subBucket.getDocCount(); - if ((OTHER_BUCKET_KEY.equals(predictedClass) && docCount == 0L) == false) { - subCounts.put(predictedClass, docCount); + long docCount = subBucket.getDocCount(); + if (OTHER_BUCKET_KEY.equals(predictedClass)) { + otherPredictedClassDocCount = docCount; + } else { + predictedClasses.add(new PredictedClass(predictedClass, docCount)); } } + predictedClasses.sort(comparing(PredictedClass::getPredictedClass)); + actualClasses.add(new ActualClass(actualClass, actualClassDocCount, predictedClasses, otherPredictedClassDocCount)); } - result = new Result(counts, termsAgg.getSumOfOtherDocCounts() == 0 ? 0 : cardinalityAgg.getValue() - size); + result = new Result(actualClasses, Math.max(cardinalityAgg.getValue() - size, 0)); } } @@ -190,37 +198,35 @@ public int hashCode() { public static class Result implements EvaluationMetricResult { private static final ParseField CONFUSION_MATRIX = new ParseField("confusion_matrix"); - private static final ParseField OTHER_CLASSES_COUNT = new ParseField("_other_"); + private static final ParseField OTHER_ACTUAL_CLASS_COUNT = new ParseField("other_actual_class_count"); + @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "multiclass_confusion_matrix_result", true, a -> new Result((Map>) a[0], (long) a[1])); + "multiclass_confusion_matrix_result", true, a -> new Result((List) a[0], (long) a[1])); static { - PARSER.declareObject( - constructorArg(), - (p, c) -> p.map(TreeMap::new, p2 -> p2.map(TreeMap::new, XContentParser::longValue)), - CONFUSION_MATRIX); - PARSER.declareLong(constructorArg(), OTHER_CLASSES_COUNT); + PARSER.declareObjectArray(constructorArg(), ActualClass.PARSER, CONFUSION_MATRIX); + PARSER.declareLong(constructorArg(), OTHER_ACTUAL_CLASS_COUNT); } public static Result fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } - // Immutable - private final Map> confusionMatrix; - private final long otherClassesCount; + /** List of actual classes. */ + private final List actualClasses; + /** Number of actual classes that were not included in the confusion matrix because there were too many of them. */ + private final long otherActualClassCount; - public Result(Map> confusionMatrix, long otherClassesCount) { - this.confusionMatrix = Collections.unmodifiableMap(Objects.requireNonNull(confusionMatrix)); - this.otherClassesCount = otherClassesCount; + public Result(List actualClasses, long otherActualClassCount) { + this.actualClasses = Collections.unmodifiableList(ExceptionsHelper.requireNonNull(actualClasses, CONFUSION_MATRIX)); + this.otherActualClassCount = requireNonNegative(otherActualClassCount, OTHER_ACTUAL_CLASS_COUNT); } public Result(StreamInput in) throws IOException { - this.confusionMatrix = Collections.unmodifiableMap( - in.readMap(StreamInput::readString, in2 -> in2.readMap(StreamInput::readString, StreamInput::readLong))); - this.otherClassesCount = in.readLong(); + this.actualClasses = Collections.unmodifiableList(in.readList(ActualClass::new)); + this.otherActualClassCount = in.readVLong(); } @Override @@ -233,28 +239,25 @@ public String getMetricName() { return NAME.getPreferredName(); } - public Map> getConfusionMatrix() { - return confusionMatrix; + public List getConfusionMatrix() { + return actualClasses; } - public long getOtherClassesCount() { - return otherClassesCount; + public long getOtherActualClassCount() { + return otherActualClassCount; } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap( - confusionMatrix, - StreamOutput::writeString, - (out2, row) -> out2.writeMap(row, StreamOutput::writeString, StreamOutput::writeLong)); - out.writeLong(otherClassesCount); + out.writeList(actualClasses); + out.writeVLong(otherActualClassCount); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(CONFUSION_MATRIX.getPreferredName(), confusionMatrix); - builder.field(OTHER_CLASSES_COUNT.getPreferredName(), otherClassesCount); + builder.field(CONFUSION_MATRIX.getPreferredName(), actualClasses); + builder.field(OTHER_ACTUAL_CLASS_COUNT.getPreferredName(), otherActualClassCount); builder.endObject(); return builder; } @@ -264,13 +267,163 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Result that = (Result) o; - return Objects.equals(this.confusionMatrix, that.confusionMatrix) - && this.otherClassesCount == that.otherClassesCount; + return Objects.equals(this.actualClasses, that.actualClasses) + && this.otherActualClassCount == that.otherActualClassCount; + } + + @Override + public int hashCode() { + return Objects.hash(actualClasses, otherActualClassCount); + } + } + + public static class ActualClass implements ToXContentObject, Writeable { + + private static final ParseField ACTUAL_CLASS = new ParseField("actual_class"); + private static final ParseField ACTUAL_CLASS_DOC_COUNT = new ParseField("actual_class_doc_count"); + private static final ParseField PREDICTED_CLASSES = new ParseField("predicted_classes"); + private static final ParseField OTHER_PREDICTED_CLASS_DOC_COUNT = new ParseField("other_predicted_class_doc_count"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "multiclass_confusion_matrix_actual_class", + true, + a -> new ActualClass((String) a[0], (long) a[1], (List) a[2], (long) a[3])); + + static { + PARSER.declareString(constructorArg(), ACTUAL_CLASS); + PARSER.declareLong(constructorArg(), ACTUAL_CLASS_DOC_COUNT); + PARSER.declareObjectArray(constructorArg(), PredictedClass.PARSER, PREDICTED_CLASSES); + PARSER.declareLong(constructorArg(), OTHER_PREDICTED_CLASS_DOC_COUNT); + } + + /** Name of the actual class. */ + private final String actualClass; + /** Number of documents (examples) belonging to the {code actualClass} class. */ + private final long actualClassDocCount; + /** List of predicted classes. */ + private final List predictedClasses; + /** Number of documents that were not predicted as any of the {@code predictedClasses}. */ + private final long otherPredictedClassDocCount; + + public ActualClass( + String actualClass, long actualClassDocCount, List predictedClasses, long otherPredictedClassDocCount) { + this.actualClass = ExceptionsHelper.requireNonNull(actualClass, ACTUAL_CLASS); + this.actualClassDocCount = requireNonNegative(actualClassDocCount, ACTUAL_CLASS_DOC_COUNT); + this.predictedClasses = Collections.unmodifiableList(ExceptionsHelper.requireNonNull(predictedClasses, PREDICTED_CLASSES)); + this.otherPredictedClassDocCount = requireNonNegative(otherPredictedClassDocCount, OTHER_PREDICTED_CLASS_DOC_COUNT); + } + + public ActualClass(StreamInput in) throws IOException { + this.actualClass = in.readString(); + this.actualClassDocCount = in.readVLong(); + this.predictedClasses = Collections.unmodifiableList(in.readList(PredictedClass::new)); + this.otherPredictedClassDocCount = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(actualClass); + out.writeVLong(actualClassDocCount); + out.writeList(predictedClasses); + out.writeVLong(otherPredictedClassDocCount); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ACTUAL_CLASS.getPreferredName(), actualClass); + builder.field(ACTUAL_CLASS_DOC_COUNT.getPreferredName(), actualClassDocCount); + builder.field(PREDICTED_CLASSES.getPreferredName(), predictedClasses); + builder.field(OTHER_PREDICTED_CLASS_DOC_COUNT.getPreferredName(), otherPredictedClassDocCount); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ActualClass that = (ActualClass) o; + return Objects.equals(this.actualClass, that.actualClass) + && this.actualClassDocCount == that.actualClassDocCount + && Objects.equals(this.predictedClasses, that.predictedClasses) + && this.otherPredictedClassDocCount == that.otherPredictedClassDocCount; + } + + @Override + public int hashCode() { + return Objects.hash(actualClass, actualClassDocCount, predictedClasses, otherPredictedClassDocCount); + } + } + + public static class PredictedClass implements ToXContentObject, Writeable { + + private static final ParseField PREDICTED_CLASS = new ParseField("predicted_class"); + private static final ParseField COUNT = new ParseField("count"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "multiclass_confusion_matrix_predicted_class", true, a -> new PredictedClass((String) a[0], (long) a[1])); + + static { + PARSER.declareString(constructorArg(), PREDICTED_CLASS); + PARSER.declareLong(constructorArg(), COUNT); + } + + private final String predictedClass; + private final long count; + + public PredictedClass(String predictedClass, long count) { + this.predictedClass = ExceptionsHelper.requireNonNull(predictedClass, PREDICTED_CLASS); + this.count = requireNonNegative(count, COUNT); + } + + public PredictedClass(StreamInput in) throws IOException { + this.predictedClass = in.readString(); + this.count = in.readVLong(); + } + + public String getPredictedClass() { + return predictedClass; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(predictedClass); + out.writeVLong(count); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(PREDICTED_CLASS.getPreferredName(), predictedClass); + builder.field(COUNT.getPreferredName(), count); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PredictedClass that = (PredictedClass) o; + return Objects.equals(this.predictedClass, that.predictedClass) + && this.count == that.count; } @Override public int hashCode() { - return Objects.hash(confusionMatrix, otherClassesCount); + return Objects.hash(predictedClass, count); + } + } + + private static long requireNonNegative(long value, ParseField field) { + if (value < 0) { + throw ExceptionsHelper.serverError("[" + field.getPreferredName() + "] must be >= 0, was: " + value); } + return value; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java index ae31966a34712..48aa1d4024be5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java @@ -242,7 +242,7 @@ public static String getKeyStoreType(Setting> setting, Settings return setting.get(settings).orElseGet(() -> inferKeyStoreType(path)); } - private static String inferKeyStoreType(String path) { + public static String inferKeyStoreType(String path) { String name = path == null ? "" : path.toLowerCase(Locale.ROOT); if (name.endsWith(".p12") || name.endsWith(".pfx") || name.endsWith(".pkcs12")) { return PKCS12_KEYSTORE_TYPE; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/MulticlassConfusionMatrixResultTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/MulticlassConfusionMatrixResultTests.java index 24b13d372d528..a2c30eaeb4979 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/MulticlassConfusionMatrixResultTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/MulticlassConfusionMatrixResultTests.java @@ -5,50 +5,53 @@ */ package org.elasticsearch.xpack.core.ml.dataframe.evaluation.classification; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.classification.MulticlassConfusionMatrix.ActualClass; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.classification.MulticlassConfusionMatrix.PredictedClass; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.classification.MulticlassConfusionMatrix.Result; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; -import java.util.Map; -import java.util.TreeMap; import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.Stream; -public class MulticlassConfusionMatrixResultTests extends AbstractSerializingTestCase { +import static org.hamcrest.Matchers.equalTo; - public static MulticlassConfusionMatrix.Result createRandom() { +public class MulticlassConfusionMatrixResultTests extends AbstractSerializingTestCase { + + public static Result createRandom() { int numClasses = randomIntBetween(2, 100); List classNames = Stream.generate(() -> randomAlphaOfLength(10)).limit(numClasses).collect(Collectors.toList()); - Map> confusionMatrix = new TreeMap<>(); + List actualClasses = new ArrayList<>(numClasses); for (int i = 0; i < numClasses; i++) { - Map row = new TreeMap<>(); - confusionMatrix.put(classNames.get(i), row); + List predictedClasses = new ArrayList<>(numClasses); for (int j = 0; j < numClasses; j++) { - if (randomBoolean()) { - row.put(classNames.get(i), randomNonNegativeLong()); - } + predictedClasses.add(new PredictedClass(classNames.get(j), randomNonNegativeLong())); } + actualClasses.add(new ActualClass(classNames.get(i), randomNonNegativeLong(), predictedClasses, randomNonNegativeLong())); } - long otherClassesCount = randomNonNegativeLong(); - return new MulticlassConfusionMatrix.Result(confusionMatrix, otherClassesCount); + return new Result(actualClasses, randomNonNegativeLong()); } @Override - protected MulticlassConfusionMatrix.Result doParseInstance(XContentParser parser) throws IOException { - return MulticlassConfusionMatrix.Result.fromXContent(parser); + protected Result doParseInstance(XContentParser parser) throws IOException { + return Result.fromXContent(parser); } @Override - protected MulticlassConfusionMatrix.Result createTestInstance() { + protected Result createTestInstance() { return createRandom(); } @Override - protected Writeable.Reader instanceReader() { - return MulticlassConfusionMatrix.Result::new; + protected Writeable.Reader instanceReader() { + return Result::new; } @Override @@ -61,4 +64,67 @@ protected Predicate getRandomFieldsExcludeFilter() { // allow unknown fields in the root of the object only return field -> !field.isEmpty(); } + + public void testConstructor_ValidationFailures() { + { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new Result(null, 0)); + assertThat(e.getMessage(), equalTo("[confusion_matrix] must not be null.")); + } + { + ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> new Result(Collections.emptyList(), -1)); + assertThat(e.status().getStatus(), equalTo(500)); + assertThat(e.getMessage(), equalTo("[other_actual_class_count] must be >= 0, was: -1")); + } + { + IllegalArgumentException e = + expectThrows( + IllegalArgumentException.class, + () -> new Result(Collections.singletonList(new ActualClass(null, 0, Collections.emptyList(), 0)), 0)); + assertThat(e.getMessage(), equalTo("[actual_class] must not be null.")); + } + { + ElasticsearchException e = + expectThrows( + ElasticsearchException.class, + () -> new Result(Collections.singletonList(new ActualClass("actual_class", -1, Collections.emptyList(), 0)), 0)); + assertThat(e.status().getStatus(), equalTo(500)); + assertThat(e.getMessage(), equalTo("[actual_class_doc_count] must be >= 0, was: -1")); + } + { + IllegalArgumentException e = + expectThrows( + IllegalArgumentException.class, + () -> new Result(Collections.singletonList(new ActualClass("actual_class", 0, null, 0)), 0)); + assertThat(e.getMessage(), equalTo("[predicted_classes] must not be null.")); + } + { + ElasticsearchException e = + expectThrows( + ElasticsearchException.class, + () -> new Result(Collections.singletonList(new ActualClass("actual_class", 0, Collections.emptyList(), -1)), 0)); + assertThat(e.status().getStatus(), equalTo(500)); + assertThat(e.getMessage(), equalTo("[other_predicted_class_doc_count] must be >= 0, was: -1")); + } + { + IllegalArgumentException e = + expectThrows( + IllegalArgumentException.class, + () -> new Result( + Collections.singletonList( + new ActualClass("actual_class", 0, Collections.singletonList(new PredictedClass(null, 0)), 0)), + 0)); + assertThat(e.getMessage(), equalTo("[predicted_class] must not be null.")); + } + { + ElasticsearchException e = + expectThrows( + ElasticsearchException.class, + () -> new Result( + Collections.singletonList( + new ActualClass("actual_class", 0, Collections.singletonList(new PredictedClass("predicted_class", -1)), 0)), + 0)); + assertThat(e.status().getStatus(), equalTo(500)); + assertThat(e.getMessage(), equalTo("[count] must be >= 0, was: -1")); + } + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/MulticlassConfusionMatrixTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/MulticlassConfusionMatrixTests.java index a4e989bce898a..0b4f724549e1a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/MulticlassConfusionMatrixTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/MulticlassConfusionMatrixTests.java @@ -14,10 +14,11 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.Cardinality; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.classification.MulticlassConfusionMatrix.ActualClass; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.classification.MulticlassConfusionMatrix.PredictedClass; import java.io.IOException; import java.util.List; -import java.util.Map; import java.util.Optional; import static org.hamcrest.Matchers.empty; @@ -85,20 +86,21 @@ public void testEvaluate() { mockTermsBucket("dog", new Aggregations(List.of())), mockTermsBucket("cat", new Aggregations(List.of()))), 0L), - mockTerms( + mockFilters( "multiclass_confusion_matrix_step_2_by_actual_class", List.of( - mockTermsBucket( + mockFiltersBucket( "dog", + 30, new Aggregations(List.of(mockFilters( "multiclass_confusion_matrix_step_2_by_predicted_class", List.of(mockFiltersBucket("cat", 10L), mockFiltersBucket("dog", 20L), mockFiltersBucket("_other_", 0L)))))), - mockTermsBucket( + mockFiltersBucket( "cat", + 70, new Aggregations(List.of(mockFilters( "multiclass_confusion_matrix_step_2_by_predicted_class", - List.of(mockFiltersBucket("cat", 30L), mockFiltersBucket("dog", 40L), mockFiltersBucket("_other_", 0L))))))), - 0L), + List.of(mockFiltersBucket("cat", 30L), mockFiltersBucket("dog", 40L), mockFiltersBucket("_other_", 0L)))))))), mockCardinality("multiclass_confusion_matrix_step_2_cardinality_of_actual_class", 2L))); MulticlassConfusionMatrix confusionMatrix = new MulticlassConfusionMatrix(2); @@ -109,8 +111,11 @@ public void testEvaluate() { assertThat(result.getMetricName(), equalTo("multiclass_confusion_matrix")); assertThat( result.getConfusionMatrix(), - equalTo(Map.of("dog", Map.of("cat", 10L, "dog", 20L), "cat", Map.of("cat", 30L, "dog", 40L)))); - assertThat(result.getOtherClassesCount(), equalTo(0L)); + equalTo( + List.of( + new ActualClass("dog", 30, List.of(new PredictedClass("cat", 10L), new PredictedClass("dog", 20L)), 0), + new ActualClass("cat", 70, List.of(new PredictedClass("cat", 30L), new PredictedClass("dog", 40L)), 0)))); + assertThat(result.getOtherActualClassCount(), equalTo(0L)); } public void testEvaluate_OtherClassesCountGreaterThanZero() { @@ -121,20 +126,21 @@ public void testEvaluate_OtherClassesCountGreaterThanZero() { mockTermsBucket("dog", new Aggregations(List.of())), mockTermsBucket("cat", new Aggregations(List.of()))), 100L), - mockTerms( + mockFilters( "multiclass_confusion_matrix_step_2_by_actual_class", List.of( - mockTermsBucket( + mockFiltersBucket( "dog", + 30, new Aggregations(List.of(mockFilters( "multiclass_confusion_matrix_step_2_by_predicted_class", List.of(mockFiltersBucket("cat", 10L), mockFiltersBucket("dog", 20L), mockFiltersBucket("_other_", 0L)))))), - mockTermsBucket( + mockFiltersBucket( "cat", + 85, new Aggregations(List.of(mockFilters( "multiclass_confusion_matrix_step_2_by_predicted_class", - List.of(mockFiltersBucket("cat", 30L), mockFiltersBucket("dog", 40L), mockFiltersBucket("_other_", 15L))))))), - 100L), + List.of(mockFiltersBucket("cat", 30L), mockFiltersBucket("dog", 40L), mockFiltersBucket("_other_", 15L)))))))), mockCardinality("multiclass_confusion_matrix_step_2_cardinality_of_actual_class", 5L))); MulticlassConfusionMatrix confusionMatrix = new MulticlassConfusionMatrix(2); @@ -145,8 +151,11 @@ public void testEvaluate_OtherClassesCountGreaterThanZero() { assertThat(result.getMetricName(), equalTo("multiclass_confusion_matrix")); assertThat( result.getConfusionMatrix(), - equalTo(Map.of("dog", Map.of("cat", 10L, "dog", 20L), "cat", Map.of("cat", 30L, "dog", 40L, "_other_", 15L)))); - assertThat(result.getOtherClassesCount(), equalTo(3L)); + equalTo( + List.of( + new ActualClass("dog", 30, List.of(new PredictedClass("cat", 10L), new PredictedClass("dog", 20L)), 0), + new ActualClass("cat", 85, List.of(new PredictedClass("cat", 30L), new PredictedClass("dog", 40L)), 15)))); + assertThat(result.getOtherActualClassCount(), equalTo(3L)); } private static Terms mockTerms(String name, List buckets, long sumOfOtherDocCounts) { @@ -157,9 +166,9 @@ private static Terms mockTerms(String name, List buckets, long sum return aggregation; } - private static Terms.Bucket mockTermsBucket(String actualClass, Aggregations subAggs) { + private static Terms.Bucket mockTermsBucket(String key, Aggregations subAggs) { Terms.Bucket bucket = mock(Terms.Bucket.class); - when(bucket.getKeyAsString()).thenReturn(actualClass); + when(bucket.getKeyAsString()).thenReturn(key); when(bucket.getAggregations()).thenReturn(subAggs); return bucket; } @@ -171,9 +180,15 @@ private static Filters mockFilters(String name, List buckets) { return aggregation; } - private static Filters.Bucket mockFiltersBucket(String predictedClass, long docCount) { + private static Filters.Bucket mockFiltersBucket(String key, long docCount, Aggregations subAggs) { + Filters.Bucket bucket = mockFiltersBucket(key, docCount); + when(bucket.getAggregations()).thenReturn(subAggs); + return bucket; + } + + private static Filters.Bucket mockFiltersBucket(String key, long docCount) { Filters.Bucket bucket = mock(Filters.Bucket.class); - when(bucket.getKeyAsString()).thenReturn(predictedClass); + when(bucket.getKeyAsString()).thenReturn(key); when(bucket.getDocCount()).thenReturn(docCount); return bucket; } diff --git a/x-pack/plugin/enrich/qa/common/src/main/java/org/elasticsearch/test/enrich/CommonEnrichRestTestCase.java b/x-pack/plugin/enrich/qa/common/src/main/java/org/elasticsearch/test/enrich/CommonEnrichRestTestCase.java index 06e15730788bf..75d7d18b45d30 100644 --- a/x-pack/plugin/enrich/qa/common/src/main/java/org/elasticsearch/test/enrich/CommonEnrichRestTestCase.java +++ b/x-pack/plugin/enrich/qa/common/src/main/java/org/elasticsearch/test/enrich/CommonEnrichRestTestCase.java @@ -10,6 +10,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -38,6 +39,15 @@ public void deletePolicies() throws Exception { for (Map entry: policies) { client().performRequest(new Request("DELETE", "/_enrich/policy/" + XContentMapValues.extractValue("config.match.name", entry))); + + List sourceIndices = (List) XContentMapValues.extractValue("config.match.indices", entry); + for (Object sourceIndex : sourceIndices) { + try { + client().performRequest(new Request("DELETE", "/" + sourceIndex)); + } catch (ResponseException e) { + // and that is ok + } + } } } @@ -48,6 +58,8 @@ protected boolean preserveIndicesUponCompletion() { } private void setupGenericLifecycleTest(boolean deletePipeilne) throws Exception { + // Create source index: + createSourceIndex("my-source-index"); // Create the policy: Request putPolicyRequest = new Request("PUT", "/_enrich/policy/my_policy"); putPolicyRequest.setJsonEntity(generatePolicySource("my-source-index")); @@ -99,6 +111,7 @@ public void testBasicFlow() throws Exception { } public void testImmutablePolicy() throws IOException { + createSourceIndex("my-source-index"); Request putPolicyRequest = new Request("PUT", "/_enrich/policy/my_policy"); putPolicyRequest.setJsonEntity(generatePolicySource("my-source-index")); assertOK(client().performRequest(putPolicyRequest)); @@ -108,6 +121,7 @@ public void testImmutablePolicy() throws IOException { } public void testDeleteIsCaseSensitive() throws Exception { + createSourceIndex("my-source-index"); Request putPolicyRequest = new Request("PUT", "/_enrich/policy/my_policy"); putPolicyRequest.setJsonEntity(generatePolicySource("my-source-index")); assertOK(client().performRequest(putPolicyRequest)); @@ -155,6 +169,20 @@ public static String generatePolicySource(String index) throws IOException { return Strings.toString(source); } + public static void createSourceIndex(String index) throws IOException { + String mapping = createSourceIndexMapping(); + createIndex(index, Settings.EMPTY, mapping); + } + + public static String createSourceIndexMapping() { + return "\"properties\":" + + "{\"host\": {\"type\":\"keyword\"}," + + "\"globalRank\":{\"type\":\"keyword\"}," + + "\"tldRank\":{\"type\":\"keyword\"}," + + "\"tld\":{\"type\":\"keyword\"}" + + "}"; + } + private static Map toMap(Response response) throws IOException { return toMap(EntityUtils.toString(response.getEntity())); } diff --git a/x-pack/plugin/enrich/qa/rest-with-security/src/test/java/org/elasticsearch/xpack/enrich/EnrichSecurityIT.java b/x-pack/plugin/enrich/qa/rest-with-security/src/test/java/org/elasticsearch/xpack/enrich/EnrichSecurityIT.java index 0f7838c4a45c5..7ea64a121c32b 100644 --- a/x-pack/plugin/enrich/qa/rest-with-security/src/test/java/org/elasticsearch/xpack/enrich/EnrichSecurityIT.java +++ b/x-pack/plugin/enrich/qa/rest-with-security/src/test/java/org/elasticsearch/xpack/enrich/EnrichSecurityIT.java @@ -36,6 +36,9 @@ protected Settings restAdminSettings() { public void testInsufficientPermissionsOnNonExistentIndex() throws Exception { // This test is here because it requires a valid user that has permission to execute policy PUTs but should fail if the user // does not have access to read the backing indices used to enrich the data. + Request request = new Request("PUT", "/some-other-index"); + request.setJsonEntity("{\n \"mappings\" : {" + createSourceIndexMapping() + "} }"); + adminClient().performRequest(request); Request putPolicyRequest = new Request("PUT", "/_enrich/policy/my_policy"); putPolicyRequest.setJsonEntity(generatePolicySource("some-other-index")); ResponseException exc = expectThrows(ResponseException.class, () -> client().performRequest(putPolicyRequest)); diff --git a/x-pack/plugin/enrich/qa/rest/src/test/resources/rest-api-spec/test/enrich/10_basic.yml b/x-pack/plugin/enrich/qa/rest/src/test/resources/rest-api-spec/test/enrich/10_basic.yml index 2a837d9c3b645..e580b188c9ba4 100644 --- a/x-pack/plugin/enrich/qa/rest/src/test/resources/rest-api-spec/test/enrich/10_basic.yml +++ b/x-pack/plugin/enrich/qa/rest/src/test/resources/rest-api-spec/test/enrich/10_basic.yml @@ -1,6 +1,20 @@ --- "Test enrich crud apis": + - do: + indices.create: + index: bar + body: + mappings: + properties: + baz: + type: keyword + a: + type: keyword + b: + type: keyword + - is_true: acknowledged + - do: enrich.put_policy: name: policy-crud diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutor.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutor.java index 361f4f6b285cb..916fe8afd491b 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutor.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutor.java @@ -29,6 +29,8 @@ public class EnrichPolicyExecutor { + public static final String TASK_ACTION = "policy_execution"; + private final ClusterService clusterService; private final Client client; private final TaskManager taskManager; @@ -165,7 +167,7 @@ private Task runPolicy(ExecuteEnrichPolicyAction.Request request, EnrichPolicy p private Task runPolicyTask(final ExecuteEnrichPolicyAction.Request request, EnrichPolicy policy, BiConsumer onResponse, BiConsumer onFailure) { - Task asyncTask = taskManager.register("enrich", "policy_execution", new TaskAwareRequest() { + Task asyncTask = taskManager.register("enrich", TASK_ACTION, new TaskAwareRequest() { @Override public void setParentTask(TaskId taskId) { request.setParentTask(taskId); diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceService.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceService.java index 0c734083755b6..594f0a264c4f1 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceService.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceService.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.cluster.LocalNodeMasterListener; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; @@ -32,6 +33,8 @@ import java.util.Map; import java.util.concurrent.Semaphore; +import static org.elasticsearch.xpack.core.ClientHelper.ENRICH_ORIGIN; + public class EnrichPolicyMaintenanceService implements LocalNodeMasterListener { private static final Logger logger = LogManager.getLogger(EnrichPolicyMaintenanceService.class); @@ -52,7 +55,7 @@ public class EnrichPolicyMaintenanceService implements LocalNodeMasterListener { EnrichPolicyMaintenanceService(Settings settings, Client client, ClusterService clusterService, ThreadPool threadPool, EnrichPolicyLocks enrichPolicyLocks) { this.settings = settings; - this.client = client; + this.client = new OriginSettingClient(client, ENRICH_ORIGIN); this.clusterService = clusterService; this.threadPool = threadPool; this.enrichPolicyLocks = enrichPolicyLocks; diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java index cbd18755718cc..409438f7e1fed 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java @@ -134,27 +134,34 @@ private void validateMappings(final GetIndexResponse getIndexResponse) { logger.debug("Policy [{}]: Validating [{}] source mappings", policyName, sourceIndices); for (String sourceIndex : sourceIndices) { Map mapping = getMappings(getIndexResponse, sourceIndex); - // First ensure mapping is set - if (mapping.get("properties") == null) { - throw new ElasticsearchException( - "Enrich policy execution for [{}] failed. Could not read mapping for source [{}] included by pattern [{}]", - policyName, sourceIndex, policy.getIndices()); - } - // Validate the key and values - try { - validateField(mapping, policy.getMatchField(), true); - for (String valueFieldName : policy.getEnrichFields()) { - validateField(mapping, valueFieldName, false); - } - } catch (ElasticsearchException e) { - throw new ElasticsearchException( - "Enrich policy execution for [{}] failed while validating field mappings for index [{}]", - e, policyName, sourceIndex); + validateMappings(policyName, policy, sourceIndex, mapping); + } + } + + static void validateMappings(final String policyName, + final EnrichPolicy policy, + final String sourceIndex, + final Map mapping) { + // First ensure mapping is set + if (mapping.get("properties") == null) { + throw new ElasticsearchException( + "Enrich policy execution for [{}] failed. Could not read mapping for source [{}] included by pattern [{}]", + policyName, sourceIndex, policy.getIndices()); + } + // Validate the key and values + try { + validateField(mapping, policy.getMatchField(), true); + for (String valueFieldName : policy.getEnrichFields()) { + validateField(mapping, valueFieldName, false); } + } catch (ElasticsearchException e) { + throw new ElasticsearchException( + "Enrich policy execution for [{}] failed while validating field mappings for index [{}]", + e, policyName, sourceIndex); } } - private void validateField(Map properties, String fieldName, boolean fieldRequired) { + private static void validateField(Map properties, String fieldName, boolean fieldRequired) { assert Strings.isEmpty(fieldName) == false: "Field name cannot be null or empty"; String[] fieldParts = fieldName.split("\\."); StringBuilder parent = new StringBuilder(); diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichStore.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichStore.java index cab3f6994b490..ebc28b0be180c 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichStore.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichStore.java @@ -8,8 +8,12 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.cluster.service.ClusterService; @@ -39,7 +43,11 @@ private EnrichStore() {} * @param policy The policy to store * @param handler The handler that gets invoked if policy has been stored or a failure has occurred. */ - public static void putPolicy(String name, EnrichPolicy policy, ClusterService clusterService, Consumer handler) { + public static void putPolicy(final String name, + final EnrichPolicy policy, + final ClusterService clusterService, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Consumer handler) { assert clusterService.localNode().isMasterNode(); if (Strings.isNullOrEmpty(name)) { @@ -75,6 +83,22 @@ public static void putPolicy(String name, EnrichPolicy policy, ClusterService cl finalPolicy = policy; } updateClusterState(clusterService, handler, current -> { + for (String indexExpression : finalPolicy.getIndices()) { + // indices field in policy can contain wildcards, aliases etc. + String[] concreteIndices = + indexNameExpressionResolver.concreteIndexNames(current, IndicesOptions.strictExpandOpen(), indexExpression); + for (String concreteIndex : concreteIndices) { + IndexMetaData imd = current.getMetaData().index(concreteIndex); + assert imd != null; + MappingMetaData mapping = imd.mapping(); + if (mapping == null) { + throw new IllegalArgumentException("source index [" + concreteIndex + "] has no mapping"); + } + Map mappingSource = mapping.getSourceAsMap(); + EnrichPolicyRunner.validateMappings(name, finalPolicy, concreteIndex, mappingSource); + } + } + final Map policies = getPolicies(current); if (policies.get(name) != null) { throw new ResourceAlreadyExistsException("policy [{}] already exists", name); diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportEnrichStatsAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportEnrichStatsAction.java index 30c84a127fcf6..b57c231effcce 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportEnrichStatsAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportEnrichStatsAction.java @@ -23,7 +23,7 @@ import org.elasticsearch.xpack.core.enrich.action.EnrichStatsAction; import org.elasticsearch.xpack.core.enrich.action.EnrichStatsAction.Response.CoordinatorStats; import org.elasticsearch.xpack.core.enrich.action.EnrichStatsAction.Response.ExecutingPolicy; -import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyAction; +import org.elasticsearch.xpack.enrich.EnrichPolicyExecutor; import java.io.IOException; import java.util.Comparator; @@ -80,7 +80,7 @@ protected void masterOperation(Task task, .sorted(Comparator.comparing(CoordinatorStats::getNodeId)) .collect(Collectors.toList()); List policyExecutionTasks = taskManager.getTasks().values().stream() - .filter(t -> t.getAction().equals(ExecuteEnrichPolicyAction.NAME)) + .filter(t -> t.getAction().equals(EnrichPolicyExecutor.TASK_ACTION)) .map(t -> t.taskInfo(clusterService.localNode().getId(), true)) .map(t -> new ExecutingPolicy(t.getDescription(), t)) .sorted(Comparator.comparing(ExecutingPolicy::getName)) diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportExecuteEnrichPolicyAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportExecuteEnrichPolicyAction.java index 7402c2daef6e7..23d756f3a83a4 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportExecuteEnrichPolicyAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportExecuteEnrichPolicyAction.java @@ -62,6 +62,12 @@ protected ExecuteEnrichPolicyAction.Response read(StreamInput in) throws IOExcep @Override protected void masterOperation(Task task, ExecuteEnrichPolicyAction.Request request, ClusterState state, ActionListener listener) { + if (state.getNodes().getIngestNodes().isEmpty()) { + // if we don't fail here then reindex will fail with a more complicated error. + // (EnrichPolicyRunner uses a pipeline with reindex) + throw new IllegalStateException("no ingest nodes in this cluster"); + } + if (request.isWaitForCompletion()) { executor.runPolicy(request, new ActionListener<>() { @Override diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportPutEnrichPolicyAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportPutEnrichPolicyAction.java index ec1d80a355d5f..2753172469c6a 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportPutEnrichPolicyAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportPutEnrichPolicyAction.java @@ -103,7 +103,7 @@ protected void masterOperation(Task task, PutEnrichPolicyAction.Request request, } private void putPolicy(PutEnrichPolicyAction.Request request, ActionListener listener ) { - EnrichStore.putPolicy(request.getName(), request.getPolicy(), clusterService, e -> { + EnrichStore.putPolicy(request.getName(), request.getPolicy(), clusterService, indexNameExpressionResolver, e -> { if (e == null) { listener.onResponse(new AcknowledgedResponse(true)); } else { diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/AbstractEnrichTestCase.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/AbstractEnrichTestCase.java index 0b8ddd0288008..7a5a3aef8c883 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/AbstractEnrichTestCase.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/AbstractEnrichTestCase.java @@ -5,6 +5,10 @@ */ package org.elasticsearch.xpack.enrich; +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -24,9 +28,13 @@ protected Collection> getPlugins() { protected AtomicReference saveEnrichPolicy(String name, EnrichPolicy policy, ClusterService clusterService) throws InterruptedException { + if (policy != null) { + createSourceIndices(policy); + } + IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(); CountDownLatch latch = new CountDownLatch(1); AtomicReference error = new AtomicReference<>(); - EnrichStore.putPolicy(name, policy, clusterService, e -> { + EnrichStore.putPolicy(name, policy, clusterService, resolver, e -> { error.set(e); latch.countDown(); }); @@ -46,4 +54,20 @@ protected void deleteEnrichPolicy(String name, ClusterService clusterService) th throw error.get(); } } + + protected void createSourceIndices(EnrichPolicy policy) { + createSourceIndices(client(), policy); + } + + protected static void createSourceIndices(Client client, EnrichPolicy policy) { + for (String sourceIndex : policy.getIndices()) { + CreateIndexRequest createIndexRequest = new CreateIndexRequest(sourceIndex); + createIndexRequest.mapping("_doc", policy.getMatchField(), "type=keyword"); + try { + client.admin().indices().create(createIndexRequest).actionGet(); + } catch (ResourceAlreadyExistsException e) { + // and that is okay + } + } + } } diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichMultiNodeIT.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichMultiNodeIT.java index 1b19958ee2ad5..63b92cea674b6 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichMultiNodeIT.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichMultiNodeIT.java @@ -119,6 +119,19 @@ public void testEnrichDedicatedIngestNode() { enrich(keys, ingestOnlyNode); } + public void testEnrichNoIngestNodes() { + Settings settings = Settings.builder() + .put(Node.NODE_MASTER_SETTING.getKey(), true) + .put(Node.NODE_DATA_SETTING.getKey(), true) + .put(Node.NODE_INGEST_SETTING.getKey(), false) + .build(); + internalCluster().startNode(settings); + + createSourceIndex(64); + Exception e = expectThrows(IllegalStateException.class, EnrichMultiNodeIT::createAndExecutePolicy); + assertThat(e.getMessage(), equalTo("no ingest nodes in this cluster")); + } + private static void enrich(List keys, String coordinatingNode) { int numDocs = 256; BulkRequest bulkRequest = new BulkRequest("my-index"); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceServiceTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceServiceTests.java index fc5e77e377971..ad984f92f014b 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceServiceTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceServiceTests.java @@ -10,6 +10,7 @@ import java.util.Collection; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Phaser; @@ -23,6 +24,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -33,6 +35,7 @@ import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import static org.elasticsearch.xpack.core.enrich.EnrichPolicy.MATCH_TYPE; +import static org.elasticsearch.xpack.enrich.AbstractEnrichTestCase.createSourceIndices; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; @@ -113,12 +116,15 @@ private EnrichPolicy randomPolicy() { for (int i = 0; i < randomIntBetween(1, 3); i++) { enrichKeys.add(randomAlphaOfLength(10)); } - return new EnrichPolicy(MATCH_TYPE, null, List.of(randomAlphaOfLength(10)), randomAlphaOfLength(10), enrichKeys); + String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + return new EnrichPolicy(MATCH_TYPE, null, List.of(sourceIndex), randomAlphaOfLength(10), enrichKeys); } private void addPolicy(String policyName, EnrichPolicy policy) throws InterruptedException { + IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(); + createSourceIndices(client(), policy); doSyncronously((clusterService, exceptionConsumer) -> - EnrichStore.putPolicy(policyName, policy, clusterService, exceptionConsumer)); + EnrichStore.putPolicy(policyName, policy, clusterService, resolver, exceptionConsumer)); } private void removePolicy(String policyName) throws InterruptedException { diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyTests.java index 3c87867f9dfe5..645bd0277de61 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyTests.java @@ -22,6 +22,8 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.util.Arrays; +import java.util.Locale; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -59,7 +61,9 @@ public static EnrichPolicy randomEnrichPolicy(XContentType xContentType) { return new EnrichPolicy( randomFrom(EnrichPolicy.SUPPORTED_POLICY_TYPES), randomBoolean() ? querySource : null, - Arrays.asList(generateRandomStringArray(8, 4, false, false)), + Arrays.stream(generateRandomStringArray(8, 4, false, false)) + .map(s -> s.toLowerCase(Locale.ROOT)) + .collect(Collectors.toList()), randomAlphaOfLength(4), Arrays.asList(generateRandomStringArray(8, 4, false, false)) ); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyUpdateTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyUpdateTests.java index 91cb2776bad93..5fff3c12e2c3a 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyUpdateTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyUpdateTests.java @@ -24,6 +24,7 @@ import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.enrich.AbstractEnrichTestCase.createSourceIndices; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -40,6 +41,7 @@ public void testUpdatePolicyOnly() { EnrichPolicy instance1 = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("index"), "key1", List.of("field1")); + createSourceIndices(client(), instance1); PutEnrichPolicyAction.Request putPolicyRequest = new PutEnrichPolicyAction.Request("my_policy", instance1); assertAcked(client().execute(PutEnrichPolicyAction.INSTANCE, putPolicyRequest).actionGet()); assertThat("Execute failed", client().execute(ExecuteEnrichPolicyAction.INSTANCE, @@ -53,7 +55,8 @@ public void testUpdatePolicyOnly() { assertThat(pipelineInstance1.getProcessors().get(0), instanceOf(MatchProcessor.class)); EnrichPolicy instance2 = - new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("index"), "key2", List.of("field2")); + new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("index2"), "key2", List.of("field2")); + createSourceIndices(client(), instance2); ResourceAlreadyExistsException exc = expectThrows(ResourceAlreadyExistsException.class, () -> client().execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request("my_policy", instance2)).actionGet()); assertTrue(exc.getMessage().contains("policy [my_policy] already exists")); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichRestartIT.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichRestartIT.java index 5abd83893a606..2462534308b38 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichRestartIT.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichRestartIT.java @@ -16,6 +16,7 @@ import java.util.List; import java.util.Optional; +import static org.elasticsearch.xpack.enrich.AbstractEnrichTestCase.createSourceIndices; import static org.elasticsearch.xpack.enrich.EnrichMultiNodeIT.DECORATE_FIELDS; import static org.elasticsearch.xpack.enrich.EnrichMultiNodeIT.MATCH_FIELD; import static org.elasticsearch.xpack.enrich.EnrichMultiNodeIT.POLICY_NAME; @@ -37,6 +38,7 @@ public void testRestart() throws Exception { EnrichPolicy enrichPolicy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of(SOURCE_INDEX_NAME), MATCH_FIELD, List.of(DECORATE_FIELDS)); + createSourceIndices(client(), enrichPolicy); for (int i = 0; i < numPolicies; i++) { String policyName = POLICY_NAME + i; PutEnrichPolicyAction.Request request = new PutEnrichPolicyAction.Request(policyName, enrichPolicy); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java index 622c1fb164f4a..293fc7883e19b 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java @@ -32,7 +32,7 @@ public class TransportDeleteEnrichPolicyActionTests extends AbstractEnrichTestCase { @After - private void cleanupPolicy() { + public void cleanupPolicy() { ClusterService clusterService = getInstanceFromNode(ClusterService.class); String name = "my-policy"; @@ -57,7 +57,7 @@ public void testDeletePolicyDoesNotExistUnlocksPolicy() throws InterruptedExcept final TransportDeleteEnrichPolicyAction transportAction = node().injector().getInstance(TransportDeleteEnrichPolicyAction.class); ActionTestUtils.execute(transportAction, null, new DeleteEnrichPolicyAction.Request(fakeId), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(AcknowledgedResponse acknowledgedResponse) { fail(); @@ -91,7 +91,7 @@ public void testDeleteWithoutIndex() throws Exception { final TransportDeleteEnrichPolicyAction transportAction = node().injector().getInstance(TransportDeleteEnrichPolicyAction.class); ActionTestUtils.execute(transportAction, null, new DeleteEnrichPolicyAction.Request(name), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(AcknowledgedResponse acknowledgedResponse) { reference.set(acknowledgedResponse); @@ -132,7 +132,7 @@ public void testDeleteIsNotLocked() throws Exception { final TransportDeleteEnrichPolicyAction transportAction = node().injector().getInstance(TransportDeleteEnrichPolicyAction.class); ActionTestUtils.execute(transportAction, null, new DeleteEnrichPolicyAction.Request(name), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(AcknowledgedResponse acknowledgedResponse) { reference.set(acknowledgedResponse); @@ -179,7 +179,7 @@ public void testDeleteLocked() throws InterruptedException { final AtomicReference reference = new AtomicReference<>(); ActionTestUtils.execute(transportAction, null, new DeleteEnrichPolicyAction.Request(name), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(AcknowledgedResponse acknowledgedResponse) { fail(); @@ -205,7 +205,7 @@ public void onFailure(final Exception e) { ActionTestUtils.execute(transportAction, null, new DeleteEnrichPolicyAction.Request(name), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(AcknowledgedResponse acknowledgedResponse) { reference.set(acknowledgedResponse); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportGetEnrichPolicyActionTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportGetEnrichPolicyActionTests.java index 31212218c771b..e6470b87f1225 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportGetEnrichPolicyActionTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportGetEnrichPolicyActionTests.java @@ -27,7 +27,7 @@ public class TransportGetEnrichPolicyActionTests extends AbstractEnrichTestCase { @After - private void cleanupPolicies() throws InterruptedException { + public void cleanupPolicies() throws InterruptedException { ClusterService clusterService = getInstanceFromNode(ClusterService.class); final CountDownLatch latch = new CountDownLatch(1); @@ -35,7 +35,7 @@ private void cleanupPolicies() throws InterruptedException { final TransportGetEnrichPolicyAction transportAction = node().injector().getInstance(TransportGetEnrichPolicyAction.class); ActionTestUtils.execute(transportAction, null, new GetEnrichPolicyAction.Request(), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(GetEnrichPolicyAction.Response response) { reference.set(response); @@ -108,7 +108,7 @@ public void testListEmptyPolicies() throws InterruptedException { final TransportGetEnrichPolicyAction transportAction = node().injector().getInstance(TransportGetEnrichPolicyAction.class); ActionTestUtils.execute(transportAction, null, new GetEnrichPolicyAction.Request(), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(GetEnrichPolicyAction.Response response) { reference.set(response); @@ -144,7 +144,7 @@ public void testGetPolicy() throws InterruptedException { final TransportGetEnrichPolicyAction transportAction = node().injector().getInstance(TransportGetEnrichPolicyAction.class); ActionTestUtils.execute(transportAction, null, new GetEnrichPolicyAction.Request(new String[]{name}), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(GetEnrichPolicyAction.Response response) { reference.set(response); @@ -187,7 +187,7 @@ public void testGetMultiplePolicies() throws InterruptedException { final TransportGetEnrichPolicyAction transportAction = node().injector().getInstance(TransportGetEnrichPolicyAction.class); ActionTestUtils.execute(transportAction, null, new GetEnrichPolicyAction.Request(new String[]{name, anotherName}), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(GetEnrichPolicyAction.Response response) { reference.set(response); @@ -218,7 +218,7 @@ public void testGetPolicyThrowsError() throws InterruptedException { final TransportGetEnrichPolicyAction transportAction = node().injector().getInstance(TransportGetEnrichPolicyAction.class); ActionTestUtils.execute(transportAction, null, new GetEnrichPolicyAction.Request(new String[]{"non-exists"}), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(GetEnrichPolicyAction.Response response) { reference.set(response); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ClassificationEvaluationIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ClassificationEvaluationIT.java index 2cfa98a28aaa9..196ca87fb1213 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ClassificationEvaluationIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ClassificationEvaluationIT.java @@ -12,11 +12,12 @@ import org.elasticsearch.xpack.core.ml.action.EvaluateDataFrameAction; import org.elasticsearch.xpack.core.ml.dataframe.evaluation.classification.Classification; import org.elasticsearch.xpack.core.ml.dataframe.evaluation.classification.MulticlassConfusionMatrix; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.classification.MulticlassConfusionMatrix.ActualClass; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.classification.MulticlassConfusionMatrix.PredictedClass; import org.junit.After; import org.junit.Before; import java.util.List; -import java.util.Map; import static org.hamcrest.Matchers.equalTo; @@ -53,13 +54,53 @@ public void testEvaluate_MulticlassClassification_DefaultMetrics() { assertThat(confusionMatrixResult.getMetricName(), equalTo(MulticlassConfusionMatrix.NAME.getPreferredName())); assertThat( confusionMatrixResult.getConfusionMatrix(), - equalTo(Map.of( - "ant", Map.of("ant", 1L, "cat", 4L, "dog", 3L, "fox", 2L, "mouse", 5L), - "cat", Map.of("ant", 3L, "cat", 1L, "dog", 5L, "fox", 4L, "mouse", 2L), - "dog", Map.of("ant", 4L, "cat", 2L, "dog", 1L, "fox", 5L, "mouse", 3L), - "fox", Map.of("ant", 5L, "cat", 3L, "dog", 2L, "fox", 1L, "mouse", 4L), - "mouse", Map.of("ant", 2L, "cat", 5L, "dog", 4L, "fox", 3L, "mouse", 1L)))); - assertThat(confusionMatrixResult.getOtherClassesCount(), equalTo(0L)); + equalTo(List.of( + new ActualClass("ant", + 15, + List.of( + new PredictedClass("ant", 1L), + new PredictedClass("cat", 4L), + new PredictedClass("dog", 3L), + new PredictedClass("fox", 2L), + new PredictedClass("mouse", 5L)), + 0), + new ActualClass("cat", + 15, + List.of( + new PredictedClass("ant", 3L), + new PredictedClass("cat", 1L), + new PredictedClass("dog", 5L), + new PredictedClass("fox", 4L), + new PredictedClass("mouse", 2L)), + 0), + new ActualClass("dog", + 15, + List.of( + new PredictedClass("ant", 4L), + new PredictedClass("cat", 2L), + new PredictedClass("dog", 1L), + new PredictedClass("fox", 5L), + new PredictedClass("mouse", 3L)), + 0), + new ActualClass("fox", + 15, + List.of( + new PredictedClass("ant", 5L), + new PredictedClass("cat", 3L), + new PredictedClass("dog", 2L), + new PredictedClass("fox", 1L), + new PredictedClass("mouse", 4L)), + 0), + new ActualClass("mouse", + 15, + List.of( + new PredictedClass("ant", 2L), + new PredictedClass("cat", 5L), + new PredictedClass("dog", 4L), + new PredictedClass("fox", 3L), + new PredictedClass("mouse", 1L)), + 0)))); + assertThat(confusionMatrixResult.getOtherActualClassCount(), equalTo(0L)); } public void testEvaluate_MulticlassClassification_ConfusionMatrixMetricWithDefaultSize() { @@ -78,13 +119,53 @@ public void testEvaluate_MulticlassClassification_ConfusionMatrixMetricWithDefau assertThat(confusionMatrixResult.getMetricName(), equalTo(MulticlassConfusionMatrix.NAME.getPreferredName())); assertThat( confusionMatrixResult.getConfusionMatrix(), - equalTo(Map.of( - "ant", Map.of("ant", 1L, "cat", 4L, "dog", 3L, "fox", 2L, "mouse", 5L), - "cat", Map.of("ant", 3L, "cat", 1L, "dog", 5L, "fox", 4L, "mouse", 2L), - "dog", Map.of("ant", 4L, "cat", 2L, "dog", 1L, "fox", 5L, "mouse", 3L), - "fox", Map.of("ant", 5L, "cat", 3L, "dog", 2L, "fox", 1L, "mouse", 4L), - "mouse", Map.of("ant", 2L, "cat", 5L, "dog", 4L, "fox", 3L, "mouse", 1L)))); - assertThat(confusionMatrixResult.getOtherClassesCount(), equalTo(0L)); + equalTo(List.of( + new ActualClass("ant", + 15, + List.of( + new PredictedClass("ant", 1L), + new PredictedClass("cat", 4L), + new PredictedClass("dog", 3L), + new PredictedClass("fox", 2L), + new PredictedClass("mouse", 5L)), + 0), + new ActualClass("cat", + 15, + List.of( + new PredictedClass("ant", 3L), + new PredictedClass("cat", 1L), + new PredictedClass("dog", 5L), + new PredictedClass("fox", 4L), + new PredictedClass("mouse", 2L)), + 0), + new ActualClass("dog", + 15, + List.of( + new PredictedClass("ant", 4L), + new PredictedClass("cat", 2L), + new PredictedClass("dog", 1L), + new PredictedClass("fox", 5L), + new PredictedClass("mouse", 3L)), + 0), + new ActualClass("fox", + 15, + List.of( + new PredictedClass("ant", 5L), + new PredictedClass("cat", 3L), + new PredictedClass("dog", 2L), + new PredictedClass("fox", 1L), + new PredictedClass("mouse", 4L)), + 0), + new ActualClass("mouse", + 15, + List.of( + new PredictedClass("ant", 2L), + new PredictedClass("cat", 5L), + new PredictedClass("dog", 4L), + new PredictedClass("fox", 3L), + new PredictedClass("mouse", 1L)), + 0)))); + assertThat(confusionMatrixResult.getOtherActualClassCount(), equalTo(0L)); } public void testEvaluate_MulticlassClassification_ConfusionMatrixMetricWithUserProvidedSize() { @@ -103,11 +184,20 @@ public void testEvaluate_MulticlassClassification_ConfusionMatrixMetricWithUserP assertThat(confusionMatrixResult.getMetricName(), equalTo(MulticlassConfusionMatrix.NAME.getPreferredName())); assertThat( confusionMatrixResult.getConfusionMatrix(), - equalTo(Map.of( - "ant", Map.of("ant", 1L, "cat", 4L, "dog", 3L, "_other_", 7L), - "cat", Map.of("ant", 3L, "cat", 1L, "dog", 5L, "_other_", 6L), - "dog", Map.of("ant", 4L, "cat", 2L, "dog", 1L, "_other_", 8L)))); - assertThat(confusionMatrixResult.getOtherClassesCount(), equalTo(2L)); + equalTo(List.of( + new ActualClass("ant", + 15, + List.of(new PredictedClass("ant", 1L), new PredictedClass("cat", 4L), new PredictedClass("dog", 3L)), + 7), + new ActualClass("cat", + 15, + List.of(new PredictedClass("ant", 3L), new PredictedClass("cat", 1L), new PredictedClass("dog", 5L)), + 6), + new ActualClass("dog", + 15, + List.of(new PredictedClass("ant", 4L), new PredictedClass("cat", 2L), new PredictedClass("dog", 1L)), + 8)))); + assertThat(confusionMatrixResult.getOtherActualClassCount(), equalTo(2L)); } private static void indexAnimalsData(String indexName) { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ClassificationIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ClassificationIT.java index ce344ec97d3db..a2fd3b194e29c 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ClassificationIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ClassificationIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ml.integration; import com.google.common.collect.Ordering; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; @@ -39,6 +40,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.startsWith; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/48337") public class ClassificationIT extends MlNativeDataFrameAnalyticsIntegTestCase { private static final String BOOLEAN_FIELD = "boolean-field"; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java deleted file mode 100644 index 6961c377f55e5..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security; - -import org.elasticsearch.bootstrap.BootstrapCheck; -import org.elasticsearch.bootstrap.BootstrapContext; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.core.XPackSettings; - - -public class FIPS140JKSKeystoreBootstrapCheck implements BootstrapCheck { - - /** - * Test if the node fails the check. - * - * @param context the bootstrap context - * @return the result of the bootstrap check - */ - @Override - public BootstrapCheckResult check(BootstrapContext context) { - - if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings())) { - final Settings settings = context.settings(); - Settings keystoreTypeSettings = settings.filter(k -> k.endsWith("keystore.type")) - .filter(k -> settings.get(k).equalsIgnoreCase("jks")); - if (keystoreTypeSettings.isEmpty() == false) { - return BootstrapCheckResult.failure("JKS Keystores cannot be used in a FIPS 140 compliant JVM. Please " + - "revisit [" + keystoreTypeSettings.toDelimitedString(',') + "] settings"); - } - // Default Keystore type is JKS if not explicitly set - Settings keystorePathSettings = settings.filter(k -> k.endsWith("keystore.path")) - .filter(k -> settings.hasValue(k.replace(".path", ".type")) == false); - if (keystorePathSettings.isEmpty() == false) { - return BootstrapCheckResult.failure("JKS Keystores cannot be used in a FIPS 140 compliant JVM. Please " + - "revisit [" + keystorePathSettings.toDelimitedString(',') + "] settings"); - } - - } - return BootstrapCheckResult.success(); - } - - @Override - public boolean alwaysEnforce() { - return true; - } -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java deleted file mode 100644 index 4b0d9cd2f8c58..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.security; - -import org.elasticsearch.bootstrap.BootstrapCheck; -import org.elasticsearch.bootstrap.BootstrapContext; -import org.elasticsearch.license.License; -import org.elasticsearch.license.LicenseService; -import org.elasticsearch.xpack.core.XPackSettings; - -import java.util.EnumSet; - -/** - * A bootstrap check which enforces the licensing of FIPS - */ -final class FIPS140LicenseBootstrapCheck implements BootstrapCheck { - - static final EnumSet ALLOWED_LICENSE_OPERATION_MODES = - EnumSet.of(License.OperationMode.PLATINUM, License.OperationMode.TRIAL); - - @Override - public BootstrapCheckResult check(BootstrapContext context) { - if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings())) { - License license = LicenseService.getLicense(context.metaData()); - if (license != null && ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode()) == false) { - return BootstrapCheckResult.failure("FIPS mode is only allowed with a Platinum or Trial license"); - } - } - return BootstrapCheckResult.success(); - } -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java deleted file mode 100644 index 8a754a2f25b93..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security; - -import org.elasticsearch.bootstrap.BootstrapCheck; -import org.elasticsearch.bootstrap.BootstrapContext; -import org.elasticsearch.xpack.core.XPackSettings; - -import java.util.Locale; - -public class FIPS140PasswordHashingAlgorithmBootstrapCheck implements BootstrapCheck { - - /** - * Test if the node fails the check. - * - * @param context the bootstrap context - * @return the result of the bootstrap check - */ - @Override - public BootstrapCheckResult check(final BootstrapContext context) { - if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings())) { - final String selectedAlgorithm = XPackSettings.PASSWORD_HASHING_ALGORITHM.get(context.settings()); - if (selectedAlgorithm.toLowerCase(Locale.ROOT).startsWith("pbkdf2") == false) { - return BootstrapCheckResult.failure("Only PBKDF2 is allowed for password hashing in a FIPS-140 JVM. Please set the " + - "appropriate value for [ " + XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey() + " ] setting."); - } - } - return BootstrapCheckResult.success(); - } - -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheck.java deleted file mode 100644 index 82a58b94a83fe..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheck.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security; - -import org.elasticsearch.bootstrap.BootstrapCheck; -import org.elasticsearch.bootstrap.BootstrapContext; -import org.elasticsearch.common.settings.KeyStoreWrapper; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.xpack.core.XPackSettings; - -import java.io.IOException; -import java.io.UncheckedIOException; - -public class FIPS140SecureSettingsBootstrapCheck implements BootstrapCheck { - - private final boolean fipsModeEnabled; - private final Environment environment; - - FIPS140SecureSettingsBootstrapCheck(Settings settings, Environment environment) { - this.fipsModeEnabled = XPackSettings.FIPS_MODE_ENABLED.get(settings); - this.environment = environment; - } - - /** - * Test if the node fails the check. - * - * @param context the bootstrap context - * @return the result of the bootstrap check - */ - @Override - public BootstrapCheckResult check(BootstrapContext context) { - if (fipsModeEnabled) { - try (KeyStoreWrapper secureSettings = KeyStoreWrapper.load(environment.configFile())) { - if (secureSettings != null && secureSettings.getFormatVersion() < 3) { - return BootstrapCheckResult.failure("Secure settings store is not of the appropriate version. Please use " + - "bin/elasticsearch-keystore create to generate a new secure settings store and migrate the secure settings there."); - } - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - return BootstrapCheckResult.success(); - } - - @Override - public boolean alwaysEnforce() { - return fipsModeEnabled; - } -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index b2ebf9733c25b..9c9aea7111442 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -263,6 +263,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; +import static org.elasticsearch.license.XPackLicenseState.FIPS_ALLOWED_LICENSE_OPERATION_MODES; import static org.elasticsearch.xpack.core.XPackSettings.API_KEY_SERVICE_ENABLED_SETTING; import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED; import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_MAIN_ALIAS; @@ -311,11 +312,7 @@ public Security(Settings settings, final Path configPath) { new ApiKeySSLBootstrapCheck(), new TokenSSLBootstrapCheck(), new PkiRealmBootstrapCheck(getSslService()), - new TLSLicenseBootstrapCheck(), - new FIPS140SecureSettingsBootstrapCheck(settings, env), - new FIPS140JKSKeystoreBootstrapCheck(), - new FIPS140PasswordHashingAlgorithmBootstrapCheck(), - new FIPS140LicenseBootstrapCheck())); + new TLSLicenseBootstrapCheck())); checks.addAll(InternalRealms.getBootstrapChecks(settings, env)); this.bootstrapChecks = Collections.unmodifiableList(checks); Automatons.updateConfiguration(settings); @@ -328,6 +325,9 @@ public Security(Settings settings, final Path configPath) { private static void runStartupChecks(Settings settings) { validateRealmSettings(settings); + if (XPackSettings.FIPS_MODE_ENABLED.get(settings)) { + validateForFips(settings); + } } // overridable by tests @@ -830,6 +830,37 @@ static void validateRealmSettings(Settings settings) { } } + static void validateForFips(Settings settings) { + final List validationErrors = new ArrayList<>(); + Settings keystoreTypeSettings = settings.filter(k -> k.endsWith("keystore.type")) + .filter(k -> settings.get(k).equalsIgnoreCase("jks")); + if (keystoreTypeSettings.isEmpty() == false) { + validationErrors.add("JKS Keystores cannot be used in a FIPS 140 compliant JVM. Please " + + "revisit [" + keystoreTypeSettings.toDelimitedString(',') + "] settings"); + } + Settings keystorePathSettings = settings.filter(k -> k.endsWith("keystore.path")) + .filter(k -> settings.hasValue(k.replace(".path", ".type")) == false); + if (keystorePathSettings.isEmpty() == false && SSLConfigurationSettings.inferKeyStoreType(null).equals("jks")) { + validationErrors.add("JKS Keystores cannot be used in a FIPS 140 compliant JVM. Please " + + "revisit [" + keystorePathSettings.toDelimitedString(',') + "] settings"); + } + final String selectedAlgorithm = XPackSettings.PASSWORD_HASHING_ALGORITHM.get(settings); + if (selectedAlgorithm.toLowerCase(Locale.ROOT).startsWith("pbkdf2") == false) { + validationErrors.add("Only PBKDF2 is allowed for password hashing in a FIPS 140 JVM. Please set the " + + "appropriate value for [ " + XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey() + " ] setting."); + } + + if (validationErrors.isEmpty() == false) { + final StringBuilder sb = new StringBuilder(); + sb.append("Validation for FIPS 140 mode failed: \n"); + int index = 0; + for (String error : validationErrors) { + sb.append(++index).append(": ").append(error).append(";\n"); + } + throw new IllegalArgumentException(sb.toString()); + } + } + @Override public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry, ThreadContext threadContext) { if (enabled == false) { // don't register anything if we are not enabled @@ -998,7 +1029,7 @@ public void accept(DiscoveryNode node, ClusterState state) { if (inFipsMode) { License license = LicenseService.getLicense(state.metaData()); if (license != null && - FIPS140LicenseBootstrapCheck.ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode()) == false) { + FIPS_ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode()) == false) { throw new IllegalStateException("FIPS mode cannot be used with a [" + license.operationMode() + "] license. It is only allowed with a Platinum or Trial license."); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java index 2097c5176d1fb..cd33ff10f0f05 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java @@ -19,6 +19,7 @@ import java.util.function.Predicate; import static org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction.TASKS_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.ENRICH_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.TRANSFORM_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.DEPRECATION_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.INDEX_LIFECYCLE_ORIGIN; @@ -111,6 +112,7 @@ public static void switchUserBasedOnActionOriginAndExecute(ThreadContext threadC case PERSISTENT_TASK_ORIGIN: case ROLLUP_ORIGIN: case INDEX_LIFECYCLE_ORIGIN: + case ENRICH_ORIGIN: case TASKS_ORIGIN: // TODO use a more limited user for tasks securityContext.executeAsUser(XPackUser.INSTANCE, consumer, Version.CURRENT); break; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java deleted file mode 100644 index b35b8009f12ee..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.AbstractBootstrapCheckTestCase; - -public class FIPS140JKSKeystoreBootstrapCheckTests extends AbstractBootstrapCheckTestCase { - - public void testNoKeystoreIsAllowed() { - final Settings.Builder settings = Settings.builder() - .put("xpack.security.fips_mode.enabled", "true"); - assertFalse(new FIPS140JKSKeystoreBootstrapCheck().check(createTestContext(settings.build(), null)).isFailure()); - } - - public void testTransportSSLKeystoreTypeIsNotAllowed() { - final Settings.Builder settings = Settings.builder() - .put("xpack.security.fips_mode.enabled", "true") - .put("xpack.security.transport.ssl.keystore.path", "/this/is/the/path") - .put("xpack.security.transport.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(createTestContext(settings.build(), null)).isFailure()); - } - - public void testHttpSSLKeystoreTypeIsNotAllowed() { - final Settings.Builder settings = Settings.builder() - .put("xpack.security.fips_mode.enabled", "true") - .put("xpack.security.http.ssl.keystore.path", "/this/is/the/path") - .put("xpack.security.http.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(createTestContext(settings.build(), null)).isFailure()); - } - - public void testRealmKeystoreTypeIsNotAllowed() { - final Settings.Builder settings = Settings.builder() - .put("xpack.security.fips_mode.enabled", "true") - .put("xpack.security.authc.realms.ldap.ssl.keystore.path", "/this/is/the/path") - .put("xpack.security.authc.realms.ldap.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(createTestContext(settings.build(), null)).isFailure()); - } - - public void testImplicitRealmKeystoreTypeIsNotAllowed() { - final Settings.Builder settings = Settings.builder() - .put("xpack.security.fips_mode.enabled", "true") - .put("xpack.security.authc.realms.ldap.ssl.keystore.path", "/this/is/the/path"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(createTestContext(settings.build(), null)).isFailure()); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java deleted file mode 100644 index 9f3cc0ef951bf..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.security; - -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.license.License; -import org.elasticsearch.license.TestUtils; -import org.elasticsearch.test.AbstractBootstrapCheckTestCase; - -public class FIPS140LicenseBootstrapCheckTests extends AbstractBootstrapCheckTestCase { - - public void testBootstrapCheck() throws Exception { - assertTrue(new FIPS140LicenseBootstrapCheck() - .check(emptyContext).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck() - .check(createTestContext(Settings.builder().put("xpack.security.fips_mode.enabled", randomBoolean()).build(), MetaData - .EMPTY_META_DATA)).isSuccess()); - - MetaData.Builder builder = MetaData.builder(); - License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); - TestUtils.putLicense(builder, license); - MetaData metaData = builder.build(); - - if (FIPS140LicenseBootstrapCheck.ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode())) { - assertTrue(new FIPS140LicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.fips_mode.enabled", false).build(), metaData)).isSuccess()); - } else { - assertTrue(new FIPS140LicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.fips_mode.enabled", false).build(), metaData)).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).isFailure()); - assertEquals("FIPS mode is only allowed with a Platinum or Trial license", - new FIPS140LicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).getMessage()); - } - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java deleted file mode 100644 index 0dcaf1128f988..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.security; - -import org.elasticsearch.bootstrap.BootstrapCheck; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.AbstractBootstrapCheckTestCase; -import org.elasticsearch.xpack.core.XPackSettings; - -import java.util.Arrays; - -import static org.hamcrest.Matchers.equalTo; - -public class FIPS140PasswordHashingAlgorithmBootstrapCheckTests extends AbstractBootstrapCheckTestCase { - - public void testPBKDF2AlgorithmIsAllowed() { - { - final Settings settings = Settings.builder() - .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) - .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), "PBKDF2_10000") - .build(); - final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(createTestContext(settings, null)); - assertFalse(result.isFailure()); - } - - { - final Settings settings = Settings.builder() - .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) - .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), "PBKDF2") - .build(); - final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(createTestContext(settings, null)); - assertFalse(result.isFailure()); - } - } - - public void testBCRYPTAlgorithmDependsOnFipsMode() { - for (final Boolean fipsModeEnabled : Arrays.asList(true, false)) { - for (final String passwordHashingAlgorithm : Arrays.asList(null, "BCRYPT", "BCRYPT11")) { - runBCRYPTTest(fipsModeEnabled, passwordHashingAlgorithm); - } - } - } - - private void runBCRYPTTest(final boolean fipsModeEnabled, final String passwordHashingAlgorithm) { - final Settings.Builder builder = Settings.builder().put(XPackSettings.FIPS_MODE_ENABLED.getKey(), fipsModeEnabled); - if (passwordHashingAlgorithm != null) { - builder.put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), passwordHashingAlgorithm); - } - final Settings settings = builder.build(); - final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(createTestContext(settings, null)); - assertThat(result.isFailure(), equalTo(fipsModeEnabled)); - } - -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheckTests.java deleted file mode 100644 index 5497dcfe46045..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheckTests.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security; - -import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.store.SimpleFSDirectory; -import org.elasticsearch.common.settings.KeyStoreWrapper; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.test.AbstractBootstrapCheckTestCase; - -import javax.crypto.SecretKey; -import javax.crypto.SecretKeyFactory; -import javax.crypto.spec.PBEKeySpec; -import java.io.ByteArrayOutputStream; -import java.nio.file.Path; -import java.security.AccessControlException; -import java.security.KeyStore; -import java.util.Base64; - -public class FIPS140SecureSettingsBootstrapCheckTests extends AbstractBootstrapCheckTestCase { - - public void testLegacySecureSettingsIsNotAllowed() throws Exception { - assumeFalse("Can't run in a FIPS JVM, PBE is not available", inFipsJvm()); - final Settings.Builder builder = Settings.builder() - .put("path.home", createTempDir()) - .put("xpack.security.fips_mode.enabled", "true"); - Environment env = TestEnvironment.newEnvironment(builder.build()); - generateV2Keystore(env); - assertTrue(new FIPS140SecureSettingsBootstrapCheck(builder.build(), env).check(createTestContext(builder.build(), - null)).isFailure()); - } - - public void testCorrectSecureSettingsVersionIsAllowed() throws Exception { - final Settings.Builder builder = Settings.builder() - .put("path.home", createTempDir()) - .put("xpack.security.fips_mode.enabled", "true"); - Environment env = TestEnvironment.newEnvironment(builder.build()); - final KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create(); - try { - keyStoreWrapper.save(env.configFile(), "password".toCharArray()); - } catch (final AccessControlException e) { - if (e.getPermission() instanceof RuntimePermission && e.getPermission().getName().equals("accessUserInformation")) { - // this is expected:but we don't care in tests - } else { - throw e; - } - } - assertFalse(new FIPS140SecureSettingsBootstrapCheck(builder.build(), env).check(createTestContext(builder.build(), - null)).isFailure()); - } - - private void generateV2Keystore(Environment env) throws Exception { - Path configDir = env.configFile(); - SimpleFSDirectory directory = new SimpleFSDirectory(configDir); - byte[] fileBytes = new byte[20]; - random().nextBytes(fileBytes); - try (IndexOutput output = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT)) { - - CodecUtil.writeHeader(output, "elasticsearch.keystore", 2); - output.writeByte((byte) 0); // hasPassword = false - output.writeString("PKCS12"); - output.writeString("PBE"); // string algo - output.writeString("PBE"); // file algo - - output.writeVInt(2); // num settings - output.writeString("string_setting"); - output.writeString("STRING"); - output.writeString("file_setting"); - output.writeString("FILE"); - - SecretKeyFactory secretFactory = SecretKeyFactory.getInstance("PBE"); - KeyStore keystore = KeyStore.getInstance("PKCS12"); - keystore.load(null, null); - SecretKey secretKey = secretFactory.generateSecret(new PBEKeySpec("stringSecretValue".toCharArray())); - KeyStore.ProtectionParameter protectionParameter = new KeyStore.PasswordProtection(new char[0]); - keystore.setEntry("string_setting", new KeyStore.SecretKeyEntry(secretKey), protectionParameter); - - byte[] base64Bytes = Base64.getEncoder().encode(fileBytes); - char[] chars = new char[base64Bytes.length]; - for (int i = 0; i < chars.length; ++i) { - chars[i] = (char) base64Bytes[i]; // PBE only stores the lower 8 bits, so this narrowing is ok - } - secretKey = secretFactory.generateSecret(new PBEKeySpec(chars)); - keystore.setEntry("file_setting", new KeyStore.SecretKeyEntry(secretKey), protectionParameter); - - ByteArrayOutputStream keystoreBytesStream = new ByteArrayOutputStream(); - keystore.store(keystoreBytesStream, new char[0]); - byte[] keystoreBytes = keystoreBytesStream.toByteArray(); - output.writeInt(keystoreBytes.length); - output.writeBytes(keystoreBytes, keystoreBytes.length); - CodecUtil.writeFooter(output); - } - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 364b32f18b356..99d101744e656 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; import org.elasticsearch.xpack.core.security.authz.permission.DocumentPermissions; @@ -65,6 +66,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; +import static org.elasticsearch.license.XPackLicenseState.FIPS_ALLOWED_LICENSE_OPERATION_MODES; import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_MAIN_ALIAS; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT; import static org.hamcrest.Matchers.containsString; @@ -243,24 +245,36 @@ public void testJoinValidatorOnDisabledSecurity() throws Exception { assertNull(joinValidator); } - public void testJoinValidatorForFIPSLicense() throws Exception { + public void testJoinValidatorForFIPSOnAllowedLicense() throws Exception { DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), VersionUtils.randomVersionBetween(random(), null, Version.CURRENT)); MetaData.Builder builder = MetaData.builder(); - License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); + License license = + TestUtils.generateSignedLicense(randomFrom(FIPS_ALLOWED_LICENSE_OPERATION_MODES).toString(), TimeValue.timeValueHours(24)); TestUtils.putLicense(builder, license); ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(builder.build()).build(); new Security.ValidateLicenseForFIPS(false).accept(node, state); + // no exception thrown + new Security.ValidateLicenseForFIPS(true).accept(node, state); + // no exception thrown + } + + public void testJoinValidatorForFIPSOnForbiddenLicense() throws Exception { + DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), + VersionUtils.randomVersionBetween(random(), null, Version.CURRENT)); + MetaData.Builder builder = MetaData.builder(); + final String forbiddenLicenseType = + randomFrom(List.of(License.OperationMode.values()).stream() + .filter(l -> FIPS_ALLOWED_LICENSE_OPERATION_MODES.contains(l) == false).collect(Collectors.toList())).toString(); + License license = TestUtils.generateSignedLicense(forbiddenLicenseType, TimeValue.timeValueHours(24)); + TestUtils.putLicense(builder, license); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(builder.build()).build(); + new Security.ValidateLicenseForFIPS(false).accept(node, state); + // no exception thrown + IllegalStateException e = expectThrows(IllegalStateException.class, + () -> new Security.ValidateLicenseForFIPS(true).accept(node, state)); + assertThat(e.getMessage(), containsString("FIPS mode cannot be used")); - final boolean isLicenseValidForFips = - FIPS140LicenseBootstrapCheck.ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode()); - if (isLicenseValidForFips) { - new Security.ValidateLicenseForFIPS(true).accept(node, state); - } else { - IllegalStateException e = expectThrows(IllegalStateException.class, - () -> new Security.ValidateLicenseForFIPS(true).accept(node, state)); - assertThat(e.getMessage(), containsString("FIPS mode cannot be used")); - } } public void testIndexJoinValidator_FullyCurrentCluster() throws Exception { @@ -377,4 +391,71 @@ public void testValidateRealmsWhenSettingsAreCorrect() { Security.validateRealmSettings(settings); // no-exception } + + public void testValidateForFipsKeystoreWithImplicitJksType() { + final Settings settings = Settings.builder() + .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) + .put("xpack.security.transport.ssl.keystore.path", "path/to/keystore") + .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), + randomFrom(Hasher.getAvailableAlgoStoredHash().stream() + .filter(alg -> alg.startsWith("pbkdf2") == false).collect(Collectors.toList()))) + .build(); + final IllegalArgumentException iae = + expectThrows(IllegalArgumentException.class, () -> Security.validateForFips(settings)); + assertThat(iae.getMessage(), containsString("JKS Keystores cannot be used in a FIPS 140 compliant JVM")); + } + + public void testValidateForFipsKeystoreWithExplicitJksType() { + final Settings settings = Settings.builder() + .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) + .put("xpack.security.transport.ssl.keystore.path", "path/to/keystore") + .put("xpack.security.transport.ssl.keystore.type", "JKS") + .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), + randomFrom(Hasher.getAvailableAlgoStoredHash().stream() + .filter(alg -> alg.startsWith("pbkdf2")).collect(Collectors.toList()))) + .build(); + final IllegalArgumentException iae = + expectThrows(IllegalArgumentException.class, () -> Security.validateForFips(settings)); + assertThat(iae.getMessage(), containsString("JKS Keystores cannot be used in a FIPS 140 compliant JVM")); + } + + public void testValidateForFipsInvalidPasswordHashingAlgorithm() { + final Settings settings = Settings.builder() + .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) + .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), + randomFrom(Hasher.getAvailableAlgoStoredHash().stream() + .filter(alg -> alg.startsWith("pbkdf2") == false).collect(Collectors.toList()))) + .build(); + final IllegalArgumentException iae = + expectThrows(IllegalArgumentException.class, () -> Security.validateForFips(settings)); + assertThat(iae.getMessage(), containsString("Only PBKDF2 is allowed for password hashing in a FIPS 140 JVM.")); + } + + public void testValidateForFipsMultipleValidationErrors() { + final Settings settings = Settings.builder() + .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) + .put("xpack.security.transport.ssl.keystore.path", "path/to/keystore") + .put("xpack.security.transport.ssl.keystore.type", "JKS") + .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), + randomFrom(Hasher.getAvailableAlgoStoredHash().stream() + .filter(alg -> alg.startsWith("pbkdf2") == false).collect(Collectors.toList()))) + .build(); + final IllegalArgumentException iae = + expectThrows(IllegalArgumentException.class, () -> Security.validateForFips(settings)); + assertThat(iae.getMessage(), containsString("JKS Keystores cannot be used in a FIPS 140 compliant JVM")); + assertThat(iae.getMessage(), containsString("Only PBKDF2 is allowed for password hashing in a FIPS 140 JVM.")); + } + + public void testValidateForFipsNoErrors() { + final Settings settings = Settings.builder() + .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) + .put("xpack.security.transport.ssl.keystore.path", "path/to/keystore") + .put("xpack.security.transport.ssl.keystore.type", "BCFKS") + .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), + randomFrom(Hasher.getAvailableAlgoStoredHash().stream() + .filter(alg -> alg.startsWith("pbkdf2")).collect(Collectors.toList()))) + .build(); + Security.validateForFips(settings); + // no exception thrown + } } diff --git a/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec index 46557c77884e8..828d110556720 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec @@ -85,12 +85,12 @@ YEAR(CAST(birth_date AS DATE)) y, birth_date, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; d:i | dm:i | dw:i | dy:i | iso_dw:i | w:i |iso_w:i | q:i | y:i | birth_date:ts | l:s -2 |2 |4 |245 |3 |36 |35 |3 |1953 |1953-09-02T00:00:00Z |Facello -2 |2 |3 |154 |2 |23 |22 |2 |1964 |1964-06-02T00:00:00Z |Simmel +2 |2 |4 |245 |3 |36 |36 |3 |1953 |1953-09-02T00:00:00Z |Facello +2 |2 |3 |154 |2 |23 |23 |2 |1964 |1964-06-02T00:00:00Z |Simmel 3 |3 |5 |337 |4 |49 |49 |4 |1959 |1959-12-03T00:00:00Z |Bamford -1 |1 |7 |121 |6 |18 |18 |2 |1954 |1954-05-01T00:00:00Z |Koblick +1 |1 |7 |121 |6 |18 |17 |2 |1954 |1954-05-01T00:00:00Z |Koblick 21 |21 |6 |21 |5 |4 |3 |1 |1955 |1955-01-21T00:00:00Z |Maliniak -20 |20 |2 |110 |1 |17 |16 |2 |1953 |1953-04-20T00:00:00Z |Preusig +20 |20 |2 |110 |1 |17 |17 |2 |1953 |1953-04-20T00:00:00Z |Preusig 23 |23 |5 |143 |4 |21 |21 |2 |1957 |1957-05-23T00:00:00Z |Zielinski 19 |19 |4 |50 |3 |8 |8 |1 |1958 |1958-02-19T00:00:00Z |Kalloufi 19 |19 |7 |110 |6 |16 |16 |2 |1952 |1952-04-19T00:00:00Z |Peac diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java index f4cccb9e7fd58..5839bfc090afe 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java @@ -15,8 +15,8 @@ import java.time.ZoneId; import java.time.ZonedDateTime; -import java.time.temporal.ChronoField; import java.time.temporal.Temporal; +import java.time.temporal.TemporalField; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; @@ -31,10 +31,10 @@ public abstract class DateTimeFunction extends BaseDateTimeFunction { public static Integer dateTimeChrono(ZonedDateTime dateTime, String tzId, String chronoName) { ZonedDateTime zdt = dateTime.withZoneSameInstant(ZoneId.of(tzId)); - return dateTimeChrono(zdt, ChronoField.valueOf(chronoName)); + return dateTimeChrono(zdt, DateTimeProcessor.DateTimeExtractor.valueOf(chronoName).chronoField()); } - protected static Integer dateTimeChrono(Temporal dateTime, ChronoField field) { + protected static Integer dateTimeChrono(Temporal dateTime, TemporalField field) { return Integer.valueOf(dateTime.get(field)); } @@ -46,7 +46,7 @@ public ScriptTemplate asScript() { String template = formatTemplate("{sql}.dateTimeChrono(" + script.template() + ", {}, {})"); params.script(script.params()) .variable(zoneId().getId()) - .variable(extractor.chronoField().name()); + .variable(extractor.name()); return new ScriptTemplate(template, params.build(), dataType()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java index d0f7b5d9afc3a..30734a2690b43 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java @@ -13,25 +13,27 @@ import java.time.ZoneId; import java.time.ZonedDateTime; import java.time.temporal.ChronoField; +import java.time.temporal.TemporalField; +import java.time.temporal.WeekFields; import java.util.Objects; public class DateTimeProcessor extends BaseDateTimeProcessor { public enum DateTimeExtractor { DAY_OF_MONTH(ChronoField.DAY_OF_MONTH), - ISO_DAY_OF_WEEK(ChronoField.DAY_OF_WEEK), + ISO_DAY_OF_WEEK(WeekFields.ISO.dayOfWeek()),//the same as ChronoField.DAY_OF_WEEK but more clear DAY_OF_YEAR(ChronoField.DAY_OF_YEAR), HOUR_OF_DAY(ChronoField.HOUR_OF_DAY), MINUTE_OF_DAY(ChronoField.MINUTE_OF_DAY), MINUTE_OF_HOUR(ChronoField.MINUTE_OF_HOUR), MONTH_OF_YEAR(ChronoField.MONTH_OF_YEAR), SECOND_OF_MINUTE(ChronoField.SECOND_OF_MINUTE), - ISO_WEEK_OF_YEAR(ChronoField.ALIGNED_WEEK_OF_YEAR), + ISO_WEEK_OF_YEAR(WeekFields.ISO.weekOfWeekBasedYear()), YEAR(ChronoField.YEAR); - private final ChronoField field; + private final TemporalField field; - DateTimeExtractor(ChronoField field) { + DateTimeExtractor(TemporalField field) { this.field = field; } @@ -43,7 +45,7 @@ public int extract(OffsetTime time) { return time.get(field); } - public ChronoField chronoField() { + public TemporalField chronoField() { return field; } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NonIsoDateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NonIsoDateTimeProcessor.java index e0ccc46c79cd3..feabea2f53838 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NonIsoDateTimeProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NonIsoDateTimeProcessor.java @@ -11,36 +11,21 @@ import java.io.IOException; import java.time.DayOfWeek; -import java.time.LocalDateTime; import java.time.ZoneId; import java.time.ZonedDateTime; -import java.time.temporal.ChronoField; import java.time.temporal.WeekFields; -import java.util.Calendar; -import java.util.Locale; import java.util.Objects; -import java.util.TimeZone; import java.util.function.Function; public class NonIsoDateTimeProcessor extends BaseDateTimeProcessor { public enum NonIsoDateTimeExtractor { DAY_OF_WEEK(zdt -> { - // by ISO 8601 standard, Monday is the first day of the week and has the value 1 - // non-ISO 8601 standard considers Sunday as the first day of the week and value 1 - int dayOfWeek = zdt.get(ChronoField.DAY_OF_WEEK) + 1; - return dayOfWeek == 8 ? 1 : dayOfWeek; + return zdt.get(WeekFields.of(DayOfWeek.SUNDAY,1).dayOfWeek()); }), WEEK_OF_YEAR(zdt -> { - // by ISO 8601 standard, the first week of a year is the first week with a majority (4 or more) of its days in January. - // Other Locales may have their own standards (see Arabic or Japanese calendars). - LocalDateTime ld = zdt.toLocalDateTime(); - Calendar cal = Calendar.getInstance(TimeZone.getTimeZone(zdt.getZone()), Locale.ROOT); - cal.clear(); - cal.set(ld.get(ChronoField.YEAR), ld.get(ChronoField.MONTH_OF_YEAR) - 1, ld.get(ChronoField.DAY_OF_MONTH), - ld.get(ChronoField.HOUR_OF_DAY), ld.get(ChronoField.MINUTE_OF_HOUR), ld.get(ChronoField.SECOND_OF_MINUTE)); -// for Locale.ROOT I would expect the same behavior as ISO, if there is a different locale, then it should be used WeekFields.of(Locale) - return zdt.get(WeekFields.of(DayOfWeek.SUNDAY,1).weekOfWeekBasedYear());//cal.get(Calendar.WEEK_OF_YEAR); + // for non-iso week of year we expect a week to start on Sunday and require only 1 day on the first week of th year + return zdt.get(WeekFields.of(DayOfWeek.SUNDAY,1).weekOfWeekBasedYear()); }); private final Function apply; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java index a3f1f385346e8..25ff9e9879797 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java @@ -108,11 +108,13 @@ import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.util.Check; import org.elasticsearch.xpack.sql.util.DateUtils; +import org.elasticsearch.xpack.sql.util.Holder; import org.elasticsearch.xpack.sql.util.ReflectionUtils; import java.time.OffsetTime; import java.time.Period; import java.time.ZonedDateTime; +import java.time.temporal.TemporalAccessor; import java.util.Arrays; import java.util.LinkedHashMap; import java.util.List; @@ -821,9 +823,36 @@ protected QueryTranslation asQuery(Range r, boolean onAggs) { if (onAggs) { aggFilter = new AggFilter(at.id().toString(), r.asScript()); } else { + Holder lower = new Holder<>(valueOf(r.lower())); + Holder upper = new Holder<>(valueOf(r.upper())); + Holder format = new Holder<>(dateFormat(r.value())); + + // for a date constant comparison, we need to use a format for the date, to make sure that the format is the same + // no matter the timezone provided by the user + if (format.get() == null) { + DateFormatter formatter = null; + if (lower.get() instanceof ZonedDateTime || upper.get() instanceof ZonedDateTime) { + formatter = DateFormatter.forPattern(DATE_FORMAT); + } else if (lower.get() instanceof OffsetTime || upper.get() instanceof OffsetTime) { + formatter = DateFormatter.forPattern(TIME_FORMAT); + } + if (formatter != null) { + // RangeQueryBuilder accepts an Object as its parameter, but it will call .toString() on the ZonedDateTime + // instance which can have a slightly different format depending on the ZoneId used to create the ZonedDateTime + // Since RangeQueryBuilder can handle date as String as well, we'll format it as String and provide the format. + if (lower.get() instanceof ZonedDateTime || lower.get() instanceof OffsetTime) { + lower.set(formatter.format((TemporalAccessor) lower.get())); + } + if (upper.get() instanceof ZonedDateTime || upper.get() instanceof OffsetTime) { + upper.set(formatter.format((TemporalAccessor) upper.get())); + } + format.set(formatter.pattern()); + } + } + query = handleQuery(r, r.value(), - () -> new RangeQuery(r.source(), nameOf(r.value()), valueOf(r.lower()), r.includeLower(), - valueOf(r.upper()), r.includeUpper(), dateFormat(r.value()))); + () -> new RangeQuery(r.source(), nameOf(r.value()), lower.get(), r.includeLower(), + upper.get(), r.includeUpper(), format.get())); } return new QueryTranslation(query, aggFilter); } else { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NonIsoDateTimeProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NonIsoDateTimeProcessorTests.java index a2ee6796f668d..974a4474287b6 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NonIsoDateTimeProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NonIsoDateTimeProcessorTests.java @@ -38,11 +38,6 @@ protected NonIsoDateTimeProcessor mutateInstance(NonIsoDateTimeProcessor instanc return new NonIsoDateTimeProcessor(replaced, UTC); } - @Override - protected ZoneId instanceZoneId(NonIsoDateTimeProcessor instance) { - return instance.zoneId(); - } - public void testNonISOWeekOfYearInUTC() { NonIsoDateTimeProcessor proc = new NonIsoDateTimeProcessor(NonIsoDateTimeExtractor.WEEK_OF_YEAR, UTC); assertEquals(2, proc.process(dateTime(568372930000L))); //1988-01-05T09:22:10Z[UTC] @@ -84,14 +79,14 @@ public void testNonISODayOfWeekInUTC() { public void testNonISODayOfWeekInNonUTCTimeZone() { NonIsoDateTimeProcessor proc = new NonIsoDateTimeProcessor(NonIsoDateTimeExtractor.DAY_OF_WEEK, ZoneId.of("GMT-10:00")); - assertEquals(2, proc.process(dateTime(568372930000L))); - assertEquals(7, proc.process(dateTime(981278530000L))); - assertEquals(2, proc.process(dateTime(224241730000L))); - - assertEquals(7, proc.process(dateTime(132744130000L))); - assertEquals(3, proc.process(dateTime(230376130000L))); - assertEquals(3, proc.process(dateTime(766833730000L))); - assertEquals(6, proc.process(dateTime(333451330000L))); - assertEquals(5, proc.process(dateTime(874660930000L))); + assertEquals(2, proc.process(dateTime(568372930000L)));//1988-01-05T09:22:10Z[UTC] Tuesday + assertEquals(7, proc.process(dateTime(981278530000L)));//2001-02-04T09:22:10Z[UTC] Sunday + assertEquals(2, proc.process(dateTime(224241730000L)));//1977-02-08T09:22:10Z[UTC] + //568372930000L + assertEquals(7, proc.process(dateTime(132744130000L)));//1974-03-17T09:22:10Z[UTC] + assertEquals(3, proc.process(dateTime(230376130000L)));//1977-04-20T09:22:10Z[UTC] + assertEquals(3, proc.process(dateTime(766833730000L)));//1994-04-20T09:22:10Z[UTC] + assertEquals(6, proc.process(dateTime(333451330000L)));//1980-07-26T09:22:10Z[UTC] + assertEquals(5, proc.process(dateTime(874660930000L)));//1997-09-19T09:22:10Z[UTC] } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java index 9b78c4791095e..25a90509d73da 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -239,22 +239,37 @@ public void testDateRangeCast() { public void testDateRangeWithCurrentTimestamp() { testDateRangeWithCurrentFunctions("CURRENT_TIMESTAMP()", DATE_FORMAT, TestUtils.TEST_CFG.now()); + testDateRangeWithCurrentFunctions_AndRangeOptimization("CURRENT_TIMESTAMP()", DATE_FORMAT, + TestUtils.TEST_CFG.now().minusDays(1L).minusSeconds(1L), + TestUtils.TEST_CFG.now().plusDays(1L).plusSeconds(1L)); } public void testDateRangeWithCurrentDate() { testDateRangeWithCurrentFunctions("CURRENT_DATE()", DATE_FORMAT, DateUtils.asDateOnly(TestUtils.TEST_CFG.now())); + testDateRangeWithCurrentFunctions_AndRangeOptimization("CURRENT_DATE()", DATE_FORMAT, + DateUtils.asDateOnly(TestUtils.TEST_CFG.now().minusDays(2L)), + DateUtils.asDateOnly(TestUtils.TEST_CFG.now().plusDays(1L))); } public void testDateRangeWithToday() { testDateRangeWithCurrentFunctions("TODAY()", DATE_FORMAT, DateUtils.asDateOnly(TestUtils.TEST_CFG.now())); + testDateRangeWithCurrentFunctions_AndRangeOptimization("TODAY()", DATE_FORMAT, + DateUtils.asDateOnly(TestUtils.TEST_CFG.now().minusDays(2L)), + DateUtils.asDateOnly(TestUtils.TEST_CFG.now().plusDays(1L))); } public void testDateRangeWithNow() { testDateRangeWithCurrentFunctions("NOW()", DATE_FORMAT, TestUtils.TEST_CFG.now()); + testDateRangeWithCurrentFunctions_AndRangeOptimization("NOW()", DATE_FORMAT, + TestUtils.TEST_CFG.now().minusDays(1L).minusSeconds(1L), + TestUtils.TEST_CFG.now().plusDays(1L).plusSeconds(1L)); } public void testDateRangeWithCurrentTime() { testDateRangeWithCurrentFunctions("CURRENT_TIME()", TIME_FORMAT, TestUtils.TEST_CFG.now()); + testDateRangeWithCurrentFunctions_AndRangeOptimization("CURRENT_TIME()", TIME_FORMAT, + TestUtils.TEST_CFG.now().minusDays(1L).minusSeconds(1L), + TestUtils.TEST_CFG.now().plusDays(1L).plusSeconds(1L)); } private void testDateRangeWithCurrentFunctions(String function, String pattern, ZonedDateTime now) { @@ -292,6 +307,38 @@ private void testDateRangeWithCurrentFunctions(String function, String pattern, assertEquals(operator.equals("=") || operator.equals("!=") || operator.equals(">="), rq.includeLower()); assertEquals(pattern, rq.format()); } + + private void testDateRangeWithCurrentFunctions_AndRangeOptimization(String function, String pattern, ZonedDateTime lowerValue, + ZonedDateTime upperValue) { + String lowerOperator = randomFrom(new String[] {"<", "<="}); + String upperOperator = randomFrom(new String[] {">", ">="}); + // use both date-only interval (1 DAY) and time-only interval (1 second) to cover CURRENT_TIMESTAMP and TODAY scenarios + String interval = "(INTERVAL 1 DAY + INTERVAL 1 SECOND)"; + + PhysicalPlan p = optimizeAndPlan("SELECT some.string FROM test WHERE date" + lowerOperator + function + " + " + interval + + " AND date " + upperOperator + function + " - " + interval); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertEquals(1, eqe.output().size()); + assertEquals("test.some.string", eqe.output().get(0).qualifiedName()); + assertEquals(DataType.TEXT, eqe.output().get(0).dataType()); + + Query query = eqe.queryContainer().query(); + // the range queries optimization should create a single "range" query with "from" and "to" populated with the values + // in the two branches of the AND condition + assertTrue(query instanceof RangeQuery); + RangeQuery rq = (RangeQuery) query; + assertEquals("date", rq.field()); + + assertEquals(DateFormatter.forPattern(pattern) + .format(upperValue.withNano(DateUtils.getNanoPrecision(null, upperValue.getNano()))), rq.upper()); + assertEquals(DateFormatter.forPattern(pattern) + .format(lowerValue.withNano(DateUtils.getNanoPrecision(null, lowerValue.getNano()))), rq.lower()); + + assertEquals(lowerOperator.equals("<="), rq.includeUpper()); + assertEquals(upperOperator.equals(">="), rq.includeLower()); + assertEquals(pattern, rq.format()); + } public void testTranslateDateAdd_WhereClause_Painless() { LogicalPlan p = plan("SELECT int FROM test WHERE DATE_ADD('quarter',int, date) > '2018-09-04'::date"); @@ -1241,7 +1288,7 @@ public void testChronoFieldBasedDateTimeFunctionsWithMathIntervalAndGroupBy() { + "InternalSqlScriptUtils.add(InternalSqlScriptUtils.docValue(doc,params.v0)," + "InternalSqlScriptUtils.intervalYearMonth(params.v1,params.v2)),params.v3,params.v4)\"," + "\"lang\":\"painless\",\"params\":{\"v0\":\"date\",\"v1\":\"P1Y\",\"v2\":\"INTERVAL_YEAR\"," - + "\"v3\":\"Z\",\"v4\":\"" + randomFunction.chronoField().name() + "\"}},\"missing_bucket\":true," + + "\"v3\":\"Z\",\"v4\":\"" + randomFunction.name() + "\"}},\"missing_bucket\":true," + "\"value_type\":\"long\",\"order\":\"asc\"}}}]}}}}")); } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/evaluate_data_frame.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/evaluate_data_frame.yml index 1bcde11f2fb74..f35346fc78582 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/evaluate_data_frame.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/evaluate_data_frame.yml @@ -618,8 +618,40 @@ setup: } } - - match: { classification.multiclass_confusion_matrix.confusion_matrix: {cat: {cat: 2, dog: 1, mouse: 0}, dog: {cat: 1, dog: 2, mouse: 0}, mouse: {cat: 1, dog: 0, mouse: 1} } } - - match: { classification.multiclass_confusion_matrix._other_: 0 } + - match: + classification.multiclass_confusion_matrix: + confusion_matrix: + - actual_class: "cat" + actual_class_doc_count: 3 + predicted_classes: + - predicted_class: "cat" + count: 2 + - predicted_class: "dog" + count: 1 + - predicted_class: "mouse" + count: 0 + other_predicted_class_doc_count: 0 + - actual_class: "dog" + actual_class_doc_count: 3 + predicted_classes: + - predicted_class: "cat" + count: 1 + - predicted_class: "dog" + count: 2 + - predicted_class: "mouse" + count: 0 + other_predicted_class_doc_count: 0 + - actual_class: "mouse" + actual_class_doc_count: 2 + predicted_classes: + - predicted_class: "cat" + count: 1 + - predicted_class: "dog" + count: 0 + - predicted_class: "mouse" + count: 1 + other_predicted_class_doc_count: 0 + other_actual_class_count: 0 --- "Test classification multiclass_confusion_matrix with explicit size": - do: @@ -636,8 +668,26 @@ setup: } } - - match: { classification.multiclass_confusion_matrix.confusion_matrix: {cat: {cat: 2, dog: 1}, dog: {cat: 1, dog: 2} } } - - match: { classification.multiclass_confusion_matrix._other_: 1 } + - match: + classification.multiclass_confusion_matrix: + confusion_matrix: + - actual_class: "cat" + actual_class_doc_count: 3 + predicted_classes: + - predicted_class: "cat" + count: 2 + - predicted_class: "dog" + count: 1 + other_predicted_class_doc_count: 0 + - actual_class: "dog" + actual_class_doc_count: 3 + predicted_classes: + - predicted_class: "cat" + count: 1 + - predicted_class: "dog" + count: 2 + other_predicted_class_doc_count: 0 + other_actual_class_count: 1 --- "Test classification with null metrics": - do: @@ -653,8 +703,7 @@ setup: } } - - match: { classification.multiclass_confusion_matrix.confusion_matrix: {cat: {cat: 2, dog: 1, mouse: 0}, dog: {cat: 1, dog: 2, mouse: 0}, mouse: {cat: 1, dog: 0, mouse: 1} } } - - match: { classification.multiclass_confusion_matrix._other_: 0 } + - is_true: classification.multiclass_confusion_matrix --- "Test classification given missing actual_field": - do: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml index 6e0b3eb69a5b5..8e3b7f0ddee52 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml @@ -6,7 +6,7 @@ setup: # wait for long enough that we give delayed unassigned shards to stop being delayed timeout: 70s --- -"Get start, stop, and delete old and mixed cluster batch data frame transforms": +"Get start, stop, and delete old cluster batch data frame transforms": # Simple and complex OLD transforms - do: transform.get_transform: @@ -75,7 +75,17 @@ setup: - match: { count: 1 } - match: { transforms.0.id: "old-complex-transform" } - match: { transforms.0.state: "stopped" } + # Delete old transform + - do: + transform.delete_transform: + transform_id: "old-simple-transform" + - do: + transform.get_transform_stats: + transform_id: "old-simple-transform" + - match: { count: 0 } +--- +"Get start, stop mixed cluster batch data frame transforms": # Simple and complex Mixed cluster transforms - do: transform.get_transform: @@ -145,23 +155,20 @@ setup: - match: { count: 1 } - match: { transforms.0.id: "mixed-complex-transform" } - match: { transforms.0.state: "stopped" } - -# Delete all old and mixed transforms + # Delete mixed transform - do: transform.delete_transform: - transform_id: "old-simple-transform" - + transform_id: "mixed-simple-transform" - do: transform.delete_transform: - transform_id: "mixed-simple-transform" - + transform_id: "mixed-complex-transform" - do: transform.get_transform_stats: - transform_id: "old-simple-transform,mixed-simple-transform" + transform_id: "mixed-simple-transform,mixed-complex-transform" - match: { count: 0 } --- -"Test GET, stop, delete, old and mixed continuous transforms": +"Test GET, stop, delete, old continuous transforms": - do: transform.get_transform: transform_id: "old-simple-continuous-transform" @@ -206,7 +213,8 @@ setup: - do: transform.delete_transform: transform_id: "old-simple-continuous-transform" - +--- +"Test GET, mixed continuous transforms": - do: transform.get_transform: transform_id: "mixed-simple-continuous-transform"