diff --git a/.ci/packer_cache.sh b/.ci/packer_cache.sh index ce78ebcc1cae7..04511f81281b9 100755 --- a/.ci/packer_cache.sh +++ b/.ci/packer_cache.sh @@ -21,4 +21,4 @@ export JAVA_HOME="${HOME}"/.java/${ES_BUILD_JAVA} export JAVA8_HOME="${HOME}"/.java/java8 export JAVA11_HOME="${HOME}"/.java/java11 export JAVA12_HOME="${HOME}"/.java/java12 -./gradlew --parallel clean pullFixture --scan -Porg.elasticsearch.acceptScanTOS=true -s resolveAllDependencies +./gradlew --parallel clean --scan -Porg.elasticsearch.acceptScanTOS=true -s resolveAllDependencies diff --git a/build.gradle b/build.gradle index e1ced30f85b85..cc841f7e8af3b 100644 --- a/build.gradle +++ b/build.gradle @@ -616,9 +616,10 @@ if (System.properties.get("build.compare") != null) { allprojects { task resolveAllDependencies { - doLast { - configurations.findAll { it.isCanBeResolved() }.each { it.resolve() } - } + dependsOn tasks.matching { it.name == "pullFixture"} + doLast { + configurations.findAll { it.isCanBeResolved() }.each { it.resolve() } + } } } diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingPlugin.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingPlugin.groovy index 3b0348b48990d..7d554386c3920 100644 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingPlugin.groovy +++ b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingPlugin.groovy @@ -3,15 +3,14 @@ package com.carrotsearch.gradle.junit4 import com.carrotsearch.ant.tasks.junit4.JUnit4 import org.gradle.api.Plugin import org.gradle.api.Project -import org.gradle.api.Task import org.gradle.api.tasks.TaskContainer class RandomizedTestingPlugin implements Plugin { void apply(Project project) { - setupSeed(project) + String seed = setupSeed(project) createUnitTestTask(project.tasks) - configureAnt(project.ant) + configureAnt(project.ant, seed) } /** @@ -21,12 +20,12 @@ class RandomizedTestingPlugin implements Plugin { * outcome of subsequent runs. Pinning the seed up front like this makes * the reproduction line from one run be useful on another run. */ - static void setupSeed(Project project) { + static String setupSeed(Project project) { if (project.rootProject.ext.has('testSeed')) { /* Skip this if we've already pinned the testSeed. It is important * that this checks the rootProject so that we know we've only ever * initialized one time. */ - return + return project.rootProject.ext.testSeed } String testSeed = System.getProperty('tests.seed') if (testSeed == null) { @@ -39,6 +38,8 @@ class RandomizedTestingPlugin implements Plugin { project.rootProject.subprojects { project.ext.testSeed = testSeed } + + return testSeed } static void createUnitTestTask(TaskContainer tasks) { @@ -52,7 +53,8 @@ class RandomizedTestingPlugin implements Plugin { } } - static void configureAnt(AntBuilder ant) { + static void configureAnt(AntBuilder ant, String seed) { ant.project.addTaskDefinition('junit4:junit4', JUnit4.class) + ant.properties.put('tests.seed', seed) } } diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy index 005e43b9db434..05248fc581e96 100644 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy +++ b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy @@ -32,6 +32,7 @@ import com.carrotsearch.ant.tasks.junit4.events.aggregated.HeartBeatEvent import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener import org.gradle.internal.logging.progress.ProgressLogger import org.gradle.internal.logging.progress.ProgressLoggerFactory +import org.junit.runner.Description import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDurationInSeconds import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.ERROR @@ -113,7 +114,7 @@ class TestProgressLogger implements AggregatedEventListener { @Subscribe void onSuiteStart(AggregatedSuiteStartedEvent e) throws IOException { - String suiteName = simpleName(e.suiteStartedEvent.description.className) + String suiteName = simpleName(e.suiteStartedEvent.description) slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${suiteName} - initializing") } @@ -146,31 +147,45 @@ class TestProgressLogger implements AggregatedEventListener { throw new IllegalArgumentException("Unknown test status: [${e.status}]") } testLogger.progress("Tests: completed: ${testsCompleted}, failed: ${testsFailed}, ignored: ${testsIgnored}") - String testName = simpleName(e.description.className) + '.' + e.description.methodName + String testName = testName(e.description) slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${testName} ${statusMessage}") } @Subscribe void onTestStarted(TestStartedEvent e) throws IOException { - String testName = simpleName(e.description.className) + '.' + e.description.methodName + String testName = testName(e.description) slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${testName} ...") } @Subscribe void onHeartbeat(HeartBeatEvent e) throws IOException { - String testName = simpleName(e.description.className) + '.' + e.description.methodName + String testName = testName(e.description) String time = formatDurationInSeconds(e.getNoEventDuration()) slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${testName} stalled for ${time}") } + /** + * Build the test name in the format of . + */ + private static String testName(Description description) { + String className = simpleName(description) + if (description == null) { + return className + "." + "" + } + return className + "." + description.methodName + } + /** * Extract a Class#getSimpleName style name from Class#getName style * string. We can't just use Class#getSimpleName because junit descriptions * don't always set the class field but they always set the className * field. */ - private static String simpleName(String className) { - return className.substring(className.lastIndexOf('.') + 1) + private static String simpleName(Description description) { + if (description == null) { + return "" + } + return description.className.substring(description.className.lastIndexOf('.') + 1) } @Override diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index 0a53787c10597..e64f30d48bd3f 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -118,11 +118,16 @@ class ClusterConfiguration { if (seedNode == node) { return null } - ant.waitfor(maxwait: '40', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond') { + ant.waitfor(maxwait: '40', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', + timeoutproperty: "failed.${seedNode.transportPortsFile.path}") { resourceexists { file(file: seedNode.transportPortsFile.toString()) } } + if (ant.properties.containsKey("failed.${seedNode.transportPortsFile.path}".toString())) { + throw new GradleException("Failed to locate seed node transport file [${seedNode.transportPortsFile}]: " + + "timed out waiting for it to be created after ${waitSeconds} seconds") + } return seedNode.transportUri() } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 603e217ecda86..210fb939c7113 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -547,7 +547,11 @@ class VagrantTestPlugin implements Plugin { project.gradle.removeListener(batsPackagingReproListener) } if (project.extensions.esvagrant.boxes.contains(box)) { - packagingTest.dependsOn(batsPackagingTest) + // these tests are temporarily disabled for suse boxes while we debug an issue + // https://github.com/elastic/elasticsearch/issues/30295 + if (box.equals("opensuse-42") == false && box.equals("sles-12") == false) { + packagingTest.dependsOn(batsPackagingTest) + } } } @@ -586,7 +590,11 @@ class VagrantTestPlugin implements Plugin { project.gradle.removeListener(javaPackagingReproListener) } if (project.extensions.esvagrant.boxes.contains(box)) { - packagingTest.dependsOn(javaPackagingTest) + // these tests are temporarily disabled for suse boxes while we debug an issue + // https://github.com/elastic/elasticsearch/issues/30295 + if (box.equals("opensuse-42") == false && box.equals("sles-12") == false) { + packagingTest.dependsOn(javaPackagingTest) + } } /* diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index cb7986b9a3051..4deb47f15ae4e 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -243,7 +243,8 @@ synchronized void stop(boolean tailLogs) { } logger.info("Stopping `{}`, tailLogs: {}", this, tailLogs); requireNonNull(esProcess, "Can't stop `" + this + "` as it was not started or already stopped."); - stopHandle(esProcess.toHandle()); + // Test clusters are not reused, don't spend time on a graceful shutdown + stopHandle(esProcess.toHandle(), true); if (tailLogs) { logFileContents("Standard output of node", esStdoutFile); logFileContents("Standard error of node", esStderrFile); @@ -251,27 +252,37 @@ synchronized void stop(boolean tailLogs) { esProcess = null; } - private void stopHandle(ProcessHandle processHandle) { + private void stopHandle(ProcessHandle processHandle, boolean forcibly) { // Stop all children first, ES could actually be a child when there's some wrapper process like on Windows. - if (processHandle.isAlive()) { - processHandle.children().forEach(this::stopHandle); - } - logProcessInfo("Terminating elasticsearch process:", processHandle.info()); - if (processHandle.isAlive()) { - processHandle.destroy(); - } else { + if (processHandle.isAlive() == false) { logger.info("Process was not running when we tried to terminate it."); + return; } - waitForProcessToExit(processHandle); - if (processHandle.isAlive()) { + + // Stop all children first, ES could actually be a child when there's some wrapper process like on Windows. + processHandle.children().forEach(each -> stopHandle(each, forcibly)); + + logProcessInfo( + "Terminating elasticsearch process" + (forcibly ? " forcibly " : "gracefully") + ":", + processHandle.info() + ); + + if (forcibly) { + processHandle.destroyForcibly(); + } else { + processHandle.destroy(); + waitForProcessToExit(processHandle); + if (processHandle.isAlive() == false) { + return; + } logger.info("process did not terminate after {} {}, stopping it forcefully", - ES_DESTROY_TIMEOUT, ES_DESTROY_TIMEOUT_UNIT - ); + ES_DESTROY_TIMEOUT, ES_DESTROY_TIMEOUT_UNIT); processHandle.destroyForcibly(); } + waitForProcessToExit(processHandle); if (processHandle.isAlive()) { - throw new TestClustersException("Was not able to terminate es process"); + throw new TestClustersException("Was not able to terminate elasticsearch process"); } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/SyncTestClustersConfiguration.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/SyncTestClustersConfiguration.java deleted file mode 100644 index d1a86a38c66ff..0000000000000 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/SyncTestClustersConfiguration.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.gradle.testclusters; - -import org.gradle.api.DefaultTask; -import org.gradle.api.Project; -import org.gradle.api.file.FileCollection; -import org.gradle.api.tasks.InputFiles; -import org.gradle.api.tasks.OutputDirectory; -import org.gradle.api.tasks.TaskAction; - -import java.io.File; -import java.util.Set; -import java.util.stream.Collectors; - -public class SyncTestClustersConfiguration extends DefaultTask { - - @InputFiles - public FileCollection getDependencies() { - Set nonZip = getProject().getConfigurations() - .getByName(TestClustersPlugin.HELPER_CONFIGURATION_NAME) - .getFiles() - .stream() - .filter(file -> file.getName().endsWith(".zip") == false) - .collect(Collectors.toSet()); - if(nonZip.isEmpty() == false) { - throw new IllegalStateException("Expected only zip files in configuration : " + - TestClustersPlugin.HELPER_CONFIGURATION_NAME + " but found " + - nonZip - ); - } - return getProject().files( - getProject().getConfigurations() - .getByName(TestClustersPlugin.HELPER_CONFIGURATION_NAME) - .getFiles() - ); - } - - @OutputDirectory - public File getOutputDir() { - return getTestClustersConfigurationExtractDir(getProject()); - } - - @TaskAction - public void doExtract() { - File outputDir = getOutputDir(); - getProject().delete(outputDir); - outputDir.mkdirs(); - getDependencies().forEach(dep -> - getProject().copy(spec -> { - spec.from(getProject().zipTree(dep)); - spec.into(new File(outputDir, "zip")); - }) - ); - } - - static File getTestClustersConfigurationExtractDir(Project project) { - return new File(TestClustersPlugin.getTestClustersBuildDir(project), "extract"); - } - -} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index 3abc9a6a6177e..56ff501a3885c 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -25,11 +25,14 @@ import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; +import org.gradle.api.artifacts.component.ComponentArtifactIdentifier; import org.gradle.api.execution.TaskActionListener; import org.gradle.api.execution.TaskExecutionListener; +import org.gradle.api.file.FileTree; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; import org.gradle.api.plugins.ExtraPropertiesExtension; +import org.gradle.api.tasks.Sync; import org.gradle.api.tasks.TaskState; import java.io.File; @@ -39,6 +42,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -87,6 +91,20 @@ public void apply(Project project) { "Internal helper configuration used by cluster configuration to download " + "ES distributions and plugins." ); + helperConfiguration.getIncoming().afterResolve(resolvableDependencies -> { + Set nonZipComponents = resolvableDependencies.getArtifacts() + .getArtifacts() + .stream() + .filter(artifact -> artifact.getFile().getName().endsWith(".zip") == false) + .map(artifact -> artifact.getId()) + .collect(Collectors.toSet()); + + if(nonZipComponents.isEmpty() == false) { + throw new IllegalStateException("Dependencies with non-zip artifacts found in configuration '" + + TestClustersPlugin.HELPER_CONFIGURATION_NAME + "': " + nonZipComponents + ); + } + }); // When running in the Daemon it's possible for this to hold references to past usedClusters.clear(); @@ -98,7 +116,15 @@ public void apply(Project project) { // the clusters will look for artifacts there based on the naming conventions. // Tasks that use a cluster will add this as a dependency automatically so it's guaranteed to run early in // the build. - rootProject.getTasks().create(SYNC_ARTIFACTS_TASK_NAME, SyncTestClustersConfiguration.class); + rootProject.getTasks().create(SYNC_ARTIFACTS_TASK_NAME, Sync.class, sync -> { + sync.from((Callable>) () -> + helperConfiguration.getFiles() + .stream() + .map(project::zipTree) + .collect(Collectors.toList()) + ); + sync.into(new File(getTestClustersConfigurationExtractDir(project), "zip")); + }); // When we know what tasks will run, we claim the clusters of those task to differentiate between clusters // that are defined in the build script and the ones that will actually be used in this invocation of gradle @@ -129,7 +155,7 @@ private NamedDomainObjectContainer createTestClustersContaine project.getPath(), name, GradleServicesAdapter.getInstance(project), - SyncTestClustersConfiguration.getTestClustersConfigurationExtractDir(project), + getTestClustersConfigurationExtractDir(project), new File(project.getBuildDir(), "testclusters") ) ); @@ -249,8 +275,8 @@ public void beforeExecute(Task task) {} ); } - static File getTestClustersBuildDir(Project project) { - return new File(project.getRootProject().getBuildDir(), "testclusters"); + static File getTestClustersConfigurationExtractDir(Project project) { + return new File(project.getRootProject().getBuildDir(), "testclusters/extract"); } /** diff --git a/buildSrc/src/main/resources/minimumGradleVersion b/buildSrc/src/main/resources/minimumGradleVersion index 3bff059174b83..804440660c71c 100644 --- a/buildSrc/src/main/resources/minimumGradleVersion +++ b/buildSrc/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -5.1.1 \ No newline at end of file +5.2.1 \ No newline at end of file diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index 0e1834cd3ac3b..e78e7ec7ca6d3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -165,10 +165,8 @@ public void testClusterHealthGreen() throws IOException { assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.status(), equalTo(RestStatus.OK)); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); - assertNoIndices(response); } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35450") public void testClusterHealthYellowClusterLevel() throws IOException { createIndex("index", Settings.EMPTY); createIndex("index2", Settings.EMPTY); @@ -178,15 +176,21 @@ public void testClusterHealthYellowClusterLevel() throws IOException { logger.info("Shard stats\n{}", EntityUtils.toString( client().performRequest(new Request("GET", "/_cat/shards")).getEntity())); - assertYellowShards(response); assertThat(response.getIndices().size(), equalTo(0)); } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35450") public void testClusterHealthYellowIndicesLevel() throws IOException { - createIndex("index", Settings.EMPTY); - createIndex("index2", Settings.EMPTY); - ClusterHealthRequest request = new ClusterHealthRequest(); + String firstIndex = "index"; + String secondIndex = "index2"; + // including another index that we do not assert on, to ensure that we are not + // accidentally asserting on entire cluster state + String ignoredIndex = "tasks"; + createIndex(firstIndex, Settings.EMPTY); + createIndex(secondIndex, Settings.EMPTY); + if (randomBoolean()) { + createIndex(ignoredIndex, Settings.EMPTY); + } + ClusterHealthRequest request = new ClusterHealthRequest(firstIndex, secondIndex); request.timeout("5s"); request.level(ClusterHealthRequest.Level.INDICES); ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync); @@ -212,11 +216,9 @@ private static void assertYellowShards(ClusterHealthResponse response) { assertThat(response.getDelayedUnassignedShards(), equalTo(0)); assertThat(response.getInitializingShards(), equalTo(0)); assertThat(response.getUnassignedShards(), equalTo(2)); - assertThat(response.getActiveShardsPercent(), equalTo(50d)); } - - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35450") + public void testClusterHealthYellowSpecificIndex() throws IOException { createIndex("index", Settings.EMPTY); createIndex("index2", Settings.EMPTY); @@ -236,7 +238,6 @@ public void testClusterHealthYellowSpecificIndex() throws IOException { assertThat(response.getDelayedUnassignedShards(), equalTo(0)); assertThat(response.getInitializingShards(), equalTo(0)); assertThat(response.getUnassignedShards(), equalTo(1)); - assertThat(response.getActiveShardsPercent(), equalTo(50d)); assertThat(response.getIndices().size(), equalTo(1)); Map.Entry index = response.getIndices().entrySet().iterator().next(); assertYellowIndex(index.getKey(), index.getValue(), false); @@ -272,7 +273,19 @@ private static void assertYellowShard(int shardId, ClusterShardHealth shardHealt assertThat(shardHealth.getRelocatingShards(), equalTo(0)); } + private static void assertNoIndices(ClusterHealthResponse response) { + assertThat(response.getIndices(), equalTo(emptyMap())); + assertThat(response.getActivePrimaryShards(), equalTo(0)); + assertThat(response.getNumberOfDataNodes(), equalTo(1)); + assertThat(response.getNumberOfNodes(), equalTo(1)); + assertThat(response.getActiveShards(), equalTo(0)); + assertThat(response.getDelayedUnassignedShards(), equalTo(0)); + assertThat(response.getInitializingShards(), equalTo(0)); + assertThat(response.getUnassignedShards(), equalTo(0)); + } + public void testClusterHealthNotFoundIndex() throws IOException { + createIndex("index", Settings.EMPTY); ClusterHealthRequest request = new ClusterHealthRequest("notexisted-index"); request.timeout("5s"); ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync); @@ -284,15 +297,4 @@ public void testClusterHealthNotFoundIndex() throws IOException { assertNoIndices(response); } - private static void assertNoIndices(ClusterHealthResponse response) { - assertThat(response.getIndices(), equalTo(emptyMap())); - assertThat(response.getActivePrimaryShards(), equalTo(0)); - assertThat(response.getNumberOfDataNodes(), equalTo(1)); - assertThat(response.getNumberOfNodes(), equalTo(1)); - assertThat(response.getActiveShards(), equalTo(0)); - assertThat(response.getDelayedUnassignedShards(), equalTo(0)); - assertThat(response.getInitializingShards(), equalTo(0)); - assertThat(response.getUnassignedShards(), equalTo(0)); - assertThat(response.getActiveShardsPercent(), equalTo(100d)); - } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index a7aa517709391..0f152551ddc3e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -1697,7 +1697,7 @@ public void testCRUDIndexTemplateWithTypes() throws Exception { assertTrue(template2.mappings().containsKey("custom_doc_type")); List names = randomBoolean() - ? Arrays.asList("*-1", "template-2") + ? Arrays.asList("*plate-1", "template-2") : Arrays.asList("template-*"); GetIndexTemplatesRequest getBothRequest = new GetIndexTemplatesRequest(names); org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse getBoth = execute( @@ -1780,7 +1780,7 @@ public void testCRUDIndexTemplate() throws Exception { List names = randomBoolean() - ? Arrays.asList("*-1", "template-2") + ? Arrays.asList("*plate-1", "template-2") : Arrays.asList("template-*"); GetIndexTemplatesRequest getBothRequest = new GetIndexTemplatesRequest(names); GetIndexTemplatesResponse getBoth = execute( @@ -1834,7 +1834,7 @@ public void testIndexTemplatesExist() throws Exception { { final List templateNames = randomBoolean() - ? Arrays.asList("*-1", "template-2") + ? Arrays.asList("*plate-1", "template-2") : Arrays.asList("template-*"); final IndexTemplatesExistRequest bothRequest = new IndexTemplatesExistRequest(templateNames); diff --git a/docs/build.gradle b/docs/build.gradle index 1083d07b94f46..5b98a62d99640 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -840,7 +840,7 @@ buildRestTests.setups['sensor_prefab_data'] = ''' ''' buildRestTests.setups['sample_job'] = ''' - do: - xpack.ml.put_job: + ml.put_job: job_id: "sample_job" body: > { @@ -894,7 +894,7 @@ buildRestTests.setups['farequote_data'] = buildRestTests.setups['farequote_index ''' buildRestTests.setups['farequote_job'] = buildRestTests.setups['farequote_data'] + ''' - do: - xpack.ml.put_job: + ml.put_job: job_id: "farequote" body: > { @@ -914,7 +914,7 @@ buildRestTests.setups['farequote_job'] = buildRestTests.setups['farequote_data'] ''' buildRestTests.setups['farequote_datafeed'] = buildRestTests.setups['farequote_job'] + ''' - do: - xpack.ml.put_datafeed: + ml.put_datafeed: datafeed_id: "datafeed-farequote" body: > { @@ -978,7 +978,7 @@ buildRestTests.setups['server_metrics_data'] = buildRestTests.setups['server_met ''' buildRestTests.setups['server_metrics_job'] = buildRestTests.setups['server_metrics_data'] + ''' - do: - xpack.ml.put_job: + ml.put_job: job_id: "total-requests" body: > { @@ -1000,7 +1000,7 @@ buildRestTests.setups['server_metrics_job'] = buildRestTests.setups['server_metr ''' buildRestTests.setups['server_metrics_datafeed'] = buildRestTests.setups['server_metrics_job'] + ''' - do: - xpack.ml.put_datafeed: + ml.put_datafeed: datafeed_id: "datafeed-total-requests" body: > { @@ -1010,22 +1010,22 @@ buildRestTests.setups['server_metrics_datafeed'] = buildRestTests.setups['server ''' buildRestTests.setups['server_metrics_openjob'] = buildRestTests.setups['server_metrics_datafeed'] + ''' - do: - xpack.ml.open_job: + ml.open_job: job_id: "total-requests" ''' buildRestTests.setups['server_metrics_startdf'] = buildRestTests.setups['server_metrics_openjob'] + ''' - do: - xpack.ml.start_datafeed: + ml.start_datafeed: datafeed_id: "datafeed-total-requests" ''' buildRestTests.setups['calendar_outages'] = ''' - do: - xpack.ml.put_calendar: + ml.put_calendar: calendar_id: "planned-outages" ''' buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['calendar_outages'] + ''' - do: - xpack.ml.post_calendar_events: + ml.post_calendar_events: calendar_id: "planned-outages" body: > { "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z", "calendar_id": "planned-outages" } @@ -1034,12 +1034,12 @@ buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['cale ''' buildRestTests.setups['calendar_outages_openjob'] = buildRestTests.setups['server_metrics_openjob'] + ''' - do: - xpack.ml.put_calendar: + ml.put_calendar: calendar_id: "planned-outages" ''' buildRestTests.setups['calendar_outages_addjob'] = buildRestTests.setups['server_metrics_openjob'] + ''' - do: - xpack.ml.put_calendar: + ml.put_calendar: calendar_id: "planned-outages" body: > { @@ -1048,7 +1048,7 @@ buildRestTests.setups['calendar_outages_addjob'] = buildRestTests.setups['server ''' buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['calendar_outages_addjob'] + ''' - do: - xpack.ml.post_calendar_events: + ml.post_calendar_events: calendar_id: "planned-outages" body: > { "events" : [ diff --git a/docs/java-rest/high-level/getting-started.asciidoc b/docs/java-rest/high-level/getting-started.asciidoc index 685122771392c..de337f7084057 100644 --- a/docs/java-rest/high-level/getting-started.asciidoc +++ b/docs/java-rest/high-level/getting-started.asciidoc @@ -83,7 +83,7 @@ dependencies { The very first releases of any major version (like a beta), might have been built on top of a Lucene Snapshot version. In such a case you will be unable to resolve the Lucene dependencies of the client. -For example, if you want to use the `7.0.0-alpha2` version which depends on Lucene `8.0.0-snapshot-774e9aefbc`, you must +For example, if you want to use the `7.0.0-beta1` version which depends on Lucene `8.0.0-snapshot-83f9835`, you must define the following repository. For Maven: @@ -93,7 +93,7 @@ For Maven: elastic-lucene-snapshots Elastic Lucene Snapshots - http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/774e9aefbc + http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/83f9835 true false @@ -104,7 +104,7 @@ For Gradle: ["source","groovy",subs="attributes"] -------------------------------------------------- maven { - url 'http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/774e9aefbc' + url 'http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/83f9835' } -------------------------------------------------- diff --git a/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc b/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc index 5578ecba88654..8936e5345c3eb 100644 --- a/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc @@ -20,7 +20,7 @@ POST /sales/_search?size=0 "range": { "date_range": { "field": "date", - "format": "MM-yyy", + "format": "MM-yyyy", "ranges": [ { "to": "now-10M/M" }, <1> { "from": "now-10M/M" } <2> diff --git a/docs/reference/analysis/tokenfilters/edgengram-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/edgengram-tokenfilter.asciidoc index be37d24f7dd7c..e460725523cf6 100644 --- a/docs/reference/analysis/tokenfilters/edgengram-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/edgengram-tokenfilter.asciidoc @@ -1,9 +1,9 @@ [[analysis-edgengram-tokenfilter]] === Edge NGram Token Filter -A token filter of type `edgeNGram`. +A token filter of type `edge_ngram`. -The following are settings that can be set for a `edgeNGram` token +The following are settings that can be set for a `edge_ngram` token filter type: [cols="<,<",options="header",] diff --git a/docs/reference/analysis/tokenfilters/ngram-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/ngram-tokenfilter.asciidoc index acc178a2741fa..53bda23d12bf9 100644 --- a/docs/reference/analysis/tokenfilters/ngram-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/ngram-tokenfilter.asciidoc @@ -1,9 +1,9 @@ [[analysis-ngram-tokenfilter]] === NGram Token Filter -A token filter of type `nGram`. +A token filter of type `ngram`. -The following are settings that can be set for a `nGram` token filter +The following are settings that can be set for a `ngram` token filter type: [cols="<,<",options="header",] diff --git a/docs/reference/docs/data-replication.asciidoc b/docs/reference/docs/data-replication.asciidoc index 47af258204fdb..84375467726fa 100644 --- a/docs/reference/docs/data-replication.asciidoc +++ b/docs/reference/docs/data-replication.asciidoc @@ -142,8 +142,8 @@ Dirty reads:: An isolated primary can expose writes that will not be acknowledge === The Tip of the Iceberg This document provides a high level overview of how Elasticsearch deals with data. Of course, there is much much more -going on under the hood. Things like primary terms, cluster state publishing and master election all play a role in +going on under the hood. Things like primary terms, cluster state publishing, and master election all play a role in keeping this system behaving correctly. This document also doesn't cover known and important bugs (both closed and open). We recognize that https://github.com/elastic/elasticsearch/issues?q=label%3Aresiliency[GitHub is hard to keep up with]. -To help people stay on top of those and we maintain a dedicated https://www.elastic.co/guide/en/elasticsearch/resiliency/current/index.html[resiliency page] +To help people stay on top of those, we maintain a dedicated https://www.elastic.co/guide/en/elasticsearch/resiliency/current/index.html[resiliency page] on our website. We strongly advise reading it. diff --git a/docs/reference/docs/delete.asciidoc b/docs/reference/docs/delete.asciidoc index 22301b98f1031..b242741abd522 100644 --- a/docs/reference/docs/delete.asciidoc +++ b/docs/reference/docs/delete.asciidoc @@ -88,11 +88,11 @@ DELETE /twitter/_doc/1?routing=kimchy // TEST[continued] The above will delete a tweet with id `1`, but will be routed based on the -user. Note, issuing a delete without the correct routing, will cause the +user. Note that issuing a delete without the correct routing will cause the document to not be deleted. When the `_routing` mapping is set as `required` and no routing value is -specified, the delete api will throw a `RoutingMissingException` and reject +specified, the delete API will throw a `RoutingMissingException` and reject the request. [float] diff --git a/docs/reference/docs/get.asciidoc b/docs/reference/docs/get.asciidoc index 742e258ac65c4..e84df1d5a9689 100644 --- a/docs/reference/docs/get.asciidoc +++ b/docs/reference/docs/get.asciidoc @@ -34,7 +34,7 @@ The result of the above get operation is: -------------------------------------------------- // TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] -The above result includes the `_index`, `_id` and `_version` +The above result includes the `_index`, `_id`, and `_version` of the document we wish to retrieve, including the actual `_source` of the document if it could be found (as indicated by the `found` field in the response). @@ -76,7 +76,7 @@ GET twitter/_doc/0?_source=false // TEST[setup:twitter] If you only need one or two fields from the complete `_source`, you can use the `_source_includes` -& `_source_excludes` parameters to include or filter out that parts you need. This can be especially helpful +and `_source_excludes` parameters to include or filter out the parts you need. This can be especially helpful with large documents where partial retrieval can save on network overhead. Both parameters take a comma separated list of fields or wildcard expressions. Example: @@ -138,7 +138,7 @@ PUT twitter/_doc/1 // CONSOLE // TEST[continued] -... and try to retrieve it: +And then try to retrieve it: [source,js] -------------------------------------------------- @@ -236,7 +236,7 @@ You can also use the same source filtering parameters to control which parts of [source,js] -------------------------------------------------- -GET twitter/_source/1/?_source_includes=*.id&_source_excludes=entities' +GET twitter/_source/1/?_source_includes=*.id&_source_excludes=entities -------------------------------------------------- // CONSOLE // TEST[continued] @@ -266,7 +266,7 @@ GET twitter/_doc/2?routing=user1 // TEST[continued] The above will get a tweet with id `2`, but will be routed based on the -user. Note, issuing a get without the correct routing, will cause the +user. Note that issuing a get without the correct routing will cause the document not to be fetched. [float] @@ -307,7 +307,7 @@ indexing). The get operation gets hashed into a specific shard id. It then gets redirected to one of the replicas within that shard id and returns the result. The replicas are the primary shard and its replicas within that -shard id group. This means that the more replicas we will have, the +shard id group. This means that the more replicas we have, the better GET scaling we will have. diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index e8a681567d622..4c687ac0cd23c 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -61,7 +61,7 @@ exist, and applies any <> that are configured. The index operation also creates a dynamic mapping if one does not already exist. By default, new fields and objects will automatically be added to the mapping definition if needed. Check out the <> section -for more information on mapping definitions, and the the +for more information on mapping definitions, and the <> API for information about updating mappings manually. diff --git a/docs/reference/images/sql/rest/console-triple-quotes.png b/docs/reference/images/sql/rest/console-triple-quotes.png new file mode 100644 index 0000000000000..4a13acb986114 Binary files /dev/null and b/docs/reference/images/sql/rest/console-triple-quotes.png differ diff --git a/docs/reference/index-modules/allocation/filtering.asciidoc b/docs/reference/index-modules/allocation/filtering.asciidoc index 9e7a67946a997..0ae331d0e446d 100644 --- a/docs/reference/index-modules/allocation/filtering.asciidoc +++ b/docs/reference/index-modules/allocation/filtering.asciidoc @@ -49,6 +49,7 @@ settings support three types of filters: `include`, `exclude`, and `require`. For example, to tell {es} to allocate shards from the `test` index to either `big` or `medium` nodes, use `index.routing.allocation.include`: + +-- [source,js] ------------------------ PUT test/_settings @@ -58,11 +59,11 @@ PUT test/_settings ------------------------ // CONSOLE // TEST[s/^/PUT test\n/] -+ + If you specify multiple filters, all conditions must be satisfied for shards to be relocated. For example, to move the `test` index to `big` nodes in `rack1`, you could specify: -+ + [source,js] ------------------------ PUT test/_settings @@ -73,6 +74,7 @@ PUT test/_settings ------------------------ // CONSOLE // TEST[s/^/PUT test\n/] +-- [float] [[index-allocation-settings]] diff --git a/docs/reference/migration/migrate_8_0.asciidoc b/docs/reference/migration/migrate_8_0.asciidoc index 4477090dc16b6..2ec007076d69b 100644 --- a/docs/reference/migration/migrate_8_0.asciidoc +++ b/docs/reference/migration/migrate_8_0.asciidoc @@ -9,4 +9,24 @@ your application to {es} 8.0. See also <> and <>. -coming[8.0.0] \ No newline at end of file +coming[8.0.0] + +* <> + +[float] +=== Indices created before 7.0 + +Elasticsearch 8.0 can read indices created in version 7.0 or above. An +Elasticsearch 8.0 node will not start in the presence of indices created in a +version of Elasticsearch before 7.0. + +[IMPORTANT] +.Reindex indices from Elasticsearch 6.x or before +========================================= + +Indices created in Elasticsearch 6.x or before will need to be reindexed with +Elasticsearch 7.x in order to be readable by Elasticsearch 8.x. + +========================================= + +include::migrate_8_0/mappings.asciidoc[] \ No newline at end of file diff --git a/docs/reference/migration/migrate_8_0/mappings.asciidoc b/docs/reference/migration/migrate_8_0/mappings.asciidoc new file mode 100644 index 0000000000000..555162351b46a --- /dev/null +++ b/docs/reference/migration/migrate_8_0/mappings.asciidoc @@ -0,0 +1,16 @@ +[float] +[[breaking_80_mappings_changes]] +=== Mapping changes + +[float] +==== The `nGram` and `edgeNGram` token filter names have been removed + +The `nGram` and `edgeNGram` token filter names that have been deprecated since +version 6.4 have been removed. Both token filters should be used by their +alternative names `ngram` and `edge_ngram` instead. + +[float] +==== Limiting the number of completion contexts + +The number of completion contexts within a single completion field +has been limited to 10. \ No newline at end of file diff --git a/docs/reference/ml/apis/get-ml-info.asciidoc b/docs/reference/ml/apis/get-ml-info.asciidoc index 67484c0073179..fb9212a59afbc 100644 --- a/docs/reference/ml/apis/get-ml-info.asciidoc +++ b/docs/reference/ml/apis/get-ml-info.asciidoc @@ -58,4 +58,4 @@ This is a possible response: "limits" : { } } ---- -// TESTRESPONSE +// TESTRESPONSE[s/"upgrade_mode": false/"upgrade_mode": $body.upgrade_mode/] diff --git a/docs/reference/modules/cross-cluster-search.asciidoc b/docs/reference/modules/cross-cluster-search.asciidoc index 186c8e8ee3837..658530b372acc 100644 --- a/docs/reference/modules/cross-cluster-search.asciidoc +++ b/docs/reference/modules/cross-cluster-search.asciidoc @@ -150,10 +150,10 @@ will be prefixed with their remote cluster name: "max_score": 1, "hits": [ { - "_index": "cluster_one:twitter", + "_index": "twitter", "_type": "_doc", "_id": "0", - "_score": 1, + "_score": 2, "_source": { "user": "kimchy", "date": "2009-11-15T14:12:12", @@ -162,10 +162,10 @@ will be prefixed with their remote cluster name: } }, { - "_index": "twitter", + "_index": "cluster_one:twitter", "_type": "_doc", "_id": "0", - "_score": 2, + "_score": 1, "_source": { "user": "kimchy", "date": "2009-11-15T14:12:12", @@ -243,10 +243,10 @@ GET /cluster_one:twitter,cluster_two:twitter,twitter/_search <1> "max_score": 1, "hits": [ { - "_index": "cluster_one:twitter", + "_index": "twitter", "_type": "_doc", "_id": "0", - "_score": 1, + "_score": 2, "_source": { "user": "kimchy", "date": "2009-11-15T14:12:12", @@ -255,10 +255,10 @@ GET /cluster_one:twitter,cluster_two:twitter,twitter/_search <1> } }, { - "_index": "twitter", + "_index": "cluster_one:twitter", "_type": "_doc", "_id": "0", - "_score": 2, + "_score": 1, "_source": { "user": "kimchy", "date": "2009-11-15T14:12:12", diff --git a/docs/reference/query-dsl/intervals-query.asciidoc b/docs/reference/query-dsl/intervals-query.asciidoc index 27609e8565902..94a8b007d8457 100644 --- a/docs/reference/query-dsl/intervals-query.asciidoc +++ b/docs/reference/query-dsl/intervals-query.asciidoc @@ -151,8 +151,14 @@ Produces intervals that are contained by an interval from the filter rule Produces intervals that do not contain an interval from the filter rule `not_contained_by`:: Produces intervals that are not contained by an interval from the filter rule +`overlapping`:: +Produces intervals that overlap with an interval from the filter rule `not_overlapping`:: Produces intervals that do not overlap with an interval from the filter rule +`before`:: +Produces intervals that appear before an interval from the filter role +`after`:: +Produces intervals that appear after an interval from the filter role [[interval-script-filter]] ==== Script filters diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index 382b4ab78ff77..e626b491da17d 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -68,6 +68,11 @@ Example response: "available" : true, "enabled" : true }, + "data_frame" : { + "description" : "Data Frame for the Elastic Stack", + "available" : true, + "enabled" : true + }, "graph" : { "description" : "Graph Data Exploration for the Elastic Stack", "available" : true, diff --git a/docs/reference/rollup/apis/stop-job.asciidoc b/docs/reference/rollup/apis/stop-job.asciidoc index 50935826f5f53..35162246a5fbb 100644 --- a/docs/reference/rollup/apis/stop-job.asciidoc +++ b/docs/reference/rollup/apis/stop-job.asciidoc @@ -56,6 +56,7 @@ POST _rollup/job/sensor/_stop -------------------------------------------------- // CONSOLE // TEST[setup:sensor_started_rollup_job] +// TEST[s/_stop/_stop?wait_for_completion=true&timeout=10s/] Which will return the response: diff --git a/docs/reference/search/request/track-total-hits.asciidoc b/docs/reference/search/request/track-total-hits.asciidoc index c416c777366e4..210f6321816e0 100644 --- a/docs/reference/search/request/track-total-hits.asciidoc +++ b/docs/reference/search/request/track-total-hits.asciidoc @@ -13,7 +13,7 @@ of hits after a certain threshold. When set to `true` the search response will always track the number of hits that match the query accurately (e.g. `total.relation` will always be equal to `"eq"` -when `track_total_hits is set to true). Otherwise the `"total.relation"` returned +when `track_total_hits` is set to true). Otherwise the `"total.relation"` returned in the `"total"` object in the search response determines how the `"total.value"` should be interpreted. A value of `"gte"` means that the `"total.value"` is a lower bound of the total hits that match the query and a value of `"eq"` indicates @@ -178,4 +178,4 @@ GET twitter/_search <1> The total number of hits is unknown. Finally you can force an accurate count by setting `"track_total_hits"` -to `true` in the request. \ No newline at end of file +to `true` in the request. diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index c0b527c06e550..b27e6f0ef0b54 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -43,6 +43,7 @@ PUT music Mapping supports the following parameters: +[horizontal] `analyzer`:: The index analyzer to use, defaults to `simple`. In case you are wondering why we did not opt for the `standard` @@ -70,7 +71,7 @@ Mapping supports the following parameters: Limits the length of a single input, defaults to `50` UTF-16 code points. This limit is only used at index time to reduce the total number of characters per input string in order to prevent massive inputs from - bloating the underlying datastructure. Most usecases won't be influenced + bloating the underlying datastructure. Most use cases won't be influenced by the default value since prefix completions seldom grow beyond prefixes longer than a handful of characters. @@ -97,6 +98,7 @@ PUT music/_doc/1?refresh The following parameters are supported: +[horizontal] `input`:: The input to store, this can be an array of strings or just a string. This field is mandatory. @@ -285,6 +287,7 @@ Which should look like: The basic completion suggester query supports the following parameters: +[horizontal] `field`:: The name of the field on which to run the query (required). `size`:: The number of suggestions to return (defaults to `5`). `skip_duplicates`:: Whether duplicate suggestions should be filtered out (defaults to `false`). @@ -326,13 +329,13 @@ POST music/_search?pretty -------------------------------------------------- // CONSOLE -WARNING: when set to true this option can slow down search because more suggestions +WARNING: When set to true, this option can slow down search because more suggestions need to be visited to find the top N. [[fuzzy]] ==== Fuzzy queries -The completion suggester also supports fuzzy queries - this means, +The completion suggester also supports fuzzy queries -- this means you can have a typo in your search and still get results back. [source,js] diff --git a/docs/reference/search/suggesters/context-suggest.asciidoc b/docs/reference/search/suggesters/context-suggest.asciidoc index 63692f0b06f82..77dd5745e159b 100644 --- a/docs/reference/search/suggesters/context-suggest.asciidoc +++ b/docs/reference/search/suggesters/context-suggest.asciidoc @@ -16,6 +16,8 @@ the field mapping. NOTE: It is mandatory to provide a context when indexing and querying a context enabled completion field. +NOTE: The maximum allowed number of completion field context mappings is 10. + The following defines types, each with two context mappings for a completion field: diff --git a/docs/reference/search/suggesters/term-suggest.asciidoc b/docs/reference/search/suggesters/term-suggest.asciidoc index 65b5c3dd9ac8b..f9dd0c9133542 100644 --- a/docs/reference/search/suggesters/term-suggest.asciidoc +++ b/docs/reference/search/suggesters/term-suggest.asciidoc @@ -18,7 +18,7 @@ doesn't take the query into account that is part of request. `field`:: The field to fetch the candidate suggestions from. This is - an required option that either needs to be set globally or per + a required option that either needs to be set globally or per suggestion. `analyzer`:: @@ -54,17 +54,17 @@ doesn't take the query into account that is part of request. [horizontal] `lowercase_terms`:: - Lower cases the suggest text terms after text analysis. + Lowercases the suggest text terms after text analysis. `max_edits`:: The maximum edit distance candidate suggestions can have in order to be considered as a suggestion. Can only be a value - between 1 and 2. Any other value result in an bad request error being + between 1 and 2. Any other value results in a bad request error being thrown. Defaults to 2. `prefix_length`:: The number of minimal prefix characters that must - match in order be a candidate suggestions. Defaults to 1. Increasing + match in order be a candidate for suggestions. Defaults to 1. Increasing this number improves spellcheck performance. Usually misspellings don't occur in the beginning of terms. (Old name "prefix_len" is deprecated) @@ -85,7 +85,7 @@ doesn't take the query into account that is part of request. `max_inspections`:: A factor that is used to multiply with the - `shards_size` in order to inspect more candidate spell corrections on + `shards_size` in order to inspect more candidate spelling corrections on the shard level. Can improve accuracy at the cost of performance. Defaults to 5. @@ -94,29 +94,29 @@ doesn't take the query into account that is part of request. suggestion should appear in. This can be specified as an absolute number or as a relative percentage of number of documents. This can improve quality by only suggesting high frequency terms. Defaults to 0f and is - not enabled. If a value higher than 1 is specified then the number + not enabled. If a value higher than 1 is specified, then the number cannot be fractional. The shard level document frequencies are used for this option. `max_term_freq`:: - The maximum threshold in number of documents a + The maximum threshold in number of documents in which a suggest text token can exist in order to be included. Can be a relative - percentage number (e.g 0.4) or an absolute number to represent document - frequencies. If an value higher than 1 is specified then fractional can + percentage number (e.g., 0.4) or an absolute number to represent document + frequencies. If a value higher than 1 is specified, then fractional can not be specified. Defaults to 0.01f. This can be used to exclude high - frequency terms from being spellchecked. High frequency terms are - usually spelled correctly on top of this also improves the spellcheck - performance. The shard level document frequencies are used for this - option. + frequency terms -- which are usually spelled correctly -- from being spellchecked. + This also improves the spellcheck performance. The shard level document frequencies + are used for this option. `string_distance`:: Which string distance implementation to use for comparing how similar suggested terms are. Five possible values can be specified: - `internal` - The default based on damerau_levenshtein but highly optimized + + ** `internal`: The default based on damerau_levenshtein but highly optimized for comparing string distance for terms inside the index. - `damerau_levenshtein` - String distance algorithm based on + ** `damerau_levenshtein`: String distance algorithm based on Damerau-Levenshtein algorithm. - `levenshtein` - String distance algorithm based on Levenshtein edit distance + ** `levenshtein`: String distance algorithm based on Levenshtein edit distance algorithm. - `jaro_winkler` - String distance algorithm based on Jaro-Winkler algorithm. - `ngram` - String distance algorithm based on character n-grams. + ** `jaro_winkler`: String distance algorithm based on Jaro-Winkler algorithm. + ** `ngram`: String distance algorithm based on character n-grams. diff --git a/docs/reference/sql/appendix/syntax-reserved.asciidoc b/docs/reference/sql/appendix/syntax-reserved.asciidoc index 8b0acbee32419..bf1b4c227af75 100644 --- a/docs/reference/sql/appendix/syntax-reserved.asciidoc +++ b/docs/reference/sql/appendix/syntax-reserved.asciidoc @@ -5,7 +5,7 @@ Table with reserved keywords that need to be quoted. Also provide an example to make it more obvious. -The following table lists all of the keywords that are reserved in Presto, +The following table lists all of the keywords that are reserved in {es-sql}, along with their status in the SQL standard. These reserved keywords must be quoted (using double quotes) in order to be used as an identifier, for example: @@ -31,43 +31,65 @@ s|SQL-92 |`BETWEEN` |reserved |reserved |`BY` |reserved |reserved |`CAST` |reserved |reserved +|`CATALOG` |reserved |reserved +|`CONVERT` |reserved |reserved +|`CURRENT_DATE` |reserved |reserved +|`CURRENT_TIMESTAMP` |reserved |reserved +|`DAY` |reserved |reserved +|`DAYS` | | |`DESC` |reserved |reserved |`DESCRIBE` |reserved |reserved |`DISTINCT` |reserved |reserved +|`ESCAPE` |reserved |reserved |`EXISTS` |reserved |reserved |`EXPLAIN` |reserved |reserved |`EXTRACT` |reserved |reserved |`FALSE` |reserved |reserved +|`FIRST` |reserved |reserved |`FROM` |reserved |reserved |`FULL` |reserved |reserved |`GROUP` |reserved |reserved |`HAVING` |reserved |reserved +|`HOUR` |reserved |reserved +|`HOURS` | | |`IN` |reserved |reserved |`INNER` |reserved |reserved +|`INTERVAL` |reserved |reserved |`IS` |reserved |reserved |`JOIN` |reserved |reserved |`LEFT` |reserved |reserved |`LIKE` |reserved |reserved |`LIMIT` |reserved |reserved |`MATCH` |reserved |reserved +|`MINUTE` |reserved |reserved +|`MINUTES` | | +|`MONTH` |reserved |reserved |`NATURAL` |reserved |reserved -|`NO` |reserved |reserved |`NOT` |reserved |reserved |`NULL` |reserved |reserved +|`NULLS` | | |`ON` |reserved |reserved |`OR` |reserved |reserved |`ORDER` |reserved |reserved |`OUTER` |reserved |reserved |`RIGHT` |reserved |reserved +|`RLIKE` | | +|`QUERY` | | +|`SECOND` |reserved |reserved +|`SECONDS` | | |`SELECT` |reserved |reserved |`SESSION` | |reserved |`TABLE` |reserved |reserved +|`TABLES` | | |`THEN` |reserved |reserved |`TO` |reserved |reserved |`TRUE` |reserved |reserved +|`TYPE` | | |`USING` |reserved |reserved |`WHEN` |reserved |reserved |`WHERE` |reserved |reserved |`WITH` |reserved |reserved +|`YEAR` |reserved |reserved +|`YEARS` | | |=== diff --git a/docs/reference/sql/endpoints/rest.asciidoc b/docs/reference/sql/endpoints/rest.asciidoc index 1cc42c3071b44..e21071a49dbbc 100644 --- a/docs/reference/sql/endpoints/rest.asciidoc +++ b/docs/reference/sql/endpoints/rest.asciidoc @@ -4,8 +4,8 @@ == SQL REST API The SQL REST API accepts SQL in a JSON document, executes it, -and returns the results. For example: - +and returns the results. +For example: [source,js] -------------------------------------------------- @@ -32,19 +32,68 @@ James S.A. Corey |Leviathan Wakes |561 |2011-06-02T00:00:00.000Z // TESTRESPONSE[s/\|/\\|/ s/\+/\\+/] // TESTRESPONSE[_cat] -While the `text/plain` format is nice for humans, computers prefer something -more structured. You can replace the value of `format` with: -- `json` aka `application/json` -- `yaml` aka `application/yaml` -- `smile` aka `application/smile` -- `cbor` aka `application/cbor` -- `txt` aka `text/plain` -- `csv` aka `text/csv` -- `tsv` aka `text/tab-separated-values` +[[sql-kibana-console]] +.Using Kibana Console +If you are using {kibana-ref}/console-kibana.html[Kibana Console]. +(which is highly recommended), take advantage of the +triple quotes `"""` when creating the query. This not only automatically escapes double +quotes (`"`) inside the query string but also support multi-line as shown below: +image:images/sql/rest/console-triple-quotes.png[] + +[[sql-rest-format]] +[float] +=== Response Data Formats + +While the textual format is nice for humans, computers prefer something +more structured. + +{es-sql} can return the data in the following formats which can be set +either through the `format` property in the URL or by setting the `Accept` HTTP header: + +NOTE: The URL parameter takes precedence over the `Accept` HTTP header. +If neither is specified then the response is returned in the same format as the request. + +[cols="^m,^4m,^8"] + +|=== +s|format +s|`Accept` HTTP header +s|Description + +3+h| Human Readable + +|csv +|text/csv +|https://en.wikipedia.org/wiki/Comma-separated_values[Comma-separated values] + +|json +|application/json +|https://www.json.org/[JSON] (JavaScript Object Notation) human-readable format + +|tsv +|text/tab-separated-values +|https://en.wikipedia.org/wiki/Tab-separated_values[Tab-separated values] + +|txt +|text/plain +|CLI-like representation + +|yaml +|application/yaml +|https://en.wikipedia.org/wiki/YAML[YAML] (YAML Ain't Markup Language) human-readable format + +3+h| Binary Formats + +|cbor +|application/cbor +|http://cbor.io/[Concise Binary Object Representation] + +|smile +|application/smile +|https://en.wikipedia.org/wiki/Smile_(data_interchange_format)[Smile] binary data format similar to CBOR + +|=== -Alternatively you can set the `Accept` HTTP header to the appropriate media -format. The GET parameter takes precedence over the header. If neither is -specified then the response is returned in the same format as the request. [source,js] -------------------------------------------------- @@ -80,7 +129,11 @@ Which returns: -------------------------------------------------- // TESTRESPONSE[s/sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWWWdrRlVfSS1TbDYtcW9lc1FJNmlYdw==:BAFmBmF1dGhvcgFmBG5hbWUBZgpwYWdlX2NvdW50AWYMcmVsZWFzZV9kYXRl\+v\/\/\/w8=/$body.cursor/] -You can continue to the next page by sending back the `cursor` field. In +[[sql-pagination]] +[float] +=== Paginating through a large response + +Using the example above, onu can continue to the next page by sending back the `cursor` field. In case of text format the cursor is returned as `Cursor` http header. [source,js] @@ -111,7 +164,7 @@ Which looks like: -------------------------------------------------- // TESTRESPONSE[s/sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWODRMaXBUaVlRN21iTlRyWHZWYUdrdw==:BAFmBmF1dGhvcgFmBG5hbWUBZgpwYWdlX2NvdW50AWYMcmVsZWFzZV9kYXRl9f\/\/\/w8=/$body.cursor/] -Note that the `column` object is only part of the first page. +Note that the `columns` object is only part of the first page. You've reached the last page when there is no `cursor` returned in the results. Like Elasticsearch's <>, @@ -145,9 +198,11 @@ Which will like return the [[sql-rest-filtering]] +[float] +=== Filtering using {es} query DSL You can filter the results that SQL will run on using a standard -Elasticsearch query DSL by specifying the query in the filter +{es} query DSL by specifying the query in the filter parameter. [source,js] @@ -181,10 +236,48 @@ Douglas Adams |The Hitchhiker's Guide to the Galaxy|180 |1979-10-12T // TESTRESPONSE[_cat] [[sql-rest-fields]] -In addition to the `query` and `cursor` fields, the request can -contain `fetch_size` and `time_zone`. `fetch_size` is a hint for how -many results to return in each page. SQL might chose to return more -or fewer results though. `time_zone` is the time zone to use for datetime -functions and datetime parsing. `time_zone` defaults to `utc` and can take -any values documented -http://www.joda.org/joda-time/apidocs/org/joda/time/DateTimeZone.html[here]. +[float] +=== Supported REST parameters + +In addition to the `query` and `fetch_size`, a request a number of user-defined fields for specifying +the request time-outs or localization information (such as timezone). + +The table below lists the supported parameters: + +[cols="^m,^m,^5"] + +|=== + +s|name +s|Default value +s|Description + +|query +|Mandatory +|SQL query to execute + +|fetch_size +|1000 +|The maximum number of rows (or entries) to return in one response + +|filter +|none +|Optional {es} query DSL for additional <>. + +|request_timeout +|90s +|The timeout before the request fails. + +|page_timeout +|45s +|The timeout before a pagination request fails. + +|time_zone +|`Z` (or `UTC`) +|Time-zone in ISO 8601 used for executing the query on the server. +More information available https://docs.oracle.com/javase/8/docs/api/java/time/ZoneId.html[here]. + +|=== + +Do note that most parameters (outside the timeout ones) make sense only during the initial query - any follow-up pagination request only requires the `cursor` parameter as explained in the <> chapter. +That's because the query has already been executed and the calls are simply about returning the found results - thus the parameters are simply ignored. \ No newline at end of file diff --git a/docs/reference/sql/endpoints/translate.asciidoc b/docs/reference/sql/endpoints/translate.asciidoc index cd7f1cc846e8a..6a347ff614af7 100644 --- a/docs/reference/sql/endpoints/translate.asciidoc +++ b/docs/reference/sql/endpoints/translate.asciidoc @@ -4,7 +4,7 @@ == SQL Translate API The SQL Translate API accepts SQL in a JSON document and translates it -into native Elasticsearch queries. For example: +into native {es} queries. For example: [source,js] -------------------------------------------------- diff --git a/docs/reference/sql/index.asciidoc b/docs/reference/sql/index.asciidoc index 0ef6bd1dca919..dc649e2434da5 100644 --- a/docs/reference/sql/index.asciidoc +++ b/docs/reference/sql/index.asciidoc @@ -12,9 +12,13 @@ [partintro] -- -X-Pack includes a SQL feature to execute SQL against Elasticsearch +X-Pack includes a SQL feature to execute SQL queries against {es} indices and return results in tabular format. +The following chapters aim to cover everything from usage, to syntax and drivers. +Experience users or those in a hurry might want to jump directly to +the list of SQL <> and <>. + <>:: Overview of {es-sql} and its features. <>:: @@ -22,22 +26,19 @@ indices and return results in tabular format. <>:: Language conventions across SQL and {es}. <>:: - Securing {es-sql} and {es}. + Secure {es-sql} and {es}. <>:: - Accepts SQL in a JSON document, executes it, and returns the - results. + Execute SQL in JSON format over REST. <>:: - Accepts SQL in a JSON document and translates it into a native - Elasticsearch query and returns that. + Translate SQL in JSON format to {es} native query. <>:: - Command-line application that connects to {es} to execute - SQL and print tabular results. + Command-line application for executing SQL against {es}. <>:: - A JDBC driver for {es}. + JDBC driver for {es}. <>:: - An ODBC driver for {es}. + ODBC driver for {es}. <>:: - Documentation for configuring various SQL/BI tools with {es-sql}. + Setup various SQL/BI tools with {es-sql}. <>:: Overview of the {es-sql} language, such as supported data types, commands and syntax. diff --git a/docs/reference/sql/language/index.asciidoc b/docs/reference/sql/language/index.asciidoc index f63afd6ebd8d6..6ea6a15b3ed64 100644 --- a/docs/reference/sql/language/index.asciidoc +++ b/docs/reference/sql/language/index.asciidoc @@ -3,12 +3,14 @@ [[sql-spec]] == SQL Language -This chapter describes the SQL semantics supported in X-Pack namely: +This chapter describes the SQL syntax and semantics supported namely: -<>:: Data types +<>:: Lexical structure <>:: Commands +<>:: Data types <>:: Index patterns +include::syntax/lexic/index.asciidoc[] +include::syntax/commands/index.asciidoc[] include::data-types.asciidoc[] -include::syntax/index.asciidoc[] include::index-patterns.asciidoc[] diff --git a/docs/reference/sql/language/syntax/describe-table.asciidoc b/docs/reference/sql/language/syntax/commands/describe-table.asciidoc similarity index 100% rename from docs/reference/sql/language/syntax/describe-table.asciidoc rename to docs/reference/sql/language/syntax/commands/describe-table.asciidoc diff --git a/docs/reference/sql/language/syntax/index.asciidoc b/docs/reference/sql/language/syntax/commands/index.asciidoc similarity index 100% rename from docs/reference/sql/language/syntax/index.asciidoc rename to docs/reference/sql/language/syntax/commands/index.asciidoc diff --git a/docs/reference/sql/language/syntax/select.asciidoc b/docs/reference/sql/language/syntax/commands/select.asciidoc similarity index 100% rename from docs/reference/sql/language/syntax/select.asciidoc rename to docs/reference/sql/language/syntax/commands/select.asciidoc diff --git a/docs/reference/sql/language/syntax/show-columns.asciidoc b/docs/reference/sql/language/syntax/commands/show-columns.asciidoc similarity index 100% rename from docs/reference/sql/language/syntax/show-columns.asciidoc rename to docs/reference/sql/language/syntax/commands/show-columns.asciidoc diff --git a/docs/reference/sql/language/syntax/show-functions.asciidoc b/docs/reference/sql/language/syntax/commands/show-functions.asciidoc similarity index 100% rename from docs/reference/sql/language/syntax/show-functions.asciidoc rename to docs/reference/sql/language/syntax/commands/show-functions.asciidoc diff --git a/docs/reference/sql/language/syntax/show-tables.asciidoc b/docs/reference/sql/language/syntax/commands/show-tables.asciidoc similarity index 100% rename from docs/reference/sql/language/syntax/show-tables.asciidoc rename to docs/reference/sql/language/syntax/commands/show-tables.asciidoc diff --git a/docs/reference/sql/language/syntax/lexic/index.asciidoc b/docs/reference/sql/language/syntax/lexic/index.asciidoc new file mode 100644 index 0000000000000..014193a0469dd --- /dev/null +++ b/docs/reference/sql/language/syntax/lexic/index.asciidoc @@ -0,0 +1,228 @@ +[role="xpack"] +[testenv="basic"] +[[sql-lexical-structure]] +== Lexical Structure + +This section covers the major lexical structure of SQL, which for the most part, is going to resemble that of ANSI SQL itself hence why low-levels details are not discussed in depth. + +{es-sql} currently accepts only one _command_ at a time. A command is a sequence of _tokens_ terminated by the end of input stream. + +A token can be a __key word__, an _identifier_ (_quoted_ or _unquoted_), a _literal_ (or constant) or a special character symbol (typically a delimiter). Tokens are typically separated by whitespace (be it space, tab) though in some cases, where there is no ambiguity (typically due to a character symbol) this is not needed - however for readability purposes this should be avoided. + +[[sql-syntax-keywords]] +[float] +=== Key Words + +Take the following example: + +[source, sql] +---- +SELECT * FROM table +---- + +This query has four tokens: `SELECT`, `*`, `FROM` and `table`. The first three, namely `SELECT`, `*` and `FROM` are __key words__ meaning words that have a fixed meaning in SQL. The token `table` is an _identifier_ meaning it identifies (by name) an entity inside SQL such as a table (in this case), a column, etc... + +As one can see, both key words and identifiers have the _same_ lexical structure and thus one cannot know whether a token is one or the other without knowing the SQL language; the complete list of key words is available in the <>. +Do note that key words are case-insensitive meaning the previous example can be written as: + +[source, sql] +---- +select * fRoM table; +---- + +Identifiers however are not - as {es} is case sensitive, {es-sql} uses the received value verbatim. + +To help differentiate between the two, through-out the documentation the SQL key words are upper-cased a convention we find increases readability and thus recommend to others. + +[[sql-syntax-identifiers]] +[float] +=== Identifiers + +Identifiers can be of two types: __quoted__ and __unquoted__: + +[source, sql] +---- +SELECT ip_address FROM "hosts-*" +---- + +This query has two identifiers, `ip_address` and `hosts-*` (an <>). As `ip_address` does not clash with any key words it can be used verbatim, `hosts-*` on the other hand cannot as it clashes with `-` (minus operation) and `*` hence the double quotes. + +Another example: + +[source, sql] +---- +SELECT "from" FROM "" +---- + +The first identifier from needs to quoted as otherwise it clashes with the `FROM` key word (which is case insensitive as thus can be written as `from`) while the second identifier using {es} <> would have otherwise confuse the parser. + +Hence why in general, *especially* when dealing with user input it is *highly* recommended to use quotes for identifiers. It adds minimal increase to your queries and in return offers clarity and disambiguation. + +[[sql-syntax-literals]] +[float] +=== Literals (Constants) + +{es-sql} supports two kind of __implicitly-typed__ literals: strings and numbers. + +[[sql-syntax-string-literals]] +[float] +==== String Literals + +A string literal is an arbitrary number of characters bounded by single quotes `'`: `'Giant Robot'`. +To include a single quote in the string, escape it using another single quote: `'Captain EO''s Voyage'`. + +NOTE: An escaped single quote is *not* a double quote (`"`), but a single quote `'` _repeated_ (`''`). + +[sql-syntax-numeric-literals] +[float] +==== Numeric Literals + +Numeric literals are accepted both in decimal and scientific notation with exponent marker (`e` or `E`), starting either with a digit or decimal point `.`: + +[source, sql] +---- +1969 -- integer notation +3.14 -- decimal notation +.1234 -- decimal notation starting with decimal point +4E5 -- scientific notation (with exponent marker) +1.2e-3 -- scientific notation with decimal point +---- + +Numeric literals that contain a decimal point are always interpreted as being of type `double`. Those without are considered `integer` if they fit otherwise their type is `long` (or `BIGINT` in ANSI SQL types). + +[[sql-syntax-generic-literals]] +[float] +==== Generic Literals + +When dealing with arbitrary type literal, one creates the object by casting, typically, the string representation to the desired type. This can be achieved through the dedicated <> and <>: + +[source, sql] +---- +123::LONG -- cast 123 to a LONG +CAST('1969-05-13T12:34:56' AS TIMESTAMP) -- cast the given string to datetime +CONVERT('10.0.0.1', IP) -- cast '10.0.0.1' to an IP +---- + +Do note that {es-sql} provides functions that out of the box return popular literals (like `E()`) or provide dedicated parsing for certain strings. + +[[sql-syntax-single-vs-double-quotes]] +[float] +=== Single vs Double Quotes + +It is worth pointing out that in SQL, single quotes `'` and double quotes `"` have different meaning and *cannot* be used interchangeably. +Single quotes are used to declare a <> while double quotes for <>. + +To wit: + +[source, sql] +---- +SELECT "first_name" <1> + FROM "musicians" <1> + WHERE "last_name" <1> + = 'Carroll' <2> +---- + +<1> Double quotes `"` used for column and table identifiers +<2> Single quotes `'` used for a string literal + +[[sql-syntax-special-chars]] +[float] +=== Special characters + +A few characters that are not alphanumeric have a dedicated meaning different from that of an operator. For completeness these are specified below: + + +[cols="^m,^15"] + +|=== + +s|Char +s|Description + +|* | The asterisk (or wildcard) is used in some contexts to denote all fields for a table. Can be also used as an argument to some aggregate functions. +|, | Commas are used to enumerate the elements of a list. +|. | Used in numeric constants or to separate identifiers qualifiers (catalog, table, column names, etc...). +|()| Parentheses are used for specific SQL commands, function declarations or to enforce precedence. +|=== + +[[sql-syntax-operators]] +[float] +=== Operators + +Most operators in {es-sql} have the same precedence and are left-associative. As this is done at parsing time, parenthesis need to be used to enforce a different precedence. + +The following table indicates the supported operators and their precendence (highest to lowest); + +[cols="^2m,^,^3"] + +|=== + +s|Operator/Element +s|Associativity +s|Description + +|. +|left +|qualifier separator + +|:: +|left +|PostgreSQL-style type cast + +|+ - +|right +|unary plus and minus (numeric literal sign) + +|* / % +|left +|multiplication, division, modulo + +|+ - +|left +|addition, substraction + +|BETWEEN IN LIKE +| +|range containment, string matching + +|< > <= >= = <=> <> != +| +|comparison + +|NOT +|right +|logical negation + +|AND +|left +|logical conjunction + +|OR +|left +|logical disjunction + +|=== + + +[[sql-syntax-comments]] +[float] +=== Comments + +{es-sql} allows comments which are sequence of characters ignored by the parsers. + +Two styles are supported: + +Single Line:: Comments start with a double dash `--` and continue until the end of the line. +Multi line:: Comments that start with `/*` and end with `*/` (also known as C-style). + + +[source, sql] +---- +-- single line comment +/* multi + line + comment + that supports /* nested comments */ + */ +---- + diff --git a/docs/reference/upgrade/close-ml.asciidoc b/docs/reference/upgrade/close-ml.asciidoc new file mode 100644 index 0000000000000..c4efddca759c9 --- /dev/null +++ b/docs/reference/upgrade/close-ml.asciidoc @@ -0,0 +1,32 @@ +[testenv="platinum"] + +If your {ml} indices were created earlier than the previous major version, they +must be reindexed. In those circumstances, there must be no machine learning +jobs running during the upgrade. + +In all other circumstances, there is no requirement to close your {ml} jobs. +There are, however, advantages to doing so. If you choose to leave your jobs +running during the upgrade, they are affected when you stop the {ml} nodes. The +jobs move to another {ml} node and restore the model states. This scenario has +the least disruption to the active {ml} jobs but incurs the highest load on the +cluster. + +To close all {ml} jobs before you upgrade, see +{stack-ov}/stopping-ml.html[Stopping {ml}]. This method persists the model +state at the moment of closure, which means that when you open your jobs after +the upgrade, they use the exact same model. This scenario takes the most time, +however, especially if you have many jobs or jobs with large model states. + +To temporarily halt the tasks associated with your {ml} jobs and {dfeeds} and +prevent new jobs from opening, use the <>: + +[source,js] +-------------------------------------------------- +POST _ml/set_upgrade_mode?enabled=true +-------------------------------------------------- +// CONSOLE + +This method does not persist the absolute latest model state, rather it uses the +last model state that was automatically saved. By halting the tasks, you avoid +incurring the cost of managing active jobs during the upgrade and it's quicker +than stopping {dfeeds} and closing jobs. \ No newline at end of file diff --git a/docs/reference/upgrade/cluster_restart.asciidoc b/docs/reference/upgrade/cluster_restart.asciidoc index 4c229e373f505..a8552a82bb8d2 100644 --- a/docs/reference/upgrade/cluster_restart.asciidoc +++ b/docs/reference/upgrade/cluster_restart.asciidoc @@ -26,8 +26,11 @@ recovery. include::synced-flush.asciidoc[] -- -. *Stop any machine learning jobs that are running.* See -{xpack-ref}/stopping-ml.html[Stopping Machine Learning]. +. *Stop any machine learning jobs that are running.* ++ +-- +include::close-ml.asciidoc[] +-- . *Shutdown all nodes.* + @@ -132,3 +135,7 @@ GET _cat/recovery -- . *Restart machine learning jobs.* ++ +-- +include::open-ml.asciidoc[] +-- diff --git a/docs/reference/upgrade/open-ml.asciidoc b/docs/reference/upgrade/open-ml.asciidoc new file mode 100644 index 0000000000000..b9b6b772bbe8d --- /dev/null +++ b/docs/reference/upgrade/open-ml.asciidoc @@ -0,0 +1,13 @@ +[testenv="platinum"] +If you closed all {ml} jobs before the upgrade, you must open them. Use {kib} or +the <>. + +Alternatively, if you temporarily halted the tasks associated with your {ml} jobs, +use the <> to return them to active +states: + +[source,js] +-------------------------------------------------- +POST _ml/set_upgrade_mode?enabled=false +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/upgrade/rolling_upgrade.asciidoc b/docs/reference/upgrade/rolling_upgrade.asciidoc index dff3895ac4c1d..e62bd9348f1ab 100644 --- a/docs/reference/upgrade/rolling_upgrade.asciidoc +++ b/docs/reference/upgrade/rolling_upgrade.asciidoc @@ -43,8 +43,11 @@ include::synced-flush.asciidoc[] -- -. *Stop any machine learning jobs that are running.* See -{xpack-ref}/stopping-ml.html[Stopping Machine Learning]. +. *Stop any machine learning jobs that are running.* ++ +-- +include::close-ml.asciidoc[] +-- . [[upgrade-node]] *Shut down a single node*. + @@ -160,6 +163,11 @@ for each node that needs to be updated. -- . *Restart machine learning jobs.* ++ +-- +include::open-ml.asciidoc[] +-- + [IMPORTANT] ==================================================== diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 2970024ea74ef..4ace7103b8ce8 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-5.1.1-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-5.2.1-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=53b71812f18cdb2777e9f1b2a0f2038683907c90bdc406bc64d8b400e1fb2c3b +distributionSha256Sum=9dc729f6dbfbbc4df1692665d301e028976dacac296a126f16148941a9cf012e diff --git a/libs/ssl-config/build.gradle b/libs/ssl-config/build.gradle index 8d5b1d18b8c04..0b8eac5486ccb 100644 --- a/libs/ssl-config/build.gradle +++ b/libs/ssl-config/build.gradle @@ -17,6 +17,9 @@ * under the License. */ +apply plugin: 'nebula.maven-base-publish' +apply plugin: 'nebula.maven-scm' + dependencies { compile "org.elasticsearch:elasticsearch-core:${version}" diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index ac439f4bae227..e05305a8ebd39 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -414,14 +414,6 @@ public List getPreConfiguredTokenFilters() { filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer()))); filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, false, input -> new EdgeNGramTokenFilter(input, 1))); - filters.add(PreConfiguredTokenFilter.singletonWithVersion("edgeNGram", false, false, (reader, version) -> { - if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) { - deprecationLogger.deprecatedAndMaybeLog("edgeNGram_deprecation", - "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [edge_ngram] instead."); - } - return new EdgeNGramTokenFilter(reader, 1); - })); filters.add(PreConfiguredTokenFilter.singleton("elision", true, input -> new ElisionFilter(input, FrenchAnalyzer.DEFAULT_ARTICLES))); filters.add(PreConfiguredTokenFilter.singleton("french_stem", false, input -> new SnowballFilter(input, new FrenchStemmer()))); @@ -438,14 +430,6 @@ public List getPreConfiguredTokenFilters() { LimitTokenCountFilterFactory.DEFAULT_MAX_TOKEN_COUNT, LimitTokenCountFilterFactory.DEFAULT_CONSUME_ALL_TOKENS))); filters.add(PreConfiguredTokenFilter.singleton("ngram", false, false, reader -> new NGramTokenFilter(reader, 1, 2, false))); - filters.add(PreConfiguredTokenFilter.singletonWithVersion("nGram", false, false, (reader, version) -> { - if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) { - deprecationLogger.deprecatedAndMaybeLog("nGram_deprecation", - "The [nGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [ngram] instead."); - } - return new NGramTokenFilter(reader, 1, 2, false); - })); filters.add(PreConfiguredTokenFilter.singleton("persian_normalization", true, PersianNormalizationFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("porter_stem", false, PorterStemFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("reverse", false, ReverseStringFilter::new)); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java index 99e882c622085..acb7f2213f641 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java @@ -185,7 +185,6 @@ protected Map> getPreConfiguredTokenFilters() { filters.put("delimited_payload", org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterFactory.class); filters.put("dutch_stem", SnowballPorterFilterFactory.class); filters.put("edge_ngram", null); - filters.put("edgeNGram", null); filters.put("elision", null); filters.put("french_stem", SnowballPorterFilterFactory.class); filters.put("german_stem", null); @@ -197,7 +196,6 @@ protected Map> getPreConfiguredTokenFilters() { filters.put("length", null); filters.put("limit", LimitTokenCountFilterFactory.class); filters.put("ngram", null); - filters.put("nGram", null); filters.put("persian_normalization", null); filters.put("porter_stem", null); filters.put("reverse", ReverseStringFilterFactory.class); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java index c52c78ffe27e3..f128c07361c45 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java @@ -20,8 +20,6 @@ package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.analysis.Tokenizer; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; @@ -29,98 +27,14 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; -import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.VersionUtils; import java.io.IOException; -import java.io.StringReader; -import java.util.Map; public class CommonAnalysisPluginTests extends ESTestCase { - /** - * Check that the deprecated name "nGram" issues a deprecation warning for indices created since 6.3.0 - */ - public void testNGramDeprecationWarning() throws IOException { - Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT)) - .build(); - - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { - Map tokenFilters = createTestAnalysis(idxSettings, settings, commonAnalysisPlugin).tokenFilter; - TokenFilterFactory tokenFilterFactory = tokenFilters.get("nGram"); - Tokenizer tokenizer = new MockTokenizer(); - tokenizer.setReader(new StringReader("foo bar")); - assertNotNull(tokenFilterFactory.create(tokenizer)); - assertWarnings( - "The [nGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [ngram] instead."); - } - } - - /** - * Check that the deprecated name "nGram" does NOT issues a deprecation warning for indices created before 6.4.0 - */ - public void testNGramNoDeprecationWarningPre6_4() throws IOException { - Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put(IndexMetaData.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_3_0)) - .build(); - - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { - Map tokenFilters = createTestAnalysis(idxSettings, settings, commonAnalysisPlugin).tokenFilter; - TokenFilterFactory tokenFilterFactory = tokenFilters.get("nGram"); - Tokenizer tokenizer = new MockTokenizer(); - tokenizer.setReader(new StringReader("foo bar")); - assertNotNull(tokenFilterFactory.create(tokenizer)); - } - } - - /** - * Check that the deprecated name "edgeNGram" issues a deprecation warning for indices created since 6.3.0 - */ - public void testEdgeNGramDeprecationWarning() throws IOException { - Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT)) - .build(); - - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { - Map tokenFilters = createTestAnalysis(idxSettings, settings, commonAnalysisPlugin).tokenFilter; - TokenFilterFactory tokenFilterFactory = tokenFilters.get("edgeNGram"); - Tokenizer tokenizer = new MockTokenizer(); - tokenizer.setReader(new StringReader("foo bar")); - assertNotNull(tokenFilterFactory.create(tokenizer)); - assertWarnings( - "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [edge_ngram] instead."); - } - } - - /** - * Check that the deprecated name "edgeNGram" does NOT issues a deprecation warning for indices created before 6.4.0 - */ - public void testEdgeNGramNoDeprecationWarningPre6_4() throws IOException { - Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put(IndexMetaData.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_3_0)) - .build(); - - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { - Map tokenFilters = createTestAnalysis(idxSettings, settings, commonAnalysisPlugin).tokenFilter; - TokenFilterFactory tokenFilterFactory = tokenFilters.get("edgeNGram"); - Tokenizer tokenizer = new MockTokenizer(); - tokenizer.setReader(new StringReader("foo bar")); - assertNotNull(tokenFilterFactory.create(tokenizer)); - } - } - - /** * Check that the deprecated analyzer name "standard_html_strip" throws exception for indices created since 7.0.0 */ diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java index e96243efc4254..8f58a074cf102 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java @@ -81,7 +81,7 @@ public void testNgramHighlightingWithBrokenPositions() throws IOException { .put("analysis.tokenizer.autocomplete.max_gram", 20) .put("analysis.tokenizer.autocomplete.min_gram", 1) .put("analysis.tokenizer.autocomplete.token_chars", "letter,digit") - .put("analysis.tokenizer.autocomplete.type", "nGram") + .put("analysis.tokenizer.autocomplete.type", "ngram") .put("analysis.filter.wordDelimiter.type", "word_delimiter") .putList("analysis.filter.wordDelimiter.type_table", "& => ALPHANUM", "| => ALPHANUM", "! => ALPHANUM", diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml index 9a7c158fc4734..4fe5162e68743 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml @@ -23,38 +23,6 @@ - match: { detail.tokenizer.tokens.0.token: Foo Bar! } --- -"nGram": - - do: - indices.analyze: - body: - text: good - explain: true - tokenizer: - type: nGram - min_gram: 2 - max_gram: 2 - - length: { detail.tokenizer.tokens: 3 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } - - match: { detail.tokenizer.tokens.0.token: go } - - match: { detail.tokenizer.tokens.1.token: oo } - - match: { detail.tokenizer.tokens.2.token: od } - ---- -"nGram_exception": - - skip: - version: " - 6.99.99" - reason: only starting from version 7.x this throws an error - - do: - catch: /The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to[:] \[1\] but was \[2\]\. This limit can be set by changing the \[index.max_ngram_diff\] index level setting\./ - indices.analyze: - body: - text: good - explain: true - tokenizer: - type: nGram - min_gram: 2 - max_gram: 4 ---- "simple_pattern": - do: indices.analyze: @@ -133,7 +101,7 @@ text: "foobar" explain: true tokenizer: - type: nGram + type: ngram min_gram: 3 max_gram: 3 - length: { detail.tokenizer.tokens: 4 } @@ -162,15 +130,31 @@ body: text: "foo" explain: true - tokenizer: nGram + tokenizer: ngram - length: { detail.tokenizer.tokens: 5 } - - match: { detail.tokenizer.name: nGram } + - match: { detail.tokenizer.name: ngram } - match: { detail.tokenizer.tokens.0.token: f } - match: { detail.tokenizer.tokens.1.token: fo } - match: { detail.tokenizer.tokens.2.token: o } - match: { detail.tokenizer.tokens.3.token: oo } - match: { detail.tokenizer.tokens.4.token: o } +--- +"ngram_exception": + - skip: + version: " - 6.99.99" + reason: only starting from version 7.x this throws an error + - do: + catch: /The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to[:] \[1\] but was \[2\]\. This limit can be set by changing the \[index.max_ngram_diff\] index level setting\./ + indices.analyze: + body: + text: good + explain: true + tokenizer: + type: ngram + min_gram: 2 + max_gram: 4 + --- "edge_ngram": - do: @@ -194,7 +178,7 @@ text: "foo" explain: true tokenizer: - type: edgeNGram + type: edge_ngram min_gram: 1 max_gram: 3 - length: { detail.tokenizer.tokens: 3 } @@ -219,9 +203,9 @@ body: text: "foo" explain: true - tokenizer: edgeNGram + tokenizer: edge_ngram - length: { detail.tokenizer.tokens: 2 } - - match: { detail.tokenizer.name: edgeNGram } + - match: { detail.tokenizer.name: edge_ngram } - match: { detail.tokenizer.tokens.0.token: f } - match: { detail.tokenizer.tokens.1.token: fo } diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml index ec00b6d41f1c5..56bbed7044e14 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml @@ -76,7 +76,7 @@ analysis: tokenizer: trigram: - type: nGram + type: ngram min_gram: 3 max_gram: 3 filter: diff --git a/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java b/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java index 358d2cb00ab14..7f4855f8880b7 100644 --- a/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java +++ b/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java @@ -20,7 +20,6 @@ package org.elasticsearch.example.rescore; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.Term; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; @@ -46,7 +45,6 @@ import java.util.Arrays; import java.util.Iterator; import java.util.Objects; -import java.util.Set; import static java.util.Collections.singletonList; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -224,9 +222,5 @@ public Explanation explain(int topLevelDocId, IndexSearcher searcher, RescoreCon return Explanation.match(context.factor, "test", singletonList(sourceExplanation)); } - @Override - public void extractTerms(IndexSearcher searcher, RescoreContext rescoreContext, Set termsSet) { - // Since we don't use queries there are no terms to extract. - } } } diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 011e20d84fbfd..b34f677e1c15b 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -25,7 +25,6 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.WarningFailureException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.CheckedFunction; @@ -34,11 +33,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.rest.action.document.RestBulkAction; -import org.elasticsearch.rest.action.document.RestGetAction; -import org.elasticsearch.rest.action.document.RestIndexAction; -import org.elasticsearch.rest.action.document.RestUpdateAction; -import org.elasticsearch.rest.action.search.RestExplainAction; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.ObjectPath; @@ -62,13 +56,11 @@ import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.startsWith; /** * Tests to run before and after a full cluster restart. This is run twice, @@ -78,21 +70,14 @@ * with {@code tests.is_old_cluster} set to {@code false}. */ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { - private final boolean supportsLenientBooleans = getOldClusterVersion().before(Version.V_6_0_0_alpha1); private String index; - private String type; @Before public void setIndex() throws IOException { index = getTestName().toLowerCase(Locale.ROOT); } - @Before - public void setType() { - type = getOldClusterVersion().before(Version.V_6_7_0) ? "doc" : "_doc"; - } - public void testSearch() throws Exception { int count; if (isRunningAgainstOldCluster()) { @@ -106,9 +91,6 @@ public void testSearch() throws Exception { } { mappingsAndSettings.startObject("mappings"); - if (isRunningAgainstAncientCluster()) { - mappingsAndSettings.startObject(type); - } mappingsAndSettings.startObject("properties"); { mappingsAndSettings.startObject("string"); @@ -127,16 +109,12 @@ public void testSearch() throws Exception { mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); - if (isRunningAgainstAncientCluster()) { - mappingsAndSettings.endObject(); - } mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - createIndex.setOptions(allowTypesRemovalWarnings()); client().performRequest(createIndex); count = randomIntBetween(2000, 3000); @@ -151,7 +129,7 @@ public void testSearch() throws Exception { .field("int", randomInt(100)) .field("float", randomFloat()) // be sure to create a "proper" boolean (True, False) for the first document so that automapping is correct - .field("bool", i > 0 && supportsLenientBooleans ? randomLenientBoolean() : randomBoolean()) + .field("bool", i > 0 && randomBoolean()) .field("field.with.dots", randomAlphaOfLength(10)) .field("binary", Base64.getEncoder().encodeToString(randomByteArray)) .endObject() @@ -165,7 +143,7 @@ public void testSearch() throws Exception { assertBasicSearchWorks(count); assertAllSearchWorks(count); assertBasicAggregationWorks(); - assertRealtimeGetWorks(type); + assertRealtimeGetWorks(); assertStoredBinaryFields(count); } @@ -181,9 +159,6 @@ public void testNewReplicasWork() throws Exception { } { mappingsAndSettings.startObject("mappings"); - if (isRunningAgainstAncientCluster()) { - mappingsAndSettings.startObject(type); - } mappingsAndSettings.startObject("properties"); { mappingsAndSettings.startObject("field"); @@ -191,16 +166,12 @@ public void testNewReplicasWork() throws Exception { mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); - if (isRunningAgainstAncientCluster()) { - mappingsAndSettings.endObject(); - } mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - createIndex.setOptions(allowTypesRemovalWarnings()); client().performRequest(createIndex); int numDocs = randomIntBetween(2000, 3000); @@ -289,9 +260,6 @@ public void testShrink() throws IOException { { mappingsAndSettings.startObject("mappings"); { - if (isRunningAgainstAncientCluster()) { - mappingsAndSettings.startObject(type); - } mappingsAndSettings.startObject("properties"); { mappingsAndSettings.startObject("field"); @@ -301,25 +269,19 @@ public void testShrink() throws IOException { mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); - if (isRunningAgainstAncientCluster()) { - mappingsAndSettings.endObject(); - } } mappingsAndSettings.endObject(); - if (isRunningAgainstAncientCluster() == false) { - // the default number of shards is now one so we have to set the number of shards to be more than one explicitly - mappingsAndSettings.startObject("settings"); - { - mappingsAndSettings.field("index.number_of_shards", 5); - } - mappingsAndSettings.endObject(); + + mappingsAndSettings.startObject("settings"); + { + mappingsAndSettings.field("index.number_of_shards", 5); } + mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - createIndex.setOptions(allowTypesRemovalWarnings()); client().performRequest(createIndex); numDocs = randomIntBetween(512, 1024); @@ -370,9 +332,6 @@ public void testShrinkAfterUpgrade() throws IOException { { mappingsAndSettings.startObject("mappings"); { - if (isRunningAgainstAncientCluster()) { - mappingsAndSettings.startObject(type); - } mappingsAndSettings.startObject("properties"); { mappingsAndSettings.startObject("field"); @@ -382,23 +341,17 @@ public void testShrinkAfterUpgrade() throws IOException { mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); - if (isRunningAgainstAncientCluster()) { - mappingsAndSettings.endObject(); - } } mappingsAndSettings.endObject(); - if (isRunningAgainstAncientCluster() == false) { - // the default number of shards is now one so we have to set the number of shards to be more than one explicitly - mappingsAndSettings.startObject("settings"); - mappingsAndSettings.field("index.number_of_shards", 5); - mappingsAndSettings.endObject(); - } + // the default number of shards is now one so we have to set the number of shards to be more than one explicitly + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.field("index.number_of_shards", 5); + mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - createIndex.setOptions(allowTypesRemovalWarnings()); client().performRequest(createIndex); numDocs = randomIntBetween(512, 1024); @@ -474,15 +427,15 @@ public void testRollover() throws IOException { bulk.append("{\"index\":{}}\n"); bulk.append("{\"test\":\"test\"}\n"); } - Request bulkRequest = new Request("POST", "/" + index + "_write/" + type + "/_bulk"); + + Request bulkRequest = new Request("POST", "/" + index + "_write/_bulk"); + bulkRequest.setJsonEntity(bulk.toString()); bulkRequest.addParameter("refresh", ""); - bulkRequest.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); assertThat(EntityUtils.toString(client().performRequest(bulkRequest).getEntity()), containsString("\"errors\":false")); if (isRunningAgainstOldCluster()) { Request rolloverRequest = new Request("POST", "/" + index + "_write/_rollover"); - rolloverRequest.setOptions(allowTypesRemovalWarnings()); rolloverRequest.setJsonEntity("{" + " \"conditions\": {" + " \"max_docs\": 5" @@ -552,12 +505,10 @@ void assertAllSearchWorks(int count) throws IOException { // the 'string' field has a boost of 4 in the mappings so it should get a payload boost String stringValue = (String) XContentMapValues.extractValue("_source.string", bestHit); assertNotNull(stringValue); - String type = (String) bestHit.get("_type"); String id = (String) bestHit.get("_id"); - Request explainRequest = new Request("GET", "/" + index + "/" + type + "/" + id + "/_explain"); + Request explainRequest = new Request("GET", "/" + index + "/_explain/" + id); explainRequest.setJsonEntity("{ \"query\": { \"match_all\" : {} }}"); - explainRequest.setOptions(expectWarnings(RestExplainAction.TYPES_DEPRECATION_MESSAGE)); String explanation = toStr(client().performRequest(explainRequest)); assertFalse("Could not find payload boost in explanation\n" + explanation, explanation.contains("payloadBoost")); @@ -597,7 +548,7 @@ void assertBasicAggregationWorks() throws IOException { assertTotalHits(termsCount, boolTerms); } - void assertRealtimeGetWorks(final String typeName) throws IOException { + void assertRealtimeGetWorks() throws IOException { Request disableAutoRefresh = new Request("PUT", "/" + index + "/_settings"); disableAutoRefresh.setJsonEntity("{ \"index\": { \"refresh_interval\" : -1 }}"); client().performRequest(disableAutoRefresh); @@ -608,15 +559,12 @@ void assertRealtimeGetWorks(final String typeName) throws IOException { Map hit = (Map) ((List)(XContentMapValues.extractValue("hits.hits", searchResponse))).get(0); String docId = (String) hit.get("_id"); - Request updateRequest = new Request("POST", "/" + index + "/" + typeName + "/" + docId + "/_update"); - updateRequest.setOptions(expectWarnings(RestUpdateAction.TYPES_DEPRECATION_MESSAGE)); + Request updateRequest = new Request("POST", "/" + index + "/_update/" + docId); updateRequest.setJsonEntity("{ \"doc\" : { \"foo\": \"bar\"}}"); client().performRequest(updateRequest); - Request getRequest = new Request("GET", "/" + index + "/" + typeName + "/" + docId); - if (getOldClusterVersion().before(Version.V_6_7_0)) { - getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); - } + Request getRequest = new Request("GET", "/" + index + "/_doc/" + docId); + Map getRsp = entityAsMap(client().performRequest(getRequest)); Map source = (Map) getRsp.get("_source"); assertTrue("doc does not contain 'foo' key: " + source, source.containsKey("foo")); @@ -659,18 +607,14 @@ void assertTotalHits(int expectedTotalHits, Map response) { } int extractTotalHits(Map response) { - if (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_7_0_0)) { - return (Integer) XContentMapValues.extractValue("hits.total", response); - } else { - return (Integer) XContentMapValues.extractValue("hits.total.value", response); - } + return (Integer) XContentMapValues.extractValue("hits.total.value", response); } /** * Tests that a single document survives. Super basic smoke test. */ public void testSingleDoc() throws IOException { - String docLocation = "/" + index + "/" + type + "/1"; + String docLocation = "/" + index + "/_doc/1"; String doc = "{\"test\": \"test\"}"; if (isRunningAgainstOldCluster()) { @@ -681,9 +625,6 @@ public void testSingleDoc() throws IOException { Request request = new Request("GET", docLocation); - if (getOldClusterVersion().before(Version.V_6_7_0)) { - request.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); - } assertThat(toStr(client().performRequest(request)), containsString(doc)); } @@ -754,10 +695,10 @@ public void testRecovery() throws Exception { i -> jsonBuilder().startObject().field("field", "value").endObject() ); } - saveInfoDocument("should_have_translog", Boolean.toString(shouldHaveTranslog)); + saveInfoDocument(index + "_should_have_translog", Boolean.toString(shouldHaveTranslog)); } else { count = countOfIndexedRandomDocuments(); - shouldHaveTranslog = Booleans.parseBoolean(loadInfoDocument("should_have_translog")); + shouldHaveTranslog = Booleans.parseBoolean(loadInfoDocument(index + "_should_have_translog")); } // Count the documents in the index to make sure we have as many as we put there @@ -867,9 +808,6 @@ public void testSnapshotRestore() throws IOException { } templateBuilder.endObject(); templateBuilder.startObject("mappings"); { - if (isRunningAgainstAncientCluster()) { - templateBuilder.startObject(type); - } { templateBuilder.startObject("_source"); { @@ -877,9 +815,6 @@ public void testSnapshotRestore() throws IOException { } templateBuilder.endObject(); } - if (isRunningAgainstAncientCluster()) { - templateBuilder.endObject(); - } } templateBuilder.endObject(); templateBuilder.startObject("aliases"); { @@ -899,13 +834,6 @@ public void testSnapshotRestore() throws IOException { Request createTemplateRequest = new Request("PUT", "/_template/test_template"); createTemplateRequest.setJsonEntity(Strings.toString(templateBuilder)); - // In 7.0, type names are no longer expected by default in put index template requests. - // We therefore use the deprecated typed APIs when running against the current version, but testing with a pre-7 version - if (isRunningAgainstOldCluster() == false && getOldClusterVersion().major < 7) { - createTemplateRequest.addParameter(INCLUDE_TYPE_NAME_PARAMETER, "true"); - } - createTemplateRequest.setOptions(allowTypesRemovalWarnings()); - client().performRequest(createTemplateRequest); if (isRunningAgainstOldCluster()) { @@ -995,10 +923,7 @@ public void testSoftDeletes() throws Exception { int numDocs = between(10, 100); for (int i = 0; i < numDocs; i++) { String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject()); - Request request = new Request("POST", "/" + index + "/" + type + "/" + i); - if (isRunningAgainstAncientCluster() == false) { - request.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); - } + Request request = new Request("POST", "/" + index + "/_doc/" + i); request.setJsonEntity(doc); client().performRequest(request); refresh(); @@ -1009,19 +934,19 @@ public void testSoftDeletes() throws Exception { for (int i = 0; i < numDocs; i++) { if (randomBoolean()) { String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v2").endObject()); - Request request = new Request("POST", "/" + index + "/" + type + "/" + i); + Request request = new Request("POST", "/" + index + "/_doc/" + i); request.setJsonEntity(doc); client().performRequest(request); } else if (randomBoolean()) { - client().performRequest(new Request("DELETE", "/" + index + "/" + type + "/" + i)); + client().performRequest(new Request("DELETE", "/" + index + "/_doc/" + i)); liveDocs--; } } refresh(); assertTotalHits(liveDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); - saveInfoDocument("doc_count", Integer.toString(liveDocs)); + saveInfoDocument(index + "_doc_count", Integer.toString(liveDocs)); } else { - int liveDocs = Integer.parseInt(loadInfoDocument("doc_count")); + int liveDocs = Integer.parseInt(loadInfoDocument(index + "_doc_count")); assertTotalHits(liveDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); } } @@ -1035,27 +960,10 @@ private void checkSnapshot(final String snapshotName, final int count, final Ver assertEquals(singletonList(tookOnVersion.toString()), XContentMapValues.extractValue("snapshots.version", listSnapshotResponse)); // Remove the routing setting and template so we can test restoring them. - try { - Request clearRoutingFromSettings = new Request("PUT", "/_cluster/settings"); - clearRoutingFromSettings.setJsonEntity("{\"persistent\":{\"cluster.routing.allocation.exclude.test_attr\": null}}"); - client().performRequest(clearRoutingFromSettings); - } catch (WarningFailureException e) { - /* - * If this test is executed on the upgraded mode before testRemoteClusterSettingsUpgraded, - * we will hit a warning exception because we put some deprecated settings in that test. - */ - if (isRunningAgainstOldCluster() == false - && getOldClusterVersion().onOrAfter(Version.V_6_1_0) && getOldClusterVersion().before(Version.V_6_5_0)) { - for (String warning : e.getResponse().getWarnings()) { - assertThat(warning, containsString( - "setting was deprecated in Elasticsearch and will be removed in a future release! " - + "See the breaking changes documentation for the next major version.")); - assertThat(warning, startsWith("[search.remote.")); - } - } else { - throw e; - } - } + Request clearRoutingFromSettings = new Request("PUT", "/_cluster/settings"); + clearRoutingFromSettings.setJsonEntity("{\"persistent\":{\"cluster.routing.allocation.exclude.test_attr\": null}}"); + client().performRequest(clearRoutingFromSettings); + client().performRequest(new Request("DELETE", "/_template/test_template")); // Restore @@ -1083,10 +991,11 @@ && getOldClusterVersion().onOrAfter(Version.V_6_1_0) && getOldClusterVersion().b bulk.append("{\"index\":{\"_id\":\"").append(count + i).append("\"}}\n"); bulk.append("{\"test\":\"test\"}\n"); } - Request writeToRestoredRequest = new Request("POST", "/restored_" + index + "/" + type + "/_bulk"); + + Request writeToRestoredRequest = new Request("POST", "/restored_" + index + "/_bulk"); + writeToRestoredRequest.addParameter("refresh", "true"); writeToRestoredRequest.setJsonEntity(bulk.toString()); - writeToRestoredRequest.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); assertThat(EntityUtils.toString(client().performRequest(writeToRestoredRequest).getEntity()), containsString("\"errors\":false")); // And count to make sure the add worked @@ -1110,28 +1019,13 @@ && getOldClusterVersion().onOrAfter(Version.V_6_1_0) && getOldClusterVersion().b // Check that the template was restored successfully Request getTemplateRequest = new Request("GET", "/_template/test_template"); - // In 7.0, type names are no longer returned by default in get index template requests. - // We therefore use the deprecated typed APIs when running against the current version. - if (isRunningAgainstAncientCluster() == false) { - getTemplateRequest.addParameter(INCLUDE_TYPE_NAME_PARAMETER, "true"); - } - getTemplateRequest.setOptions(allowTypesRemovalWarnings()); - Map getTemplateResponse = entityAsMap(client().performRequest(getTemplateRequest)); Map expectedTemplate = new HashMap<>(); - if (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_6_0_0_beta1)) { - expectedTemplate.put("template", "evil_*"); - } else { - expectedTemplate.put("index_patterns", singletonList("evil_*")); - } + expectedTemplate.put("index_patterns", singletonList("evil_*")); + expectedTemplate.put("settings", singletonMap("index", singletonMap("number_of_shards", "1"))); - // We don't have the type in the response starting with 7.0, but we won't have it on old cluster after upgrade - // either so look at the response to figure out the correct assertions - if (isTypeInTemplateResponse(getTemplateResponse)) { - expectedTemplate.put("mappings", singletonMap(type, singletonMap("_source", singletonMap("enabled", true)))); - } else { - expectedTemplate.put("mappings", singletonMap("_source", singletonMap("enabled", true))); - } + expectedTemplate.put("mappings", singletonMap("_source", singletonMap("enabled", true))); + expectedTemplate.put("order", 0); Map aliases = new HashMap<>(); @@ -1147,13 +1041,6 @@ && getOldClusterVersion().onOrAfter(Version.V_6_1_0) && getOldClusterVersion().b } } - @SuppressWarnings("unchecked") - private boolean isTypeInTemplateResponse(Map getTemplateResponse) { - return ( (Map) ( - (Map) getTemplateResponse.getOrDefault("test_template", emptyMap()) - ).get("mappings")).get("_source") == null; - } - // TODO tests for upgrades after shrink. We've had trouble with shrink in the past. private void indexRandomDocuments( @@ -1165,10 +1052,7 @@ private void indexRandomDocuments( logger.info("Indexing {} random documents", count); for (int i = 0; i < count; i++) { logger.debug("Indexing document [{}]", i); - Request createDocument = new Request("POST", "/" + index + "/" + type + "/" + i); - if (isRunningAgainstAncientCluster() == false) { - createDocument.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); - } + Request createDocument = new Request("POST", "/" + index + "/_doc/" + i); createDocument.setJsonEntity(Strings.toString(docSupplier.apply(i))); client().performRequest(createDocument); if (rarely()) { @@ -1180,44 +1064,34 @@ private void indexRandomDocuments( } } if (saveInfo) { - saveInfoDocument("count", Integer.toString(count)); + saveInfoDocument(index + "_count", Integer.toString(count)); } } private int countOfIndexedRandomDocuments() throws IOException { - return Integer.parseInt(loadInfoDocument("count")); + return Integer.parseInt(loadInfoDocument(index + "_count")); } - private void saveInfoDocument(String type, String value) throws IOException { + private void saveInfoDocument(String id, String value) throws IOException { XContentBuilder infoDoc = JsonXContent.contentBuilder().startObject(); infoDoc.field("value", value); infoDoc.endObject(); // Only create the first version so we know how many documents are created when the index is first created - Request request = new Request("PUT", "/info/" + this.type + "/" + index + "_" + type); + Request request = new Request("PUT", "/info/_doc/" + id); request.addParameter("op_type", "create"); request.setJsonEntity(Strings.toString(infoDoc)); - if (isRunningAgainstAncientCluster() == false) { - request.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); - } client().performRequest(request); } - private String loadInfoDocument(String type) throws IOException { - Request request = new Request("GET", "/info/" + this.type + "/" + index + "_" + type); + private String loadInfoDocument(String id) throws IOException { + Request request = new Request("GET", "/info/_doc/" + id); request.addParameter("filter_path", "_source"); - if (isRunningAgainstAncientCluster()) { - request.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); - } String doc = toStr(client().performRequest(request)); Matcher m = Pattern.compile("\"value\":\"(.+)\"").matcher(doc); assertTrue(doc, m.find()); return m.group(1); } - private Object randomLenientBoolean() { - return randomFrom(new Object[] {"off", "no", "0", 0, "false", false, "on", "yes", "1", 1, "true", true}); - } - private void refresh() throws IOException { logger.debug("Refreshing [{}]", index); client().performRequest(new Request("POST", "/" + index + "/_refresh")); diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java index fb14b89fc621b..9e1e5f93fcd92 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java @@ -19,84 +19,6 @@ package org.elasticsearch.upgrades; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsResponse; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.transport.RemoteClusterService; - -import java.io.IOException; -import java.util.Collections; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.transport.RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS; -import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE; -import static org.hamcrest.Matchers.equalTo; - public class FullClusterRestartSettingsUpgradeIT extends AbstractFullClusterRestartTestCase { - public void testRemoteClusterSettingsUpgraded() throws IOException { - assumeTrue("skip_unavailable did not exist until 6.1.0", getOldClusterVersion().onOrAfter(Version.V_6_1_0)); - assumeTrue("settings automatically upgraded since 6.5.0", getOldClusterVersion().before(Version.V_6_5_0)); - if (isRunningAgainstOldCluster()) { - final Request putSettingsRequest = new Request("PUT", "/_cluster/settings"); - try (XContentBuilder builder = jsonBuilder()) { - builder.startObject(); - { - builder.startObject("persistent"); - { - builder.field("search.remote.foo.skip_unavailable", true); - builder.field("search.remote.foo.seeds", Collections.singletonList("localhost:9200")); - } - builder.endObject(); - } - builder.endObject(); - putSettingsRequest.setJsonEntity(Strings.toString(builder)); - } - client().performRequest(putSettingsRequest); - - final Request getSettingsRequest = new Request("GET", "/_cluster/settings"); - final Response response = client().performRequest(getSettingsRequest); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, response.getEntity().getContent())) { - final ClusterGetSettingsResponse clusterGetSettingsResponse = ClusterGetSettingsResponse.fromXContent(parser); - final Settings settings = clusterGetSettingsResponse.getPersistentSettings(); - - assertTrue(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings)); - assertTrue(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").get(settings)); - assertTrue(SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings)); - assertThat( - SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").get(settings), - equalTo(Collections.singletonList("localhost:9200"))); - } - - assertSettingDeprecationsAndWarnings(new Setting[]{ - SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo"), - SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo")}); - } else { - final Request getSettingsRequest = new Request("GET", "/_cluster/settings"); - final Response getSettingsResponse = client().performRequest(getSettingsRequest); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, getSettingsResponse.getEntity().getContent())) { - final ClusterGetSettingsResponse clusterGetSettingsResponse = ClusterGetSettingsResponse.fromXContent(parser); - final Settings settings = clusterGetSettingsResponse.getPersistentSettings(); - - assertFalse(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings)); - assertTrue( - settings.toString(), - RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings)); - assertTrue(RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").get(settings)); - assertFalse(SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings)); - assertTrue(RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings)); - assertThat( - RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").get(settings), - equalTo(Collections.singletonList("localhost:9200"))); - } - } - } - } diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index fb4e33863cacf..861a774ee17d2 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.upgrades; import org.apache.http.util.EntityUtils; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; @@ -144,7 +143,6 @@ private static void addCandidate(String querySource, QueryBuilder expectedQb) { } public void testQueryBuilderBWC() throws Exception { - final String type = getOldClusterVersion().before(Version.V_7_0_0) ? "doc" : "_doc"; String index = "queries"; if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); @@ -157,9 +155,6 @@ public void testQueryBuilderBWC() throws Exception { } { mappingsAndSettings.startObject("mappings"); - if (isRunningAgainstAncientCluster()) { - mappingsAndSettings.startObject(type); - } mappingsAndSettings.startObject("properties"); { mappingsAndSettings.startObject("query"); @@ -178,9 +173,6 @@ public void testQueryBuilderBWC() throws Exception { } mappingsAndSettings.endObject(); mappingsAndSettings.endObject(); - if (isRunningAgainstAncientCluster()) { - mappingsAndSettings.endObject(); - } } mappingsAndSettings.endObject(); Request request = new Request("PUT", "/" + index); @@ -190,7 +182,7 @@ public void testQueryBuilderBWC() throws Exception { assertEquals(200, rsp.getStatusLine().getStatusCode()); for (int i = 0; i < CANDIDATES.size(); i++) { - request = new Request("PUT", "/" + index + "/" + type + "/" + Integer.toString(i)); + request = new Request("PUT", "/" + index + "/_doc/" + Integer.toString(i)); request.setJsonEntity((String) CANDIDATES.get(i)[0]); rsp = client().performRequest(request); assertEquals(201, rsp.getStatusLine().getStatusCode()); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml index e6e54cbb275e8..1d4f1883ef5f5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml @@ -322,3 +322,68 @@ setup: query: "there" ordered: false - match: { hits.total.value: 1 } + +--- +"Test overlapping": + - skip: + version: " - 7.9.99" + reason: "Implemented in 7.1" + - do: + search: + index: test + body: + query: + intervals: + text: + match: + query: "cold outside" + ordered: true + filter: + overlapping: + match: + query: "baby there" + ordered: false + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "3" } + +--- +"Test before": + - skip: + version: " - 7.9.99" + reason: "Implemented in 7.1" + - do: + search: + index: test + body: + query: + intervals: + text: + match: + query: "cold" + filter: + before: + match: + query: "outside" + - match: { hits.total.value: 2 } + +--- +"Test after": + - skip: + version: " - 7.9.99" + reason: "Implemented in 7.1" + - do: + search: + index: test + body: + query: + intervals: + text: + match: + query: "cold" + filter: + after: + match: + query: "outside" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "4" } + diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java index 999ea9b20548d..99caa03e53992 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -39,7 +39,6 @@ import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.profile.SearchProfileShardResults; import org.elasticsearch.search.suggest.Suggest; -import org.elasticsearch.transport.RemoteClusterAware; import java.util.ArrayList; import java.util.Arrays; @@ -368,17 +367,7 @@ public int compareTo(ShardIdAndClusterAlias o) { if (shardIdCompareTo != 0) { return shardIdCompareTo; } - int clusterAliasCompareTo = clusterAlias.compareTo(o.clusterAlias); - if (clusterAliasCompareTo != 0) { - //TODO we may want to fix this, CCS returns remote results before local ones (TransportSearchAction#mergeShardsIterators) - if (clusterAlias.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { - return 1; - } - if (o.clusterAlias.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { - return -1; - } - } - return clusterAliasCompareTo; + return clusterAlias.compareTo(o.clusterAlias); } } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchShardIterator.java b/server/src/main/java/org/elasticsearch/action/search/SearchShardIterator.java index be3b5d7a9c2b3..ec27af0970545 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchShardIterator.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchShardIterator.java @@ -21,12 +21,14 @@ import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.cluster.routing.PlainShardIterator; +import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchShardTarget; import java.util.List; +import java.util.Objects; /** * Extension of {@link PlainShardIterator} used in the search api, which also holds the {@link OriginalIndices} @@ -93,4 +95,43 @@ void resetAndSkip() { boolean skip() { return skip; } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (super.equals(o) == false) { + return false; + } + SearchShardIterator that = (SearchShardIterator) o; + return Objects.equals(clusterAlias, that.clusterAlias); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), clusterAlias); + } + + @Override + public int compareTo(ShardIterator o) { + int superCompareTo = super.compareTo(o); + if (superCompareTo != 0 || (o instanceof SearchShardIterator == false)) { + return superCompareTo; + } + SearchShardIterator searchShardIterator = (SearchShardIterator)o; + if (clusterAlias == null && searchShardIterator.getClusterAlias() == null) { + return 0; + } + if (clusterAlias == null) { + return -1; + } + if (searchShardIterator.getClusterAlias() == null) { + return 1; + } + return clusterAlias.compareTo(searchShardIterator.getClusterAlias()); + } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 1d2e54ae86d7c..9d936a28846e4 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -159,7 +159,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING, FsDirectoryService.INDEX_LOCK_FACTOR_SETTING, EngineConfig.INDEX_CODEC_SETTING, - EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS, IndexMetaData.SETTING_WAIT_FOR_ACTIVE_SHARDS, IndexSettings.DEFAULT_PIPELINE, diff --git a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java index 543ec9be75d17..d4e6362085238 100644 --- a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java @@ -248,6 +248,18 @@ public void beforeIndexAddedToCluster(Index index, Settings indexSettings) { } } + @Override + public void onStoreCreated(ShardId shardId) { + for (IndexEventListener listener : listeners) { + try { + listener.onStoreCreated(shardId); + } catch (Exception e) { + logger.warn("failed to invoke on store created", e); + throw e; + } + } + } + @Override public void onStoreClosed(ShardId shardId) { for (IndexEventListener listener : listeners) { diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 8bbf048fe1246..51c625d334d55 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -388,6 +388,7 @@ public synchronized IndexShard createShard( DirectoryService directoryService = indexStore.newDirectoryService(path); store = new Store(shardId, this.indexSettings, directoryService.newDirectory(), lock, new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId))); + eventListener.onStoreCreated(shardId); indexShard = new IndexShard( routing, this.indexSettings, diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 7716cf93ffd6b..7696d545649d5 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -116,16 +116,6 @@ public Supplier retentionLeasesSupplier() { } }, Property.IndexScope, Property.NodeScope); - /** - * Configures an index to optimize documents with auto generated ids for append only. If this setting is updated from false - * to true might not take effect immediately. In other words, disabling the optimization will be immediately applied while - * re-enabling it might not be applied until the engine is in a safe state to do so. Depending on the engine implementation a change to - * this setting won't be reflected re-enabled optimization until the engine is restarted or the index is closed and reopened. - * The default is true - */ - public static final Setting INDEX_OPTIMIZE_AUTO_GENERATED_IDS = Setting.boolSetting("index.optimize_auto_generated_id", true, - Property.IndexScope, Property.Dynamic); - private final TranslogConfig translogConfig; /** @@ -349,14 +339,6 @@ public List getExternalRefreshListener() { */ public List getInternalRefreshListener() { return internalRefreshListener;} - - /** - * returns true if the engine is allowed to optimize indexing operations with an auto-generated ID - */ - public boolean isAutoGeneratedIDsOptimizationEnabled() { - return indexSettings.getValue(INDEX_OPTIMIZE_AUTO_GENERATED_IDS); - } - /** * Return the sort order of this index, or null if the index has no sort. */ diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 832df83fe0f5c..2def84f875b17 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -171,9 +171,6 @@ public InternalEngine(EngineConfig engineConfig) { final EngineConfig engineConfig, final BiFunction localCheckpointTrackerSupplier) { super(engineConfig); - if (engineConfig.isAutoGeneratedIDsOptimizationEnabled() == false) { - updateAutoIdTimestamp(Long.MAX_VALUE, true); - } final TranslogDeletionPolicy translogDeletionPolicy = new TranslogDeletionPolicy( engineConfig.getIndexSettings().getTranslogRetentionSize().getBytes(), engineConfig.getIndexSettings().getTranslogRetentionAge().getMillis() @@ -948,6 +945,7 @@ protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IO } } } + markSeqNoAsSeen(index.seqNo()); return plan; } @@ -1301,6 +1299,7 @@ protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws delete.seqNo(), delete.version()); } } + markSeqNoAsSeen(delete.seqNo()); return plan; } @@ -1455,6 +1454,7 @@ public void maybePruneDeletes() { public NoOpResult noOp(final NoOp noOp) { NoOpResult noOpResult; try (ReleasableLock ignored = readLock.acquire()) { + markSeqNoAsSeen(noOp.seqNo()); noOpResult = innerNoOp(noOp); } catch (final Exception e) { noOpResult = new NoOpResult(getPrimaryTerm(), noOp.seqNo(), e); @@ -2397,12 +2397,6 @@ public void onSettingsChanged() { mergeScheduler.refreshConfig(); // config().isEnableGcDeletes() or config.getGcDeletesInMillis() may have changed: maybePruneDeletes(); - if (engineConfig.isAutoGeneratedIDsOptimizationEnabled() == false) { - // this is an anti-viral settings you can only opt out for the entire index - // only if a shard starts up again due to relocation or if the index is closed - // the setting will be re-interpreted if it's set to true - updateAutoIdTimestamp(Long.MAX_VALUE, true); - } final TranslogDeletionPolicy translogDeletionPolicy = translog.getDeletionPolicy(); final IndexSettings indexSettings = engineConfig.getIndexSettings(); translogDeletionPolicy.setRetentionAgeInMillis(indexSettings.getTranslogRetentionAge().getMillis()); @@ -2434,6 +2428,13 @@ public void waitForOpsToComplete(long seqNo) throws InterruptedException { localCheckpointTracker.waitForOpsToComplete(seqNo); } + /** + * Marks the given seq_no as seen and advances the max_seq_no of this engine to at least that value. + */ + protected final void markSeqNoAsSeen(long seqNo) { + localCheckpointTracker.advanceMaxSeqNo(seqNo); + } + /** * Checks if the given operation has been processed in this engine or not. * @return true if the given operation was processed; otherwise false. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java index df6a291372f64..5fd06633bcfc8 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.index.mapper; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; @@ -31,8 +32,10 @@ import org.apache.lucene.search.suggest.document.RegexCompletionQuery; import org.apache.lucene.search.suggest.document.SuggestField; import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.util.set.Sets; @@ -85,6 +88,11 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapperParser { public static final String CONTENT_TYPE = "completion"; + /** + * Maximum allowed number of completion contexts in a mapping. + */ + static final int COMPLETION_CONTEXTS_LIMIT = 10; + public static class Defaults { public static final MappedFieldType FIELD_TYPE = new CompletionFieldType(); static { @@ -354,6 +362,8 @@ public static class Builder extends FieldMapper.Builder COMPLETION_CONTEXTS_LIMIT) { + if (context.indexCreatedVersion().onOrAfter(Version.V_8_0_0)) { + throw new IllegalArgumentException( + "Limit of completion field contexts [" + COMPLETION_CONTEXTS_LIMIT + "] has been exceeded"); + } else { + deprecationLogger.deprecated("You have defined more than [" + COMPLETION_CONTEXTS_LIMIT + "] completion contexts" + + " in the mapping for index [" + context.indexSettings().get(IndexMetaData.SETTING_INDEX_PROVIDED_NAME) + "]. " + + "The maximum allowed number of completion contexts in a mapping will be limited to " + + "[" + COMPLETION_CONTEXTS_LIMIT + "] starting in version [8.0]."); + } + } + } } private int maxInputLength; diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java index 3fa608db37eaf..dacd843c377ac 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java @@ -453,8 +453,14 @@ public IntervalsSource filter(IntervalsSource input, QueryShardContext context, return Intervals.notContaining(input, filterSource); case "not_contained_by": return Intervals.notContainedBy(input, filterSource); + case "overlapping": + return Intervals.overlapping(input, filterSource); case "not_overlapping": return Intervals.nonOverlapping(input, filterSource); + case "before": + return Intervals.before(input, filterSource); + case "after": + return Intervals.after(input, filterSource); default: throw new IllegalArgumentException("Unknown filter type [" + type + "]"); } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java index 50f4e311c8d81..a19d9ac4abb94 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java @@ -81,6 +81,15 @@ public synchronized long generateSeqNo() { return nextSeqNo++; } + /** + * Marks the provided sequence number as seen and updates the max_seq_no if needed. + */ + public synchronized void advanceMaxSeqNo(long seqNo) { + if (seqNo >= nextSeqNo) { + nextSeqNo = seqNo + 1; + } + } + /** * Marks the processing of the provided sequence number as completed as updates the checkpoint if possible. * diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java index f35e4906131e1..6fa1fd7fb3f3e 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java @@ -115,7 +115,7 @@ public void onFailure(final Exception e) { } @Override - protected Response shardOperation(final T request, final ShardId shardId) throws IOException { + protected Response shardOperation(final T request, final ShardId shardId) { throw new UnsupportedOperationException(); } @@ -136,10 +136,10 @@ protected boolean resolveIndex(final T request) { public static class Add extends Action { public static final Add INSTANCE = new Add(); - public static final String NAME = "indices:admin/seq_no/add_retention_lease"; + public static final String ACTION_NAME = "indices:admin/seq_no/add_retention_lease"; private Add() { - super(NAME); + super(ACTION_NAME); } public static class TransportAction extends TransportRetentionLeaseAction { @@ -153,7 +153,7 @@ public TransportAction( final IndexNameExpressionResolver indexNameExpressionResolver, final IndicesService indicesService) { super( - NAME, + ACTION_NAME, threadPool, clusterService, transportService, @@ -186,10 +186,10 @@ public Response newResponse() { public static class Renew extends Action { public static final Renew INSTANCE = new Renew(); - public static final String NAME = "indices:admin/seq_no/renew_retention_lease"; + public static final String ACTION_NAME = "indices:admin/seq_no/renew_retention_lease"; private Renew() { - super(NAME); + super(ACTION_NAME); } public static class TransportAction extends TransportRetentionLeaseAction { @@ -203,7 +203,7 @@ public TransportAction( final IndexNameExpressionResolver indexNameExpressionResolver, final IndicesService indicesService) { super( - NAME, + ACTION_NAME, threadPool, clusterService, transportService, @@ -232,10 +232,10 @@ public Response newResponse() { public static class Remove extends Action { public static final Remove INSTANCE = new Remove(); - public static final String NAME = "indices:admin/seq_no/remove_retention_lease"; + public static final String ACTION_NAME = "indices:admin/seq_no/remove_retention_lease"; private Remove() { - super(NAME); + super(ACTION_NAME); } public static class TransportAction extends TransportRetentionLeaseAction { @@ -249,7 +249,7 @@ public TransportAction( final IndexNameExpressionResolver indexNameExpressionResolver, final IndicesService indicesService) { super( - NAME, + ACTION_NAME, threadPool, clusterService, transportService, diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseAlreadyExistsException.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseAlreadyExistsException.java index aaa41a7b400c1..ffd5e96e6a526 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseAlreadyExistsException.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseAlreadyExistsException.java @@ -27,7 +27,7 @@ public class RetentionLeaseAlreadyExistsException extends ResourceAlreadyExistsException { - RetentionLeaseAlreadyExistsException(final String id) { + public RetentionLeaseAlreadyExistsException(final String id) { super("retention lease with ID [" + Objects.requireNonNull(id) + "] already exists"); } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseNotFoundException.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseNotFoundException.java index d975077327fa3..2b13ae6b448e0 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseNotFoundException.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseNotFoundException.java @@ -27,7 +27,7 @@ public class RetentionLeaseNotFoundException extends ResourceNotFoundException { - RetentionLeaseNotFoundException(final String id) { + public RetentionLeaseNotFoundException(final String id) { super("retention lease with ID [" + Objects.requireNonNull(id) + "] not found"); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java b/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java index c0a89e7cf006c..982b42b2c3f66 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java @@ -160,6 +160,13 @@ default void afterIndexShardDeleted(ShardId shardId, Settings indexSettings) { default void beforeIndexAddedToCluster(Index index, Settings indexSettings) { } + /** + * Called when the given shards store is created. The shard store is created before the shard is created. + * + * @param shardId the shard ID the store belongs to + */ + default void onStoreCreated(ShardId shardId) {} + /** * Called when the given shards store is closed. The store is closed once all resource have been released on the store. * This implies that all index readers are closed and no recoveries are running. diff --git a/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java b/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java index badf981803603..a8b50fcc53895 100644 --- a/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java +++ b/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java @@ -144,6 +144,7 @@ public IndexInput openInput(String name, IOContext context) throws IOException { case "nvd": case "dvd": case "tim": + case "cfs": // we need to do these checks on the outer directory since the inner doesn't know about pending deletes ensureOpen(); ensureCanRead(name); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 64236ae19fa52..d6e5449a80aa8 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -62,6 +62,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.AbstractRefCounted; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -83,7 +84,6 @@ import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.cache.request.ShardRequestCache; import org.elasticsearch.index.engine.CommitStats; -import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.engine.NoOpEngine; @@ -126,6 +126,7 @@ import java.io.Closeable; import java.io.IOException; +import java.io.UncheckedIOException; import java.nio.file.Files; import java.util.ArrayList; import java.util.Collection; @@ -196,6 +197,7 @@ public class IndicesService extends AbstractLifecycleComponent private final MetaStateService metaStateService; private final Collection>> engineFactoryProviders; private final Map> indexStoreFactories; + final AbstractRefCounted indicesRefCount; // pkg-private for testing @Override protected void doStart() { @@ -251,6 +253,27 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon } this.indexStoreFactories = indexStoreFactories; + // doClose() is called when shutting down a node, yet there might still be ongoing requests + // that we need to wait for before closing some resources such as the caches. In order to + // avoid closing these resources while ongoing requests are still being processed, we use a + // ref count which will only close them when both this service and all index services are + // actually closed + indicesRefCount = new AbstractRefCounted("indices") { + @Override + protected void closeInternal() { + try { + IOUtils.close( + analysisRegistry, + indexingMemoryController, + indicesFieldDataCache, + cacheCleaner, + indicesRequestCache, + indicesQueryCache); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + }; } @Override @@ -282,14 +305,8 @@ protected void doStop() { } @Override - protected void doClose() { - IOUtils.closeWhileHandlingException( - analysisRegistry, - indexingMemoryController, - indicesFieldDataCache, - cacheCleaner, - indicesRequestCache, - indicesQueryCache); + protected void doClose() throws IOException { + indicesRefCount.decRef(); } /** @@ -457,9 +474,17 @@ public synchronized IndexService createIndex( } List finalListeners = new ArrayList<>(builtInListeners); final IndexEventListener onStoreClose = new IndexEventListener() { + @Override + public void onStoreCreated(ShardId shardId) { + indicesRefCount.incRef(); + } @Override public void onStoreClosed(ShardId shardId) { - indicesQueryCache.onClose(shardId); + try { + indicesRefCount.decRef(); + } finally { + indicesQueryCache.onClose(shardId); + } } }; finalListeners.add(onStoreClose); @@ -495,11 +520,6 @@ private synchronized IndexService createIndexService(final String reason, List builtInListeners, IndexingOperationListener... indexingOperationListeners) throws IOException { final IndexSettings idxSettings = new IndexSettings(indexMetaData, settings, indexScopedSettings); - if (idxSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0) - && EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.exists(idxSettings.getSettings())) { - throw new IllegalArgumentException( - "Setting [" + EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.getKey() + "] was removed in version 7.0.0"); - } // we ignore private settings since they are not registered settings indexScopedSettings.validate(indexMetaData.getSettings(), true, true, true); logger.debug("creating Index [{}], shards [{}]/[{}] - reason [{}]", diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java index 7539c1653cce4..bc71cb597b234 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java @@ -81,7 +81,7 @@ public void put(Version version, T model) { @Override public Collection values() { - return Collections.singleton(model); + return model == null ? Collections.emptySet() : Collections.singleton(model); } } diff --git a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java index 0b7d8da481c62..3cbc47173c6b4 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java @@ -19,14 +19,12 @@ package org.elasticsearch.search.dfs; -import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.ObjectObjectHashMap; -import com.carrotsearch.hppc.cursors.ObjectCursor; - -import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermStates; import org.apache.lucene.search.CollectionStatistics; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TermStatistics; import org.elasticsearch.common.collect.HppcMaps; @@ -36,9 +34,8 @@ import org.elasticsearch.tasks.TaskCancelledException; import java.io.IOException; -import java.util.AbstractSet; -import java.util.Collection; -import java.util.Iterator; +import java.util.HashMap; +import java.util.Map; /** * Dfs phase of a search request, used to make scoring 100% accurate by collecting additional info from each shard before the query phase. @@ -52,42 +49,46 @@ public void preProcess(SearchContext context) { @Override public void execute(SearchContext context) { - final ObjectHashSet termsSet = new ObjectHashSet<>(); try { - context.searcher().createWeight(context.searcher().rewrite(context.query()), ScoreMode.COMPLETE, 1f) - .extractTerms(new DelegateSet(termsSet)); + ObjectObjectHashMap fieldStatistics = HppcMaps.newNoNullKeysMap(); + Map stats = new HashMap<>(); + IndexSearcher searcher = new IndexSearcher(context.searcher().getIndexReader()) { + @Override + public TermStatistics termStatistics(Term term, TermStates states) throws IOException { + if (context.isCancelled()) { + throw new TaskCancelledException("cancelled"); + } + TermStatistics ts = super.termStatistics(term, states); + if (ts != null) { + stats.put(term, ts); + } + return ts; + } + + @Override + public CollectionStatistics collectionStatistics(String field) throws IOException { + if (context.isCancelled()) { + throw new TaskCancelledException("cancelled"); + } + CollectionStatistics cs = super.collectionStatistics(field); + if (cs != null) { + fieldStatistics.put(field, cs); + } + return cs; + } + }; + + searcher.createWeight(context.searcher().rewrite(context.query()), ScoreMode.COMPLETE, 1); for (RescoreContext rescoreContext : context.rescore()) { - try { - rescoreContext.rescorer().extractTerms(context.searcher(), rescoreContext, new DelegateSet(termsSet)); - } catch (IOException e) { - throw new IllegalStateException("Failed to extract terms", e); + for (Query query : rescoreContext.getQueries()) { + searcher.createWeight(context.searcher().rewrite(query), ScoreMode.COMPLETE, 1); } } - Term[] terms = termsSet.toArray(Term.class); + Term[] terms = stats.keySet().toArray(new Term[0]); TermStatistics[] termStatistics = new TermStatistics[terms.length]; - IndexReaderContext indexReaderContext = context.searcher().getTopReaderContext(); for (int i = 0; i < terms.length; i++) { - if(context.isCancelled()) { - throw new TaskCancelledException("cancelled"); - } - // LUCENE 4 UPGRADE: cache TermStates? - TermStates termContext = TermStates.build(indexReaderContext, terms[i], true); - termStatistics[i] = context.searcher().termStatistics(terms[i], termContext); - } - - ObjectObjectHashMap fieldStatistics = HppcMaps.newNoNullKeysMap(); - for (Term term : terms) { - assert term.field() != null : "field is null"; - if (fieldStatistics.containsKey(term.field()) == false) { - final CollectionStatistics collectionStatistics = context.searcher().collectionStatistics(term.field()); - if (collectionStatistics != null) { - fieldStatistics.put(term.field(), collectionStatistics); - } - if(context.isCancelled()) { - throw new TaskCancelledException("cancelled"); - } - } + termStatistics[i] = stats.get(terms[i]); } context.dfsResult().termsStatistics(terms, termStatistics) @@ -95,58 +96,6 @@ public void execute(SearchContext context) { .maxDoc(context.searcher().getIndexReader().maxDoc()); } catch (Exception e) { throw new DfsPhaseExecutionException(context, "Exception during dfs phase", e); - } finally { - termsSet.clear(); // don't hold on to terms - } - } - - // We need to bridge to JCF world, b/c of Query#extractTerms - private static class DelegateSet extends AbstractSet { - - private final ObjectHashSet delegate; - - private DelegateSet(ObjectHashSet delegate) { - this.delegate = delegate; - } - - @Override - public boolean add(Term term) { - return delegate.add(term); - } - - @Override - public boolean addAll(Collection terms) { - boolean result = false; - for (Term term : terms) { - result = delegate.add(term); - } - return result; - } - - @Override - public Iterator iterator() { - final Iterator> iterator = delegate.iterator(); - return new Iterator() { - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public Term next() { - return iterator.next().value; - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - }; - } - - @Override - public int size() { - return delegate.size(); } } diff --git a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java index d70ec62c7af60..6e236d3e8143d 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java @@ -19,19 +19,19 @@ package org.elasticsearch.search.rescore; -import org.apache.lucene.index.Term; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TopDocs; import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.Comparator; +import java.util.List; import java.util.Set; -import java.util.Collections; + import static java.util.stream.Collectors.toSet; public final class QueryRescorer implements Rescorer { @@ -170,6 +170,11 @@ public void setQuery(Query query) { this.query = query; } + @Override + public List getQueries() { + return Collections.singletonList(query); + } + public Query query() { return query; } @@ -203,10 +208,4 @@ public void setScoreMode(String scoreMode) { } } - @Override - public void extractTerms(IndexSearcher searcher, RescoreContext rescoreContext, Set termsSet) throws IOException { - Query query = ((QueryRescoreContext) rescoreContext).query(); - searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f).extractTerms(termsSet); - } - } diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java b/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java index 2401b9ff32900..4f44af6321791 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java @@ -19,6 +19,10 @@ package org.elasticsearch.search.rescore; +import org.apache.lucene.search.Query; + +import java.util.Collections; +import java.util.List; import java.util.Set; /** @@ -29,7 +33,7 @@ public class RescoreContext { private final int windowSize; private final Rescorer rescorer; - private Set resroredDocs; //doc Ids for which rescoring was applied + private Set rescoredDocs; //doc Ids for which rescoring was applied /** * Build the context. @@ -55,10 +59,17 @@ public int getWindowSize() { } public void setRescoredDocs(Set docIds) { - resroredDocs = docIds; + rescoredDocs = docIds; } public boolean isRescored(int docId) { - return resroredDocs.contains(docId); + return rescoredDocs.contains(docId); + } + + /** + * Returns queries associated with the rescorer + */ + public List getQueries() { + return Collections.emptyList(); } } diff --git a/server/src/main/java/org/elasticsearch/search/rescore/Rescorer.java b/server/src/main/java/org/elasticsearch/search/rescore/Rescorer.java index 023c9340e4b1d..36c77d2df4124 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/Rescorer.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/Rescorer.java @@ -19,14 +19,11 @@ package org.elasticsearch.search.rescore; -import org.apache.lucene.index.Term; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TopDocs; -import org.elasticsearch.action.search.SearchType; import java.io.IOException; -import java.util.Set; /** * A query rescorer interface used to re-rank the Top-K results of a previously @@ -61,10 +58,4 @@ public interface Rescorer { Explanation explain(int topLevelDocId, IndexSearcher searcher, RescoreContext rescoreContext, Explanation sourceExplanation) throws IOException; - /** - * Extracts all terms needed to execute this {@link Rescorer}. This method - * is executed in a distributed frequency collection roundtrip for - * {@link SearchType#DFS_QUERY_THEN_FETCH} - */ - void extractTerms(IndexSearcher searcher, RescoreContext rescoreContext, Set termsSet) throws IOException; } diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 2ff5ae1583e37..d5a524105dd01 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -388,28 +388,28 @@ private InetSocketAddress bindToPort(final String name, final InetAddress hostAd PortsRange portsRange = new PortsRange(port); final AtomicReference lastException = new AtomicReference<>(); final AtomicReference boundSocket = new AtomicReference<>(); - boolean success = portsRange.iterate(portNumber -> { - try { - TcpServerChannel channel = bind(name, new InetSocketAddress(hostAddress, portNumber)); - synchronized (serverChannels) { - List list = serverChannels.get(name); - if (list == null) { - list = new ArrayList<>(); - serverChannels.put(name, list); - } - list.add(channel); + closeLock.writeLock().lock(); + try { + if (lifecycle.initialized() == false && lifecycle.started() == false) { + throw new IllegalStateException("transport has been stopped"); + } + boolean success = portsRange.iterate(portNumber -> { + try { + TcpServerChannel channel = bind(name, new InetSocketAddress(hostAddress, portNumber)); + serverChannels.computeIfAbsent(name, k -> new ArrayList<>()).add(channel); boundSocket.set(channel.getLocalAddress()); + } catch (Exception e) { + lastException.set(e); + return false; } - } catch (Exception e) { - lastException.set(e); - return false; + return true; + }); + if (!success) { + throw new BindTransportException("Failed to bind to [" + port + "]", lastException.get()); } - return true; - }); - if (!success) { - throw new BindTransportException("Failed to bind to [" + port + "]", lastException.get()); + } finally { + closeLock.writeLock().unlock(); } - if (logger.isDebugEnabled()) { logger.debug("Bound profile [{}] to address {{}}", name, NetworkAddress.format(boundSocket.get())); } @@ -553,6 +553,7 @@ protected final void doClose() { protected final void doStop() { final CountDownLatch latch = new CountDownLatch(1); // make sure we run it on another thread than a possible IO handler thread + assert threadPool.generic().isShutdown() == false : "Must stop transport before terminating underlying threadpool"; threadPool.generic().execute(() -> { closeLock.writeLock().lock(); try { diff --git a/server/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java b/server/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java index c82f184eff418..177c0870803d7 100644 --- a/server/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java +++ b/server/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java @@ -53,7 +53,7 @@ public void testOriginalIndicesSerialization() throws IOException { } } - private static OriginalIndices randomOriginalIndices() { + public static OriginalIndices randomOriginalIndices() { int numIndices = randomInt(10); String[] indices = new String[numIndices]; for (int j = 0; j < indices.length; j++) { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java index c32ff7b88f8b9..baedc69d64167 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java @@ -632,12 +632,6 @@ public int compare(SearchHit a, SearchHit b) { } int clusterAliasCompareTo = aShard.getClusterAlias().compareTo(bShard.getClusterAlias()); if (clusterAliasCompareTo != 0) { - if (aShard.getClusterAlias().equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { - return 1; - } - if (bShard.getClusterAlias().equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { - return -1; - } return clusterAliasCompareTo; } return Integer.compare(a.docId(), b.docId()); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchShardIteratorTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchShardIteratorTests.java index 09595650932c5..8fdd0838e984c 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchShardIteratorTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchShardIteratorTests.java @@ -20,12 +20,19 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.OriginalIndicesTests; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.routing.GroupShardsIteratorTests; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.hamcrest.Matchers; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; +import java.util.List; public class SearchShardIteratorTests extends ESTestCase { @@ -64,4 +71,79 @@ public void testNewSearchShardTarget() { assertEquals(nodeId, searchShardTarget.getNodeId()); assertSame(originalIndices, searchShardTarget.getOriginalIndices()); } + + public void testEqualsAndHashcode() { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(randomSearchShardIterator(), s -> new SearchShardIterator(s.getClusterAlias(), + s.shardId(), s.getShardRoutings(), s.getOriginalIndices()), s -> { + if (randomBoolean()) { + String clusterAlias; + if (s.getClusterAlias() == null) { + clusterAlias = randomAlphaOfLengthBetween(5, 10); + } else { + clusterAlias = randomBoolean() ? null : s.getClusterAlias() + randomAlphaOfLength(3); + } + return new SearchShardIterator(clusterAlias, s.shardId(), s.getShardRoutings(), s.getOriginalIndices()); + } else { + ShardId shardId = new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), + randomIntBetween(0, Integer.MAX_VALUE)); + return new SearchShardIterator(s.getClusterAlias(), shardId, s.getShardRoutings(), s.getOriginalIndices()); + } + }); + } + + public void testCompareTo() { + String[] clusters = generateRandomStringArray(2, 10, false, false); + Arrays.sort(clusters); + String[] indices = generateRandomStringArray(3, 10, false, false); + Arrays.sort(indices); + String[] uuids = generateRandomStringArray(3, 10, false, false); + Arrays.sort(uuids); + List shardIterators = new ArrayList<>(); + int numShards = randomIntBetween(1, 5); + for (int i = 0; i < numShards; i++) { + for (String index : indices) { + for (String uuid : uuids) { + ShardId shardId = new ShardId(index, uuid, i); + shardIterators.add(new SearchShardIterator(null, shardId, GroupShardsIteratorTests.randomShardRoutings(shardId), + OriginalIndicesTests.randomOriginalIndices())); + for (String cluster : clusters) { + shardIterators.add(new SearchShardIterator(cluster, shardId, GroupShardsIteratorTests.randomShardRoutings(shardId), + OriginalIndicesTests.randomOriginalIndices())); + } + + } + } + } + for (int i = 0; i < shardIterators.size(); i++) { + SearchShardIterator currentIterator = shardIterators.get(i); + for (int j = i + 1; j < shardIterators.size(); j++) { + SearchShardIterator greaterIterator = shardIterators.get(j); + assertThat(currentIterator, Matchers.lessThan(greaterIterator)); + assertThat(greaterIterator, Matchers.greaterThan(currentIterator)); + assertNotEquals(currentIterator, greaterIterator); + } + for (int j = i - 1; j >= 0; j--) { + SearchShardIterator smallerIterator = shardIterators.get(j); + assertThat(smallerIterator, Matchers.lessThan(currentIterator)); + assertThat(currentIterator, Matchers.greaterThan(smallerIterator)); + assertNotEquals(currentIterator, smallerIterator); + } + } + } + + public void testCompareToEqualItems() { + SearchShardIterator shardIterator1 = randomSearchShardIterator(); + SearchShardIterator shardIterator2 = new SearchShardIterator(shardIterator1.getClusterAlias(), shardIterator1.shardId(), + shardIterator1.getShardRoutings(), shardIterator1.getOriginalIndices()); + assertEquals(shardIterator1, shardIterator2); + assertEquals(0, shardIterator1.compareTo(shardIterator2)); + assertEquals(0, shardIterator2.compareTo(shardIterator1)); + } + + private static SearchShardIterator randomSearchShardIterator() { + String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10); + ShardId shardId = new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), randomIntBetween(0, Integer.MAX_VALUE)); + return new SearchShardIterator(clusterAlias, shardId, GroupShardsIteratorTests.randomShardRoutings(shardId), + OriginalIndicesTests.randomOriginalIndices()); + } } diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index 9a9524d0ff57e..9fb3358b29f13 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -25,11 +25,13 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.OriginalIndicesTests; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.GroupShardsIteratorTests; import org.elasticsearch.cluster.routing.PlainShardIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; @@ -39,6 +41,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.index.Index; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder; @@ -71,6 +74,7 @@ import org.elasticsearch.transport.TransportService; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -85,7 +89,6 @@ import java.util.function.BiFunction; import java.util.function.Function; -import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.awaitLatch; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; @@ -101,92 +104,96 @@ public void tearDown() throws Exception { ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); } + private static SearchShardIterator createSearchShardIterator(int id, Index index, + OriginalIndices originalIndices, String clusterAlias) { + ShardId shardId = new ShardId(index, id); + List shardRoutings = GroupShardsIteratorTests.randomShardRoutings(shardId); + return new SearchShardIterator(clusterAlias, shardId, shardRoutings, originalIndices); + } + public void testMergeShardsIterators() { - List localShardIterators = new ArrayList<>(); - { - ShardId shardId = new ShardId("local_index", "local_index_uuid", 0); - ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, "local_node", true, STARTED); - ShardIterator shardIterator = new PlainShardIterator(shardId, Collections.singletonList(shardRouting)); - localShardIterators.add(shardIterator); + Index[] indices = new Index[randomIntBetween(1, 10)]; + for (int i = 0; i < indices.length; i++) { + if (randomBoolean() && i > 0) { + Index existingIndex = indices[randomIntBetween(0, i - 1)]; + indices[i] = new Index(existingIndex.getName(), randomAlphaOfLength(10)); + } else { + indices[i] = new Index(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10)); + } } - { - ShardId shardId2 = new ShardId("local_index_2", "local_index_2_uuid", 1); - ShardRouting shardRouting2 = TestShardRouting.newShardRouting(shardId2, "local_node", true, STARTED); - ShardIterator shardIterator2 = new PlainShardIterator(shardId2, Collections.singletonList(shardRouting2)); - localShardIterators.add(shardIterator2); + Arrays.sort(indices, (o1, o2) -> { + int nameCompareTo = o1.getName().compareTo(o2.getName()); + if (nameCompareTo == 0) { + return o1.getUUID().compareTo(o2.getUUID()); + } + return nameCompareTo; + }); + String[] remoteClusters = new String[randomIntBetween(1, 3)]; + for (int i = 0; i < remoteClusters.length; i++) { + remoteClusters[i] = randomAlphaOfLengthBetween(5, 10); } - GroupShardsIterator localShardsIterator = new GroupShardsIterator<>(localShardIterators); - - OriginalIndices localIndices = new OriginalIndices(new String[]{"local_alias", "local_index_2"}, - SearchRequest.DEFAULT_INDICES_OPTIONS); + Arrays.sort(remoteClusters); - OriginalIndices remoteIndices = new OriginalIndices(new String[]{"remote_alias", "remote_index_2"}, - IndicesOptions.strictExpandOpen()); + List expected = new ArrayList<>(); + String localClusterAlias = randomAlphaOfLengthBetween(5, 10); + OriginalIndices localIndices = OriginalIndicesTests.randomOriginalIndices(); + List localShardIterators = new ArrayList<>(); List remoteShardIterators = new ArrayList<>(); - { - ShardId remoteShardId = new ShardId("remote_index", "remote_index_uuid", 2); - ShardRouting remoteShardRouting = TestShardRouting.newShardRouting(remoteShardId, "remote_node", true, STARTED); - SearchShardIterator remoteShardIterator = new SearchShardIterator("remote", remoteShardId, - Collections.singletonList(remoteShardRouting), remoteIndices); - remoteShardIterators.add(remoteShardIterator); - } - { - ShardId remoteShardId2 = new ShardId("remote_index_2", "remote_index_2_uuid", 3); - ShardRouting remoteShardRouting2 = TestShardRouting.newShardRouting(remoteShardId2, "remote_node", true, STARTED); - SearchShardIterator remoteShardIterator2 = new SearchShardIterator("remote", remoteShardId2, - Collections.singletonList(remoteShardRouting2), remoteIndices); - remoteShardIterators.add(remoteShardIterator2); + int numShards = randomIntBetween(0, 10); + for (int i = 0; i < numShards; i++) { + int numIndices = randomIntBetween(0, indices.length); + for (int j = 0; j < numIndices; j++) { + Index index = indices[j]; + boolean localIndex = randomBoolean(); + if (localIndex) { + SearchShardIterator localIterator = createSearchShardIterator(i, index, localIndices, localClusterAlias); + localShardIterators.add(new PlainShardIterator(localIterator.shardId(), localIterator.getShardRoutings())); + if (rarely()) { + String remoteClusterAlias = randomFrom(remoteClusters); + //simulate scenario where the local cluster is also registered as a remote one + SearchShardIterator remoteIterator = createSearchShardIterator(i, index, + OriginalIndicesTests.randomOriginalIndices(), remoteClusterAlias); + remoteShardIterators.add(remoteIterator); + assert remoteClusterAlias.equals(localClusterAlias) == false; + if (remoteClusterAlias.compareTo(localClusterAlias) < 0) { + expected.add(remoteIterator); + expected.add(localIterator); + } else { + expected.add(localIterator); + expected.add(remoteIterator); + } + } else { + expected.add(localIterator); + } + } else if (rarely()) { + int numClusters = randomIntBetween(1, remoteClusters.length); + for (int k = 0; k < numClusters; k++) { + //simulate scenario where the same cluster is registered multiple times with different aliases + String clusterAlias = remoteClusters[k]; + SearchShardIterator iterator = createSearchShardIterator(i, index, OriginalIndicesTests.randomOriginalIndices(), + clusterAlias); + expected.add(iterator); + remoteShardIterators.add(iterator); + } + } else { + SearchShardIterator iterator = createSearchShardIterator(i, index, OriginalIndicesTests.randomOriginalIndices(), + randomFrom(remoteClusters)); + expected.add(iterator); + remoteShardIterators.add(iterator); + } + } } - OriginalIndices remoteIndices2 = new OriginalIndices(new String[]{"remote_index_3"}, IndicesOptions.strictExpand()); - { - ShardId remoteShardId3 = new ShardId("remote_index_3", "remote_index_3_uuid", 4); - ShardRouting remoteShardRouting3 = TestShardRouting.newShardRouting(remoteShardId3, "remote_node", true, STARTED); - SearchShardIterator remoteShardIterator3 = new SearchShardIterator("remote", remoteShardId3, - Collections.singletonList(remoteShardRouting3), remoteIndices2); - remoteShardIterators.add(remoteShardIterator3); - } + Collections.shuffle(localShardIterators, random()); + Collections.shuffle(remoteShardIterators, random()); - String localClusterAlias = randomBoolean() ? null : "local"; - GroupShardsIterator searchShardIterators = TransportSearchAction.mergeShardsIterators(localShardsIterator, - localIndices, localClusterAlias, remoteShardIterators); - - assertEquals(searchShardIterators.size(), 5); - int i = 0; - for (SearchShardIterator searchShardIterator : searchShardIterators) { - switch(i++) { - case 0: - assertEquals("local_index", searchShardIterator.shardId().getIndexName()); - assertEquals(0, searchShardIterator.shardId().getId()); - assertSame(localIndices, searchShardIterator.getOriginalIndices()); - assertEquals(localClusterAlias, searchShardIterator.getClusterAlias()); - break; - case 1: - assertEquals("local_index_2", searchShardIterator.shardId().getIndexName()); - assertEquals(1, searchShardIterator.shardId().getId()); - assertSame(localIndices, searchShardIterator.getOriginalIndices()); - assertEquals(localClusterAlias, searchShardIterator.getClusterAlias()); - break; - case 2: - assertEquals("remote_index", searchShardIterator.shardId().getIndexName()); - assertEquals(2, searchShardIterator.shardId().getId()); - assertSame(remoteIndices, searchShardIterator.getOriginalIndices()); - assertEquals("remote", searchShardIterator.getClusterAlias()); - break; - case 3: - assertEquals("remote_index_2", searchShardIterator.shardId().getIndexName()); - assertEquals(3, searchShardIterator.shardId().getId()); - assertSame(remoteIndices, searchShardIterator.getOriginalIndices()); - assertEquals("remote", searchShardIterator.getClusterAlias()); - break; - case 4: - assertEquals("remote_index_3", searchShardIterator.shardId().getIndexName()); - assertEquals(4, searchShardIterator.shardId().getId()); - assertSame(remoteIndices2, searchShardIterator.getOriginalIndices()); - assertEquals("remote", searchShardIterator.getClusterAlias()); - break; - } + GroupShardsIterator groupShardsIterator = TransportSearchAction.mergeShardsIterators( + new GroupShardsIterator<>(localShardIterators), localIndices, localClusterAlias, remoteShardIterators); + List result = new ArrayList<>(); + for (SearchShardIterator searchShardIterator : groupShardsIterator) { + result.add(searchShardIterator); } + assertEquals(expected, result); } public void testProcessRemoteShards() { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/GroupShardsIteratorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/GroupShardsIteratorTests.java index 7012b7af68c78..f7fe59e501b33 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/GroupShardsIteratorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/GroupShardsIteratorTests.java @@ -20,6 +20,8 @@ package org.elasticsearch.cluster.routing; import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.action.OriginalIndicesTests; +import org.elasticsearch.action.search.SearchShardIterator; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; @@ -29,20 +31,44 @@ import java.util.Collections; import java.util.List; +import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; + public class GroupShardsIteratorTests extends ESTestCase { + public static List randomShardRoutings(ShardId shardId) { + return randomShardRoutings(shardId, randomIntBetween(0, 2)); + } + + private static List randomShardRoutings(ShardId shardId, int numReplicas) { + List shardRoutings = new ArrayList<>(); + shardRoutings.add(TestShardRouting.newShardRouting(shardId, randomAlphaOfLengthBetween(5, 10), true, STARTED)); + for (int j = 0; j < numReplicas; j++) { + shardRoutings.add(TestShardRouting.newShardRouting(shardId, randomAlphaOfLengthBetween(5, 10), false, STARTED)); + } + return shardRoutings; + } + public void testSize() { List list = new ArrayList<>(); Index index = new Index("foo", "na"); - - list.add(new PlainShardIterator(new ShardId(index, 0), Arrays.asList(newRouting(index, 0, true), newRouting(index, 0, true), - newRouting(index, 0, true)))); + { + ShardId shardId = new ShardId(index, 0); + list.add(new PlainShardIterator(shardId, randomShardRoutings(shardId, 2))); + } list.add(new PlainShardIterator(new ShardId(index, 1), Collections.emptyList())); - list.add(new PlainShardIterator(new ShardId(index, 2), Arrays.asList(newRouting(index, 2, true)))); + { + ShardId shardId = new ShardId(index, 2); + list.add(new PlainShardIterator(shardId, randomShardRoutings(shardId, 0))); + } index = new Index("foo_1", "na"); - - list.add(new PlainShardIterator(new ShardId(index, 0), Arrays.asList(newRouting(index, 0, true)))); - list.add(new PlainShardIterator(new ShardId(index, 1), Arrays.asList(newRouting(index, 1, true)))); + { + ShardId shardId = new ShardId(index, 0); + list.add(new PlainShardIterator(shardId, randomShardRoutings(shardId, 0))); + } + { + ShardId shardId = new ShardId(index, 1); + list.add(new PlainShardIterator(shardId, randomShardRoutings(shardId, 0))); + } GroupShardsIterator iter = new GroupShardsIterator<>(list); assertEquals(7, iter.totalSizeWith1ForEmpty()); assertEquals(5, iter.size()); @@ -52,21 +78,35 @@ public void testSize() { public void testIterate() { List list = new ArrayList<>(); Index index = new Index("foo", "na"); - - list.add(new PlainShardIterator(new ShardId(index, 0), Arrays.asList(newRouting(index, 0, true), newRouting(index, 0, true), - newRouting(index, 0, true)))); + { + ShardId shardId = new ShardId(index, 0); + list.add(new PlainShardIterator(shardId, randomShardRoutings(shardId))); + } list.add(new PlainShardIterator(new ShardId(index, 1), Collections.emptyList())); - list.add(new PlainShardIterator(new ShardId(index, 2), Arrays.asList(newRouting(index, 2, true)))); - - list.add(new PlainShardIterator(new ShardId(index, 0), Arrays.asList(newRouting(index, 0, true)))); - list.add(new PlainShardIterator(new ShardId(index, 1), Arrays.asList(newRouting(index, 1, true)))); - + { + ShardId shardId = new ShardId(index, 2); + list.add(new PlainShardIterator(shardId, randomShardRoutings(shardId))); + } + { + ShardId shardId = new ShardId(index, 0); + list.add(new PlainShardIterator(shardId, randomShardRoutings(shardId))); + } + { + ShardId shardId = new ShardId(index, 1); + list.add(new PlainShardIterator(shardId, randomShardRoutings(shardId))); + } index = new Index("foo_2", "na"); - list.add(new PlainShardIterator(new ShardId(index, 0), Arrays.asList(newRouting(index, 0, true)))); - list.add(new PlainShardIterator(new ShardId(index, 1), Arrays.asList(newRouting(index, 1, true)))); + { + ShardId shardId = new ShardId(index, 0); + list.add(new PlainShardIterator(shardId, randomShardRoutings(shardId))); + } + { + ShardId shardId = new ShardId(index, 1); + list.add(new PlainShardIterator(shardId, randomShardRoutings(shardId))); + } Collections.shuffle(list, random()); - ArrayList actualIterators = new ArrayList<>(); + List actualIterators = new ArrayList<>(); GroupShardsIterator iter = new GroupShardsIterator<>(list); for (ShardIterator shardsIterator : iter) { actualIterators.add(shardsIterator); @@ -75,13 +115,39 @@ public void testIterate() { assertEquals(actualIterators, list); } - public ShardRouting newRouting(Index index, int id, boolean started) { - ShardRouting shardRouting = ShardRouting.newUnassigned(new ShardId(index, id), true, - RecoverySource.EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); - shardRouting = ShardRoutingHelper.initialize(shardRouting, "some node"); - if (started) { - shardRouting = ShardRoutingHelper.moveToStarted(shardRouting); + public void testOrderingWithSearchShardIterators() { + String[] indices = generateRandomStringArray(10, 10, false, false); + Arrays.sort(indices); + String[] uuids = generateRandomStringArray(5, 10, false, false); + Arrays.sort(uuids); + String[] clusters = generateRandomStringArray(5, 10, false, false); + Arrays.sort(clusters); + + List expected = new ArrayList<>(); + int numShards = randomIntBetween(1, 10); + for (int i = 0; i < numShards; i++) { + for (String index : indices) { + for (String uuid : uuids) { + ShardId shardId = new ShardId(index, uuid, i); + SearchShardIterator shardIterator = new SearchShardIterator(null, shardId, + GroupShardsIteratorTests.randomShardRoutings(shardId), OriginalIndicesTests.randomOriginalIndices()); + expected.add(shardIterator); + for (String cluster : clusters) { + SearchShardIterator remoteIterator = new SearchShardIterator(cluster, shardId, + GroupShardsIteratorTests.randomShardRoutings(shardId), OriginalIndicesTests.randomOriginalIndices()); + expected.add(remoteIterator); + } + } + } + } + + List shuffled = new ArrayList<>(expected); + Collections.shuffle(shuffled, random()); + List actualIterators = new ArrayList<>(); + GroupShardsIterator iter = new GroupShardsIterator<>(shuffled); + for (SearchShardIterator searchShardIterator : iter) { + actualIterators.add(searchShardIterator); } - return shardRouting; + assertEquals(expected, actualIterators); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PlainShardIteratorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/PlainShardIteratorTests.java index c92da8e0a8fa0..b07045dfd8d3d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PlainShardIteratorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PlainShardIteratorTests.java @@ -19,26 +19,83 @@ package org.elasticsearch.cluster.routing; -import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.hamcrest.Matchers; import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; public class PlainShardIteratorTests extends ESTestCase { - public void testEquals() { - Index index = new Index("a", "b"); - ShardId shardId = new ShardId(index, 1); - ShardId shardId2 = new ShardId(index, 2); - PlainShardIterator iterator1 = new PlainShardIterator(shardId, new ArrayList<>()); - PlainShardIterator iterator2 = new PlainShardIterator(shardId, new ArrayList<>()); - PlainShardIterator iterator3 = new PlainShardIterator(shardId2, new ArrayList<>()); - String s = "Some other random object"; - assertEquals(iterator1, iterator1); - assertEquals(iterator1, iterator2); - assertNotEquals(iterator1, null); - assertNotEquals(iterator1, s); - assertNotEquals(iterator1, iterator3); + public void testEqualsAndHashCode() { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(randomPlainShardIterator(), + i -> new PlainShardIterator(i.shardId(), i.getShardRoutings()), + i -> { + ShardId shardId; + switch(randomIntBetween(0, 2)) { + case 0: + shardId = new ShardId(i.shardId().getIndex(), i.shardId().getId() + randomIntBetween(1, 1000)); + break; + case 1: + shardId = new ShardId(i.shardId().getIndexName(), + i.shardId().getIndex().getUUID() + randomAlphaOfLengthBetween(1, 3), i.shardId().getId()); + break; + case 2: + shardId = new ShardId(i.shardId().getIndexName() + randomAlphaOfLengthBetween(1, 3), + i.shardId().getIndex().getUUID(), i.shardId().getId()); + break; + default: + throw new UnsupportedOperationException(); + } + return new PlainShardIterator(shardId, i.getShardRoutings()); + }); + } + + public void testCompareTo() { + String[] indices = generateRandomStringArray(3, 10, false, false); + Arrays.sort(indices); + String[] uuids = generateRandomStringArray(3, 10, false, false); + Arrays.sort(uuids); + List shardIterators = new ArrayList<>(); + int numShards = randomIntBetween(1, 5); + for (int i = 0; i < numShards; i++) { + for (String index : indices) { + for (String uuid : uuids) { + ShardId shardId = new ShardId(index, uuid, i); + shardIterators.add(new PlainShardIterator(shardId, GroupShardsIteratorTests.randomShardRoutings(shardId))); + } + } + } + for (int i = 0; i < shardIterators.size(); i++) { + PlainShardIterator currentIterator = shardIterators.get(i); + for (int j = i + 1; j < shardIterators.size(); j++) { + PlainShardIterator greaterIterator = shardIterators.get(j); + assertThat(currentIterator, Matchers.lessThan(greaterIterator)); + assertThat(greaterIterator, Matchers.greaterThan(currentIterator)); + assertNotEquals(currentIterator, greaterIterator); + } + for (int j = i - 1; j >= 0; j--) { + PlainShardIterator smallerIterator = shardIterators.get(j); + assertThat(smallerIterator, Matchers.lessThan(currentIterator)); + assertThat(currentIterator, Matchers.greaterThan(smallerIterator)); + assertNotEquals(currentIterator, smallerIterator); + } + } + } + + public void testCompareToEqualItems() { + PlainShardIterator shardIterator1 = randomPlainShardIterator(); + PlainShardIterator shardIterator2 = new PlainShardIterator(shardIterator1.shardId(), shardIterator1.getShardRoutings()); + assertEquals(shardIterator1, shardIterator2); + assertEquals(0, shardIterator1.compareTo(shardIterator2)); + assertEquals(0, shardIterator2.compareTo(shardIterator1)); + } + + private static PlainShardIterator randomPlainShardIterator() { + ShardId shardId = new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), randomIntBetween(1, Integer.MAX_VALUE)); + return new PlainShardIterator(shardId, GroupShardsIteratorTests.randomShardRoutings(shardId)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 9a7e25d29bb08..f4b834e4d29a6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -46,6 +46,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; @@ -84,7 +85,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { // disruption tests need MockTransportService - return Arrays.asList(MockTransportService.TestPlugin.class); + return Arrays.asList(MockTransportService.TestPlugin.class, InternalSettingsPlugin.class); } public void testBulkWeirdScenario() throws Exception { @@ -92,7 +93,9 @@ public void testBulkWeirdScenario() throws Exception { internalCluster().startDataOnlyNodes(2); assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() - .put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).get()); + .put("index.number_of_shards", 1).put("index.number_of_replicas", 1) + .put("index.global_checkpoint_sync.interval", "1s")) + .get()); ensureGreen(); BulkResponse bulkResponse = client().prepareBulk() diff --git a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java index 97ba76b822020..bd89ceb64e6df 100644 --- a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java +++ b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java @@ -30,8 +30,10 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.Bridge; @@ -65,6 +67,13 @@ protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(DEFAULT_SETTINGS).build(); } + @Override + public Settings indexSettings() { + return Settings.builder().put(super.indexSettings()) + // sync global checkpoint quickly so we can verify seq_no_stats aligned between all copies after tests. + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s").build(); + } + @Override protected int numberOfShards() { return 3; @@ -128,7 +137,7 @@ List startCluster(int numberOfNodes) { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Arrays.asList(MockTransportService.TestPlugin.class, InternalSettingsPlugin.class); } ClusterState getNodeClusterState(String node) { diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index f23665d201206..d9ed5cd2c719e 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -5653,4 +5653,42 @@ public void testStoreHonorsLuceneVersion() throws IOException { } } } + + public void testMaxSeqNoInCommitUserData() throws Exception { + AtomicBoolean running = new AtomicBoolean(true); + Thread rollTranslog = new Thread(() -> { + while (running.get() && engine.getTranslog().currentFileGeneration() < 500) { + engine.rollTranslogGeneration(); // make adding operations to translog slower + } + }); + rollTranslog.start(); + + Thread indexing = new Thread(() -> { + long seqNo = 0; + while (running.get() && seqNo <= 1000) { + try { + String id = Long.toString(between(1, 50)); + if (randomBoolean()) { + ParsedDocument doc = testParsedDocument(id, null, testDocumentWithTextField(), SOURCE, null); + engine.index(replicaIndexForDoc(doc, 1L, seqNo, false)); + } else { + engine.delete(replicaDeleteForDoc(id, 1L, seqNo, 0L)); + } + seqNo++; + } catch (IOException e) { + throw new AssertionError(e); + } + } + }); + indexing.start(); + + int numCommits = between(5, 20); + for (int i = 0; i < numCommits; i++) { + engine.flush(false, true); + } + running.set(false); + indexing.join(); + rollTranslog.join(); + assertMaxSeqNoInCommitUserData(engine); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java index 7354af17043eb..7d16c01e9daaf 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java @@ -908,6 +908,28 @@ public void testEmptyName() throws IOException { assertThat(e.getMessage(), containsString("name cannot be empty string")); } + public void testLimitOfContextMappings() throws Throwable { + final String index = "test"; + XContentBuilder mappingBuilder = XContentFactory.jsonBuilder().startObject().startObject("properties") + .startObject("suggest").field("type", "completion").startArray("contexts"); + for (int i = 0; i < CompletionFieldMapper.COMPLETION_CONTEXTS_LIMIT + 1; i++) { + mappingBuilder.startObject(); + mappingBuilder.field("name", Integer.toString(i)); + mappingBuilder.field("type", "category"); + mappingBuilder.endObject(); + } + + mappingBuilder.endArray().endObject().endObject().endObject(); + String mappings = Strings.toString(mappingBuilder); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + createIndex(index).mapperService().documentMapperParser().parse("type1", new CompressedXContent(mappings)); + }); + assertTrue(e.getMessage(), + e.getMessage().contains("Limit of completion field contexts [" + + CompletionFieldMapper.COMPLETION_CONTEXTS_LIMIT + "] has been exceeded")); + } + private Matcher suggestField(String value) { return Matchers.allOf(hasProperty(IndexableField::stringValue, equalTo(value)), Matchers.instanceOf(SuggestField.class)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java index 31864abc2e459..7c0e554439078 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; -import org.elasticsearch.Version; import org.elasticsearch.common.geo.builders.PointBuilder; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.geo.GeoPoint; @@ -63,7 +62,6 @@ public static class Builder extends FieldMapper.Builder private BooleanFieldMapper.Builder boolBuilder = new BooleanFieldMapper.Builder(Names.FIELD_BOOL); private GeoPointFieldMapper.Builder latLonPointBuilder = new GeoPointFieldMapper.Builder(Names.FIELD_POINT); private GeoShapeFieldMapper.Builder shapeBuilder = new GeoShapeFieldMapper.Builder(Names.FIELD_SHAPE); - private LegacyGeoShapeFieldMapper.Builder legacyShapeBuilder = new LegacyGeoShapeFieldMapper.Builder(Names.FIELD_SHAPE); private Mapper.Builder stringBuilder; private String generatedValue; private String mapperName; @@ -87,9 +85,7 @@ public ExternalMapper build(BuilderContext context) { BinaryFieldMapper binMapper = binBuilder.build(context); BooleanFieldMapper boolMapper = boolBuilder.build(context); GeoPointFieldMapper pointMapper = latLonPointBuilder.build(context); - BaseGeoShapeFieldMapper shapeMapper = (context.indexCreatedVersion().before(Version.V_6_6_0)) - ? legacyShapeBuilder.build(context) - : shapeBuilder.build(context); + BaseGeoShapeFieldMapper shapeMapper = shapeBuilder.build(context); FieldMapper stringMapper = (FieldMapper)stringBuilder.build(context); context.path().remove(); diff --git a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java index 11f704e3b1209..6c33d582452b3 100644 --- a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java @@ -53,7 +53,8 @@ public void testUnknownField() throws IOException { } private static final String[] filters = new String[]{ - "containing", "contained_by", "not_containing", "not_contained_by", "not_overlapping" + "containing", "contained_by", "not_containing", "not_contained_by", + "overlapping", "not_overlapping", "before", "after" }; private IntervalsSourceProvider.IntervalFilter createRandomFilter() { diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 5b861e7d52bd5..c053c52bc70e0 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -121,6 +121,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -465,18 +466,22 @@ public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { final FlushStats initialStats = shard.flushStats(); client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); check = () -> { + assertFalse(shard.shouldPeriodicallyFlush()); final FlushStats currentStats = shard.flushStats(); String msg = String.format(Locale.ROOT, "flush stats: total=[%d vs %d], periodic=[%d vs %d]", initialStats.getTotal(), currentStats.getTotal(), initialStats.getPeriodic(), currentStats.getPeriodic()); - assertThat(msg, currentStats.getPeriodic(), equalTo(initialStats.getPeriodic() + 1)); - assertThat(msg, currentStats.getTotal(), equalTo(initialStats.getTotal() + 1)); + assertThat(msg, currentStats.getPeriodic(), + either(equalTo(initialStats.getPeriodic() + 1)).or(equalTo(initialStats.getPeriodic() + 2))); + assertThat(msg, currentStats.getTotal(), + either(equalTo(initialStats.getTotal() + 1)).or(equalTo(initialStats.getTotal() + 2))); }; } else { final long generation = getTranslog(shard).currentFileGeneration(); client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); - check = () -> assertEquals( - generation + 1, - getTranslog(shard).currentFileGeneration()); + check = () -> { + assertFalse(shard.shouldRollTranslogGeneration()); + assertEquals(generation + 1, getTranslog(shard).currentFileGeneration()); + }; } assertBusy(check); running.set(false); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java new file mode 100644 index 0000000000000..9990d7b082e5c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java @@ -0,0 +1,124 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; +import org.elasticsearch.node.MockNode; +import org.elasticsearch.node.Node; +import org.elasticsearch.node.NodeValidationException; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.MockHttpTransport; +import org.elasticsearch.transport.nio.MockNioTransportPlugin; + +import java.nio.file.Path; +import java.util.Arrays; + +import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +public class IndicesServiceCloseTests extends ESTestCase { + + private Node startNode() throws NodeValidationException { + final Path tempDir = createTempDir(); + String nodeName = "node_s_0"; + Settings settings = Settings.builder() + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", random().nextLong())) + .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) + .put(Environment.PATH_REPO_SETTING.getKey(), tempDir.resolve("repo")) + .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), createTempDir().getParent()) + .put(Node.NODE_NAME_SETTING.getKey(), nodeName) + .put(ScriptService.SCRIPT_MAX_COMPILATIONS_RATE.getKey(), "1000/1m") + .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created + .put("transport.type", getTestTransportType()) + .put(Node.NODE_DATA_SETTING.getKey(), true) + .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) + // default the watermarks low values to prevent tests from failing on nodes without enough disk space + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "1b") + // turning on the real memory circuit breaker leads to spurious test failures. As have no full control over heap usage, we + // turn it off for these tests. + .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false) + .putList(DISCOVERY_SEED_HOSTS_SETTING.getKey()) // empty list disables a port scan for other nodes + .putList(INITIAL_MASTER_NODES_SETTING.getKey(), nodeName) + .build(); + + Node node = new MockNode(settings, Arrays.asList(MockNioTransportPlugin.class, MockHttpTransport.TestPlugin.class), true); + node.start(); + return node; + } + + public void testCloseEmptyIndicesService() throws Exception { + Node node = startNode(); + IndicesService indicesService = node.injector().getInstance(IndicesService.class); + assertEquals(1, indicesService.indicesRefCount.refCount()); + node.close(); + assertEquals(0, indicesService.indicesRefCount.refCount()); + } + + public void testCloseNonEmptyIndicesService() throws Exception { + Node node = startNode(); + IndicesService indicesService = node.injector().getInstance(IndicesService.class); + assertEquals(1, indicesService.indicesRefCount.refCount()); + + assertAcked(node.client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0))); + + assertEquals(2, indicesService.indicesRefCount.refCount()); + + node.close(); + assertEquals(0, indicesService.indicesRefCount.refCount()); + } + + public void testCloseWhileOngoingRequest() throws Exception { + Node node = startNode(); + IndicesService indicesService = node.injector().getInstance(IndicesService.class); + assertEquals(1, indicesService.indicesRefCount.refCount()); + + assertAcked(node.client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0))); + + assertEquals(2, indicesService.indicesRefCount.refCount()); + + IndexService indexService = indicesService.iterator().next(); + IndexShard shard = indexService.getShard(0); + shard.store().incRef(); + + node.close(); + assertEquals(1, indicesService.indicesRefCount.refCount()); + + shard.store().decRef(); + assertEquals(0, indicesService.indicesRefCount.refCount()); + } + +} diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index a540cd203ef6b..769d7a6c6866a 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -650,48 +650,4 @@ public static ClusterState createClusterForShardLimitTest(int nodesInCluster, in .build(); } - public void testOptimizeAutoGeneratedIdsSettingRemoval() throws Exception { - final IndicesService indicesService = getIndicesService(); - - final Index index = new Index("foo-index", UUIDs.randomBase64UUID()); - Settings.Builder builder = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_7_0_0) - .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); - IndexMetaData indexMetaData = new IndexMetaData.Builder(index.getName()) - .settings(builder.build()) - .numberOfShards(1) - .numberOfReplicas(0) - .build(); - IndexService indexService = indicesService.createIndex(indexMetaData, Collections.emptyList()); - assertNotNull(indexService); - - final Index index2 = new Index("bar-index", UUIDs.randomBase64UUID()); - Settings.Builder builder2 = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_7_0_0) - .put(IndexMetaData.SETTING_INDEX_UUID, index2.getUUID()) - .put(EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.getKey(), randomBoolean()); - IndexMetaData indexMetaData2 = new IndexMetaData.Builder(index2.getName()) - .settings(builder2.build()) - .numberOfShards(1) - .numberOfReplicas(0) - .build(); - IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, - () -> indicesService.createIndex(indexMetaData2, Collections.emptyList())); - assertEquals("Setting [" + EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.getKey() + "] was removed in version 7.0.0", - ex.getMessage()); - - Version version = randomFrom(Version.V_6_0_0_rc1, Version.V_6_0_0, Version.V_6_2_0, Version.V_6_3_0, Version.V_6_4_0); - builder = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, version) - .put(IndexMetaData.SETTING_INDEX_UUID, index2.getUUID()) - .put(EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.getKey(), randomBoolean()); - IndexMetaData indexMetaData3 = new IndexMetaData.Builder(index2.getName()) - .settings(builder.build()) - .numberOfShards(1) - .numberOfReplicas(0) - .build(); - IndexService indexService2 = indicesService.createIndex(indexMetaData3, Collections.emptyList()); - assertNotNull(indexService2); - } - } diff --git a/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java index fb455f37d76f3..c2d35279bdff4 100644 --- a/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -110,6 +110,13 @@ protected void beforeIndexDeletion() throws Exception { internalCluster().assertSameDocIdsOnShards(); } + @Override + public Settings indexSettings() { + return Settings.builder().put(super.indexSettings()) + // sync global checkpoint quickly so we can verify seq_no_stats aligned between all copies after tests. + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s").build(); + } + public void testSimpleRelocationNoIndexing() { logger.info("--> starting [node1] ..."); final String node_1 = internalCluster().startNode(); @@ -279,8 +286,7 @@ public void testRelocationWhileRefreshing() throws Exception { .put("index.number_of_shards", 1) .put("index.number_of_replicas", numberOfReplicas) .put("index.refresh_interval", -1) // we want to control refreshes - .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms")) - .get(); + ).get(); for (int i = 1; i < numberOfNodes; i++) { logger.info("--> starting [node_{}] ...", i); @@ -465,8 +471,7 @@ public void testIndexAndRelocateConcurrently() throws ExecutionException, Interr final Settings.Builder settings = Settings.builder() .put("index.routing.allocation.exclude.color", "blue") .put(indexSettings()) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(halfNodes - 1)) - .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms"); + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(halfNodes - 1)); assertAcked(prepareCreate("test", settings)); assertAllShardsOnNodes("test", redNodes); int numDocs = randomIntBetween(100, 150); @@ -518,8 +523,8 @@ public void testRelocateWhileWaitingForRefresh() { prepareCreate("test", Settings.builder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", 0) - .put("index.refresh_interval", -1) // we want to control refreshes - ).get(); + // we want to control refreshes + .put("index.refresh_interval", -1)).get(); logger.info("--> index 10 docs"); for (int i = 0; i < 10; i++) { diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 11fa1f5dc196d..f1a091e337e87 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -27,6 +27,8 @@ import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.StoredField; import org.apache.lucene.document.TextField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReader; @@ -126,6 +128,7 @@ import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; public abstract class EngineTestCase extends ESTestCase { @@ -254,18 +257,20 @@ public EngineConfig copy(EngineConfig config, MergePolicy mergePolicy) { @After public void tearDown() throws Exception { super.tearDown(); - if (engine != null && engine.isClosed.get() == false) { - engine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); - assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, createMapperService("test")); - } - if (replicaEngine != null && replicaEngine.isClosed.get() == false) { - replicaEngine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); - assertConsistentHistoryBetweenTranslogAndLuceneIndex(replicaEngine, createMapperService("test")); + try { + if (engine != null && engine.isClosed.get() == false) { + engine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, createMapperService("test")); + assertMaxSeqNoInCommitUserData(engine); + } + if (replicaEngine != null && replicaEngine.isClosed.get() == false) { + replicaEngine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(replicaEngine, createMapperService("test")); + assertMaxSeqNoInCommitUserData(replicaEngine); + } + } finally { + IOUtils.close(replicaEngine, storeReplica, engine, store, () -> terminate(threadPool)); } - IOUtils.close( - replicaEngine, storeReplica, - engine, store); - terminate(threadPool); } @@ -1075,6 +1080,21 @@ public static void assertConsistentHistoryBetweenTranslogAndLuceneIndex(Engine e } } + /** + * Asserts that the max_seq_no stored in the commit's user_data is never smaller than seq_no of any document in the commit. + */ + public static void assertMaxSeqNoInCommitUserData(Engine engine) throws Exception { + List commits = DirectoryReader.listCommits(engine.store.directory()); + for (IndexCommit commit : commits) { + try (DirectoryReader reader = DirectoryReader.open(commit)) { + AtomicLong maxSeqNoFromDocs = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + Lucene.scanSeqNosInReader(reader, 0, Long.MAX_VALUE, n -> maxSeqNoFromDocs.set(Math.max(n, maxSeqNoFromDocs.get()))); + assertThat(Long.parseLong(commit.getUserData().get(SequenceNumbers.MAX_SEQ_NO)), + greaterThanOrEqualTo(maxSeqNoFromDocs.get())); + } + } + } + public static MapperService createMapperService(String type) throws IOException { IndexMetaData indexMetaData = IndexMetaData.builder("test") .settings(Settings.builder() diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index c396cdfe84570..37fc1c748c189 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -267,9 +267,7 @@ public synchronized int startReplicas(int numOfReplicasToStart) throws IOExcepti } public void startPrimary() throws IOException { - final DiscoveryNode pNode = getDiscoveryNode(primary.routingEntry().currentNodeId()); - primary.markAsRecovering("store", new RecoveryState(primary.routingEntry(), pNode, null)); - primary.recoverFromStore(); + recoverPrimary(primary); HashSet activeIds = new HashSet<>(); activeIds.addAll(activeIds()); activeIds.add(primary.routingEntry().allocationId().getId()); @@ -302,6 +300,11 @@ assert shardRoutings().stream() updateAllocationIDsOnPrimary(); } + protected synchronized void recoverPrimary(IndexShard primary) { + final DiscoveryNode pNode = getDiscoveryNode(primary.routingEntry().currentNodeId()); + primary.markAsRecovering("store", new RecoveryState(primary.routingEntry(), pNode, null)); + primary.recoverFromStore(); + } public synchronized IndexShard addReplicaWithExistingPath(final ShardPath shardPath, final String nodeId) throws IOException { final ShardRouting shardRouting = TestShardRouting.newShardRouting( diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 343614081c48c..9ac74c29c1ef3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -120,7 +120,7 @@ public void setUp() throws Exception { @Override public void tearDown() throws Exception { - logger.info("[{}#{}]: cleaning up after test", getTestClass().getSimpleName(), getTestName()); + logger.trace("[{}#{}]: cleaning up after test", getTestClass().getSimpleName(), getTestName()); super.tearDown(); assertAcked(client().admin().indices().prepareDelete("*").get()); MetaData metaData = client().admin().cluster().prepareState().get().getState().getMetaData(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 00900271a2569..35902f80f8469 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -458,6 +458,15 @@ protected boolean preserveILMPoliciesUponCompletion() { } private void wipeCluster() throws Exception { + + // Cleanup rollup before deleting indices. A rollup job might have bulks in-flight, + // so we need to fully shut them down first otherwise a job might stall waiting + // for a bulk to finish against a non-existing index (and then fail tests) + if (hasXPack && false == preserveRollupJobsUponCompletion()) { + wipeRollupJobs(); + waitForPendingRollupTasks(); + } + if (preserveIndicesUponCompletion() == false) { // wipe indices try { @@ -505,11 +514,6 @@ private void wipeCluster() throws Exception { wipeClusterSettings(); } - if (hasXPack && false == preserveRollupJobsUponCompletion()) { - wipeRollupJobs(); - waitForPendingRollupTasks(); - } - if (hasXPack && false == preserveILMPoliciesUponCompletion()) { deleteAllPolicies(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java index dbda656656f01..b1337172a5679 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -40,8 +40,6 @@ import java.util.List; import java.util.Map; -import static org.elasticsearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER; - /** * Execution context passed across the REST tests. * Holds the REST client used to communicate with elasticsearch. @@ -98,10 +96,6 @@ public ClientYamlTestResponse callApi(String apiName, Map params } } - if (esVersion().before(Version.V_7_0_0)) { - adaptRequestForOlderVersion(apiName, bodies, requestParams); - } - HttpEntity entity = createEntity(bodies, requestHeaders); try { response = callApiInternal(apiName, requestParams, entity, requestHeaders, nodeSelector); @@ -117,64 +111,6 @@ public ClientYamlTestResponse callApi(String apiName, Map params } } - /** - * To allow tests to run against a mixed 7.x/6.x cluster, we make certain modifications to the - * request related to types. - * - * Specifically, we generally use typeless index creation and document writes in test set-up code. - * This functionality is supported in 7.x, but is not supported in 6.x (or is not the default - * behavior). Here we modify the request so that it will work against a 6.x node. - */ - private void adaptRequestForOlderVersion(String apiName, - List> bodies, - Map requestParams) { - // For index creations, we specify 'include_type_name=false' if it is not explicitly set. This - // allows us to omit the parameter in the test description, while still being able to communicate - // with 6.x nodes where include_type_name defaults to 'true'. - if (apiName.equals("indices.create") && requestParams.containsKey(INCLUDE_TYPE_NAME_PARAMETER) == false) { - requestParams.put(INCLUDE_TYPE_NAME_PARAMETER, "false"); - } - - // We add the type to the document API requests if it's not already included. - if ((apiName.equals("index") || apiName.equals("update") || apiName.equals("delete") || apiName.equals("get")) - && requestParams.containsKey("type") == false) { - requestParams.put("type", "_doc"); - } - - // We also add the type to the bulk API requests if it's not already included. The type can either - // be on the request parameters or in the action metadata in the body of the request so we need to - // be sensitive to both scenarios. - if (apiName.equals("bulk") && requestParams.containsKey("type") == false) { - if (requestParams.containsKey("index")) { - requestParams.put("type", "_doc"); - } else { - for (int i = 0; i < bodies.size(); i++) { - Map body = bodies.get(i); - Map actionMetadata; - if (body.containsKey("index")) { - actionMetadata = (Map) body.get("index"); - i++; - } else if (body.containsKey("create")) { - actionMetadata = (Map) body.get("create"); - i++; - } else if (body.containsKey("update")) { - actionMetadata = (Map) body.get("update"); - i++; - } else if (body.containsKey("delete")) { - actionMetadata = (Map) body.get("delete"); - } else { - // action metadata is malformed so leave it malformed since - // the test is probably testing for malformed action metadata - continue; - } - if (actionMetadata.containsKey("_type") == false) { - actionMetadata.put("_type", "_doc"); - } - } - } - } - } - private HttpEntity createEntity(List> bodies, Map headers) throws IOException { if (bodies.isEmpty()) { return null; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index c63e726ea2a0a..dd2da0d928b76 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -292,17 +292,7 @@ void checkWarningHeaders(final List warningHeaders, final Version master final boolean matches = matcher.matches(); if (matches) { final String message = matcher.group(1); - // noinspection StatementWithEmptyBody - if (masterVersion.before(Version.V_7_0_0) - && message.equals("the default number of shards will change from [5] to [1] in 7.0.0; " - + "if you wish to continue using the default of [5] shards, " - + "you must manage this on the create index request or with an index template")) { - /* - * This warning header will come back in the vast majority of our tests that create an index when running against an - * older master. Rather than rewrite our tests to assert this warning header, we assume that it is expected. - */ - } else // noinspection StatementWithEmptyBody - if (message.startsWith("[types removal]")) { + if (message.startsWith("[types removal]")) { /* * We skip warnings related to types deprecation so that we can continue to run the many * mixed-version tests that used typed APIs. diff --git a/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java index 18215b6797ec1..84f9b4dfd149d 100644 --- a/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java @@ -33,10 +33,6 @@ public final boolean isRunningAgainstOldCluster() { private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); - public final boolean isRunningAgainstAncientCluster() { - return isRunningAgainstOldCluster() && oldClusterVersion.before(Version.V_7_0_0); - } - public final Version getOldClusterVersion() { return oldClusterVersion; } diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 518628e9fd0fb..f64f70459a1d1 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -97,7 +97,7 @@ buildRestTests.docs = fileTree(projectDir) { Map setups = buildRestTests.setups setups['my_inactive_watch'] = ''' - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" active: false body: > @@ -216,7 +216,7 @@ setups['library'] = ''' ''' setups['sample_job'] = ''' - do: - xpack.ml.put_job: + ml.put_job: job_id: "sample_job" body: > { @@ -270,7 +270,7 @@ setups['farequote_data'] = setups['farequote_index'] + ''' ''' setups['farequote_job'] = setups['farequote_data'] + ''' - do: - xpack.ml.put_job: + ml.put_job: job_id: "farequote" body: > { @@ -290,7 +290,7 @@ setups['farequote_job'] = setups['farequote_data'] + ''' ''' setups['farequote_datafeed'] = setups['farequote_job'] + ''' - do: - xpack.ml.put_datafeed: + ml.put_datafeed: datafeed_id: "datafeed-farequote" body: > { @@ -300,7 +300,7 @@ setups['farequote_datafeed'] = setups['farequote_job'] + ''' ''' setups['ml_filter_safe_domains'] = ''' - do: - xpack.ml.put_filter: + ml.put_filter: filter_id: "safe_domains" body: > { @@ -364,7 +364,7 @@ setups['server_metrics_data'] = setups['server_metrics_index'] + ''' ''' setups['server_metrics_job'] = setups['server_metrics_data'] + ''' - do: - xpack.ml.put_job: + ml.put_job: job_id: "total-requests" body: > { @@ -386,7 +386,7 @@ setups['server_metrics_job'] = setups['server_metrics_data'] + ''' ''' setups['server_metrics_datafeed'] = setups['server_metrics_job'] + ''' - do: - xpack.ml.put_datafeed: + ml.put_datafeed: datafeed_id: "datafeed-total-requests" body: > { @@ -396,22 +396,22 @@ setups['server_metrics_datafeed'] = setups['server_metrics_job'] + ''' ''' setups['server_metrics_openjob'] = setups['server_metrics_datafeed'] + ''' - do: - xpack.ml.open_job: + ml.open_job: job_id: "total-requests" ''' setups['server_metrics_startdf'] = setups['server_metrics_openjob'] + ''' - do: - xpack.ml.start_datafeed: + ml.start_datafeed: datafeed_id: "datafeed-total-requests" ''' setups['calendar_outages'] = ''' - do: - xpack.ml.put_calendar: + ml.put_calendar: calendar_id: "planned-outages" ''' setups['calendar_outages_addevent'] = setups['calendar_outages'] + ''' - do: - xpack.ml.post_calendar_events: + ml.post_calendar_events: calendar_id: "planned-outages" body: > { "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z", "calendar_id": "planned-outages" } @@ -420,12 +420,12 @@ setups['calendar_outages_addevent'] = setups['calendar_outages'] + ''' ''' setups['calendar_outages_openjob'] = setups['server_metrics_openjob'] + ''' - do: - xpack.ml.put_calendar: + ml.put_calendar: calendar_id: "planned-outages" ''' setups['calendar_outages_addjob'] = setups['server_metrics_openjob'] + ''' - do: - xpack.ml.put_calendar: + ml.put_calendar: calendar_id: "planned-outages" body: > { @@ -434,7 +434,7 @@ setups['calendar_outages_addjob'] = setups['server_metrics_openjob'] + ''' ''' setups['calendar_outages_addevent'] = setups['calendar_outages_addjob'] + ''' - do: - xpack.ml.post_calendar_events: + ml.post_calendar_events: calendar_id: "planned-outages" body: > { "events" : [ @@ -473,7 +473,7 @@ setups['sensor_rollup_job'] = ''' node: type: keyword - do: - xpack.rollup.put_job: + rollup.put_job: id: "sensor" body: > { @@ -541,7 +541,7 @@ setups['sensor_started_rollup_job'] = ''' {"timestamp": 1516297294000, "temperature": 202, "voltage": 4.0, "node": "c"} - do: - xpack.rollup.put_job: + rollup.put_job: id: "sensor" body: > { @@ -571,7 +571,7 @@ setups['sensor_started_rollup_job'] = ''' ] } - do: - xpack.rollup.start_job: + rollup.start_job: id: "sensor" ''' diff --git a/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc index 89b79b5680056..1349e8def05d9 100644 --- a/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc @@ -5,7 +5,7 @@ Put watch ++++ -The PUT watch API either registers a new watch in {watcher} or update an +The PUT watch API either registers a new watch in {watcher} or updates an existing one. [float] @@ -21,13 +21,13 @@ the `.watches` index and its trigger is immediately registered with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler is the trigger engine. -IMPORTANT: Putting a watch must be done via this API only. Do not put a watch - directly to the `.watches` index using the Elasticsearch Index API. - If {es} {security-features} are enabled, make sure no `write` - privileges are granted to anyone over the `.watches` index. +IMPORTANT: You must use {kib} or this API to create a watch. Do not put a watch + directly to the `.watches` index using the Elasticsearch index API. + If {es} {security-features} are enabled, do not give users `write` + privileges on the `.watches` index. When adding a watch you can also define its initial -{xpack-ref}/how-watcher-works.html#watch-active-state[active state]. You do that +{stack-ov}/how-watcher-works.html#watch-active-state[active state]. You do that by setting the `active` parameter. [float] @@ -52,16 +52,16 @@ A watch has the following fields: |====== | Name | Description -| `trigger` | The {xpack-ref}/trigger.html[trigger] that defines when +| `trigger` | The {stack-ov}/trigger.html[trigger] that defines when the watch should run. -| `input` | The {xpack-ref}/input.html[input] that defines the input +| `input` | The {stack-ov}/input.html[input] that defines the input that loads the data for the watch. -| `condition` | The {xpack-ref}/condition.html[condition] that defines if +| `condition` | The {stack-ov}/condition.html[condition] that defines if the actions should be run. -| `actions` | The list of {xpack-ref}/actions.html[actions] that will be +| `actions` | The list of {stack-ov}/actions.html[actions] that will be run if the condition matches | `metadata` | Metadata json that will be copied into the history entries. @@ -75,7 +75,7 @@ A watch has the following fields: ==== Authorization You must have `manage_watcher` cluster privileges to use this API. For more -information, see {xpack-ref}/security-privileges.html[Security Privileges]. +information, see {stack-ov}/security-privileges.html[Security Privileges]. [float] ==== Security Integration @@ -148,7 +148,7 @@ PUT _watcher/watch/my-watch // CONSOLE When you add a watch you can also define its initial -{xpack-ref}/how-watcher-works.html#watch-active-state[active state]. You do that +{stack-ov}/how-watcher-works.html#watch-active-state[active state]. You do that by setting the `active` parameter. The following command adds a watch and sets it to be inactive by default: diff --git a/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java b/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java index e57f57174a883..ba3516d4f2e8d 100644 --- a/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java +++ b/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java @@ -79,13 +79,13 @@ public void reenableWatcher() throws Exception { if (isWatcherTest()) { assertBusy(() -> { ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); switch (state) { case "stopped": ClientYamlTestResponse startResponse = - getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap()); boolean isAcknowledged = (boolean) startResponse.evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); throw new AssertionError("waiting until stopped state reached started state"); diff --git a/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java b/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java index 656328d5ead9e..4891c51049b62 100644 --- a/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java +++ b/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java @@ -28,6 +28,7 @@ import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class ESCCRRestTestCase extends ESRestTestCase { @@ -139,8 +140,9 @@ protected static void verifyCcrMonitoring(final String expectedLeaderIndex, fina throw new AssertionError("error while searching", e); } - int numberOfOperationsReceived = 0; - int numberOfOperationsIndexed = 0; + int followerMaxSeqNo = 0; + int followerMappingVersion = 0; + int followerSettingsVersion = 0; List hits = (List) XContentMapValues.extractValue("hits.hits", response); assertThat(hits.size(), greaterThanOrEqualTo(1)); @@ -153,16 +155,20 @@ protected static void verifyCcrMonitoring(final String expectedLeaderIndex, fina final String followerIndex = (String) XContentMapValues.extractValue("_source.ccr_stats.follower_index", hit); assertThat(followerIndex, equalTo(expectedFollowerIndex)); - int foundNumberOfOperationsReceived = - (int) XContentMapValues.extractValue("_source.ccr_stats.operations_read", hit); - numberOfOperationsReceived = Math.max(numberOfOperationsReceived, foundNumberOfOperationsReceived); - int foundNumberOfOperationsIndexed = - (int) XContentMapValues.extractValue("_source.ccr_stats.operations_written", hit); - numberOfOperationsIndexed = Math.max(numberOfOperationsIndexed, foundNumberOfOperationsIndexed); + int foundFollowerMaxSeqNo = + (int) XContentMapValues.extractValue("_source.ccr_stats.follower_max_seq_no", hit); + followerMaxSeqNo = Math.max(followerMaxSeqNo, foundFollowerMaxSeqNo); + int foundFollowerMappingVersion = + (int) XContentMapValues.extractValue("_source.ccr_stats.follower_mapping_version", hit); + followerMappingVersion = Math.max(followerMappingVersion, foundFollowerMappingVersion); + int foundFollowerSettingsVersion = + (int) XContentMapValues.extractValue("_source.ccr_stats.follower_settings_version", hit); + followerSettingsVersion = Math.max(followerSettingsVersion, foundFollowerSettingsVersion); } - assertThat(numberOfOperationsReceived, greaterThanOrEqualTo(1)); - assertThat(numberOfOperationsIndexed, greaterThanOrEqualTo(1)); + assertThat(followerMaxSeqNo, greaterThan(0)); + assertThat(followerMappingVersion, greaterThan(0)); + assertThat(followerSettingsVersion, greaterThan(0)); } protected static void verifyAutoFollowMonitoring() throws IOException { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index d4c73c1c6e503..0e6854652aa16 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -254,7 +254,7 @@ public List getNamedWriteables() { return Arrays.asList( // Persistent action requests new NamedWriteableRegistry.Entry(PersistentTaskParams.class, ShardFollowTask.NAME, - ShardFollowTask::new), + ShardFollowTask::readFrom), // Task statuses new NamedWriteableRegistry.Entry(Task.Status.class, ShardFollowNodeTaskStatus.STATUS_PARSER_NAME, diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRetentionLeases.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRetentionLeases.java new file mode 100644 index 0000000000000..122fbdb969aa7 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRetentionLeases.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.index.Index; + +import java.util.Locale; + +public class CcrRetentionLeases { + + /** + * The retention lease ID used by followers. + * + * @param localClusterName the local cluster name + * @param followerIndex the follower index + * @param remoteClusterAlias the remote cluster alias + * @param leaderIndex the leader index + * @return the retention lease ID + */ + public static String retentionLeaseId( + final String localClusterName, + final Index followerIndex, + final String remoteClusterAlias, + final Index leaderIndex) { + return String.format( + Locale.ROOT, + "%s/%s/%s-following-%s/%s/%s", + localClusterName, + followerIndex.getName(), + followerIndex.getUUID(), + remoteClusterAlias, + leaderIndex.getName(), + leaderIndex.getUUID()); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index 03e936ca8c2ea..1bbc62b45c3b3 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -526,7 +526,7 @@ private void followLeaderIndex(String autoFollowPattenName, request.getParameters().setMaxWriteBufferCount(pattern.getMaxWriteBufferCount()); request.getParameters().setMaxWriteBufferSize(pattern.getMaxWriteBufferSize()); request.getParameters().setMaxRetryDelay(pattern.getMaxRetryDelay()); - request.getParameters().setReadPollTimeout(pattern.getPollTimeout()); + request.getParameters().setReadPollTimeout(pattern.getReadPollTimeout()); // Execute if the create and follow api call succeeds: Runnable successHandler = () -> { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java index 796c26022e1a9..a82670a52a0c4 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java @@ -14,11 +14,11 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.ccr.action.ImmutableFollowParameters; import java.io.IOException; import java.util.Arrays; @@ -28,18 +28,7 @@ import java.util.Objects; import java.util.Set; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_READ_REQUEST_OPERATION_COUNT; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_READ_REQUEST_SIZE; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_OUTSTANDING_READ_REQUESTS; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_WRITE_REQUEST_OPERATION_COUNT; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_WRITE_REQUEST_SIZE; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_OUTSTANDING_WRITE_REQUESTS; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_WRITE_BUFFER_COUNT; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_WRITE_BUFFER_SIZE; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_RETRY_DELAY; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.READ_POLL_TIMEOUT; - -public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { +public class ShardFollowTask extends ImmutableFollowParameters implements XPackPlugin.XPackPersistentTaskParams { public static final String NAME = "xpack/ccr/shard_follow_task"; @@ -60,8 +49,8 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, (a) -> new ShardFollowTask((String) a[0], new ShardId((String) a[1], (String) a[2], (int) a[3]), new ShardId((String) a[4], (String) a[5], (int) a[6]), - (int) a[7], (ByteSizeValue) a[8], (int) a[9], (int) a[10], (ByteSizeValue) a[11], (int) a[12], - (int) a[13], (ByteSizeValue) a[14], (TimeValue) a[15], (TimeValue) a[16], (Map) a[17])); + (Integer) a[7], (Integer) a[8], (Integer) a[9], (Integer) a[10], (ByteSizeValue) a[11], (ByteSizeValue) a[12], + (Integer) a[13], (ByteSizeValue) a[14], (TimeValue) a[15], (TimeValue) a[16], (Map) a[17])); static { PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), REMOTE_CLUSTER_FIELD); @@ -71,48 +60,13 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_SHARD_INDEX_FIELD); PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_SHARD_INDEX_UUID_FIELD); PARSER.declareInt(ConstructingObjectParser.constructorArg(), LEADER_SHARD_SHARDID_FIELD); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_READ_REQUEST_OPERATION_COUNT); - PARSER.declareField( - ConstructingObjectParser.constructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_READ_REQUEST_SIZE.getPreferredName()), - MAX_READ_REQUEST_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_OUTSTANDING_READ_REQUESTS); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_WRITE_REQUEST_OPERATION_COUNT); - PARSER.declareField( - ConstructingObjectParser.constructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_REQUEST_SIZE.getPreferredName()), - MAX_WRITE_REQUEST_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_OUTSTANDING_WRITE_REQUESTS); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_WRITE_BUFFER_COUNT); - PARSER.declareField( - ConstructingObjectParser.constructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_BUFFER_SIZE.getPreferredName()), - MAX_WRITE_BUFFER_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareField(ConstructingObjectParser.constructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_RETRY_DELAY.getPreferredName()), - MAX_RETRY_DELAY, ObjectParser.ValueType.STRING); - PARSER.declareField(ConstructingObjectParser.constructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), READ_POLL_TIMEOUT.getPreferredName()), - READ_POLL_TIMEOUT, ObjectParser.ValueType.STRING); + ImmutableFollowParameters.initParser(PARSER); PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> p.mapStrings(), HEADERS); } private final String remoteCluster; private final ShardId followShardId; private final ShardId leaderShardId; - private final int maxReadRequestOperationCount; - private final ByteSizeValue maxReadRequestSize; - private final int maxOutstandingReadRequests; - private final int maxWriteRequestOperationCount; - private final ByteSizeValue maxWriteRequestSize; - private final int maxOutstandingWriteRequests; - private final int maxWriteBufferCount; - private final ByteSizeValue maxWriteBufferSize; - private final TimeValue maxRetryDelay; - private final TimeValue readPollTimeout; private final Map headers; ShardFollowTask( @@ -120,47 +74,43 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { final ShardId followShardId, final ShardId leaderShardId, final int maxReadRequestOperationCount, - final ByteSizeValue maxReadRequestSize, - final int maxOutstandingReadRequests, final int maxWriteRequestOperationCount, - final ByteSizeValue maxWriteRequestSize, + final int maxOutstandingReadRequests, final int maxOutstandingWriteRequests, + final ByteSizeValue maxReadRequestSize, + final ByteSizeValue maxWriteRequestSize, final int maxWriteBufferCount, final ByteSizeValue maxWriteBufferSize, final TimeValue maxRetryDelay, final TimeValue readPollTimeout, final Map headers) { + super(maxReadRequestOperationCount, maxWriteRequestOperationCount, maxOutstandingReadRequests, maxOutstandingWriteRequests, + maxReadRequestSize, maxWriteRequestSize, maxWriteBufferCount, maxWriteBufferSize, maxRetryDelay, readPollTimeout); this.remoteCluster = remoteCluster; this.followShardId = followShardId; this.leaderShardId = leaderShardId; - this.maxReadRequestOperationCount = maxReadRequestOperationCount; - this.maxReadRequestSize = maxReadRequestSize; - this.maxOutstandingReadRequests = maxOutstandingReadRequests; - this.maxWriteRequestOperationCount = maxWriteRequestOperationCount; - this.maxWriteRequestSize = maxWriteRequestSize; - this.maxOutstandingWriteRequests = maxOutstandingWriteRequests; - this.maxWriteBufferCount = maxWriteBufferCount; - this.maxWriteBufferSize = maxWriteBufferSize; - this.maxRetryDelay = maxRetryDelay; - this.readPollTimeout = readPollTimeout; this.headers = headers != null ? Collections.unmodifiableMap(headers) : Collections.emptyMap(); } - public ShardFollowTask(StreamInput in) throws IOException { - this.remoteCluster = in.readString(); - this.followShardId = ShardId.readShardId(in); - this.leaderShardId = ShardId.readShardId(in); - this.maxReadRequestOperationCount = in.readVInt(); - this.maxReadRequestSize = new ByteSizeValue(in); - this.maxOutstandingReadRequests = in.readVInt(); - this.maxWriteRequestOperationCount = in.readVInt(); - this.maxWriteRequestSize = new ByteSizeValue(in); - this.maxOutstandingWriteRequests = in.readVInt(); - this.maxWriteBufferCount = in.readVInt(); - this.maxWriteBufferSize = new ByteSizeValue(in); - this.maxRetryDelay = in.readTimeValue(); - this.readPollTimeout = in.readTimeValue(); - this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); + public static ShardFollowTask readFrom(StreamInput in) throws IOException { + String remoteCluster = in.readString(); + ShardId followShardId = ShardId.readShardId(in); + ShardId leaderShardId = ShardId.readShardId(in); + // TODO: use ImmutableFollowParameters(StreamInput) constructor + int maxReadRequestOperationCount = in.readVInt(); + ByteSizeValue maxReadRequestSize = new ByteSizeValue(in); + int maxOutstandingReadRequests = in.readVInt(); + int maxWriteRequestOperationCount = in.readVInt(); + ByteSizeValue maxWriteRequestSize = new ByteSizeValue(in); + int maxOutstandingWriteRequests = in.readVInt(); + int maxWriteBufferCount = in.readVInt(); + ByteSizeValue maxWriteBufferSize = new ByteSizeValue(in); + TimeValue maxRetryDelay = in.readTimeValue(); + TimeValue readPollTimeout = in.readTimeValue(); + Map headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); + return new ShardFollowTask(remoteCluster, followShardId, leaderShardId, maxReadRequestOperationCount, + maxWriteRequestOperationCount, maxOutstandingReadRequests, maxOutstandingWriteRequests, maxReadRequestSize, + maxWriteRequestSize, maxWriteBufferCount, maxWriteBufferSize, maxRetryDelay, readPollTimeout, headers); } public String getRemoteCluster() { @@ -175,50 +125,6 @@ public ShardId getLeaderShardId() { return leaderShardId; } - public int getMaxReadRequestOperationCount() { - return maxReadRequestOperationCount; - } - - public int getMaxOutstandingReadRequests() { - return maxOutstandingReadRequests; - } - - public int getMaxWriteRequestOperationCount() { - return maxWriteRequestOperationCount; - } - - public ByteSizeValue getMaxWriteRequestSize() { - return maxWriteRequestSize; - } - - public int getMaxOutstandingWriteRequests() { - return maxOutstandingWriteRequests; - } - - public int getMaxWriteBufferCount() { - return maxWriteBufferCount; - } - - public ByteSizeValue getMaxWriteBufferSize() { - return maxWriteBufferSize; - } - - public ByteSizeValue getMaxReadRequestSize() { - return maxReadRequestSize; - } - - public TimeValue getMaxRetryDelay() { - return maxRetryDelay; - } - - public TimeValue getReadPollTimeout() { - return readPollTimeout; - } - - public String getTaskId() { - return followShardId.getIndex().getUUID() + "-" + followShardId.getId(); - } - public Map getHeaders() { return headers; } @@ -233,16 +139,17 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(remoteCluster); followShardId.writeTo(out); leaderShardId.writeTo(out); - out.writeVLong(maxReadRequestOperationCount); - maxReadRequestSize.writeTo(out); - out.writeVInt(maxOutstandingReadRequests); - out.writeVLong(maxWriteRequestOperationCount); - maxWriteRequestSize.writeTo(out); - out.writeVInt(maxOutstandingWriteRequests); - out.writeVInt(maxWriteBufferCount); - maxWriteBufferSize.writeTo(out); - out.writeTimeValue(maxRetryDelay); - out.writeTimeValue(readPollTimeout); + // TODO: use super.writeTo() + out.writeVLong(getMaxReadRequestOperationCount()); + getMaxReadRequestSize().writeTo(out); + out.writeVInt(getMaxOutstandingReadRequests()); + out.writeVLong(getMaxWriteRequestOperationCount()); + getMaxWriteRequestSize().writeTo(out); + out.writeVInt(getMaxOutstandingWriteRequests()); + out.writeVInt(getMaxWriteBufferCount()); + getMaxWriteBufferSize().writeTo(out); + out.writeTimeValue(getMaxRetryDelay()); + out.writeTimeValue(getReadPollTimeout()); out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); } @@ -260,16 +167,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(LEADER_SHARD_INDEX_FIELD.getPreferredName(), leaderShardId.getIndex().getName()); builder.field(LEADER_SHARD_INDEX_UUID_FIELD.getPreferredName(), leaderShardId.getIndex().getUUID()); builder.field(LEADER_SHARD_SHARDID_FIELD.getPreferredName(), leaderShardId.id()); - builder.field(MAX_READ_REQUEST_OPERATION_COUNT.getPreferredName(), maxReadRequestOperationCount); - builder.field(MAX_READ_REQUEST_SIZE.getPreferredName(), maxReadRequestSize.getStringRep()); - builder.field(MAX_OUTSTANDING_READ_REQUESTS.getPreferredName(), maxOutstandingReadRequests); - builder.field(MAX_WRITE_REQUEST_OPERATION_COUNT.getPreferredName(), maxWriteRequestOperationCount); - builder.field(MAX_WRITE_REQUEST_SIZE.getPreferredName(), maxWriteRequestSize.getStringRep()); - builder.field(MAX_OUTSTANDING_WRITE_REQUESTS.getPreferredName(), maxOutstandingWriteRequests); - builder.field(MAX_WRITE_BUFFER_COUNT.getPreferredName(), maxWriteBufferCount); - builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize.getStringRep()); - builder.field(MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay.getStringRep()); - builder.field(READ_POLL_TIMEOUT.getPreferredName(), readPollTimeout.getStringRep()); + toXContentFragment(builder); builder.field(HEADERS.getPreferredName(), headers); return builder.endObject(); } @@ -278,39 +176,21 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; ShardFollowTask that = (ShardFollowTask) o; return Objects.equals(remoteCluster, that.remoteCluster) && Objects.equals(followShardId, that.followShardId) && Objects.equals(leaderShardId, that.leaderShardId) && - maxReadRequestOperationCount == that.maxReadRequestOperationCount && - maxReadRequestSize.equals(that.maxReadRequestSize) && - maxOutstandingReadRequests == that.maxOutstandingReadRequests && - maxWriteRequestOperationCount == that.maxWriteRequestOperationCount && - maxWriteRequestSize.equals(that.maxWriteRequestSize) && - maxOutstandingWriteRequests == that.maxOutstandingWriteRequests && - maxWriteBufferCount == that.maxWriteBufferCount && - maxWriteBufferSize.equals(that.maxWriteBufferSize) && - Objects.equals(maxRetryDelay, that.maxRetryDelay) && - Objects.equals(readPollTimeout, that.readPollTimeout) && Objects.equals(headers, that.headers); } @Override public int hashCode() { return Objects.hash( + super.hashCode(), remoteCluster, followShardId, leaderShardId, - maxReadRequestOperationCount, - maxReadRequestSize, - maxOutstandingReadRequests, - maxWriteRequestOperationCount, - maxWriteRequestSize, - maxOutstandingWriteRequests, - maxWriteBufferCount, - maxWriteBufferSize, - maxRetryDelay, - readPollTimeout, headers ); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java index d5127cbb74d4b..a218ec2dcaa7c 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java @@ -159,11 +159,11 @@ static ClusterState innerPut(PutAutoFollowPatternAction.Request request, request.getLeaderIndexPatterns(), request.getFollowIndexNamePattern(), request.getParameters().getMaxReadRequestOperationCount(), - request.getParameters().getMaxReadRequestSize(), - request.getParameters().getMaxOutstandingReadRequests(), request.getParameters().getMaxWriteRequestOperationCount(), - request.getParameters().getMaxWriteRequestSize(), + request.getParameters().getMaxOutstandingReadRequests(), request.getParameters().getMaxOutstandingWriteRequests(), + request.getParameters().getMaxReadRequestSize(), + request.getParameters().getMaxWriteRequestSize(), request.getParameters().getMaxWriteBufferCount(), request.getParameters().getMaxWriteBufferSize(), request.getParameters().getMaxRetryDelay(), diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java index 3eacabe78b606..f59f23f5dd366 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java @@ -324,11 +324,11 @@ private static ShardFollowTask createShardFollowTask( new ShardId(followIndexMetadata.getIndex(), shardId), new ShardId(leaderIndexMetadata.getIndex(), shardId), maxReadRequestOperationCount, - maxReadRequestSize, - maxOutstandingReadRequests, maxWriteRequestOperationCount, - maxWriteRequestSize, + maxOutstandingReadRequests, maxOutstandingWriteRequests, + maxReadRequestSize, + maxWriteRequestSize, maxWriteBufferCount, maxWriteBufferSize, maxRetryDelay, diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java index c779b491d581e..e0b0734912b97 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java @@ -68,6 +68,7 @@ private void preFlight(final Operation operation) { @Override protected InternalEngine.IndexingStrategy indexingStrategyForOperation(final Index index) throws IOException { preFlight(index); + markSeqNoAsSeen(index.seqNo()); // NOTES: refer Engine#getMaxSeqNoOfUpdatesOrDeletes for the explanation of the optimization using sequence numbers. final long maxSeqNoOfUpdatesOrDeletes = getMaxSeqNoOfUpdatesOrDeletes(); assert maxSeqNoOfUpdatesOrDeletes != SequenceNumbers.UNASSIGNED_SEQ_NO : "max_seq_no_of_updates is not initialized"; @@ -103,6 +104,7 @@ protected InternalEngine.IndexingStrategy indexingStrategyForOperation(final Ind @Override protected InternalEngine.DeletionStrategy deletionStrategyForOperation(final Delete delete) throws IOException { preFlight(delete); + markSeqNoAsSeen(delete.seqNo()); if (delete.origin() == Operation.Origin.PRIMARY && hasBeenProcessedBefore(delete)) { // See the comment in #indexingStrategyForOperation for the explanation why we can safely skip this operation. final AlreadyProcessedFollowingEngineException error = new AlreadyProcessedFollowingEngineException( diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index 88f4e974beae1..41cce3f5b0b06 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -8,8 +8,12 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.IndexCommit; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -19,6 +23,7 @@ import org.elasticsearch.action.support.ListenerTimeouts; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -30,13 +35,18 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.metrics.CounterMetric; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.Index; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.seqno.LocalCheckpointTracker; +import org.elasticsearch.index.seqno.RetentionLeaseActions; +import org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException; +import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardRecoveryException; import org.elasticsearch.index.shard.ShardId; @@ -56,6 +66,7 @@ import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotShardFailure; import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.ccr.CcrLicenseChecker; @@ -76,12 +87,16 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.LongConsumer; import java.util.function.Supplier; +import static org.elasticsearch.index.seqno.RetentionLeaseActions.RETAIN_ALL; import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; +import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.retentionLeaseId; /** @@ -90,6 +105,8 @@ */ public class CcrRepository extends AbstractLifecycleComponent implements Repository { + private static final Logger logger = LogManager.getLogger(CcrRepository.class); + public static final String LATEST = "_latest_"; public static final String TYPE = "_ccr_"; public static final String NAME_PREFIX = "_ccr_"; @@ -98,6 +115,7 @@ public class CcrRepository extends AbstractLifecycleComponent implements Reposit private final RepositoryMetaData metadata; private final CcrSettings ccrSettings; + private final String localClusterName; private final String remoteClusterAlias; private final Client client; private final CcrLicenseChecker ccrLicenseChecker; @@ -109,6 +127,7 @@ public CcrRepository(RepositoryMetaData metadata, Client client, CcrLicenseCheck CcrSettings ccrSettings, ThreadPool threadPool) { this.metadata = metadata; this.ccrSettings = ccrSettings; + this.localClusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings).value(); assert metadata.name().startsWith(NAME_PREFIX) : "CcrRepository metadata.name() must start with: " + NAME_PREFIX; this.remoteClusterAlias = Strings.split(metadata.name(), NAME_PREFIX)[1]; this.ccrLicenseChecker = ccrLicenseChecker; @@ -136,10 +155,14 @@ public RepositoryMetaData getMetadata() { return metadata; } + private Client getRemoteClusterClient() { + return client.getRemoteClusterClient(remoteClusterAlias); + } + @Override public SnapshotInfo getSnapshotInfo(SnapshotId snapshotId) { assert SNAPSHOT_ID.equals(snapshotId) : "RemoteClusterRepository only supports " + SNAPSHOT_ID + " as the SnapshotId"; - Client remoteClient = client.getRemoteClusterClient(remoteClusterAlias); + Client remoteClient = getRemoteClusterClient(); ClusterStateResponse response = remoteClient.admin().cluster().prepareState().clear().setMetaData(true).setNodes(true) .get(ccrSettings.getRecoveryActionTimeout()); ImmutableOpenMap indicesMap = response.getState().metaData().indices(); @@ -152,7 +175,7 @@ public SnapshotInfo getSnapshotInfo(SnapshotId snapshotId) { @Override public MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId) { assert SNAPSHOT_ID.equals(snapshotId) : "RemoteClusterRepository only supports " + SNAPSHOT_ID + " as the SnapshotId"; - Client remoteClient = client.getRemoteClusterClient(remoteClusterAlias); + Client remoteClient = getRemoteClusterClient(); // We set a single dummy index name to avoid fetching all the index data ClusterStateRequest clusterStateRequest = CcrRequests.metaDataRequest("dummy_index_name"); ClusterStateResponse clusterState = remoteClient.admin().cluster().state(clusterStateRequest) @@ -164,7 +187,7 @@ public MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId) { public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId index) throws IOException { assert SNAPSHOT_ID.equals(snapshotId) : "RemoteClusterRepository only supports " + SNAPSHOT_ID + " as the SnapshotId"; String leaderIndex = index.getName(); - Client remoteClient = client.getRemoteClusterClient(remoteClusterAlias); + Client remoteClient = getRemoteClusterClient(); ClusterStateRequest clusterStateRequest = CcrRequests.metaDataRequest(leaderIndex); ClusterStateResponse clusterState = remoteClient.admin().cluster().state(clusterStateRequest) @@ -203,7 +226,7 @@ public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId ind @Override public RepositoryData getRepositoryData() { - Client remoteClient = client.getRemoteClusterClient(remoteClusterAlias); + Client remoteClient = getRemoteClusterClient(); ClusterStateResponse response = remoteClient.admin().cluster().prepareState().clear().setMetaData(true) .get(ccrSettings.getRecoveryActionTimeout()); MetaData remoteMetaData = response.getState().getMetaData(); @@ -280,33 +303,167 @@ public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, public void restoreShard(IndexShard indexShard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId, RecoveryState recoveryState) { // TODO: Add timeouts to network calls / the restore process. + createEmptyStore(indexShard, shardId); + + final Map ccrMetaData = indexShard.indexSettings().getIndexMetaData().getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY); + final String leaderIndexName = ccrMetaData.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_NAME_KEY); + final String leaderUUID = ccrMetaData.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY); + final Index leaderIndex = new Index(leaderIndexName, leaderUUID); + final ShardId leaderShardId = new ShardId(leaderIndex, shardId.getId()); + + final Client remoteClient = getRemoteClusterClient(); + + final String retentionLeaseId = + retentionLeaseId(localClusterName, indexShard.shardId().getIndex(), remoteClusterAlias, leaderIndex); + + acquireRetentionLeaseOnLeader(shardId, retentionLeaseId, leaderShardId, remoteClient); + + // schedule renewals to run during the restore + final Scheduler.Cancellable renewable = threadPool.scheduleWithFixedDelay( + () -> { + logger.trace("{} background renewal of retention lease [{}] during restore", shardId, retentionLeaseId); + final ThreadContext threadContext = threadPool.getThreadContext(); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + // we have to execute under the system context so that if security is enabled the renewal is authorized + threadContext.markAsSystemContext(); + asyncRenewRetentionLease( + leaderShardId, + retentionLeaseId, + remoteClient, + ActionListener.wrap( + r -> {}, + e -> { + assert e instanceof ElasticsearchSecurityException == false : e; + logger.warn(new ParameterizedMessage( + "{} background renewal of retention lease [{}] failed during restore", + shardId, + retentionLeaseId), + e); + })); + } + }, + RETENTION_LEASE_RENEW_INTERVAL_SETTING.get(indexShard.indexSettings().getSettings()), + Ccr.CCR_THREAD_POOL_NAME); + + // TODO: There should be some local timeout. And if the remote cluster returns an unknown session + // response, we should be able to retry by creating a new session. + try (RestoreSession restoreSession = openSession(metadata.name(), remoteClient, leaderShardId, indexShard, recoveryState)) { + restoreSession.restoreFiles(); + updateMappings(remoteClient, leaderIndex, restoreSession.mappingVersion, client, indexShard.routingEntry().index()); + } catch (Exception e) { + throw new IndexShardRestoreFailedException(indexShard.shardId(), "failed to restore snapshot [" + snapshotId + "]", e); + } finally { + logger.trace("{} canceling background renewal of retention lease [{}] at the end of restore", shardId, retentionLeaseId); + renewable.cancel(); + } + } + + private void createEmptyStore(final IndexShard indexShard, final ShardId shardId) { final Store store = indexShard.store(); store.incRef(); try { store.createEmpty(indexShard.indexSettings().getIndexMetaData().getCreationVersion().luceneVersion); - } catch (EngineException | IOException e) { + } catch (final EngineException | IOException e) { throw new IndexShardRecoveryException(shardId, "failed to create empty store", e); } finally { store.decRef(); } + } - Map ccrMetaData = indexShard.indexSettings().getIndexMetaData().getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY); - String leaderUUID = ccrMetaData.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY); - Index leaderIndex = new Index(shardId.getIndexName(), leaderUUID); - ShardId leaderShardId = new ShardId(leaderIndex, shardId.getId()); + void acquireRetentionLeaseOnLeader( + final ShardId shardId, + final String retentionLeaseId, + final ShardId leaderShardId, + final Client remoteClient) { + logger.trace( + () -> new ParameterizedMessage("{} requesting leader to add retention lease [{}]", shardId, retentionLeaseId)); + final Optional maybeAddAlready = + syncAddRetentionLease(leaderShardId, retentionLeaseId, remoteClient); + maybeAddAlready.ifPresent(addAlready -> { + logger.trace(() -> new ParameterizedMessage( + "{} retention lease [{}] already exists, requesting a renewal", + shardId, + retentionLeaseId), + addAlready); + final Optional maybeRenewNotFound = + syncRenewRetentionLease(leaderShardId, retentionLeaseId, remoteClient); + maybeRenewNotFound.ifPresent(renewNotFound -> { + logger.trace(() -> new ParameterizedMessage( + "{} retention lease [{}] not found while attempting to renew, requesting a final add", + shardId, + retentionLeaseId), + renewNotFound); + final Optional maybeFallbackAddAlready = + syncAddRetentionLease(leaderShardId, retentionLeaseId, remoteClient); + maybeFallbackAddAlready.ifPresent(fallbackAddAlready -> { + /* + * At this point we tried to add the lease and the retention lease already existed. By the time we tried to renew the + * lease, it expired or was removed. We tried to add the lease again and it already exists? Bail. + */ + assert false : fallbackAddAlready; + throw fallbackAddAlready; + }); + }); + }); + } - Client remoteClient = client.getRemoteClusterClient(remoteClusterAlias); - // TODO: There should be some local timeout. And if the remote cluster returns an unknown session - // response, we should be able to retry by creating a new session. - String name = metadata.name(); - try (RestoreSession restoreSession = openSession(name, remoteClient, leaderShardId, indexShard, recoveryState)) { - restoreSession.restoreFiles(); - updateMappings(remoteClient, leaderIndex, restoreSession.mappingVersion, client, indexShard.routingEntry().index()); - } catch (Exception e) { - throw new IndexShardRestoreFailedException(indexShard.shardId(), "failed to restore snapshot [" + snapshotId + "]", e); + private Optional syncAddRetentionLease( + final ShardId leaderShardId, + final String retentionLeaseId, + final Client remoteClient) { + try { + final PlainActionFuture response = new PlainActionFuture<>(); + asyncAddRetentionLease(leaderShardId, retentionLeaseId, remoteClient, response); + response.actionGet(ccrSettings.getRecoveryActionTimeout()); + return Optional.empty(); + } catch (final RetentionLeaseAlreadyExistsException e) { + return Optional.of(e); + } + } + + private void asyncAddRetentionLease( + final ShardId leaderShardId, + final String retentionLeaseId, + final Client remoteClient, + final ActionListener listener) { + final RetentionLeaseActions.AddRequest request = + new RetentionLeaseActions.AddRequest(leaderShardId, retentionLeaseId, RETAIN_ALL, "ccr"); + remoteClient.execute(RetentionLeaseActions.Add.INSTANCE, request, listener); + } + + private Optional syncRenewRetentionLease( + final ShardId leaderShardId, + final String retentionLeaseId, + final Client remoteClient) { + try { + final PlainActionFuture response = new PlainActionFuture<>(); + asyncRenewRetentionLease(leaderShardId, retentionLeaseId, remoteClient, response); + response.actionGet(ccrSettings.getRecoveryActionTimeout()); + return Optional.empty(); + } catch (final RetentionLeaseNotFoundException e) { + return Optional.of(e); } } + private void asyncRenewRetentionLease( + final ShardId leaderShardId, + final String retentionLeaseId, + final Client remoteClient, + final ActionListener listener) { + final RetentionLeaseActions.RenewRequest request = + new RetentionLeaseActions.RenewRequest(leaderShardId, retentionLeaseId, RETAIN_ALL, "ccr"); + remoteClient.execute(RetentionLeaseActions.Renew.INSTANCE, request, listener); + } + + // this setting is intentionally not registered, it is only used in tests + public static final Setting RETENTION_LEASE_RENEW_INTERVAL_SETTING = + Setting.timeSetting( + "index.ccr.retention_lease.renew_interval", + new TimeValue(5, TimeUnit.MINUTES), + new TimeValue(0, TimeUnit.MILLISECONDS), + Setting.Property.Dynamic, + Setting.Property.IndexScope); + @Override public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId leaderShardId) { throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); @@ -330,7 +487,7 @@ private void updateMappings(Client leaderClient, Index leaderIndex, long leaderM } } - private RestoreSession openSession(String repositoryName, Client remoteClient, ShardId leaderShardId, IndexShard indexShard, + RestoreSession openSession(String repositoryName, Client remoteClient, ShardId leaderShardId, IndexShard indexShard, RecoveryState recoveryState) { String sessionUUID = UUIDs.randomBase64UUID(); PutCcrRestoreSessionAction.PutCcrRestoreSessionResponse response = remoteClient.execute(PutCcrRestoreSessionAction.INSTANCE, diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index 81b90a3ff60b0..d219ddefa066b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack; import org.apache.lucene.store.AlreadyClosedException; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; @@ -22,8 +23,11 @@ import org.elasticsearch.analysis.common.CommonAnalysisPlugin; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -35,6 +39,7 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -43,6 +48,7 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.DocIdSeqNoAndTerm; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.seqno.SeqNoStats; @@ -58,10 +64,14 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.snapshots.RestoreInfo; +import org.elasticsearch.snapshots.RestoreService; +import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.NodeConfigurationSource; @@ -74,6 +84,7 @@ import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; import org.elasticsearch.xpack.core.ccr.action.PauseFollowAction; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; @@ -99,10 +110,12 @@ import java.util.function.BooleanSupplier; import java.util.function.Function; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING; import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; +import static org.elasticsearch.snapshots.RestoreService.restoreInProgress; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.empty; @@ -114,6 +127,10 @@ public abstract class CcrIntegTestCase extends ESTestCase { private static ClusterGroup clusterGroup; + protected Collection> nodePlugins() { + return Collections.emptyList(); + } + @Before public final void startClusters() throws Exception { if (clusterGroup != null && reuseClusters()) { @@ -125,7 +142,7 @@ public final void startClusters() throws Exception { stopClusters(); Collection> mockPlugins = Arrays.asList(ESIntegTestCase.TestSeedPlugin.class, MockHttpTransport.TestPlugin.class, MockTransportService.TestPlugin.class, - MockNioTransportPlugin.class); + MockNioTransportPlugin.class, InternalSettingsPlugin.class); InternalTestCluster leaderCluster = new InternalTestCluster(randomLong(), createTempDir(), true, true, numberOfNodesPerCluster(), numberOfNodesPerCluster(), "leader_cluster", createNodeConfigurationSource(null), 0, "leader", mockPlugins, @@ -224,7 +241,10 @@ public Path nodeConfigPath(int nodeOrdinal) { @Override public Collection> nodePlugins() { - return Arrays.asList(LocalStateCcr.class, CommonAnalysisPlugin.class); + return Stream.concat( + Stream.of(LocalStateCcr.class, CommonAnalysisPlugin.class), + CcrIntegTestCase.this.nodePlugins().stream()) + .collect(Collectors.toList()); } @Override @@ -357,13 +377,18 @@ protected void ensureEmptyWriteBuffers() throws Exception { protected void pauseFollow(String... indices) throws Exception { for (String index : indices) { final PauseFollowAction.Request unfollowRequest = new PauseFollowAction.Request(index); - followerClient().execute(PauseFollowAction.INSTANCE, unfollowRequest).get(); + assertAcked(followerClient().execute(PauseFollowAction.INSTANCE, unfollowRequest).actionGet()); } ensureNoCcrTasks(); } protected void ensureNoCcrTasks() throws Exception { assertBusy(() -> { + CcrStatsAction.Response statsResponse = + followerClient().execute(CcrStatsAction.INSTANCE, new CcrStatsAction.Request()).actionGet(); + assertThat("Follow stats not empty: " + Strings.toString(statsResponse.getFollowStats()), + statsResponse.getFollowStats().getStatsResponses(), empty()); + final ClusterState clusterState = followerClient().admin().cluster().prepareState().get().getState(); final PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); assertThat(tasks.tasks(), empty()); @@ -390,6 +415,7 @@ protected String getIndexSettings(final int numberOfShards, final int numberOfRe builder.startObject("settings"); { builder.field(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0); + builder.field(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s"); builder.field("index.number_of_shards", numberOfShards); builder.field("index.number_of_replicas", numberOfReplicas); for (final Map.Entry additionalSetting : additionalIndexSettings.entrySet()) { @@ -639,6 +665,61 @@ public long waitForDocs(final long numDocs, int maxWaitTime, TimeUnit maxWaitTim return lastKnownCount.get(); } + protected ActionListener waitForRestore( + final ClusterService clusterService, + final ActionListener listener) { + return new ActionListener() { + + @Override + public void onResponse(RestoreService.RestoreCompletionResponse restoreCompletionResponse) { + if (restoreCompletionResponse.getRestoreInfo() == null) { + final Snapshot snapshot = restoreCompletionResponse.getSnapshot(); + final String uuid = restoreCompletionResponse.getUuid(); + + final ClusterStateListener clusterStateListener = new ClusterStateListener() { + + @Override + public void clusterChanged(ClusterChangedEvent changedEvent) { + final RestoreInProgress.Entry prevEntry = restoreInProgress(changedEvent.previousState(), uuid); + final RestoreInProgress.Entry newEntry = restoreInProgress(changedEvent.state(), uuid); + if (prevEntry == null) { + /* + * When there is a master failure after a restore has been started, this listener might not be registered + * on the current master and as such it might miss some intermediary cluster states due to batching. + * Clean up the listener in that case and acknowledge completion of restore operation to client. + */ + clusterService.removeListener(this); + listener.onResponse(null); + } else if (newEntry == null) { + clusterService.removeListener(this); + ImmutableOpenMap shards = prevEntry.shards(); + RestoreInfo ri = new RestoreInfo(prevEntry.snapshot().getSnapshotId().getName(), + prevEntry.indices(), + shards.size(), + shards.size() - RestoreService.failedShards(shards)); + logger.debug("restore of [{}] completed", snapshot); + listener.onResponse(ri); + } else { + // restore not completed yet, wait for next cluster state update + } + } + + }; + + clusterService.addListener(clusterStateListener); + } else { + listener.onResponse(restoreCompletionResponse.getRestoreInfo()); + } + } + + @Override + public void onFailure(Exception t) { + listener.onFailure(t); + } + + }; + } + static void removeCCRRelatedMetadataFromClusterState(ClusterService clusterService) throws Exception { CountDownLatch latch = new CountDownLatch(1); clusterService.submitStateUpdateTask("remove-ccr-related-metadata", new ClusterStateUpdateTask() { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowMetadataTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowMetadataTests.java index 5dab22500a600..26182781233e2 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowMetadataTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowMetadataTests.java @@ -45,11 +45,11 @@ protected AutoFollowMetadata createTestInstance() { leaderPatterns, randomAlphaOfLength(4), randomIntBetween(0, Integer.MAX_VALUE), - new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(0, Integer.MAX_VALUE), randomIntBetween(0, Integer.MAX_VALUE), - new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(0, Integer.MAX_VALUE), + new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), + new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(0, Integer.MAX_VALUE), new ByteSizeValue(randomNonNegativeLong()), TimeValue.timeValueMillis(500), diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java index 45adec46a21a6..5357f9f01b229 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java @@ -16,26 +16,20 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.snapshots.RestoreInfo; import org.elasticsearch.snapshots.RestoreService; -import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.TransportActionProxy; import org.elasticsearch.transport.TransportService; @@ -55,7 +49,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import static java.util.Collections.singletonMap; -import static org.elasticsearch.snapshots.RestoreService.restoreInProgress; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -453,51 +446,4 @@ private void assertExpectedDocument(String followerIndex, final int value) { assertThat(getResponse.getSource().get("f"), equalTo(value)); } - private ActionListener waitForRestore(ClusterService clusterService, - ActionListener listener) { - return new ActionListener() { - @Override - public void onResponse(RestoreService.RestoreCompletionResponse restoreCompletionResponse) { - if (restoreCompletionResponse.getRestoreInfo() == null) { - final Snapshot snapshot = restoreCompletionResponse.getSnapshot(); - final String uuid = restoreCompletionResponse.getUuid(); - - ClusterStateListener clusterStateListener = new ClusterStateListener() { - @Override - public void clusterChanged(ClusterChangedEvent changedEvent) { - final RestoreInProgress.Entry prevEntry = restoreInProgress(changedEvent.previousState(), uuid); - final RestoreInProgress.Entry newEntry = restoreInProgress(changedEvent.state(), uuid); - if (prevEntry == null) { - // When there is a master failure after a restore has been started, this listener might not be registered - // on the current master and as such it might miss some intermediary cluster states due to batching. - // Clean up listener in that case and acknowledge completion of restore operation to client. - clusterService.removeListener(this); - listener.onResponse(null); - } else if (newEntry == null) { - clusterService.removeListener(this); - ImmutableOpenMap shards = prevEntry.shards(); - RestoreInfo ri = new RestoreInfo(prevEntry.snapshot().getSnapshotId().getName(), - prevEntry.indices(), - shards.size(), - shards.size() - RestoreService.failedShards(shards)); - logger.debug("restore of [{}] completed", snapshot); - listener.onResponse(ri); - } else { - // restore not completed yet, wait for next cluster state update - } - } - }; - - clusterService.addListener(clusterStateListener); - } else { - listener.onResponse(restoreCompletionResponse.getRestoreInfo()); - } - } - - @Override - public void onFailure(Exception t) { - listener.onFailure(t); - } - }; - } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java new file mode 100644 index 0000000000000..c42887e6b52f1 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java @@ -0,0 +1,408 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.seqno.RetentionLease; +import org.elasticsearch.index.seqno.RetentionLeases; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.snapshots.RestoreInfo; +import org.elasticsearch.snapshots.RestoreService; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.CcrIntegTestCase; +import org.elasticsearch.xpack.ccr.action.repositories.ClearCcrRestoreSessionAction; +import org.elasticsearch.xpack.ccr.repository.CcrRepository; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.retentionLeaseId; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; + +public class CcrRetentionLeaseIT extends CcrIntegTestCase { + + public static final class RetentionLeaseRenewIntervalSettingPlugin extends Plugin { + + @Override + public List> getSettings() { + return Collections.singletonList(CcrRepository.RETENTION_LEASE_RENEW_INTERVAL_SETTING); + } + + } + + public static final class RetentionLeaseSyncIntervalSettingPlugin extends Plugin { + + @Override + public List> getSettings() { + return Collections.singletonList(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING); + } + + } + + @Override + protected Collection> nodePlugins() { + return Stream.concat( + super.nodePlugins().stream(), + Stream.of(RetentionLeaseRenewIntervalSettingPlugin.class, RetentionLeaseSyncIntervalSettingPlugin.class)) + .collect(Collectors.toList()); + } + + private final IndicesOptions indicesOptions = IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + + private RestoreSnapshotRequest setUpRestoreSnapshotRequest( + final String leaderIndex, + final int numberOfShards, + final int numberOfReplicas, + final String followerIndex, + final int numberOfDocuments) throws IOException { + final ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(); + final String chunkSize = new ByteSizeValue(randomFrom(4, 128, 1024), ByteSizeUnit.KB).getStringRep(); + settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(), chunkSize)); + assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + + final String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster"; + + final Map additionalSettings = new HashMap<>(); + additionalSettings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true"); + additionalSettings.put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(200).getStringRep()); + final String leaderIndexSettings = getIndexSettings(numberOfShards, numberOfReplicas, additionalSettings); + assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON)); + ensureLeaderGreen(leaderIndex); + + logger.info("indexing [{}] docs", numberOfDocuments); + for (int i = 0; i < numberOfDocuments; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + leaderClient().prepareIndex(leaderIndex, "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); + if (rarely()) { + leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).setWaitIfOngoing(true).get(); + } + } + + leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).setWaitIfOngoing(true).get(); + + final Settings.Builder settingsBuilder = Settings.builder() + .put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followerIndex) + .put(CcrRepository.RETENTION_LEASE_RENEW_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(200)) + .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); + return new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST) + .indexSettings(settingsBuilder) + .indices(leaderIndex) + .indicesOptions(indicesOptions) + .renamePattern("^(.*)$") + .renameReplacement(followerIndex) + .masterNodeTimeout(new TimeValue(1L, TimeUnit.HOURS)); + } + + public void testRetentionLeaseIsTakenAtTheStartOfRecovery() throws Exception { + final String leaderIndex = "leader"; + final int numberOfShards = randomIntBetween(1, 3); + final int numberOfReplicas = between(0, 1); + final String followerIndex = "follower"; + final int numberOfDocuments = scaledRandomIntBetween(1, 8192); + final RestoreSnapshotRequest restoreRequest = + setUpRestoreSnapshotRequest(leaderIndex, numberOfShards, numberOfReplicas, followerIndex, numberOfDocuments); + final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class); + final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class); + + final PlainActionFuture future = PlainActionFuture.newFuture(); + restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future)); + + final ClusterStateResponse leaderIndexClusterState = + leaderClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(leaderIndex).get(); + final String leaderUUID = leaderIndexClusterState.getState().metaData().index(leaderIndex).getIndexUUID(); + + // ensure that a retention lease has been put in place on each shard + assertBusy(() -> { + final IndicesStatsResponse stats = + leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); + assertNotNull(stats.getShards()); + assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); + final List shardStats = getShardStats(stats); + for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { + final RetentionLeases currentRetentionLeases = shardStats.get(i).getRetentionLeaseStats().retentionLeases(); + assertThat(currentRetentionLeases.leases(), hasSize(1)); + final ClusterStateResponse followerIndexClusterState = + followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); + final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); + final RetentionLease retentionLease = + currentRetentionLeases.leases().iterator().next(); + assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, followerUUID, leaderIndex, leaderUUID))); + } + }); + + final RestoreInfo restoreInfo = future.actionGet(); + + assertEquals(restoreInfo.totalShards(), restoreInfo.successfulShards()); + assertEquals(0, restoreInfo.failedShards()); + for (int i = 0; i < numberOfDocuments; ++i) { + assertExpectedDocument(followerIndex, i); + } + + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39011") + public void testRetentionLeaseIsRenewedDuringRecovery() throws Exception { + final String leaderIndex = "leader"; + final int numberOfShards = randomIntBetween(1, 3); + final int numberOfReplicas = between(0, 1); + final String followerIndex = "follower"; + final int numberOfDocuments = scaledRandomIntBetween(1, 8192); + final RestoreSnapshotRequest restoreRequest = + setUpRestoreSnapshotRequest(leaderIndex, numberOfShards, numberOfReplicas, followerIndex, numberOfDocuments); + final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class); + final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class); + + final CountDownLatch latch = new CountDownLatch(1); + + // block the recovery from completing; this ensures the background sync is still running + final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); + for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + final ClusterStateResponse leaderClusterState = leaderClient().admin().cluster().prepareState().clear().setNodes(true).get(); + for (final ObjectCursor receiverNode : leaderClusterState.getState().nodes().getDataNodes().values()) { + final MockTransportService receiverTransportService = + (MockTransportService) getLeaderCluster().getInstance(TransportService.class, receiverNode.value.getName()); + senderTransportService.addSendBehavior(receiverTransportService, + (connection, requestId, action, request, options) -> { + if (ClearCcrRestoreSessionAction.NAME.equals(action)) { + try { + latch.await(); + } catch (final InterruptedException e) { + fail(e.toString()); + } + } + connection.sendRequest(requestId, action, request, options); + }); + + } + + } + + final PlainActionFuture future = PlainActionFuture.newFuture(); + restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future)); + + final ClusterStateResponse leaderIndexClusterState = + leaderClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(leaderIndex).get(); + final String leaderUUID = leaderIndexClusterState.getState().metaData().index(leaderIndex).getIndexUUID(); + + try { + // ensure that a retention lease has been put in place on each shard, and grab a copy of them + final List retentionLeases = new ArrayList<>(); + assertBusy(() -> { + retentionLeases.clear(); + final IndicesStatsResponse stats = + leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); + assertNotNull(stats.getShards()); + assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); + final List shardStats = getShardStats(stats); + for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { + final RetentionLeases currentRetentionLeases = shardStats.get(i).getRetentionLeaseStats().retentionLeases(); + assertThat(currentRetentionLeases.leases(), hasSize(1)); + final ClusterStateResponse followerIndexClusterState = + followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); + final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); + final RetentionLease retentionLease = + currentRetentionLeases.leases().iterator().next(); + assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, followerUUID, leaderIndex, leaderUUID))); + retentionLeases.add(currentRetentionLeases); + } + }); + + // now ensure that the retention leases are being renewed + assertBusy(() -> { + final IndicesStatsResponse stats = + leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); + assertNotNull(stats.getShards()); + assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); + final List shardStats = getShardStats(stats); + for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { + final RetentionLeases currentRetentionLeases = shardStats.get(i).getRetentionLeaseStats().retentionLeases(); + assertThat(currentRetentionLeases.leases(), hasSize(1)); + final ClusterStateResponse followerIndexClusterState = + followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); + final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); + final RetentionLease retentionLease = + currentRetentionLeases.leases().iterator().next(); + assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, followerUUID, leaderIndex, leaderUUID))); + // we assert that retention leases are being renewed by an increase in the timestamp + assertThat(retentionLease.timestamp(), greaterThan(retentionLeases.get(i).leases().iterator().next().timestamp())); + } + }); + latch.countDown(); + } finally { + for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + senderTransportService.clearAllRules(); + } + } + + final RestoreInfo restoreInfo = future.actionGet(); + + assertEquals(restoreInfo.totalShards(), restoreInfo. + + successfulShards()); + + assertEquals(0, restoreInfo.failedShards()); + for (int i = 0; i < numberOfDocuments; i++) { + assertExpectedDocument(followerIndex, i); + } + + } + + public void testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes() throws Exception { + final String leaderIndex = "leader"; + final int numberOfShards = randomIntBetween(1, 3); + final int numberOfReplicas = between(0, 1); + final String followerIndex = "follower"; + final int numberOfDocuments = scaledRandomIntBetween(1, 8192); + final RestoreSnapshotRequest restoreRequest = + setUpRestoreSnapshotRequest(leaderIndex, numberOfShards, numberOfReplicas, followerIndex, numberOfDocuments); + final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class); + final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class); + + final PlainActionFuture future = PlainActionFuture.newFuture(); + restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future)); + + final RestoreInfo restoreInfo = future.actionGet(); + final long start = System.nanoTime(); + + final ClusterStateResponse leaderIndexClusterState = + leaderClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(leaderIndex).get(); + final String leaderUUID = leaderIndexClusterState.getState().metaData().index(leaderIndex).getIndexUUID(); + + // sample the leases after recovery + final List retentionLeases = new ArrayList<>(); + assertBusy(() -> { + retentionLeases.clear(); + final IndicesStatsResponse stats = + leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); + assertNotNull(stats.getShards()); + assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); + final List shardStats = getShardStats(stats); + for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { + final RetentionLeases currentRetentionLeases = shardStats.get(i).getRetentionLeaseStats().retentionLeases(); + assertThat(currentRetentionLeases.leases(), hasSize(1)); + final ClusterStateResponse followerIndexClusterState = + followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); + final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); + final RetentionLease retentionLease = + currentRetentionLeases.leases().iterator().next(); + final String expectedRetentionLeaseId = retentionLeaseId( + getFollowerCluster().getClusterName(), + new Index(followerIndex, followerUUID), + getLeaderCluster().getClusterName(), + new Index(leaderIndex, leaderUUID)); + assertThat(retentionLease.id(), equalTo(expectedRetentionLeaseId)); + retentionLeases.add(currentRetentionLeases); + } + }); + + final long end = System.nanoTime(); + Thread.sleep(Math.max(0, randomIntBetween(2, 4) * 200 - TimeUnit.NANOSECONDS.toMillis(end - start))); + + // now ensure that the retention leases are the same + assertBusy(() -> { + final IndicesStatsResponse stats = + leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); + assertNotNull(stats.getShards()); + assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); + final List shardStats = getShardStats(stats); + for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { + final RetentionLeases currentRetentionLeases = shardStats.get(i).getRetentionLeaseStats().retentionLeases(); + assertThat(currentRetentionLeases.leases(), hasSize(1)); + final ClusterStateResponse followerIndexClusterState = + followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); + final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); + final RetentionLease retentionLease = + currentRetentionLeases.leases().iterator().next(); + assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, followerUUID, leaderIndex, leaderUUID))); + // we assert that retention leases are being renewed by an increase in the timestamp + assertThat(retentionLease.timestamp(), equalTo(retentionLeases.get(i).leases().iterator().next().timestamp())); + } + }); + + assertEquals(restoreInfo.totalShards(), restoreInfo.successfulShards()); + assertEquals(0, restoreInfo.failedShards()); + for (int i = 0; i < numberOfDocuments; ++i) { + assertExpectedDocument(followerIndex, i); + } + } + + /** + * Extract the shard stats from an indices stats response, with the stats ordered by shard ID with primaries first. This is to have a + * consistent ordering when comparing two responses. + * + * @param stats the indices stats + * @return the shard stats in sorted order with (shard ID, primary) as the sort key + */ + private List getShardStats(final IndicesStatsResponse stats) { + return Arrays.stream(stats.getShards()) + .sorted((s, t) -> { + if (s.getShardRouting().shardId().id() == t.getShardRouting().shardId().id()) { + return Boolean.compare(s.getShardRouting().primary(), t.getShardRouting().primary()); + } else { + return Integer.compare(s.getShardRouting().shardId().id(), t.getShardRouting().shardId().id()); + } + }) + .collect(Collectors.toList()); + } + + private String getRetentionLeaseId(String followerIndex, String followerUUID, String leaderIndex, String leaderUUID) { + return retentionLeaseId( + getFollowerCluster().getClusterName(), + new Index(followerIndex, followerUUID), + getLeaderCluster().getClusterName(), + new Index(leaderIndex, leaderUUID)); + } + + private void assertExpectedDocument(final String followerIndex, final int value) { + final GetResponse getResponse = followerClient().prepareGet(followerIndex, "doc", Integer.toString(value)).get(); + assertTrue("doc with id [" + value + "] is missing", getResponse.isExists()); + assertTrue((getResponse.getSource().containsKey("f"))); + assertThat(getResponse.getSource().get("f"), equalTo(value)); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java index 1f1c6cd5c64e3..e6662f3770d24 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java @@ -149,6 +149,7 @@ public void testFollowStatsApiResourceNotFound() throws Exception { assertAcked(client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request("follower1")).actionGet()); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38779") public void testFollowStatsApiIncludeShardFollowStatsWithRemovedFollowerIndex() throws Exception { final String leaderIndexSettings = getIndexSettings(1, 0, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java index d2df76e047c21..fa23065b18761 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java @@ -54,6 +54,7 @@ protected boolean reuseClusters() { return false; } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38633") public void testFailOverOnFollower() throws Exception { final String leaderIndex = "leader_test_failover"; final String followerIndex = "follower_test_failover"; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index ade96b4614171..5a8a7feb34716 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ccr; +import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ExceptionsHelper; @@ -15,6 +16,7 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest; @@ -43,6 +45,8 @@ import org.elasticsearch.cluster.health.ClusterShardHealth; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.bytes.BytesReference; @@ -53,8 +57,10 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.seqno.RetentionLeaseActions; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.rest.RestStatus; @@ -87,11 +93,13 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BooleanSupplier; +import java.util.function.Consumer; import java.util.stream.Collectors; import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.retentionLeaseId; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -103,6 +111,7 @@ public class IndexFollowingIT extends CcrIntegTestCase { + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38949") public void testFollowIndex() throws Exception { final int numberOfPrimaryShards = randomIntBetween(1, 3); int numberOfReplicas = between(0, 1); @@ -209,6 +218,7 @@ public void testFollowIndex() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38949") public void testFollowIndexWithConcurrentMappingChanges() throws Exception { final int numberOfPrimaryShards = randomIntBetween(1, 3); final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1), @@ -981,9 +991,70 @@ public void testMustCloseIndexAndPauseToRestartWithPutFollowing() throws Excepti } public void testIndexFallBehind() throws Exception { + runFallBehindTest( + () -> { + // we have to remove the retention leases on the leader shards to ensure the follower falls behind + final ClusterStateResponse followerIndexClusterState = + followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices("index2").get(); + final String followerUUID = followerIndexClusterState.getState().metaData().index("index2").getIndexUUID(); + final ClusterStateResponse leaderIndexClusterState = + leaderClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices("index1").get(); + final String leaderUUID = leaderIndexClusterState.getState().metaData().index("index1").getIndexUUID(); + + final RoutingTable leaderRoutingTable = leaderClient() + .admin() + .cluster() + .prepareState() + .clear() + .setIndices("index1") + .setRoutingTable(true) + .get() + .getState() + .routingTable(); + + final String retentionLeaseId = retentionLeaseId( + getFollowerCluster().getClusterName(), + new Index("index2", followerUUID), + getLeaderCluster().getClusterName(), + new Index("index1", leaderUUID)); + + for (final ObjectCursor shardRoutingTable + : leaderRoutingTable.index("index1").shards().values()) { + final ShardId shardId = shardRoutingTable.value.shardId(); + leaderClient().execute( + RetentionLeaseActions.Remove.INSTANCE, + new RetentionLeaseActions.RemoveRequest(shardId, retentionLeaseId)) + .get(); + } + }, + exceptions -> assertThat(exceptions.size(), greaterThan(0))); + } + + public void testIndexDoesNotFallBehind() throws Exception { + runFallBehindTest( + () -> {}, + exceptions -> assertThat(exceptions.size(), equalTo(0))); + } + + /** + * Runs a fall behind test. In this test, we construct a situation where a follower is paused. While the follower is paused we index + * more documents that causes soft deletes on the leader, flush them, and run a force merge. This is to set up a situation where the + * operations will not necessarily be there. With retention leases in place, we would actually expect the operations to be there. After + * pausing the follower, the specified callback is executed. This gives a test an opportunity to set up assumptions. For example, a test + * might remove all the retention leases on the leader to set up a situation where the follower will fall behind when it is resumed + * because the operations will no longer be held on the leader. The specified exceptions callback is invoked after resuming the follower + * to give a test an opportunity to assert on the resource not found exceptions (either present or not present). + * + * @param afterPausingFollower the callback to run after pausing the follower + * @param exceptionConsumer the callback to run on a collection of resource not found exceptions after resuming the follower + * @throws Exception if a checked exception is thrown during the test + */ + private void runFallBehindTest( + final CheckedRunnable afterPausingFollower, + final Consumer> exceptionConsumer) throws Exception { final int numberOfPrimaryShards = randomIntBetween(1, 3); final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1), - singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); ensureLeaderYellow("index1"); @@ -1007,6 +1078,8 @@ public void testIndexFallBehind() throws Exception { pauseFollow("index2"); + afterPausingFollower.run(); + for (int i = 0; i < numDocs; i++) { final String source = String.format(Locale.ROOT, "{\"f\":%d}", i * 2); leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); @@ -1023,20 +1096,19 @@ public void testIndexFallBehind() throws Exception { assertBusy(() -> { List statuses = getFollowTaskStatuses("index2"); Set exceptions = statuses.stream() - .map(ShardFollowNodeTaskStatus::getFatalException) - .filter(Objects::nonNull) - .map(ExceptionsHelper::unwrapCause) - .filter(e -> e instanceof ResourceNotFoundException) - .map(e -> (ResourceNotFoundException) e) - .filter(e -> e.getMetadataKeys().contains("es.requested_operations_missing")) - .collect(Collectors.toSet()); - assertThat(exceptions.size(), greaterThan(0)); + .map(ShardFollowNodeTaskStatus::getFatalException) + .filter(Objects::nonNull) + .map(ExceptionsHelper::unwrapCause) + .filter(e -> e instanceof ResourceNotFoundException) + .map(e -> (ResourceNotFoundException) e) + .filter(e -> e.getMetadataKeys().contains("es.requested_operations_missing")) + .collect(Collectors.toSet()); + exceptionConsumer.accept(exceptions); }); followerClient().admin().indices().prepareClose("index2").get(); pauseFollow("index2"); - final PutFollowAction.Request followRequest2 = putFollow("index1", "index2"); PutFollowAction.Response response2 = followerClient().execute(PutFollowAction.INSTANCE, followRequest2).get(); assertTrue(response2.isFollowIndexCreated()); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/GetAutoFollowPatternResponseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/GetAutoFollowPatternResponseTests.java index 7130c830baa01..55582815ce5e6 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/GetAutoFollowPatternResponseTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/GetAutoFollowPatternResponseTests.java @@ -34,11 +34,11 @@ protected GetAutoFollowPatternAction.Response createTestInstance() { Collections.singletonList(randomAlphaOfLength(4)), randomAlphaOfLength(4), randomIntBetween(0, Integer.MAX_VALUE), - new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(0, Integer.MAX_VALUE), randomIntBetween(0, Integer.MAX_VALUE), - new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(0, Integer.MAX_VALUE), + new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), + new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(0, Integer.MAX_VALUE), new ByteSizeValue(randomNonNegativeLong()), TimeValue.timeValueMillis(500), diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java index 629127c454cef..88412aa8fd3b4 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java @@ -82,11 +82,11 @@ private ShardFollowNodeTask createShardFollowTask(int concurrency, TestRun testR new ShardId("follow_index", "", 0), new ShardId("leader_index", "", 0), testRun.maxOperationCount, - TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, - concurrency, testRun.maxOperationCount, - TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, concurrency, + concurrency, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, 10240, new ByteSizeValue(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java index a7d07b6066732..178f09c86835a 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java @@ -960,11 +960,11 @@ private ShardFollowNodeTask createShardFollowTask(ShardFollowTaskParams params) params.followShardId, params.leaderShardId, params.maxReadRequestOperationCount, - params.maxReadRequestSize, - params.maxOutstandingReadRequests, params.maxWriteRequestOperationCount, - params.maxWriteRequestSize, + params.maxOutstandingReadRequests, params.maxOutstandingWriteRequests, + params.maxReadRequestSize, + params.maxWriteRequestSize, params.maxWriteBufferCount, params.maxWriteBufferSize, params.maxRetryDelay, diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 8b05b618ba407..0f82bfab10287 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -7,6 +7,7 @@ import com.carrotsearch.hppc.LongHashSet; import com.carrotsearch.hppc.LongSet; +import org.apache.lucene.store.IOContext; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -17,9 +18,14 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingHelper; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -31,9 +37,16 @@ import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; +import org.elasticsearch.index.shard.RestoreOnlyRepository; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryTarget; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsRequest; @@ -57,6 +70,8 @@ import java.util.function.LongConsumer; import java.util.stream.Collectors; +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -64,233 +79,241 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTestCase { public void testSimpleCcrReplication() throws Exception { - try (ReplicationGroup leaderGroup = createGroup(randomInt(2)); - ReplicationGroup followerGroup = createFollowGroup(randomInt(2))) { + try (ReplicationGroup leaderGroup = createLeaderGroup(randomInt(2))) { leaderGroup.startAll(); - int docCount = leaderGroup.appendDocs(randomInt(64)); - leaderGroup.assertAllEqual(docCount); - followerGroup.startAll(); - ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); - final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); - final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); - shardFollowTask.start( + try (ReplicationGroup followerGroup = createFollowGroup(leaderGroup, randomInt(2))) { + int docCount = leaderGroup.appendDocs(randomInt(64)); + leaderGroup.assertAllEqual(docCount); + followerGroup.startAll(); + ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); + final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); + final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); + shardFollowTask.start( followerGroup.getPrimary().getHistoryUUID(), leaderSeqNoStats.getGlobalCheckpoint(), leaderSeqNoStats.getMaxSeqNo(), followerSeqNoStats.getGlobalCheckpoint(), followerSeqNoStats.getMaxSeqNo()); - docCount += leaderGroup.appendDocs(randomInt(128)); - leaderGroup.syncGlobalCheckpoint(); - leaderGroup.assertAllEqual(docCount); - Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); - assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); - followerGroup.assertAllEqual(indexedDocIds.size()); - }); - for (IndexShard shard : followerGroup) { - assertThat(((FollowingEngine) (getEngine(shard))).getNumberOfOptimizedIndexing(), equalTo((long) docCount)); - } - // Deletes should be replicated to the follower - List deleteDocIds = randomSubsetOf(indexedDocIds); - for (String deleteId : deleteDocIds) { - BulkItemResponse resp = leaderGroup.delete(new DeleteRequest(index.getName(), "type", deleteId)); - assertThat(resp.getResponse().getResult(), equalTo(DocWriteResponse.Result.DELETED)); + docCount += leaderGroup.appendDocs(randomInt(128)); + leaderGroup.syncGlobalCheckpoint(); + leaderGroup.assertAllEqual(docCount); + Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); + assertBusy(() -> { + assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + followerGroup.assertAllEqual(indexedDocIds.size()); + }); + for (IndexShard shard : followerGroup) { + assertThat(((FollowingEngine) (getEngine(shard))).getNumberOfOptimizedIndexing(), equalTo((long) docCount)); + } + // Deletes should be replicated to the follower + List deleteDocIds = randomSubsetOf(indexedDocIds); + for (String deleteId : deleteDocIds) { + BulkItemResponse resp = leaderGroup.delete(new DeleteRequest(index.getName(), "type", deleteId)); + assertThat(resp.getResponse().getResult(), equalTo(DocWriteResponse.Result.DELETED)); + } + leaderGroup.syncGlobalCheckpoint(); + assertBusy(() -> { + assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + followerGroup.assertAllEqual(indexedDocIds.size() - deleteDocIds.size()); + }); + shardFollowTask.markAsCompleted(); + assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup, true); } - leaderGroup.syncGlobalCheckpoint(); - assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); - followerGroup.assertAllEqual(indexedDocIds.size() - deleteDocIds.size()); - }); - shardFollowTask.markAsCompleted(); - assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup, true); } } public void testAddRemoveShardOnLeader() throws Exception { - try (ReplicationGroup leaderGroup = createGroup(1 + randomInt(1)); - ReplicationGroup followerGroup = createFollowGroup(randomInt(2))) { + try (ReplicationGroup leaderGroup = createLeaderGroup(1 + randomInt(1))) { leaderGroup.startAll(); - followerGroup.startAll(); - ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); - final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); - final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); - shardFollowTask.start( + try (ReplicationGroup followerGroup = createFollowGroup(leaderGroup, randomInt(2))) { + followerGroup.startAll(); + ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); + final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); + final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); + shardFollowTask.start( followerGroup.getPrimary().getHistoryUUID(), leaderSeqNoStats.getGlobalCheckpoint(), leaderSeqNoStats.getMaxSeqNo(), followerSeqNoStats.getGlobalCheckpoint(), followerSeqNoStats.getMaxSeqNo()); - int batches = between(0, 10); - int docCount = 0; - boolean hasPromotion = false; - for (int i = 0; i < batches; i++) { - docCount += leaderGroup.indexDocs(between(1, 5)); - if (leaderGroup.getReplicas().isEmpty() == false && randomInt(100) < 5) { - IndexShard closingReplica = randomFrom(leaderGroup.getReplicas()); - leaderGroup.removeReplica(closingReplica); - closingReplica.close("test", false); - closingReplica.store().close(); - } else if (leaderGroup.getReplicas().isEmpty() == false && rarely()) { - IndexShard newPrimary = randomFrom(leaderGroup.getReplicas()); - leaderGroup.promoteReplicaToPrimary(newPrimary).get(); - hasPromotion = true; - } else if (randomInt(100) < 5) { - leaderGroup.addReplica(); - leaderGroup.startReplicas(1); + int batches = between(0, 10); + int docCount = 0; + boolean hasPromotion = false; + for (int i = 0; i < batches; i++) { + docCount += leaderGroup.indexDocs(between(1, 5)); + if (leaderGroup.getReplicas().isEmpty() == false && randomInt(100) < 5) { + IndexShard closingReplica = randomFrom(leaderGroup.getReplicas()); + leaderGroup.removeReplica(closingReplica); + closingReplica.close("test", false); + closingReplica.store().close(); + } else if (leaderGroup.getReplicas().isEmpty() == false && rarely()) { + IndexShard newPrimary = randomFrom(leaderGroup.getReplicas()); + leaderGroup.promoteReplicaToPrimary(newPrimary).get(); + hasPromotion = true; + } else if (randomInt(100) < 5) { + leaderGroup.addReplica(); + leaderGroup.startReplicas(1); + } + leaderGroup.syncGlobalCheckpoint(); } - leaderGroup.syncGlobalCheckpoint(); + leaderGroup.assertAllEqual(docCount); + assertThat(shardFollowTask.getFailure(), nullValue()); + int expectedDoc = docCount; + assertBusy(() -> followerGroup.assertAllEqual(expectedDoc)); + shardFollowTask.markAsCompleted(); + assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup, hasPromotion == false); } - leaderGroup.assertAllEqual(docCount); - assertThat(shardFollowTask.getFailure(), nullValue()); - int expectedDoc = docCount; - assertBusy(() -> followerGroup.assertAllEqual(expectedDoc)); - shardFollowTask.markAsCompleted(); - assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup, hasPromotion == false); } } public void testChangeLeaderHistoryUUID() throws Exception { - try (ReplicationGroup leaderGroup = createGroup(0); - ReplicationGroup followerGroup = createFollowGroup(0)) { - leaderGroup.startAll(); - int docCount = leaderGroup.appendDocs(randomInt(64)); - leaderGroup.assertAllEqual(docCount); - followerGroup.startAll(); - ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); - final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); - final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); - shardFollowTask.start( - followerGroup.getPrimary().getHistoryUUID(), - leaderSeqNoStats.getGlobalCheckpoint(), - leaderSeqNoStats.getMaxSeqNo(), - followerSeqNoStats.getGlobalCheckpoint(), - followerSeqNoStats.getMaxSeqNo()); - leaderGroup.syncGlobalCheckpoint(); - leaderGroup.assertAllEqual(docCount); - Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); - assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); - followerGroup.assertAllEqual(indexedDocIds.size()); - }); - - String oldHistoryUUID = leaderGroup.getPrimary().getHistoryUUID(); - leaderGroup.reinitPrimaryShard(); - leaderGroup.getPrimary().store().bootstrapNewHistory(); - recoverShardFromStore(leaderGroup.getPrimary()); - String newHistoryUUID = leaderGroup.getPrimary().getHistoryUUID(); - - // force the global checkpoint on the leader to advance - leaderGroup.appendDocs(64); - - assertBusy(() -> { - assertThat(shardFollowTask.isStopped(), is(true)); - ElasticsearchException failure = shardFollowTask.getStatus().getFatalException(); - assertThat(failure.getRootCause().getMessage(), equalTo("unexpected history uuid, expected [" + oldHistoryUUID + - "], actual [" + newHistoryUUID + "]")); - }); + try (ReplicationGroup leaderGroup = createLeaderGroup(0)) { + try (ReplicationGroup followerGroup = createFollowGroup(leaderGroup, 0)) { + leaderGroup.startAll(); + int docCount = leaderGroup.appendDocs(randomInt(64)); + leaderGroup.assertAllEqual(docCount); + followerGroup.startAll(); + ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); + final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); + final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); + shardFollowTask.start( + followerGroup.getPrimary().getHistoryUUID(), + leaderSeqNoStats.getGlobalCheckpoint(), + leaderSeqNoStats.getMaxSeqNo(), + followerSeqNoStats.getGlobalCheckpoint(), + followerSeqNoStats.getMaxSeqNo()); + leaderGroup.syncGlobalCheckpoint(); + leaderGroup.assertAllEqual(docCount); + Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); + assertBusy(() -> { + assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + followerGroup.assertAllEqual(indexedDocIds.size()); + }); + + String oldHistoryUUID = leaderGroup.getPrimary().getHistoryUUID(); + leaderGroup.reinitPrimaryShard(); + leaderGroup.getPrimary().store().bootstrapNewHistory(); + recoverShardFromStore(leaderGroup.getPrimary()); + String newHistoryUUID = leaderGroup.getPrimary().getHistoryUUID(); + + // force the global checkpoint on the leader to advance + leaderGroup.appendDocs(64); + + assertBusy(() -> { + assertThat(shardFollowTask.isStopped(), is(true)); + ElasticsearchException failure = shardFollowTask.getStatus().getFatalException(); + assertThat(failure.getRootCause().getMessage(), equalTo("unexpected history uuid, expected [" + oldHistoryUUID + + "], actual [" + newHistoryUUID + "]")); + }); + } } } public void testChangeFollowerHistoryUUID() throws Exception { - try (ReplicationGroup leaderGroup = createGroup(0); - ReplicationGroup followerGroup = createFollowGroup(0)) { + try (ReplicationGroup leaderGroup = createLeaderGroup(0)) { leaderGroup.startAll(); - int docCount = leaderGroup.appendDocs(randomInt(64)); - leaderGroup.assertAllEqual(docCount); - followerGroup.startAll(); - ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); - final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); - final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); - shardFollowTask.start( - followerGroup.getPrimary().getHistoryUUID(), - leaderSeqNoStats.getGlobalCheckpoint(), - leaderSeqNoStats.getMaxSeqNo(), - followerSeqNoStats.getGlobalCheckpoint(), - followerSeqNoStats.getMaxSeqNo()); - leaderGroup.syncGlobalCheckpoint(); - leaderGroup.assertAllEqual(docCount); - Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); - assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); - followerGroup.assertAllEqual(indexedDocIds.size()); - }); - - String oldHistoryUUID = followerGroup.getPrimary().getHistoryUUID(); - followerGroup.reinitPrimaryShard(); - followerGroup.getPrimary().store().bootstrapNewHistory(); - recoverShardFromStore(followerGroup.getPrimary()); - String newHistoryUUID = followerGroup.getPrimary().getHistoryUUID(); - - // force the global checkpoint on the leader to advance - leaderGroup.appendDocs(64); - - assertBusy(() -> { - assertThat(shardFollowTask.isStopped(), is(true)); - ElasticsearchException failure = shardFollowTask.getStatus().getFatalException(); - assertThat(failure.getRootCause().getMessage(), equalTo("unexpected history uuid, expected [" + oldHistoryUUID + - "], actual [" + newHistoryUUID + "], shard is likely restored from snapshot or force allocated")); - }); + try(ReplicationGroup followerGroup = createFollowGroup(leaderGroup, 0)) { + int docCount = leaderGroup.appendDocs(randomInt(64)); + leaderGroup.assertAllEqual(docCount); + followerGroup.startAll(); + ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); + final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); + final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); + shardFollowTask.start( + followerGroup.getPrimary().getHistoryUUID(), + leaderSeqNoStats.getGlobalCheckpoint(), + leaderSeqNoStats.getMaxSeqNo(), + followerSeqNoStats.getGlobalCheckpoint(), + followerSeqNoStats.getMaxSeqNo()); + leaderGroup.syncGlobalCheckpoint(); + leaderGroup.assertAllEqual(docCount); + Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); + assertBusy(() -> { + assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + followerGroup.assertAllEqual(indexedDocIds.size()); + }); + + String oldHistoryUUID = followerGroup.getPrimary().getHistoryUUID(); + followerGroup.reinitPrimaryShard(); + followerGroup.getPrimary().store().bootstrapNewHistory(); + recoverShardFromStore(followerGroup.getPrimary()); + String newHistoryUUID = followerGroup.getPrimary().getHistoryUUID(); + + // force the global checkpoint on the leader to advance + leaderGroup.appendDocs(64); + + assertBusy(() -> { + assertThat(shardFollowTask.isStopped(), is(true)); + ElasticsearchException failure = shardFollowTask.getStatus().getFatalException(); + assertThat(failure.getRootCause().getMessage(), equalTo("unexpected history uuid, expected [" + oldHistoryUUID + + "], actual [" + newHistoryUUID + "], shard is likely restored from snapshot or force allocated")); + }); + } } } public void testRetryBulkShardOperations() throws Exception { - try (ReplicationGroup leaderGroup = createGroup(between(0, 1)); - ReplicationGroup followerGroup = createFollowGroup(between(1, 3))) { + try (ReplicationGroup leaderGroup = createLeaderGroup(between(0, 1))) { leaderGroup.startAll(); - followerGroup.startAll(); - leaderGroup.appendDocs(between(10, 100)); - leaderGroup.refresh("test"); - for (int numNoOps = between(1, 10), i = 0; i < numNoOps; i++) { - long seqNo = leaderGroup.getPrimary().seqNoStats().getMaxSeqNo() + 1; - Engine.NoOp noOp = new Engine.NoOp(seqNo, leaderGroup.getPrimary().getOperationPrimaryTerm(), - Engine.Operation.Origin.REPLICA, threadPool.relativeTimeInMillis(), "test-" + i); - for (IndexShard shard : leaderGroup) { - getEngine(shard).noOp(noOp); + try(ReplicationGroup followerGroup = createFollowGroup(leaderGroup, between(1, 3))) { + followerGroup.startAll(); + leaderGroup.appendDocs(between(10, 100)); + leaderGroup.refresh("test"); + for (int numNoOps = between(1, 10), i = 0; i < numNoOps; i++) { + long seqNo = leaderGroup.getPrimary().seqNoStats().getMaxSeqNo() + 1; + Engine.NoOp noOp = new Engine.NoOp(seqNo, leaderGroup.getPrimary().getOperationPrimaryTerm(), + Engine.Operation.Origin.REPLICA, threadPool.relativeTimeInMillis(), "test-" + i); + for (IndexShard shard : leaderGroup) { + getEngine(shard).noOp(noOp); + } } - } - for (String deleteId : randomSubsetOf(IndexShardTestCase.getShardDocUIDs(leaderGroup.getPrimary()))) { - BulkItemResponse resp = leaderGroup.delete(new DeleteRequest("test", "type", deleteId)); - assertThat(resp.getFailure(), nullValue()); - } - leaderGroup.syncGlobalCheckpoint(); - IndexShard leadingPrimary = leaderGroup.getPrimary(); - // Simulates some bulk requests are completed on the primary and replicated to some (but all) replicas of the follower - // but the primary of the follower crashed before these requests completed. - for (int numBulks = between(1, 5), i = 0; i < numBulks; i++) { - long fromSeqNo = randomLongBetween(0, leadingPrimary.getGlobalCheckpoint()); - long toSeqNo = randomLongBetween(fromSeqNo, leadingPrimary.getGlobalCheckpoint()); - int numOps = Math.toIntExact(toSeqNo + 1 - fromSeqNo); - Translog.Operation[] ops = ShardChangesAction.getOperations(leadingPrimary, leadingPrimary.getGlobalCheckpoint(), - fromSeqNo, numOps, leadingPrimary.getHistoryUUID(), new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)); - - IndexShard followingPrimary = followerGroup.getPrimary(); - TransportWriteAction.WritePrimaryResult primaryResult = - TransportBulkShardOperationsAction.shardOperationOnPrimary(followingPrimary.shardId(), - followingPrimary.getHistoryUUID(), Arrays.asList(ops), leadingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), - followingPrimary, logger); - for (IndexShard replica : randomSubsetOf(followerGroup.getReplicas())) { - final PlainActionFuture permitFuture = new PlainActionFuture<>(); - replica.acquireReplicaOperationPermit(followingPrimary.getOperationPrimaryTerm(), - followingPrimary.getGlobalCheckpoint(), followingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), - permitFuture, ThreadPool.Names.SAME, primaryResult); - try (Releasable ignored = permitFuture.get()) { - TransportBulkShardOperationsAction.shardOperationOnReplica(primaryResult.replicaRequest(), replica, logger); + for (String deleteId : randomSubsetOf(IndexShardTestCase.getShardDocUIDs(leaderGroup.getPrimary()))) { + BulkItemResponse resp = leaderGroup.delete(new DeleteRequest("test", "type", deleteId)); + assertThat(resp.getFailure(), nullValue()); + } + leaderGroup.syncGlobalCheckpoint(); + IndexShard leadingPrimary = leaderGroup.getPrimary(); + // Simulates some bulk requests are completed on the primary and replicated to some (but all) replicas of the follower + // but the primary of the follower crashed before these requests completed. + for (int numBulks = between(1, 5), i = 0; i < numBulks; i++) { + long fromSeqNo = randomLongBetween(0, leadingPrimary.getGlobalCheckpoint()); + long toSeqNo = randomLongBetween(fromSeqNo, leadingPrimary.getGlobalCheckpoint()); + int numOps = Math.toIntExact(toSeqNo + 1 - fromSeqNo); + Translog.Operation[] ops = ShardChangesAction.getOperations(leadingPrimary, leadingPrimary.getGlobalCheckpoint(), + fromSeqNo, numOps, leadingPrimary.getHistoryUUID(), new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)); + + IndexShard followingPrimary = followerGroup.getPrimary(); + TransportWriteAction.WritePrimaryResult primaryResult = + TransportBulkShardOperationsAction.shardOperationOnPrimary(followingPrimary.shardId(), + followingPrimary.getHistoryUUID(), Arrays.asList(ops), leadingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), + followingPrimary, logger); + for (IndexShard replica : randomSubsetOf(followerGroup.getReplicas())) { + final PlainActionFuture permitFuture = new PlainActionFuture<>(); + replica.acquireReplicaOperationPermit(followingPrimary.getOperationPrimaryTerm(), + followingPrimary.getGlobalCheckpoint(), followingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), + permitFuture, ThreadPool.Names.SAME, primaryResult); + try (Releasable ignored = permitFuture.get()) { + TransportBulkShardOperationsAction.shardOperationOnReplica(primaryResult.replicaRequest(), replica, logger); + } } } - } - // A follow-task retries these requests while the primary-replica resync is happening on the follower. - followerGroup.promoteReplicaToPrimary(randomFrom(followerGroup.getReplicas())); - ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); - SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); - shardFollowTask.start(followerGroup.getPrimary().getHistoryUUID(), leadingPrimary.getGlobalCheckpoint(), - leadingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), followerSeqNoStats.getGlobalCheckpoint(), followerSeqNoStats.getMaxSeqNo()); - try { - assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leadingPrimary.getGlobalCheckpoint())); - assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup, true); - }); - } finally { - shardFollowTask.markAsCompleted(); + // A follow-task retries these requests while the primary-replica resync is happening on the follower. + followerGroup.promoteReplicaToPrimary(randomFrom(followerGroup.getReplicas())); + ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); + SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); + shardFollowTask.start(followerGroup.getPrimary().getHistoryUUID(), + leadingPrimary.getGlobalCheckpoint(), + leadingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), + followerSeqNoStats.getGlobalCheckpoint(), + followerSeqNoStats.getMaxSeqNo()); + try { + assertBusy(() -> { + assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leadingPrimary.getGlobalCheckpoint())); + assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup, true); + }); + } finally { + shardFollowTask.markAsCompleted(); + } } } } @@ -303,7 +326,17 @@ public void testAddNewFollowingReplica() throws Exception { operations.add(new Translog.Index("type", Integer.toString(i), i, primaryTerm, 0, source, null, -1)); } Future recoveryFuture = null; - try (ReplicationGroup group = createFollowGroup(between(0, 1))) { + Settings settings = Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(between(1, 1000), ByteSizeUnit.KB)) + .build(); + IndexMetaData indexMetaData = buildIndexMetaData(between(0, 1), settings, indexMapping); + try (ReplicationGroup group = new ReplicationGroup(indexMetaData) { + @Override + protected EngineFactory getEngineFactory(ShardRouting routing) { + return new FollowingEngineFactory(); + } + }) { group.startAll(); while (operations.isEmpty() == false) { List bulkOps = randomSubsetOf(between(1, operations.size()), operations); @@ -330,35 +363,79 @@ public void testAddNewFollowingReplica() throws Exception { } } - @Override - protected ReplicationGroup createGroup(int replicas, Settings settings) throws IOException { - Settings newSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, replicas) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + public void testSimpleRemoteRecovery() throws Exception { + try (ReplicationGroup leader = createLeaderGroup(between(0, 1))) { + leader.startAll(); + leader.appendDocs(between(0, 100)); + leader.flush(); + leader.syncGlobalCheckpoint(); + try (ReplicationGroup follower = createFollowGroup(leader, 0)) { + follower.startAll(); + ShardFollowNodeTask followTask = createShardFollowTask(leader, follower); + followTask.start( + follower.getPrimary().getHistoryUUID(), + leader.getPrimary().getGlobalCheckpoint(), + leader.getPrimary().seqNoStats().getMaxSeqNo(), + follower.getPrimary().getGlobalCheckpoint(), + follower.getPrimary().seqNoStats().getMaxSeqNo() + ); + leader.appendDocs(between(0, 100)); + if (randomBoolean()) { + follower.recoverReplica(follower.addReplica()); + } + assertBusy(() -> assertConsistentHistoryBetweenLeaderAndFollower(leader, follower, false)); + followTask.markAsCompleted(); + } + } + } + + private ReplicationGroup createLeaderGroup(int replicas) throws IOException { + Settings settings = Settings.builder() .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 10000) - .put(settings) .build(); - if (CcrSettings.CCR_FOLLOWING_INDEX_SETTING.get(newSettings)) { - IndexMetaData metaData = buildIndexMetaData(replicas, newSettings, indexMapping); - return new ReplicationGroup(metaData) { - - @Override - protected EngineFactory getEngineFactory(ShardRouting routing) { - return new FollowingEngineFactory(); - } - }; - } else { - return super.createGroup(replicas, newSettings); - } + return createGroup(replicas, settings); } - private ReplicationGroup createFollowGroup(int replicas) throws IOException { - Settings.Builder settingsBuilder = Settings.builder(); - settingsBuilder.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) + private ReplicationGroup createFollowGroup(ReplicationGroup leaderGroup, int replicas) throws IOException { + Settings settings = Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(between(1, 1000), ByteSizeUnit.KB)); - return createGroup(replicas, settingsBuilder.build()); + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(between(1, 1000), ByteSizeUnit.KB)) + .build(); + IndexMetaData indexMetaData = buildIndexMetaData(replicas, settings, indexMapping); + return new ReplicationGroup(indexMetaData) { + @Override + protected EngineFactory getEngineFactory(ShardRouting routing) { + return new FollowingEngineFactory(); + } + @Override + protected synchronized void recoverPrimary(IndexShard primary) { + DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); + Snapshot snapshot = new Snapshot("foo", new SnapshotId("bar", UUIDs.randomBase64UUID())); + ShardRouting routing = ShardRoutingHelper.newWithRestoreSource(primary.routingEntry(), + new RecoverySource.SnapshotRecoverySource(UUIDs.randomBase64UUID(), snapshot, Version.CURRENT, "test")); + primary.markAsRecovering("remote recovery from leader", new RecoveryState(routing, localNode, null)); + primary.restoreFromRepository(new RestoreOnlyRepository(index.getName()) { + @Override + public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, + IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { + try { + IndexShard leader = leaderGroup.getPrimary(); + Lucene.cleanLuceneIndex(primary.store().directory()); + try (Engine.IndexCommitRef sourceCommit = leader.acquireSafeIndexCommit()) { + Store.MetadataSnapshot sourceSnapshot = leader.store().getMetadata(sourceCommit.getIndexCommit()); + for (StoreFileMetaData md : sourceSnapshot) { + primary.store().directory().copyFrom( + leader.store().directory(), md.name(), md.name(), IOContext.DEFAULT); + } + } + } catch (Exception ex) { + throw new AssertionError(ex); + } + } + }); + } + }; } private ShardFollowNodeTask createShardFollowTask(ReplicationGroup leaderGroup, ReplicationGroup followerGroup) { @@ -367,11 +444,11 @@ private ShardFollowNodeTask createShardFollowTask(ReplicationGroup leaderGroup, new ShardId("follow_index", "", 0), new ShardId("leader_index", "", 0), between(1, 64), - new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), - between(1, 8), between(1, 64), - new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), + between(1, 8), between(1, 4), + new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), + new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), 10240, new ByteSizeValue(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), @@ -483,7 +560,7 @@ private void assertConsistentHistoryBetweenLeaderAndFollower(ReplicationGroup le final List> docAndSeqNosOnLeader = getDocIdAndSeqNos(leader.getPrimary()).stream() .map(d -> Tuple.tuple(d.getId(), d.getSeqNo())).collect(Collectors.toList()); final Set> operationsOnLeader = new HashSet<>(); - try (Translog.Snapshot snapshot = leader.getPrimary().getHistoryOperations("test", 0)) { + try (Translog.Snapshot snapshot = leader.getPrimary().newChangesSnapshot("test", 0, Long.MAX_VALUE, false)) { Translog.Operation op; while ((op = snapshot.next()) != null) { operationsOnLeader.add(Tuple.tuple(op.seqNo(), op.opType())); @@ -497,13 +574,13 @@ private void assertConsistentHistoryBetweenLeaderAndFollower(ReplicationGroup le .map(d -> Tuple.tuple(d.getId(), d.getSeqNo())).collect(Collectors.toList()); assertThat(docAndSeqNosOnFollower, equalTo(docAndSeqNosOnLeader)); final Set> operationsOnFollower = new HashSet<>(); - try (Translog.Snapshot snapshot = followingShard.getHistoryOperations("test", 0)) { + try (Translog.Snapshot snapshot = followingShard.newChangesSnapshot("test", 0, Long.MAX_VALUE, false)) { Translog.Operation op; while ((op = snapshot.next()) != null) { operationsOnFollower.add(Tuple.tuple(op.seqNo(), op.opType())); } } - assertThat(operationsOnFollower, equalTo(operationsOnLeader)); + assertThat(followingShard.routingEntry().toString(), operationsOnFollower, equalTo(operationsOnLeader)); } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java index 1dfe4a9897075..94b27a2850d5b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java @@ -30,11 +30,11 @@ protected ShardFollowTask createTestInstance() { new ShardId(randomAlphaOfLength(4), randomAlphaOfLength(4), randomInt(5)), new ShardId(randomAlphaOfLength(4), randomAlphaOfLength(4), randomInt(5)), randomIntBetween(1, Integer.MAX_VALUE), - new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(1, Integer.MAX_VALUE), randomIntBetween(1, Integer.MAX_VALUE), - new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(1, Integer.MAX_VALUE), + new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), + new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(1, Integer.MAX_VALUE), new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), TimeValue.parseTimeValue(randomTimeValue(), ""), @@ -45,6 +45,6 @@ protected ShardFollowTask createTestInstance() { @Override protected Writeable.Reader instanceReader() { - return ShardFollowTask::new; + return ShardFollowTask::readFrom; } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java index b8f570e4ef4f6..b5f369f52472c 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java @@ -50,11 +50,11 @@ static ShardFollowTask createShardFollowTask(String followerIndex) { new ShardId(followerIndex, "", 0), new ShardId("leader_index", "", 0), 1024, - TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, - 1, 1024, - TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, 1, + 1, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, 10240, new ByteSizeValue(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java index 57bc30210fa74..ef1fa1a80259e 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MapperTestUtils; -import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; @@ -233,7 +232,6 @@ public void testDynamicIndexSettingsAreClassified() { replicatedSettings.add(MapperService.INDEX_MAPPER_DYNAMIC_SETTING); replicatedSettings.add(IndexSettings.MAX_NGRAM_DIFF_SETTING); replicatedSettings.add(IndexSettings.MAX_SHINGLE_DIFF_SETTING); - replicatedSettings.add(EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS); for (Setting setting : IndexScopedSettings.BUILT_IN_INDEX_SETTINGS) { if (setting.isDynamic()) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java index 93987a7306f45..5b6bac6491398 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java @@ -83,11 +83,11 @@ public void testUnfollowRunningShardFollowTasks() { new ShardId("follow_index", "", 0), new ShardId("leader_index", "", 0), 1024, - TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, - 1, 1024, - TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, 1, + 1, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, 10240, new ByteSizeValue(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java index 67d31ff39007f..69fa23bd3fbcd 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -59,6 +59,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.index.engine.EngineTestCase.getDocIds; +import static org.elasticsearch.index.engine.EngineTestCase.getTranslog; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -659,4 +660,49 @@ public void testVerifyShardBeforeIndexClosingIsNoOp() throws IOException { } }); } + + public void testMaxSeqNoInCommitUserData() throws Exception { + final Settings settings = Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT).put("index.xpack.ccr.following_index", true) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build(); + final IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()).settings(settings).build(); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + try (Store store = createStore(shardId, indexSettings, newDirectory())) { + final EngineConfig engineConfig = engineConfig(shardId, indexSettings, threadPool, store, logger, xContentRegistry()); + try (FollowingEngine engine = createEngine(store, engineConfig)) { + AtomicBoolean running = new AtomicBoolean(true); + Thread rollTranslog = new Thread(() -> { + while (running.get() && getTranslog(engine).currentFileGeneration() < 500) { + engine.rollTranslogGeneration(); // make adding operations to translog slower + } + }); + rollTranslog.start(); + + Thread indexing = new Thread(() -> { + List ops = EngineTestCase.generateSingleDocHistory(true, VersionType.EXTERNAL, 2, 50, 500, "id"); + engine.advanceMaxSeqNoOfUpdatesOrDeletes(ops.stream().mapToLong(Engine.Operation::seqNo).max().getAsLong()); + for (Engine.Operation op : ops) { + if (running.get() == false) { + return; + } + try { + EngineTestCase.applyOperation(engine, op); + } catch (IOException e) { + throw new AssertionError(e); + } + } + }); + indexing.start(); + + int numCommits = between(5, 20); + for (int i = 0; i < numCommits; i++) { + engine.flush(false, true); + } + running.set(false); + indexing.join(); + rollTranslog.join(); + EngineTestCase.assertMaxSeqNoInCommitUserData(engine); + } + } + } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRepositoryRetentionLeaseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRepositoryRetentionLeaseTests.java new file mode 100644 index 0000000000000..2e382f739300b --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRepositoryRetentionLeaseTests.java @@ -0,0 +1,191 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.repository; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.seqno.RetentionLeaseActions; +import org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException; +import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.ccr.CcrSettings; +import org.mockito.ArgumentCaptor; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.elasticsearch.index.seqno.RetentionLeaseActions.RETAIN_ALL; +import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.retentionLeaseId; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.same; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class CcrRepositoryRetentionLeaseTests extends ESTestCase { + + public void testWhenRetentionLeaseAlreadyExistsWeTryToRenewIt() { + final RepositoryMetaData repositoryMetaData = mock(RepositoryMetaData.class); + when(repositoryMetaData.name()).thenReturn(CcrRepository.NAME_PREFIX); + final Set> settings = + Stream.concat( + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.stream(), + CcrSettings.getSettings().stream().filter(Setting::hasNodeScope)) + .collect(Collectors.toSet()); + + final CcrRepository repository = new CcrRepository( + repositoryMetaData, + mock(Client.class), + new CcrLicenseChecker(() -> true, () -> true), + Settings.EMPTY, + new CcrSettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, settings)), + mock(ThreadPool.class)); + + final ShardId followerShardId = new ShardId(new Index("follower-index-name", "follower-index-uuid"), 0); + final ShardId leaderShardId = new ShardId(new Index("leader-index-name", "leader-index-uuid"), 0); + + final String retentionLeaseId = + retentionLeaseId("local-cluster", followerShardId.getIndex(), "remote-cluster", leaderShardId.getIndex()); + + // simulate that the the retention lease already exists on the leader, and verify that we attempt to renew it + final Client remoteClient = mock(Client.class); + final ArgumentCaptor addRequestCaptor = + ArgumentCaptor.forClass(RetentionLeaseActions.AddRequest.class); + doAnswer( + invocationOnMock -> { + @SuppressWarnings("unchecked") final ActionListener listener = + (ActionListener) invocationOnMock.getArguments()[2]; + listener.onFailure(new RetentionLeaseAlreadyExistsException(retentionLeaseId)); + return null; + }) + .when(remoteClient) + .execute(same(RetentionLeaseActions.Add.INSTANCE), addRequestCaptor.capture(), any()); + final ArgumentCaptor renewRequestCaptor = + ArgumentCaptor.forClass(RetentionLeaseActions.RenewRequest.class); + doAnswer( + invocationOnMock -> { + @SuppressWarnings("unchecked") final ActionListener listener = + (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(new RetentionLeaseActions.Response()); + return null; + }) + .when(remoteClient) + .execute(same(RetentionLeaseActions.Renew.INSTANCE), renewRequestCaptor.capture(), any()); + + repository.acquireRetentionLeaseOnLeader(followerShardId, retentionLeaseId, leaderShardId, remoteClient); + + verify(remoteClient).execute(same(RetentionLeaseActions.Add.INSTANCE), any(RetentionLeaseActions.AddRequest.class), any()); + assertThat(addRequestCaptor.getValue().getShardId(), equalTo(leaderShardId)); + assertThat(addRequestCaptor.getValue().getId(), equalTo(retentionLeaseId)); + assertThat(addRequestCaptor.getValue().getRetainingSequenceNumber(), equalTo(RETAIN_ALL)); + assertThat(addRequestCaptor.getValue().getSource(), equalTo("ccr")); + + verify(remoteClient).execute(same(RetentionLeaseActions.Renew.INSTANCE), any(RetentionLeaseActions.RenewRequest.class), any()); + assertThat(renewRequestCaptor.getValue().getShardId(), equalTo(leaderShardId)); + assertThat(renewRequestCaptor.getValue().getId(), equalTo(retentionLeaseId)); + assertThat(renewRequestCaptor.getValue().getRetainingSequenceNumber(), equalTo(RETAIN_ALL)); + assertThat(renewRequestCaptor.getValue().getSource(), equalTo("ccr")); + + verifyNoMoreInteractions(remoteClient); + } + + public void testWhenRetentionLeaseExpiresBeforeWeCanRenewIt() { + final RepositoryMetaData repositoryMetaData = mock(RepositoryMetaData.class); + when(repositoryMetaData.name()).thenReturn(CcrRepository.NAME_PREFIX); + final Set> settings = + Stream.concat( + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.stream(), + CcrSettings.getSettings().stream().filter(Setting::hasNodeScope)) + .collect(Collectors.toSet()); + + final CcrRepository repository = new CcrRepository( + repositoryMetaData, + mock(Client.class), + new CcrLicenseChecker(() -> true, () -> true), + Settings.EMPTY, + new CcrSettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, settings)), + mock(ThreadPool.class)); + + final ShardId followerShardId = new ShardId(new Index("follower-index-name", "follower-index-uuid"), 0); + final ShardId leaderShardId = new ShardId(new Index("leader-index-name", "leader-index-uuid"), 0); + + final String retentionLeaseId = + retentionLeaseId("local-cluster", followerShardId.getIndex(), "remote-cluster", leaderShardId.getIndex()); + + // simulate that the the retention lease already exists on the leader, expires before we renew, and verify that we attempt to add it + final Client remoteClient = mock(Client.class); + final ArgumentCaptor addRequestCaptor = + ArgumentCaptor.forClass(RetentionLeaseActions.AddRequest.class); + final PlainActionFuture response = new PlainActionFuture<>(); + response.onResponse(new RetentionLeaseActions.Response()); + doAnswer( + new Answer() { + + final AtomicBoolean firstInvocation = new AtomicBoolean(true); + + @Override + public Void answer(final InvocationOnMock invocationOnMock) { + @SuppressWarnings("unchecked") final ActionListener listener = + (ActionListener) invocationOnMock.getArguments()[2]; + if (firstInvocation.compareAndSet(true, false)) { + listener.onFailure(new RetentionLeaseAlreadyExistsException(retentionLeaseId)); + } else { + listener.onResponse(new RetentionLeaseActions.Response()); + } + return null; + } + + }) + .when(remoteClient).execute(same(RetentionLeaseActions.Add.INSTANCE), addRequestCaptor.capture(), any()); + final ArgumentCaptor renewRequestCaptor = + ArgumentCaptor.forClass(RetentionLeaseActions.RenewRequest.class); + doAnswer( + invocationOnMock -> { + @SuppressWarnings("unchecked") final ActionListener listener = + (ActionListener) invocationOnMock.getArguments()[2]; + listener.onFailure(new RetentionLeaseNotFoundException(retentionLeaseId)); + return null; + } + ).when(remoteClient) + .execute(same(RetentionLeaseActions.Renew.INSTANCE), renewRequestCaptor.capture(), any()); + + repository.acquireRetentionLeaseOnLeader(followerShardId, retentionLeaseId, leaderShardId, remoteClient); + + verify(remoteClient, times(2)) + .execute(same(RetentionLeaseActions.Add.INSTANCE), any(RetentionLeaseActions.AddRequest.class), any()); + assertThat(addRequestCaptor.getValue().getShardId(), equalTo(leaderShardId)); + assertThat(addRequestCaptor.getValue().getId(), equalTo(retentionLeaseId)); + assertThat(addRequestCaptor.getValue().getRetainingSequenceNumber(), equalTo(RETAIN_ALL)); + assertThat(addRequestCaptor.getValue().getSource(), equalTo("ccr")); + + verify(remoteClient).execute(same(RetentionLeaseActions.Renew.INSTANCE), any(RetentionLeaseActions.RenewRequest.class), any()); + assertThat(renewRequestCaptor.getValue().getShardId(), equalTo(leaderShardId)); + assertThat(renewRequestCaptor.getValue().getId(), equalTo(retentionLeaseId)); + assertThat(renewRequestCaptor.getValue().getRetainingSequenceNumber(), equalTo(RETAIN_ALL)); + assertThat(renewRequestCaptor.getValue().getSource(), equalTo("ccr")); + + verifyNoMoreInteractions(remoteClient); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 7cb04a9e57a4b..a5da656b886ce 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -553,6 +553,15 @@ public static boolean isMachineLearningAllowedForOperationMode(final OperationMo return isPlatinumOrTrialOperationMode(operationMode); } + /** + * Data Frame is always available as long as there is a valid license + * + * @return true if the license is active + */ + public synchronized boolean isDataFrameAllowed() { + return status.active; + } + /** * Rollup is always available as long as there is a valid license * diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java index a6874a188534a..306a22253242f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java @@ -50,6 +50,7 @@ public final class ClientHelper { public static final String DEPRECATION_ORIGIN = "deprecation"; public static final String PERSISTENT_TASK_ORIGIN = "persistent_tasks"; public static final String ROLLUP_ORIGIN = "rollup"; + public static final String DATA_FRAME_ORIGIN = "data_frame"; private ClientHelper() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 6b1fcb67950e9..bc2e99a6cd049 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -42,6 +42,7 @@ import org.elasticsearch.xpack.core.beats.BeatsFeatureSetUsage; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.CCRFeatureSet; +import org.elasticsearch.xpack.core.dataframe.DataFrameFeatureSetUsage; import org.elasticsearch.xpack.core.deprecation.DeprecationInfoAction; import org.elasticsearch.xpack.core.graph.GraphFeatureSetUsage; import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; @@ -439,8 +440,9 @@ public List getNamedWriteables() { new NamedWriteableRegistry.Entry(LifecycleAction.class, DeleteAction.NAME, DeleteAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, FreezeAction.NAME, FreezeAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, SetPriorityAction.NAME, SetPriorityAction::new), - new NamedWriteableRegistry.Entry(LifecycleAction.class, UnfollowAction.NAME, UnfollowAction::new) - ); + new NamedWriteableRegistry.Entry(LifecycleAction.class, UnfollowAction.NAME, UnfollowAction::new), + // Data Frame + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.DATA_FRAME, DataFrameFeatureSetUsage::new)); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java index 0c763032e22ca..a5baf4d4f9382 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java @@ -35,6 +35,8 @@ public final class XPackField { public static final String INDEX_LIFECYCLE = "ilm"; /** Name constant for the CCR feature. */ public static final String CCR = "ccr"; + /** Name constant for the data frame feature. */ + public static final String DATA_FRAME = "data_frame"; private XPackField() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index dd8b1d5bb4681..0eeb173b8b84e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -44,6 +44,10 @@ private XPackSettings() { */ public static final Setting CCR_ENABLED_SETTING = Setting.boolSetting("xpack.ccr.enabled", true, Property.NodeScope); + /** Setting for enabling or disabling data frame. Defaults to true. */ + public static final Setting DATA_FRAME_ENABLED = Setting.boolSetting("xpack.data_frame.enabled", true, + Setting.Property.NodeScope); + /** Setting for enabling or disabling security. Defaults to true. */ public static final Setting SECURITY_ENABLED = Setting.boolSetting("xpack.security.enabled", true, Setting.Property.NodeScope); @@ -209,6 +213,7 @@ public static List> getAllSettings() { settings.add(ROLLUP_ENABLED); settings.add(PASSWORD_HASHING_ALGORITHM); settings.add(INDEX_LIFECYCLE_ENABLED); + settings.add(DATA_FRAME_ENABLED); return Collections.unmodifiableList(settings); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java index 14fba91b3f522..a8758ed6c2d5a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java @@ -12,16 +12,15 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.ccr.action.ImmutableFollowParameters; import java.io.IOException; import java.util.Collections; @@ -32,17 +31,6 @@ import java.util.Objects; import java.util.stream.Collectors; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_READ_REQUEST_OPERATION_COUNT; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_READ_REQUEST_SIZE; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_OUTSTANDING_READ_REQUESTS; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_WRITE_REQUEST_OPERATION_COUNT; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_WRITE_REQUEST_SIZE; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_OUTSTANDING_WRITE_REQUESTS; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_WRITE_BUFFER_COUNT; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_WRITE_BUFFER_SIZE; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.MAX_RETRY_DELAY; -import static org.elasticsearch.xpack.core.ccr.action.FollowParameters.READ_POLL_TIMEOUT; - /** * Custom metadata that contains auto follow patterns and what leader indices an auto follow pattern has already followed. */ @@ -101,7 +89,7 @@ public AutoFollowMetadata(Map patterns, public AutoFollowMetadata(StreamInput in) throws IOException { this( - in.readMap(StreamInput::readString, AutoFollowPattern::new), + in.readMap(StreamInput::readString, AutoFollowPattern::readFrom), in.readMapOfLists(StreamInput::readString, StreamInput::readString), in.readMap(StreamInput::readString, valIn -> valIn.readMap(StreamInput::readString, StreamInput::readString)) ); @@ -186,7 +174,7 @@ public int hashCode() { return Objects.hash(patterns, followedLeaderIndexUUIDs, headers); } - public static class AutoFollowPattern implements Writeable, ToXContentObject { + public static class AutoFollowPattern extends ImmutableFollowParameters implements ToXContentObject { public static final ParseField REMOTE_CLUSTER_FIELD = new ParseField("remote_cluster"); public static final ParseField LEADER_PATTERNS_FIELD = new ParseField("leader_index_patterns"); @@ -196,96 +184,50 @@ public static class AutoFollowPattern implements Writeable, ToXContentObject { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("auto_follow_pattern", args -> new AutoFollowPattern((String) args[0], (List) args[1], (String) args[2], (Integer) args[3], - (ByteSizeValue) args[4], (Integer) args[5], (Integer) args[6], (ByteSizeValue) args[7], (Integer) args[8], + (Integer) args[4], (Integer) args[5], (Integer) args[6], (ByteSizeValue) args[7], (ByteSizeValue) args[8], (Integer) args[9], (ByteSizeValue) args[10], (TimeValue) args[11], (TimeValue) args[12])); static { PARSER.declareString(ConstructingObjectParser.constructorArg(), REMOTE_CLUSTER_FIELD); PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), LEADER_PATTERNS_FIELD); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FOLLOW_PATTERN_FIELD); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_READ_REQUEST_OPERATION_COUNT); - PARSER.declareField( - ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_READ_REQUEST_SIZE.getPreferredName()), - MAX_READ_REQUEST_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_OUTSTANDING_READ_REQUESTS); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_WRITE_REQUEST_OPERATION_COUNT); - PARSER.declareField( - ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_REQUEST_SIZE.getPreferredName()), - MAX_WRITE_REQUEST_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_OUTSTANDING_WRITE_REQUESTS); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_WRITE_BUFFER_COUNT); - PARSER.declareField( - ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_BUFFER_SIZE.getPreferredName()), - MAX_WRITE_BUFFER_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_RETRY_DELAY.getPreferredName()), - MAX_RETRY_DELAY, ObjectParser.ValueType.STRING); - PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), READ_POLL_TIMEOUT.getPreferredName()), - READ_POLL_TIMEOUT, ObjectParser.ValueType.STRING); + ImmutableFollowParameters.initParser(PARSER); } private final String remoteCluster; private final List leaderIndexPatterns; private final String followIndexPattern; - private final Integer maxReadRequestOperationCount; - private final ByteSizeValue maxReadRequestSize; - private final Integer maxOutstandingReadRequests; - private final Integer maxWriteRequestOperationCount; - private final ByteSizeValue maxWriteRequestSize; - private final Integer maxOutstandingWriteRequests; - private final Integer maxWriteBufferCount; - private final ByteSizeValue maxWriteBufferSize; - private final TimeValue maxRetryDelay; - private final TimeValue pollTimeout; public AutoFollowPattern(String remoteCluster, List leaderIndexPatterns, String followIndexPattern, Integer maxReadRequestOperationCount, - ByteSizeValue maxReadRequestSize, - Integer maxOutstandingReadRequests, Integer maxWriteRequestOperationCount, - ByteSizeValue maxWriteRequestSize, + Integer maxOutstandingReadRequests, Integer maxOutstandingWriteRequests, + ByteSizeValue maxReadRequestSize, + ByteSizeValue maxWriteRequestSize, Integer maxWriteBufferCount, - ByteSizeValue maxWriteBufferSize, TimeValue maxRetryDelay, + ByteSizeValue maxWriteBufferSize, + TimeValue maxRetryDelay, TimeValue pollTimeout) { + super(maxReadRequestOperationCount, maxWriteRequestOperationCount, maxOutstandingReadRequests, maxOutstandingWriteRequests, + maxReadRequestSize, maxWriteRequestSize, maxWriteBufferCount, maxWriteBufferSize, maxRetryDelay, pollTimeout); this.remoteCluster = remoteCluster; this.leaderIndexPatterns = leaderIndexPatterns; this.followIndexPattern = followIndexPattern; - this.maxReadRequestOperationCount = maxReadRequestOperationCount; - this.maxReadRequestSize = maxReadRequestSize; - this.maxOutstandingReadRequests = maxOutstandingReadRequests; - this.maxWriteRequestOperationCount = maxWriteRequestOperationCount; - this.maxWriteRequestSize = maxWriteRequestSize; - this.maxOutstandingWriteRequests = maxOutstandingWriteRequests; - this.maxWriteBufferCount = maxWriteBufferCount; - this.maxWriteBufferSize = maxWriteBufferSize; - this.maxRetryDelay = maxRetryDelay; - this.pollTimeout = pollTimeout; } - public AutoFollowPattern(StreamInput in) throws IOException { - remoteCluster = in.readString(); - leaderIndexPatterns = in.readStringList(); - followIndexPattern = in.readOptionalString(); - maxReadRequestOperationCount = in.readOptionalVInt(); - maxReadRequestSize = in.readOptionalWriteable(ByteSizeValue::new); - maxOutstandingReadRequests = in.readOptionalVInt(); - maxWriteRequestOperationCount = in.readOptionalVInt(); - maxWriteRequestSize = in.readOptionalWriteable(ByteSizeValue::new); - maxOutstandingWriteRequests = in.readOptionalVInt(); - maxWriteBufferCount = in.readOptionalVInt(); - maxWriteBufferSize = in.readOptionalWriteable(ByteSizeValue::new); - maxRetryDelay = in.readOptionalTimeValue(); - pollTimeout = in.readOptionalTimeValue(); + public static AutoFollowPattern readFrom(StreamInput in) throws IOException { + return new AutoFollowPattern(in.readString(), in.readStringList(), in.readOptionalString(), in); + } + + private AutoFollowPattern(String remoteCluster, List leaderIndexPatterns, + String followIndexPattern, StreamInput in) throws IOException { + super(in); + this.remoteCluster = remoteCluster; + this.leaderIndexPatterns = leaderIndexPatterns; + this.followIndexPattern = followIndexPattern; } public boolean match(String indexName) { @@ -308,61 +250,12 @@ public String getFollowIndexPattern() { return followIndexPattern; } - public Integer getMaxReadRequestOperationCount() { - return maxReadRequestOperationCount; - } - - public Integer getMaxOutstandingReadRequests() { - return maxOutstandingReadRequests; - } - - public ByteSizeValue getMaxReadRequestSize() { - return maxReadRequestSize; - } - - public Integer getMaxWriteRequestOperationCount() { - return maxWriteRequestOperationCount; - } - - public ByteSizeValue getMaxWriteRequestSize() { - return maxWriteRequestSize; - } - - public Integer getMaxOutstandingWriteRequests() { - return maxOutstandingWriteRequests; - } - - public Integer getMaxWriteBufferCount() { - return maxWriteBufferCount; - } - - public ByteSizeValue getMaxWriteBufferSize() { - return maxWriteBufferSize; - } - - public TimeValue getMaxRetryDelay() { - return maxRetryDelay; - } - - public TimeValue getPollTimeout() { - return pollTimeout; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(remoteCluster); out.writeStringCollection(leaderIndexPatterns); out.writeOptionalString(followIndexPattern); - out.writeOptionalVInt(maxReadRequestOperationCount); - out.writeOptionalWriteable(maxReadRequestSize); - out.writeOptionalVInt(maxOutstandingReadRequests); - out.writeOptionalVInt(maxWriteRequestOperationCount); - out.writeOptionalWriteable(maxWriteRequestSize); - out.writeOptionalVInt(maxOutstandingWriteRequests); - out.writeOptionalVInt(maxWriteBufferCount); - out.writeOptionalWriteable(maxWriteBufferSize); - out.writeOptionalTimeValue(maxRetryDelay); - out.writeOptionalTimeValue(pollTimeout); + super.writeTo(out); } @Override @@ -372,36 +265,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (followIndexPattern != null) { builder.field(FOLLOW_PATTERN_FIELD.getPreferredName(), followIndexPattern); } - if (maxReadRequestOperationCount != null) { - builder.field(MAX_READ_REQUEST_OPERATION_COUNT.getPreferredName(), maxReadRequestOperationCount); - } - if (maxReadRequestSize != null) { - builder.field(MAX_READ_REQUEST_SIZE.getPreferredName(), maxReadRequestSize.getStringRep()); - } - if (maxOutstandingReadRequests != null) { - builder.field(MAX_OUTSTANDING_READ_REQUESTS.getPreferredName(), maxOutstandingReadRequests); - } - if (maxWriteRequestOperationCount != null) { - builder.field(MAX_WRITE_REQUEST_OPERATION_COUNT.getPreferredName(), maxWriteRequestOperationCount); - } - if (maxWriteRequestSize != null) { - builder.field(MAX_WRITE_REQUEST_SIZE.getPreferredName(), maxWriteRequestSize.getStringRep()); - } - if (maxOutstandingWriteRequests != null) { - builder.field(MAX_OUTSTANDING_WRITE_REQUESTS.getPreferredName(), maxOutstandingWriteRequests); - } - if (maxWriteBufferCount != null){ - builder.field(MAX_WRITE_BUFFER_COUNT.getPreferredName(), maxWriteBufferCount); - } - if (maxWriteBufferSize != null) { - builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize.getStringRep()); - } - if (maxRetryDelay != null) { - builder.field(MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay); - } - if (pollTimeout != null) { - builder.field(READ_POLL_TIMEOUT.getPreferredName(), pollTimeout); - } + toXContentFragment(builder); return builder; } @@ -414,38 +278,16 @@ public boolean isFragment() { public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - AutoFollowPattern that = (AutoFollowPattern) o; - return Objects.equals(remoteCluster, that.remoteCluster) && - Objects.equals(leaderIndexPatterns, that.leaderIndexPatterns) && - Objects.equals(followIndexPattern, that.followIndexPattern) && - Objects.equals(maxReadRequestOperationCount, that.maxReadRequestOperationCount) && - Objects.equals(maxReadRequestSize, that.maxReadRequestSize) && - Objects.equals(maxOutstandingReadRequests, that.maxOutstandingReadRequests) && - Objects.equals(maxWriteRequestOperationCount, that.maxWriteRequestOperationCount) && - Objects.equals(maxWriteRequestSize, that.maxWriteRequestSize) && - Objects.equals(maxOutstandingWriteRequests, that.maxOutstandingWriteRequests) && - Objects.equals(maxWriteBufferCount, that.maxWriteBufferCount) && - Objects.equals(maxWriteBufferSize, that.maxWriteBufferSize) && - Objects.equals(maxRetryDelay, that.maxRetryDelay) && - Objects.equals(pollTimeout, that.pollTimeout); + if (!super.equals(o)) return false; + AutoFollowPattern pattern = (AutoFollowPattern) o; + return remoteCluster.equals(pattern.remoteCluster) && + leaderIndexPatterns.equals(pattern.leaderIndexPatterns) && + followIndexPattern.equals(pattern.followIndexPattern); } @Override public int hashCode() { - return Objects.hash( - remoteCluster, - leaderIndexPatterns, - followIndexPattern, - maxReadRequestOperationCount, - maxReadRequestSize, - maxOutstandingReadRequests, - maxWriteRequestOperationCount, - maxWriteRequestSize, - maxOutstandingWriteRequests, - maxWriteBufferCount, - maxWriteBufferSize, - maxRetryDelay, - pollTimeout); + return Objects.hash(super.hashCode(), remoteCluster, leaderIndexPatterns, followIndexPattern); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java index 098ba6dba6935..cd37692da43a3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java @@ -99,7 +99,7 @@ public Map getAutoFollowPatterns() { public Response(StreamInput in) throws IOException { super.readFrom(in); - autoFollowPatterns = in.readMap(StreamInput::readString, AutoFollowPattern::new); + autoFollowPatterns = in.readMap(StreamInput::readString, AutoFollowPattern::readFrom); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ImmutableFollowParameters.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ImmutableFollowParameters.java new file mode 100644 index 0000000000000..76d7f1c51f4da --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ImmutableFollowParameters.java @@ -0,0 +1,213 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ccr.action; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public class ImmutableFollowParameters implements Writeable { + + private final Integer maxReadRequestOperationCount; + private final Integer maxWriteRequestOperationCount; + private final Integer maxOutstandingReadRequests; + private final Integer maxOutstandingWriteRequests; + private final ByteSizeValue maxReadRequestSize; + private final ByteSizeValue maxWriteRequestSize; + private final Integer maxWriteBufferCount; + private final ByteSizeValue maxWriteBufferSize; + private final TimeValue maxRetryDelay; + private final TimeValue readPollTimeout; + + public ImmutableFollowParameters(Integer maxReadRequestOperationCount, Integer maxWriteRequestOperationCount, + Integer maxOutstandingReadRequests, Integer maxOutstandingWriteRequests, + ByteSizeValue maxReadRequestSize, ByteSizeValue maxWriteRequestSize, + Integer maxWriteBufferCount, ByteSizeValue maxWriteBufferSize, + TimeValue maxRetryDelay, TimeValue readPollTimeout) { + this.maxReadRequestOperationCount = maxReadRequestOperationCount; + this.maxWriteRequestOperationCount = maxWriteRequestOperationCount; + this.maxOutstandingReadRequests = maxOutstandingReadRequests; + this.maxOutstandingWriteRequests = maxOutstandingWriteRequests; + this.maxReadRequestSize = maxReadRequestSize; + this.maxWriteRequestSize = maxWriteRequestSize; + this.maxWriteBufferCount = maxWriteBufferCount; + this.maxWriteBufferSize = maxWriteBufferSize; + this.maxRetryDelay = maxRetryDelay; + this.readPollTimeout = readPollTimeout; + } + + public Integer getMaxReadRequestOperationCount() { + return maxReadRequestOperationCount; + } + + public ByteSizeValue getMaxReadRequestSize() { + return maxReadRequestSize; + } + + public Integer getMaxOutstandingReadRequests() { + return maxOutstandingReadRequests; + } + + public Integer getMaxWriteRequestOperationCount() { + return maxWriteRequestOperationCount; + } + + public ByteSizeValue getMaxWriteRequestSize() { + return maxWriteRequestSize; + } + + public Integer getMaxOutstandingWriteRequests() { + return maxOutstandingWriteRequests; + } + + public Integer getMaxWriteBufferCount() { + return maxWriteBufferCount; + } + + public ByteSizeValue getMaxWriteBufferSize() { + return maxWriteBufferSize; + } + + public TimeValue getMaxRetryDelay() { + return maxRetryDelay; + } + + public TimeValue getReadPollTimeout() { + return readPollTimeout; + } + + public ImmutableFollowParameters(StreamInput in) throws IOException { + maxReadRequestOperationCount = in.readOptionalVInt(); + maxReadRequestSize = in.readOptionalWriteable(ByteSizeValue::new); + maxOutstandingReadRequests = in.readOptionalVInt(); + maxWriteRequestOperationCount = in.readOptionalVInt(); + maxWriteRequestSize = in.readOptionalWriteable(ByteSizeValue::new); + maxOutstandingWriteRequests = in.readOptionalVInt(); + maxWriteBufferCount = in.readOptionalVInt(); + maxWriteBufferSize = in.readOptionalWriteable(ByteSizeValue::new); + maxRetryDelay = in.readOptionalTimeValue(); + readPollTimeout = in.readOptionalTimeValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalVInt(maxReadRequestOperationCount); + out.writeOptionalWriteable(maxReadRequestSize); + out.writeOptionalVInt(maxOutstandingReadRequests); + out.writeOptionalVInt(maxWriteRequestOperationCount); + out.writeOptionalWriteable(maxWriteRequestSize); + out.writeOptionalVInt(maxOutstandingWriteRequests); + out.writeOptionalVInt(maxWriteBufferCount); + out.writeOptionalWriteable(maxWriteBufferSize); + out.writeOptionalTimeValue(maxRetryDelay); + out.writeOptionalTimeValue(readPollTimeout); + + } + + protected XContentBuilder toXContentFragment(final XContentBuilder builder) throws IOException { + if (maxReadRequestOperationCount != null) { + builder.field(FollowParameters.MAX_READ_REQUEST_OPERATION_COUNT.getPreferredName(), maxReadRequestOperationCount); + } + if (maxWriteRequestOperationCount != null) { + builder.field(FollowParameters.MAX_WRITE_REQUEST_OPERATION_COUNT.getPreferredName(), maxWriteRequestOperationCount); + } + if (maxOutstandingReadRequests != null) { + builder.field(FollowParameters.MAX_OUTSTANDING_READ_REQUESTS.getPreferredName(), maxOutstandingReadRequests); + } + if (maxOutstandingWriteRequests != null) { + builder.field(FollowParameters.MAX_OUTSTANDING_WRITE_REQUESTS.getPreferredName(), maxOutstandingWriteRequests); + } + if (maxReadRequestSize != null) { + builder.field(FollowParameters.MAX_READ_REQUEST_SIZE.getPreferredName(), maxReadRequestSize.getStringRep()); + } + if (maxWriteRequestSize != null) { + builder.field(FollowParameters.MAX_WRITE_REQUEST_SIZE.getPreferredName(), maxWriteRequestSize.getStringRep()); + } + if (maxWriteBufferCount != null) { + builder.field(FollowParameters.MAX_WRITE_BUFFER_COUNT.getPreferredName(), maxWriteBufferCount); + } + if (maxWriteBufferSize != null) { + builder.field(FollowParameters.MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize.getStringRep()); + } + if (maxRetryDelay != null) { + builder.field(FollowParameters.MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay.getStringRep()); + } + if (readPollTimeout != null) { + builder.field(FollowParameters.READ_POLL_TIMEOUT.getPreferredName(), readPollTimeout.getStringRep()); + } + return builder; + } + + public static

void initParser(ConstructingObjectParser parser) { + parser.declareInt(ConstructingObjectParser.optionalConstructorArg(), FollowParameters.MAX_READ_REQUEST_OPERATION_COUNT); + parser.declareInt(ConstructingObjectParser.optionalConstructorArg(), FollowParameters.MAX_WRITE_REQUEST_OPERATION_COUNT); + parser.declareInt(ConstructingObjectParser.optionalConstructorArg(), FollowParameters.MAX_OUTSTANDING_READ_REQUESTS); + parser.declareInt(ConstructingObjectParser.optionalConstructorArg(), FollowParameters.MAX_OUTSTANDING_WRITE_REQUESTS); + parser.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowParameters.MAX_READ_REQUEST_SIZE.getPreferredName()), + FollowParameters.MAX_READ_REQUEST_SIZE, + ObjectParser.ValueType.STRING); + parser.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowParameters.MAX_WRITE_REQUEST_SIZE.getPreferredName()), + FollowParameters.MAX_WRITE_REQUEST_SIZE, + ObjectParser.ValueType.STRING); + parser.declareInt(ConstructingObjectParser.optionalConstructorArg(), FollowParameters.MAX_WRITE_BUFFER_COUNT); + parser.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowParameters.MAX_WRITE_BUFFER_SIZE.getPreferredName()), + FollowParameters.MAX_WRITE_BUFFER_SIZE, + ObjectParser.ValueType.STRING); + parser.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), FollowParameters.MAX_RETRY_DELAY.getPreferredName()), + FollowParameters.MAX_RETRY_DELAY, ObjectParser.ValueType.STRING); + parser.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), FollowParameters.READ_POLL_TIMEOUT.getPreferredName()), + FollowParameters.READ_POLL_TIMEOUT, ObjectParser.ValueType.STRING); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o instanceof ImmutableFollowParameters == false) return false; + ImmutableFollowParameters that = (ImmutableFollowParameters) o; + return Objects.equals(maxReadRequestOperationCount, that.maxReadRequestOperationCount) && + Objects.equals(maxWriteRequestOperationCount, that.maxWriteRequestOperationCount) && + Objects.equals(maxOutstandingReadRequests, that.maxOutstandingReadRequests) && + Objects.equals(maxOutstandingWriteRequests, that.maxOutstandingWriteRequests) && + Objects.equals(maxReadRequestSize, that.maxReadRequestSize) && + Objects.equals(maxWriteRequestSize, that.maxWriteRequestSize) && + Objects.equals(maxWriteBufferCount, that.maxWriteBufferCount) && + Objects.equals(maxWriteBufferSize, that.maxWriteBufferSize) && + Objects.equals(maxRetryDelay, that.maxRetryDelay) && + Objects.equals(readPollTimeout, that.readPollTimeout); + } + + @Override + public int hashCode() { + return Objects.hash( + maxReadRequestOperationCount, + maxWriteRequestOperationCount, + maxOutstandingReadRequests, + maxOutstandingWriteRequests, + maxReadRequestSize, + maxWriteRequestSize, + maxWriteBufferCount, + maxWriteBufferSize, + maxRetryDelay, + readPollTimeout + ); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameFeatureSetUsage.java new file mode 100644 index 0000000000000..48cdcd37572c9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameFeatureSetUsage.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe; + +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackFeatureSet.Usage; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.dataframe.transform.DataFrameIndexerTransformStats; + +import java.io.IOException; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; + +public class DataFrameFeatureSetUsage extends Usage { + + private final Map transformCountByState; + private final DataFrameIndexerTransformStats accumulatedStats; + + public DataFrameFeatureSetUsage(StreamInput in) throws IOException { + super(in); + this.transformCountByState = in.readMap(StreamInput::readString, StreamInput::readLong); + this.accumulatedStats = new DataFrameIndexerTransformStats(in); + } + + public DataFrameFeatureSetUsage(boolean available, boolean enabled, Map transformCountByState, + DataFrameIndexerTransformStats accumulatedStats) { + super(XPackField.DATA_FRAME, available, enabled); + this.transformCountByState = Objects.requireNonNull(transformCountByState); + this.accumulatedStats = Objects.requireNonNull(accumulatedStats); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(transformCountByState, StreamOutput::writeString, StreamOutput::writeLong); + accumulatedStats.writeTo(out); + } + + @Override + protected void innerXContent(XContentBuilder builder, Params params) throws IOException { + super.innerXContent(builder, params); + if (transformCountByState.isEmpty() == false) { + builder.startObject(DataFrameField.TRANSFORMS.getPreferredName()); + long all = 0L; + for (Entry entry : transformCountByState.entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + all+=entry.getValue(); + } + builder.field(MetaData.ALL, all); + builder.endObject(); + + // if there are no transforms, do not show any stats + builder.field(DataFrameField.STATS_FIELD.getPreferredName(), accumulatedStats); + } + } + + @Override + public int hashCode() { + return Objects.hash(enabled, available, transformCountByState, accumulatedStats); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + DataFrameFeatureSetUsage other = (DataFrameFeatureSetUsage) obj; + return Objects.equals(name, other.name) && available == other.available && enabled == other.enabled + && Objects.equals(transformCountByState, other.transformCountByState) + && Objects.equals(accumulatedStats, other.accumulatedStats); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java new file mode 100644 index 0000000000000..9749cd915b54e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe; + +import org.elasticsearch.common.ParseField; + +/* + * Utility class to hold common fields and strings for data frame. + */ +public final class DataFrameField { + + // common parse fields + public static final ParseField AGGREGATIONS = new ParseField("aggregations"); + public static final ParseField AGGS = new ParseField("aggs"); + public static final ParseField ID = new ParseField("id"); + public static final ParseField TRANSFORMS = new ParseField("transforms"); + public static final ParseField COUNT = new ParseField("count"); + public static final ParseField GROUP_BY = new ParseField("group_by"); + public static final ParseField TIMEOUT = new ParseField("timeout"); + public static final ParseField WAIT_FOR_COMPLETION = new ParseField("wait_for_completion"); + public static final ParseField STATS_FIELD = new ParseField("stats"); + + // common strings + public static final String TASK_NAME = "data_frame/transforms"; + public static final String REST_BASE_PATH = "/_data_frame/"; + public static final String REST_BASE_PATH_TRANSFORMS_BY_ID = REST_BASE_PATH + "transforms/{id}/"; + + // note: this is used to match tasks + public static final String PERSISTENT_TASK_DESCRIPTION_PREFIX = "data_frame_"; + + // strings for meta information + public static final String META_FIELDNAME = "_data_frame"; + public static final String CREATION_DATE_MILLIS = "creation_date_in_millis"; + public static final String VERSION = "version"; + public static final String CREATED = "created"; + public static final String CREATED_BY = "created_by"; + public static final String TRANSFORM = "transform"; + public static final String DATA_FRAME_SIGNATURE = "data-frame-transform"; + + private DataFrameField() { + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java new file mode 100644 index 0000000000000..a395dcdb3dfd9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe; + +import java.text.MessageFormat; +import java.util.Locale; + +public class DataFrameMessages { + + public static final String REST_STOP_TRANSFORM_WAIT_FOR_COMPLETION_TIMEOUT = + "Timed out after [{0}] while waiting for data frame transform [{1}] to stop"; + public static final String REST_STOP_TRANSFORM_WAIT_FOR_COMPLETION_INTERRUPT = + "Interrupted while waiting for data frame transform [{0}] to stop"; + public static final String REST_PUT_DATA_FRAME_TRANSFORM_EXISTS = "Transform with id [{0}] already exists"; + public static final String REST_DATA_FRAME_UNKNOWN_TRANSFORM = "Transform with id [{0}] could not be found"; + public static final String REST_PUT_DATA_FRAME_FAILED_TO_VALIDATE_DATA_FRAME_CONFIGURATION = + "Failed to validate data frame configuration"; + public static final String REST_PUT_DATA_FRAME_FAILED_PERSIST_TRANSFORM_CONFIGURATION = "Failed to persist data frame configuration"; + public static final String REST_PUT_DATA_FRAME_FAILED_TO_DEDUCE_TARGET_MAPPINGS = "Failed to deduce target mappings"; + public static final String REST_PUT_DATA_FRAME_FAILED_TO_CREATE_TARGET_INDEX = "Failed to create target index"; + public static final String REST_PUT_DATA_FRAME_FAILED_TO_START_PERSISTENT_TASK = + "Failed to start persistent task, configuration has been cleaned up: [{0}]"; + public static final String REST_DATA_FRAME_FAILED_TO_SERIALIZE_TRANSFORM = "Failed to serialise transform [{0}]"; + + public static final String FAILED_TO_CREATE_DESTINATION_INDEX = "Could not create destination index [{0}] for transform[{1}]"; + public static final String FAILED_TO_LOAD_TRANSFORM_CONFIGURATION = + "Failed to load data frame transform configuration for transform [{0}]"; + public static final String FAILED_TO_PARSE_TRANSFORM_CONFIGURATION = + "Failed to parse transform configuration for data frame transform [{0}]"; + public static final String DATA_FRAME_TRANSFORM_CONFIGURATION_NO_TRANSFORM = + "Data frame transform configuration must specify exactly 1 function"; + public static final String DATA_FRAME_TRANSFORM_CONFIGURATION_PIVOT_NO_GROUP_BY = + "Data frame pivot transform configuration must specify at least 1 group_by"; + public static final String DATA_FRAME_TRANSFORM_CONFIGURATION_PIVOT_NO_AGGREGATION = + "Data frame pivot transform configuration must specify at least 1 aggregation"; + public static final String DATA_FRAME_TRANSFORM_PIVOT_FAILED_TO_CREATE_COMPOSITE_AGGREGATION = + "Failed to create composite aggregation from pivot function"; + public static final String DATA_FRAME_TRANSFORM_CONFIGURATION_INVALID = + "Data frame transform configuration [{0}] has invalid elements"; + + public static final String LOG_DATA_FRAME_TRANSFORM_CONFIGURATION_BAD_QUERY = + "Failed to parse query for data frame transform"; + public static final String LOG_DATA_FRAME_TRANSFORM_CONFIGURATION_BAD_GROUP_BY = + "Failed to parse group_by for data frame pivot transform"; + public static final String LOG_DATA_FRAME_TRANSFORM_CONFIGURATION_BAD_AGGREGATION = + "Failed to parse aggregation for data frame pivot transform"; + + private DataFrameMessages() { + } + + /** + * Returns the message parameter + * + * @param message Should be one of the statics defined in this class + */ + public static String getMessage(String message) { + return message; + } + + /** + * Format the message with the supplied arguments + * + * @param message Should be one of the statics defined in this class + * @param args MessageFormat arguments. See {@linkplain MessageFormat#format(Object)}] + */ + public static String getMessage(String message, Object... args) { + return new MessageFormat(message, Locale.ROOT).format(args); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transform/DataFrameIndexerTransformStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transform/DataFrameIndexerTransformStats.java new file mode 100644 index 0000000000000..d6778e8656452 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transform/DataFrameIndexerTransformStats.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.transform; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexing.IndexerJobStats; + +import java.io.IOException; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +public class DataFrameIndexerTransformStats extends IndexerJobStats { + private static ParseField NUM_PAGES = new ParseField("pages_processed"); + private static ParseField NUM_INPUT_DOCUMENTS = new ParseField("documents_processed"); + private static ParseField NUM_OUTPUT_DOCUMENTS = new ParseField("documents_indexed"); + private static ParseField NUM_INVOCATIONS = new ParseField("trigger_count"); + private static ParseField INDEX_TIME_IN_MS = new ParseField("index_time_in_ms"); + private static ParseField SEARCH_TIME_IN_MS = new ParseField("search_time_in_ms"); + private static ParseField INDEX_TOTAL = new ParseField("index_total"); + private static ParseField SEARCH_TOTAL = new ParseField("search_total"); + private static ParseField SEARCH_FAILURES = new ParseField("search_failures"); + private static ParseField INDEX_FAILURES = new ParseField("index_failures"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME.getPreferredName(), args -> new DataFrameIndexerTransformStats((long) args[0], (long) args[1], (long) args[2], + (long) args[3], (long) args[4], (long) args[5], (long) args[6], (long) args[7], (long) args[8], (long) args[9])); + + static { + PARSER.declareLong(constructorArg(), NUM_PAGES); + PARSER.declareLong(constructorArg(), NUM_INPUT_DOCUMENTS); + PARSER.declareLong(constructorArg(), NUM_OUTPUT_DOCUMENTS); + PARSER.declareLong(constructorArg(), NUM_INVOCATIONS); + PARSER.declareLong(constructorArg(), INDEX_TIME_IN_MS); + PARSER.declareLong(constructorArg(), SEARCH_TIME_IN_MS); + PARSER.declareLong(constructorArg(), INDEX_TOTAL); + PARSER.declareLong(constructorArg(), SEARCH_TOTAL); + PARSER.declareLong(constructorArg(), INDEX_FAILURES); + PARSER.declareLong(constructorArg(), SEARCH_FAILURES); + } + + public DataFrameIndexerTransformStats() { + super(); + } + + public DataFrameIndexerTransformStats(long numPages, long numInputDocuments, long numOuputDocuments, long numInvocations, + long indexTime, long searchTime, long indexTotal, long searchTotal, long indexFailures, long searchFailures) { + super(numPages, numInputDocuments, numOuputDocuments, numInvocations, indexTime, searchTime, indexTotal, searchTotal, indexFailures, + searchFailures); + } + + public DataFrameIndexerTransformStats(StreamInput in) throws IOException { + super(in); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NUM_PAGES.getPreferredName(), numPages); + builder.field(NUM_INPUT_DOCUMENTS.getPreferredName(), numInputDocuments); + builder.field(NUM_OUTPUT_DOCUMENTS.getPreferredName(), numOuputDocuments); + builder.field(NUM_INVOCATIONS.getPreferredName(), numInvocations); + builder.field(INDEX_TIME_IN_MS.getPreferredName(), indexTime); + builder.field(INDEX_TOTAL.getPreferredName(), indexTotal); + builder.field(INDEX_FAILURES.getPreferredName(), indexFailures); + builder.field(SEARCH_TIME_IN_MS.getPreferredName(), searchTime); + builder.field(SEARCH_TOTAL.getPreferredName(), searchTotal); + builder.field(SEARCH_FAILURES.getPreferredName(), searchFailures); + builder.endObject(); + return builder; + } + + public DataFrameIndexerTransformStats merge(DataFrameIndexerTransformStats other) { + numPages += other.numPages; + numInputDocuments += other.numInputDocuments; + numOuputDocuments += other.numOuputDocuments; + numInvocations += other.numInvocations; + indexTime += other.indexTime; + searchTime += other.searchTime; + indexTotal += other.indexTotal; + searchTotal += other.searchTotal; + indexFailures += other.indexFailures; + searchFailures += other.searchFailures; + + return this; + } + + public static DataFrameIndexerTransformStats fromXContent(XContentParser parser) { + try { + return PARSER.parse(parser, null); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transform/DataFrameTransformState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transform/DataFrameTransformState.java new file mode 100644 index 0000000000000..2338ec1f4dafb --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transform/DataFrameTransformState.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.transform; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.persistent.PersistentTaskState; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.indexing.IndexerState; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.SortedMap; +import java.util.TreeMap; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class DataFrameTransformState implements Task.Status, PersistentTaskState { + public static final String NAME = DataFrameField.TASK_NAME; + + private final IndexerState state; + private final long generation; + + @Nullable + private final SortedMap currentPosition; + + private static final ParseField STATE = new ParseField("transform_state"); + private static final ParseField CURRENT_POSITION = new ParseField("current_position"); + private static final ParseField GENERATION = new ParseField("generation"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + args -> new DataFrameTransformState((IndexerState) args[0], (HashMap) args[1], (long) args[2])); + + static { + PARSER.declareField(constructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return IndexerState.fromString(p.text()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + + }, STATE, ObjectParser.ValueType.STRING); + PARSER.declareField(optionalConstructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.START_OBJECT) { + return p.map(); + } + if (p.currentToken() == XContentParser.Token.VALUE_NULL) { + return null; + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, CURRENT_POSITION, ObjectParser.ValueType.VALUE_OBJECT_ARRAY); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), GENERATION); + } + + public DataFrameTransformState(IndexerState state, @Nullable Map position, long generation) { + this.state = state; + this.currentPosition = position == null ? null : Collections.unmodifiableSortedMap(new TreeMap<>(position)); + this.generation = generation; + } + + public DataFrameTransformState(StreamInput in) throws IOException { + state = IndexerState.fromStream(in); + currentPosition = in.readBoolean() ? Collections.unmodifiableSortedMap(new TreeMap<>(in.readMap())) : null; + generation = in.readLong(); + } + + public IndexerState getIndexerState() { + return state; + } + + public Map getPosition() { + return currentPosition; + } + + public long getGeneration() { + return generation; + } + + public static DataFrameTransformState fromXContent(XContentParser parser) { + try { + return PARSER.parse(parser, null); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(STATE.getPreferredName(), state.value()); + if (currentPosition != null) { + builder.field(CURRENT_POSITION.getPreferredName(), currentPosition); + } + builder.field(GENERATION.getPreferredName(), generation); + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + state.writeTo(out); + out.writeBoolean(currentPosition != null); + if (currentPosition != null) { + out.writeMap(currentPosition); + } + out.writeLong(generation); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DataFrameTransformState that = (DataFrameTransformState) other; + + return Objects.equals(this.state, that.state) && Objects.equals(this.currentPosition, that.currentPosition) + && this.generation == that.generation; + } + + @Override + public int hashCode() { + return Objects.hash(state, currentPosition, generation); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java index ec3305a963c55..523a810174912 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.security.authz.privilege; +import org.elasticsearch.index.seqno.RetentionLeaseActions; import org.elasticsearch.index.seqno.RetentionLeaseBackgroundSyncAction; import org.elasticsearch.index.seqno.RetentionLeaseSyncAction; import org.elasticsearch.transport.TransportActionProxy; @@ -29,6 +30,8 @@ public final class SystemPrivilege extends Privilege { "indices:admin/seq_no/global_checkpoint_sync*", // needed for global checkpoint syncs RetentionLeaseSyncAction.ACTION_NAME + "*", // needed for retention lease syncs RetentionLeaseBackgroundSyncAction.ACTION_NAME + "*", // needed for background retention lease syncs + RetentionLeaseActions.Add.ACTION_NAME + "*", // needed for CCR to add retention leases + RetentionLeaseActions.Renew.ACTION_NAME + "*", // needed for CCR to renew retention leases "indices:admin/settings/update" // needed for DiskThresholdMonitor.markIndicesReadOnly ); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchResponse.java index 188c49963151f..6d12bb9d5f811 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchResponse.java @@ -38,7 +38,7 @@ public WatchStatus getStatus() { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - status = in.readBoolean() ? WatchStatus.read(in) : null; + status = in.readBoolean() ? new WatchStatus(in) : null; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchResponse.java index 0c92fc046722a..bf43bbbe3c54d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchResponse.java @@ -38,7 +38,7 @@ public WatchStatus getStatus() { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - status = in.readBoolean() ? WatchStatus.read(in) : null; + status = in.readBoolean() ? new WatchStatus(in) : null; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchResponse.java index c612bc0e9ef55..18ec33f5dfb0d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchResponse.java @@ -92,7 +92,7 @@ public void readFrom(StreamInput in) throws IOException { id = in.readString(); found = in.readBoolean(); if (found) { - status = WatchStatus.read(in); + status = new WatchStatus(in); source = XContentSource.readFrom(in); version = in.readZLong(); seqNo = in.readZLong(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java index df63022aa5734..0f4361aa87b06 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -36,7 +37,7 @@ import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.writeDate; import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.writeOptionalDate; -public class WatchStatus implements ToXContentObject, Streamable { +public class WatchStatus implements ToXContentObject, Streamable, Writeable { public static final String INCLUDE_STATE = "include_state"; @@ -49,8 +50,26 @@ public class WatchStatus implements ToXContentObject, Streamable { @Nullable private Map headers; private Map actions; - // for serialization - private WatchStatus() { + public WatchStatus(StreamInput in) throws IOException { + version = in.readLong(); + lastChecked = readOptionalDate(in); + lastMetCondition = readOptionalDate(in); + int count = in.readInt(); + Map actions = new HashMap<>(count); + for (int i = 0; i < count; i++) { + actions.put(in.readString(), ActionStatus.readFrom(in)); + } + this.actions = unmodifiableMap(actions); + state = new State(in.readBoolean(), Instant.ofEpochMilli(in.readLong()).atZone(ZoneOffset.UTC)); + boolean executionStateExists = in.readBoolean(); + if (executionStateExists) { + executionState = ExecutionState.resolve(in.readString()); + } + if (in.readBoolean()) { + headers = in.readMap(StreamInput::readString, StreamInput::readString); + } else { + headers = Collections.emptyMap(); + } } public WatchStatus(ZonedDateTime now, Map actions) { @@ -222,31 +241,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public void readFrom(StreamInput in) throws IOException { - version = in.readLong(); - lastChecked = readOptionalDate(in); - lastMetCondition = readOptionalDate(in); - int count = in.readInt(); - Map actions = new HashMap<>(count); - for (int i = 0; i < count; i++) { - actions.put(in.readString(), ActionStatus.readFrom(in)); - } - this.actions = unmodifiableMap(actions); - state = new State(in.readBoolean(), Instant.ofEpochMilli(in.readLong()).atZone(ZoneOffset.UTC)); - boolean executionStateExists = in.readBoolean(); - if (executionStateExists) { - executionState = ExecutionState.resolve(in.readString()); - } - if (in.readBoolean()) { - headers = in.readMap(StreamInput::readString, StreamInput::readString); - } else { - headers = Collections.emptyMap(); - } - } - - public static WatchStatus read(StreamInput in) throws IOException { - WatchStatus status = new WatchStatus(); - status.readFrom(in); - return status; + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/DataFrameFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/DataFrameFeatureSetUsageTests.java new file mode 100644 index 0000000000000..f9b741d335587 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/DataFrameFeatureSetUsageTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.dataframe.transform.DataFrameIndexerTransformStatsTests; +import org.elasticsearch.xpack.core.indexing.IndexerState; + +import java.util.HashMap; +import java.util.Map; + +public class DataFrameFeatureSetUsageTests extends AbstractWireSerializingTestCase { + + @Override + protected DataFrameFeatureSetUsage createTestInstance() { + Map transformCountByState = new HashMap<>(); + + if (randomBoolean()) { + transformCountByState.put(randomFrom(IndexerState.values()).toString(), randomLong()); + } + + return new DataFrameFeatureSetUsage(randomBoolean(), randomBoolean(), transformCountByState, + DataFrameIndexerTransformStatsTests.randomStats()); + } + + @Override + protected Reader instanceReader() { + return DataFrameFeatureSetUsage::new; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessagesTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessagesTests.java new file mode 100644 index 0000000000000..fc67dc8ce64e8 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessagesTests.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe; + +import org.elasticsearch.test.ESTestCase; + +import java.lang.reflect.Field; +import java.text.MessageFormat; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +public class DataFrameMessagesTests extends ESTestCase { + + public void testGetMessage_WithFormatStrings() { + String formattedMessage = DataFrameMessages.getMessage(DataFrameMessages.REST_STOP_TRANSFORM_WAIT_FOR_COMPLETION_TIMEOUT, "30s", + "my_transform"); + assertEquals("Timed out after [30s] while waiting for data frame transform [my_transform] to stop", formattedMessage); + } + + public void testMessageProperFormat() throws IllegalArgumentException, IllegalAccessException { + Field[] declaredFields = DataFrameMessages.class.getFields(); + int checkedMessages = 0; + + for (Field field : declaredFields) { + int modifiers = field.getModifiers(); + if (java.lang.reflect.Modifier.isStatic(modifiers) && java.lang.reflect.Modifier.isFinal(modifiers) + && field.getType().isAssignableFrom(String.class)) { + + assertSingleMessage((String) field.get(DataFrameMessages.class)); + ++checkedMessages; + } + } + assertTrue(checkedMessages > 0); + logger.info("Checked {} messages", checkedMessages); + } + + public void testAssertSingleMessage() { + expectThrows(RuntimeException.class, () -> innerAssertSingleMessage("missing zero position {1} {1}")); + expectThrows(RuntimeException.class, () -> innerAssertSingleMessage("incomplete {}")); + expectThrows(RuntimeException.class, () -> innerAssertSingleMessage("count from 1 {1}")); + } + + private void assertSingleMessage(String message) { + // for testing the test method, we can not assert directly, but wrap it with an exception, which also + // nicely encapsulate parsing errors thrown by MessageFormat itself + try { + innerAssertSingleMessage(message); + } catch (Exception e) { + fail(e.getMessage()); + } + } + + private void innerAssertSingleMessage(String message) { + MessageFormat messageWithNoArguments = new MessageFormat(message, Locale.ROOT); + int numberOfArguments = messageWithNoArguments.getFormats().length; + + List args = new ArrayList<>(); + for (int i = 0; i < numberOfArguments; ++i) { + args.add(randomAlphaOfLength(5)); + } + + String properFormatedMessage = new MessageFormat(message, Locale.ROOT).format(args.toArray(new String[0])); + for (String arg : args) { + if (properFormatedMessage.contains(arg) == false) { + throw new RuntimeException("Message check: [" + message + "] failed, missing argument"); + } + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transform/DataFrameIndexerTransformStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transform/DataFrameIndexerTransformStatsTests.java new file mode 100644 index 0000000000000..2a762584b88d6 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transform/DataFrameIndexerTransformStatsTests.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.transform; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; + +public class DataFrameIndexerTransformStatsTests extends AbstractSerializingTestCase { + @Override + protected DataFrameIndexerTransformStats createTestInstance() { + return randomStats(); + } + + @Override + protected Writeable.Reader instanceReader() { + return DataFrameIndexerTransformStats::new; + } + + @Override + protected DataFrameIndexerTransformStats doParseInstance(XContentParser parser) { + return DataFrameIndexerTransformStats.fromXContent(parser); + } + + public static DataFrameIndexerTransformStats randomStats() { + return new DataFrameIndexerTransformStats(randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), + randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), + randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L)); + } + + public void testMerge() throws IOException { + DataFrameIndexerTransformStats emptyStats = new DataFrameIndexerTransformStats(); + DataFrameIndexerTransformStats randomStats = randomStats(); + + assertEquals(randomStats, emptyStats.merge(randomStats)); + assertEquals(randomStats, randomStats.merge(emptyStats)); + + DataFrameIndexerTransformStats randomStatsClone = copyInstance(randomStats); + + DataFrameIndexerTransformStats trippleRandomStats = new DataFrameIndexerTransformStats(3 * randomStats.getNumPages(), + 3 * randomStats.getNumDocuments(), 3 * randomStats.getOutputDocuments(), 3 * randomStats.getNumInvocations(), + 3 * randomStats.getIndexTime(), 3 * randomStats.getSearchTime(), 3 * randomStats.getIndexTotal(), + 3 * randomStats.getSearchTotal(), 3 * randomStats.getIndexFailures(), 3 * randomStats.getSearchFailures()); + + assertEquals(trippleRandomStats, randomStats.merge(randomStatsClone).merge(randomStatsClone)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transform/DataFrameTransformStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transform/DataFrameTransformStateTests.java new file mode 100644 index 0000000000000..df5a377d57b02 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transform/DataFrameTransformStateTests.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.transform; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.dataframe.transform.DataFrameTransformState; +import org.elasticsearch.xpack.core.indexing.IndexerState; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class DataFrameTransformStateTests extends AbstractSerializingTestCase { + + public static DataFrameTransformState randomDataFrameTransformState() { + return new DataFrameTransformState(randomFrom(IndexerState.values()), randomPosition(), randomLongBetween(0,10)); + } + + @Override + protected DataFrameTransformState doParseInstance(XContentParser parser) throws IOException { + return DataFrameTransformState.fromXContent(parser); + } + + @Override + protected DataFrameTransformState createTestInstance() { + return randomDataFrameTransformState(); + } + + @Override + protected Reader instanceReader() { + return DataFrameTransformState::new; + } + + private static Map randomPosition() { + if (randomBoolean()) { + return null; + } + int numFields = randomIntBetween(1, 5); + Map position = new HashMap<>(); + for (int i = 0; i < numFields; i++) { + Object value; + if (randomBoolean()) { + value = randomLong(); + } else { + value = randomAlphaOfLengthBetween(1, 10); + } + position.put(randomAlphaOfLengthBetween(3, 10), value); + } + return position; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java index 906b00ccab0fc..46db9e83f7740 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java @@ -133,6 +133,10 @@ public void testSystem() throws Exception { assertThat(predicate.test("indices:admin/seq_no/retention_lease_background_sync"), is(true)); assertThat(predicate.test("indices:admin/seq_no/retention_lease_background_sync[p]"), is(true)); assertThat(predicate.test("indices:admin/seq_no/retention_lease_background_sync[r]"), is(true)); + assertThat(predicate.test("indices:admin/seq_no/add_retention_lease"), is(true)); + assertThat(predicate.test("indices:admin/seq_no/add_retention_lease[s]"), is(true)); + assertThat(predicate.test("indices:admin/seq_no/renew_retention_lease"), is(true)); + assertThat(predicate.test("indices:admin/seq_no/renew_retention_lease[s]"), is(true)); assertThat(predicate.test("indices:admin/settings/update"), is(true)); assertThat(predicate.test("indices:admin/settings/foo"), is(false)); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java index 28d0ccd682f52..24804129bde6c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java @@ -30,12 +30,6 @@ public final class XPackRestTestHelper { - public static final List ML_PRE_V660_TEMPLATES = Collections.unmodifiableList( - Arrays.asList(AuditorField.NOTIFICATIONS_INDEX, - MlMetaIndex.INDEX_NAME, - AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX, - AnomalyDetectorsIndex.jobResultsIndexPrefix())); - public static final List ML_POST_V660_TEMPLATES = Collections.unmodifiableList( Arrays.asList(AuditorField.NOTIFICATIONS_INDEX, MlMetaIndex.INDEX_NAME, diff --git a/x-pack/plugin/data-frame/build.gradle b/x-pack/plugin/data-frame/build.gradle new file mode 100644 index 0000000000000..bff8118bfc425 --- /dev/null +++ b/x-pack/plugin/data-frame/build.gradle @@ -0,0 +1,37 @@ +evaluationDependsOn(xpackModule('core')) + +apply plugin: 'elasticsearch.esplugin' +esplugin { + name 'data-frame' + description 'A plugin to build data frames' + classname 'org.elasticsearch.xpack.dataframe.DataFrame' + extendedPlugins = ['x-pack-core'] +} + +compileJava.options.compilerArgs << "-Xlint:-rawtypes" +compileTestJava.options.compilerArgs << "-Xlint:-rawtypes" + +dependencies { + compileOnly "org.elasticsearch:elasticsearch:${version}" + + compileOnly project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +run { + plugin xpackModule('core') +} + +// xpack modules are installed in real clusters as the meta plugin, so +// installing them as individual plugins for integ tests doesn't make sense, +// so we disable integ tests +integTest.enabled = false + +// add all sub-projects of the qa sub-project +gradle.projectsEvaluated { + project.subprojects + .find { it.path == project.path + ":qa" } + .subprojects + .findAll { it.path.startsWith(project.path + ":qa") } + .each { check.dependsOn it.check } +} diff --git a/x-pack/plugin/data-frame/qa/build.gradle b/x-pack/plugin/data-frame/qa/build.gradle new file mode 100644 index 0000000000000..4f0103c9c1232 --- /dev/null +++ b/x-pack/plugin/data-frame/qa/build.gradle @@ -0,0 +1,14 @@ +/* Remove assemble on all qa projects because we don't need to publish + * artifacts for them. */ +gradle.projectsEvaluated { + subprojects { + Task assemble = project.tasks.findByName('assemble') + if (assemble) { + assemble.enabled = false + } + Task dependenciesInfo = project.tasks.findByName('dependenciesInfo') + if (dependenciesInfo) { + dependenciesInfo.enabled = false + } + } +} diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/build.gradle b/x-pack/plugin/data-frame/qa/single-node-tests/build.gradle new file mode 100644 index 0000000000000..fccd98313dc07 --- /dev/null +++ b/x-pack/plugin/data-frame/qa/single-node-tests/build.gradle @@ -0,0 +1,12 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile project(path: xpackModule('data-frame'), configuration: 'runtime') +} + +integTestCluster { + setting 'xpack.security.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' +} diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java new file mode 100644 index 0000000000000..439aa3098908c --- /dev/null +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.integration; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformConfig; + +import java.io.IOException; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +public class DataFrameConfigurationIndexIT extends DataFrameRestTestCase { + + /** + * Tests the corner case that for some reason a transform configuration still exists in the index but + * the persistent task disappeared + * + * test note: {@link DataFrameRestTestCase} checks for an empty index as part of the test case cleanup, + * so we do not need to check that the document has been deleted in this place + */ + public void testDeleteConfigurationLeftOver() throws IOException { + String fakeTransformName = randomAlphaOfLengthBetween(5, 20); + + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.field(DataFrameField.ID.getPreferredName(), fakeTransformName); + } + builder.endObject(); + final StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + Request req = new Request("PUT", + DataFrameInternalIndex.INDEX_NAME + "/_doc/" + DataFrameTransformConfig.documentId(fakeTransformName)); + req.setEntity(entity); + client().performRequest(req); + } + + Request deleteRequest = new Request("DELETE", DATAFRAME_ENDPOINT + fakeTransformName); + Response deleteResponse = client().performRequest(deleteRequest); + assertOK(deleteResponse); + assertTrue((boolean)XContentMapValues.extractValue("acknowledged", entityAsMap(deleteResponse))); + + // delete again, should fail + expectThrows(ResponseException.class,() -> client().performRequest(deleteRequest)); + } +} diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java new file mode 100644 index 0000000000000..d278c78842c39 --- /dev/null +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.integration; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.junit.Before; + +import java.io.IOException; +import java.util.Map; + +public class DataFrameMetaDataIT extends DataFrameRestTestCase { + + private boolean indicesCreated = false; + + // preserve indices in order to reuse source indices in several test cases + @Override + protected boolean preserveIndicesUponCompletion() { + return true; + } + + @Before + public void createIndexes() throws IOException { + + // it's not possible to run it as @BeforeClass as clients aren't initialized then, so we need this little hack + if (indicesCreated) { + return; + } + + createReviewsIndex(); + indicesCreated = true; + } + + public void testMetaData() throws IOException { + long testStarted = System.currentTimeMillis(); + createPivotReviewsTransform("test_meta", "pivot_reviews", null); + + Response mappingResponse = client().performRequest(new Request("GET", "pivot_reviews/_mapping")); + + Map mappingAsMap = entityAsMap(mappingResponse); + assertEquals(Version.CURRENT.toString(), + XContentMapValues.extractValue("pivot_reviews.mappings._meta._data_frame.version.created", mappingAsMap)); + assertTrue((Long) XContentMapValues.extractValue("pivot_reviews.mappings._meta._data_frame.creation_date_in_millis", + mappingAsMap) < System.currentTimeMillis()); + assertTrue((Long) XContentMapValues.extractValue("pivot_reviews.mappings._meta._data_frame.creation_date_in_millis", + mappingAsMap) > testStarted); + assertEquals("test_meta", + XContentMapValues.extractValue("pivot_reviews.mappings._meta._data_frame.transform", mappingAsMap)); + assertEquals("data-frame-transform", + XContentMapValues.extractValue("pivot_reviews.mappings._meta.created_by", mappingAsMap)); + } + +} diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java new file mode 100644 index 0000000000000..eb8203e1dd2e2 --- /dev/null +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -0,0 +1,263 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.integration; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.equalTo; + +public class DataFramePivotRestIT extends DataFrameRestTestCase { + + private static boolean indicesCreated = false; + + // preserve indices in order to reuse source indices in several test cases + @Override + protected boolean preserveIndicesUponCompletion() { + return true; + } + + @Before + public void createIndexes() throws IOException { + + // it's not possible to run it as @BeforeClass as clients aren't initialized then, so we need this little hack + if (indicesCreated) { + return; + } + + createReviewsIndex(); + indicesCreated = true; + } + + public void testSimplePivot() throws Exception { + String transformId = "simplePivot"; + String dataFrameIndex = "pivot_reviews"; + + createPivotReviewsTransform(transformId, dataFrameIndex, null); + + startAndWaitForTransform(transformId, dataFrameIndex); + + // we expect 27 documents as there shall be 27 user_id's + Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + assertEquals(27, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); + + // get and check some users + assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_0", 3.776978417); + assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_5", 3.72); + assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_11", 3.846153846); + assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_20", 3.769230769); + assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_26", 3.918918918); + } + + public void testSimplePivotWithQuery() throws Exception { + String transformId = "simplePivotWithQuery"; + String dataFrameIndex = "pivot_reviews_user_id_above_20"; + String query = "\"match\": {\"user_id\": \"user_26\"}"; + + createPivotReviewsTransform(transformId, dataFrameIndex, query); + + startAndWaitForTransform(transformId, dataFrameIndex); + + // we expect only 1 document due to the query + Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + assertEquals(1, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); + assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_26", 3.918918918); + } + + public void testHistogramPivot() throws Exception { + String transformId = "simpleHistogramPivot"; + String dataFrameIndex = "pivot_reviews_via_histogram"; + + final Request createDataframeTransformRequest = new Request("PUT", DATAFRAME_ENDPOINT + transformId); + + String config = "{" + + " \"source\": \"reviews\"," + + " \"dest\": \"" + dataFrameIndex + "\","; + + + config += " \"pivot\": {" + + " \"group_by\": {" + + " \"every_2\": {" + + " \"histogram\": {" + + " \"interval\": 2,\"field\":\"stars\"" + + " } } }," + + " \"aggregations\": {" + + " \"avg_rating\": {" + + " \"avg\": {" + + " \"field\": \"stars\"" + + " } } } }" + + "}"; + + + createDataframeTransformRequest.setJsonEntity(config); + Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); + assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + assertTrue(indexExists(dataFrameIndex)); + + startAndWaitForTransform(transformId, dataFrameIndex); + + // we expect 3 documents as there shall be 5 unique star values and we are bucketing every 2 starting at 0 + Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + assertEquals(3, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); + assertOnePivotValue(dataFrameIndex + "/_search?q=every_2:0.0", 1.0); + } + + public void testBiggerPivot() throws Exception { + String transformId = "biggerPivot"; + String dataFrameIndex = "bigger_pivot_reviews"; + + final Request createDataframeTransformRequest = new Request("PUT", DATAFRAME_ENDPOINT + transformId); + + String config = "{" + + " \"source\": \"reviews\"," + + " \"dest\": \"" + dataFrameIndex + "\","; + + + config += " \"pivot\": {" + + " \"group_by\": {" + + " \"reviewer\": {" + + " \"terms\": {" + + " \"field\": \"user_id\"" + + " } } }," + + " \"aggregations\": {" + + " \"avg_rating\": {" + + " \"avg\": {" + + " \"field\": \"stars\"" + + " } }," + + " \"sum_rating\": {" + + " \"sum\": {" + + " \"field\": \"stars\"" + + " } }," + + " \"cardinality_business\": {" + + " \"cardinality\": {" + + " \"field\": \"business_id\"" + + " } }," + + " \"min_rating\": {" + + " \"min\": {" + + " \"field\": \"stars\"" + + " } }," + + " \"max_rating\": {" + + " \"max\": {" + + " \"field\": \"stars\"" + + " } }," + + " \"count\": {" + + " \"value_count\": {" + + " \"field\": \"business_id\"" + + " } }" + + " } }" + + "}"; + + createDataframeTransformRequest.setJsonEntity(config); + Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); + assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + assertTrue(indexExists(dataFrameIndex)); + + startAndWaitForTransform(transformId, dataFrameIndex); + + // we expect 27 documents as there shall be 27 user_id's + Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + assertEquals(27, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); + + // get and check some users + Map searchResult = getAsMap(dataFrameIndex + "/_search?q=reviewer:user_4"); + + assertEquals(1, XContentMapValues.extractValue("hits.total.value", searchResult)); + Number actual = (Number) ((List) XContentMapValues.extractValue("hits.hits._source.avg_rating", searchResult)).get(0); + assertEquals(3.878048780, actual.doubleValue(), 0.000001); + actual = (Number) ((List) XContentMapValues.extractValue("hits.hits._source.sum_rating", searchResult)).get(0); + assertEquals(159, actual.longValue()); + actual = (Number) ((List) XContentMapValues.extractValue("hits.hits._source.cardinality_business", searchResult)).get(0); + assertEquals(6, actual.longValue()); + actual = (Number) ((List) XContentMapValues.extractValue("hits.hits._source.min_rating", searchResult)).get(0); + assertEquals(1, actual.longValue()); + actual = (Number) ((List) XContentMapValues.extractValue("hits.hits._source.max_rating", searchResult)).get(0); + assertEquals(5, actual.longValue()); + actual = (Number) ((List) XContentMapValues.extractValue("hits.hits._source.count", searchResult)).get(0); + assertEquals(41, actual.longValue()); + } + + public void testDateHistogramPivot() throws Exception { + String transformId = "simpleDateHistogramPivot"; + String dataFrameIndex = "pivot_reviews_via_date_histogram"; + + final Request createDataframeTransformRequest = new Request("PUT", DATAFRAME_ENDPOINT + transformId); + + String config = "{" + + " \"source\": \"reviews\"," + + " \"dest\": \"" + dataFrameIndex + "\","; + + + config += " \"pivot\": {" + + " \"group_by\": {" + + " \"by_day\": {" + + " \"date_histogram\": {" + + " \"interval\": \"1d\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-DD\"" + + " } } }," + + " \"aggregations\": {" + + " \"avg_rating\": {" + + " \"avg\": {" + + " \"field\": \"stars\"" + + " } } } }" + + "}"; + + createDataframeTransformRequest.setJsonEntity(config); + Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); + assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + assertTrue(indexExists(dataFrameIndex)); + + startAndWaitForTransform(transformId, dataFrameIndex); + + // we expect 21 documents as there shall be 21 days worth of docs + Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + assertEquals(21, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); + assertOnePivotValue(dataFrameIndex + "/_search?q=by_day:2017-01-15", 3.82); + } + + private void startAndWaitForTransform(String transformId, String dataFrameIndex) throws IOException, Exception { + // start the transform + final Request startTransformRequest = new Request("POST", DATAFRAME_ENDPOINT + transformId + "/_start"); + Map startTransformResponse = entityAsMap(client().performRequest(startTransformRequest)); + assertThat(startTransformResponse.get("started"), equalTo(Boolean.TRUE)); + + // wait until the dataframe has been created and all data is available + waitForDataFrameGeneration(transformId); + refreshIndex(dataFrameIndex); + } + + private void waitForDataFrameGeneration(String transformId) throws Exception { + assertBusy(() -> { + long generation = getDataFrameGeneration(transformId); + assertEquals(1, generation); + }, 30, TimeUnit.SECONDS); + } + + private static int getDataFrameGeneration(String transformId) throws IOException { + Response statsResponse = client().performRequest(new Request("GET", DATAFRAME_ENDPOINT + transformId + "/_stats")); + + Map transformStatsAsMap = (Map) ((List) entityAsMap(statsResponse).get("transforms")).get(0); + return (int) XContentMapValues.extractValue("state.generation", transformStatsAsMap); + } + + private void refreshIndex(String index) throws IOException { + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); + } + + private void assertOnePivotValue(String query, double expected) throws IOException { + Map searchResult = getAsMap(query); + + assertEquals(1, XContentMapValues.extractValue("hits.total.value", searchResult)); + double actual = (double) ((List) XContentMapValues.extractValue("hits.hits._source.avg_rating", searchResult)).get(0); + assertEquals(expected, actual, 0.000001); + } +} diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java new file mode 100644 index 0000000000000..bd6812ae4896d --- /dev/null +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java @@ -0,0 +1,224 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.integration; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; +import org.junit.AfterClass; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; + +public abstract class DataFrameRestTestCase extends ESRestTestCase { + + protected static final String DATAFRAME_ENDPOINT = DataFrameField.REST_BASE_PATH + "transforms/"; + + /** + * Create a simple dataset for testing with reviewers, ratings and businesses + */ + protected void createReviewsIndex() throws IOException { + int[] distributionTable = {5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 2, 1, 1, 1}; + + final int numDocs = 1000; + + // create mapping + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("mappings") + .startObject("properties") + .startObject("timestamp") + .field("type", "date") + .endObject() + .startObject("user_id") + .field("type", "keyword") + .endObject() + .startObject("business_id") + .field("type", "keyword") + .endObject() + .startObject("stars") + .field("type", "integer") + .endObject() + .endObject() + .endObject(); + } + builder.endObject(); + final StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + Request req = new Request("PUT", "reviews"); + req.setEntity(entity); + client().performRequest(req); + } + + // create index + final StringBuilder bulk = new StringBuilder(); + int day = 10; + for (int i = 0; i < numDocs; i++) { + bulk.append("{\"index\":{\"_index\":\"reviews\"}}\n"); + long user = Math.round(Math.pow(i * 31 % 1000, distributionTable[i % distributionTable.length]) % 27); + int stars = distributionTable[(i * 33) % distributionTable.length]; + long business = Math.round(Math.pow(user * stars, distributionTable[i % distributionTable.length]) % 13); + int hour = randomIntBetween(10, 20); + int min = randomIntBetween(30, 59); + int sec = randomIntBetween(30, 59); + + String date_string = "2017-01-" + day + "T" + hour + ":" + min + ":" + sec + "Z"; + bulk.append("{\"user_id\":\"") + .append("user_") + .append(user) + .append("\",\"business_id\":\"") + .append("business_") + .append(business) + .append("\",\"stars\":") + .append(stars) + .append(",\"timestamp\":\"") + .append(date_string) + .append("\"}\n"); + + if (i % 50 == 0) { + bulk.append("\r\n"); + final Request bulkRequest = new Request("POST", "/_bulk"); + bulkRequest.addParameter("refresh", "true"); + bulkRequest.setJsonEntity(bulk.toString()); + client().performRequest(bulkRequest); + // clear the builder + bulk.setLength(0); + day += 1; + } + } + bulk.append("\r\n"); + + final Request bulkRequest = new Request("POST", "/_bulk"); + bulkRequest.addParameter("refresh", "true"); + bulkRequest.setJsonEntity(bulk.toString()); + client().performRequest(bulkRequest); + } + + protected void createPivotReviewsTransform(String transformId, String dataFrameIndex, String query) throws IOException { + final Request createDataframeTransformRequest = new Request("PUT", DATAFRAME_ENDPOINT + transformId); + + String config = "{" + + " \"source\": \"reviews\"," + + " \"dest\": \"" + dataFrameIndex + "\","; + + if (query != null) { + config += "\"query\": {" + + query + + "},"; + } + + config += " \"pivot\": {" + + " \"group_by\": {" + + " \"reviewer\": {" + + " \"terms\": {" + + " \"field\": \"user_id\"" + + " } } }," + + " \"aggregations\": {" + + " \"avg_rating\": {" + + " \"avg\": {" + + " \"field\": \"stars\"" + + " } } } }" + + "}"; + + createDataframeTransformRequest.setJsonEntity(config); + Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); + assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + assertTrue(indexExists(dataFrameIndex)); + } + + @SuppressWarnings("unchecked") + private static List> getDataFrameTransforms() throws IOException { + Response response = adminClient().performRequest(new Request("GET", DATAFRAME_ENDPOINT + "_all")); + Map transforms = entityAsMap(response); + List> transformConfigs = (List>) XContentMapValues.extractValue("transforms", transforms); + + return transformConfigs == null ? Collections.emptyList() : transformConfigs; + } + + protected static String getDataFrameIndexerState(String transformId) throws IOException { + Response statsResponse = client().performRequest(new Request("GET", DATAFRAME_ENDPOINT + transformId + "/_stats")); + + Map transformStatsAsMap = (Map) ((List) entityAsMap(statsResponse).get("transforms")).get(0); + return (String) XContentMapValues.extractValue("state.transform_state", transformStatsAsMap); + } + + @AfterClass + public static void removeIndices() throws Exception { + wipeDataFrameTransforms(); + waitForPendingDataFrameTasks(); + // we might have disabled wiping indices, but now its time to get rid of them + // note: can not use super.cleanUpCluster() as this method must be static + wipeIndices(); + } + + protected static void wipeDataFrameTransforms() throws IOException, InterruptedException { + List> transformConfigs = getDataFrameTransforms(); + + for (Map transformConfig : transformConfigs) { + String transformId = (String) transformConfig.get("id"); + Request request = new Request("POST", DATAFRAME_ENDPOINT + transformId + "/_stop"); + request.addParameter("wait_for_completion", "true"); + request.addParameter("timeout", "10s"); + request.addParameter("ignore", "404"); + adminClient().performRequest(request); + assertEquals("stopped", getDataFrameIndexerState(transformId)); + } + + for (Map transformConfig : transformConfigs) { + String transformId = (String) transformConfig.get("id"); + Request request = new Request("DELETE", DATAFRAME_ENDPOINT + transformId); + request.addParameter("ignore", "404"); // Ignore 404s because they imply someone was racing us to delete this + adminClient().performRequest(request); + } + + // transforms should be all gone + transformConfigs = getDataFrameTransforms(); + assertTrue(transformConfigs.isEmpty()); + + // the configuration index should be empty + Request request = new Request("GET", DataFrameInternalIndex.INDEX_NAME + "/_search"); + try { + Response searchResponse = adminClient().performRequest(request); + Map searchResult = entityAsMap(searchResponse); + + assertEquals(0, XContentMapValues.extractValue("hits.total.value", searchResult)); + } catch (ResponseException e) { + // 404 here just means we had no data frame transforms, true for some tests + if (e.getResponse().getStatusLine().getStatusCode() != 404) { + throw e; + } + } + } + + protected static void waitForPendingDataFrameTasks() throws Exception { + waitForPendingTasks(adminClient(), taskName -> taskName.startsWith(DataFrameField.TASK_NAME) == false); + } + + protected static void wipeIndices() throws IOException { + try { + adminClient().performRequest(new Request("DELETE", "*")); + } catch (ResponseException e) { + // 404 here just means we had no indexes + if (e.getResponse().getStatusLine().getStatusCode() != 404) { + throw e; + } + } + } +} diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java new file mode 100644 index 0000000000000..c5436049c1abe --- /dev/null +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.integration; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.junit.Before; + +import java.io.IOException; +import java.util.Map; + +public class DataFrameUsageIT extends DataFrameRestTestCase { + private boolean indicesCreated = false; + + // preserve indices in order to reuse source indices in several test cases + @Override + protected boolean preserveIndicesUponCompletion() { + return true; + } + + @Before + public void createIndexes() throws IOException { + + // it's not possible to run it as @BeforeClass as clients aren't initialized then, so we need this little hack + if (indicesCreated) { + return; + } + + createReviewsIndex(); + indicesCreated = true; + } + + public void testUsage() throws IOException { + Response usageResponse = client().performRequest(new Request("GET", "_xpack/usage")); + + Map usageAsMap = entityAsMap(usageResponse); + assertTrue((boolean) XContentMapValues.extractValue("data_frame.available", usageAsMap)); + assertTrue((boolean) XContentMapValues.extractValue("data_frame.enabled", usageAsMap)); + // no transforms, no stats + assertEquals(null, XContentMapValues.extractValue("data_frame.transforms", usageAsMap)); + assertEquals(null, XContentMapValues.extractValue("data_frame.stats", usageAsMap)); + + // create a transform + createPivotReviewsTransform("test_usage", "pivot_reviews", null); + + usageResponse = client().performRequest(new Request("GET", "_xpack/usage")); + + usageAsMap = entityAsMap(usageResponse); + // we should see some stats + assertEquals(1, XContentMapValues.extractValue("data_frame.transforms._all", usageAsMap)); + assertEquals(0, XContentMapValues.extractValue("data_frame.stats.index_failures", usageAsMap)); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java new file mode 100644 index 0000000000000..4ef39d630f06c --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java @@ -0,0 +1,226 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.persistent.PersistentTaskState; +import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.PersistentTaskPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ExecutorBuilder; +import org.elasticsearch.threadpool.FixedExecutorBuilder; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.dataframe.transform.DataFrameTransformState; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; +import org.elasticsearch.xpack.dataframe.action.DeleteDataFrameTransformAction; +import org.elasticsearch.xpack.dataframe.action.GetDataFrameTransformsAction; +import org.elasticsearch.xpack.dataframe.action.GetDataFrameTransformsStatsAction; +import org.elasticsearch.xpack.dataframe.action.PutDataFrameTransformAction; +import org.elasticsearch.xpack.dataframe.action.StartDataFrameTransformAction; +import org.elasticsearch.xpack.dataframe.action.StopDataFrameTransformAction; +import org.elasticsearch.xpack.dataframe.action.TransportDeleteDataFrameTransformAction; +import org.elasticsearch.xpack.dataframe.action.TransportGetDataFrameTransformsAction; +import org.elasticsearch.xpack.dataframe.action.TransportGetDataFrameTransformsStatsAction; +import org.elasticsearch.xpack.dataframe.action.TransportPutDataFrameTransformAction; +import org.elasticsearch.xpack.dataframe.action.TransportStartDataFrameTransformAction; +import org.elasticsearch.xpack.dataframe.action.TransportStopDataFrameTransformAction; +import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; +import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; +import org.elasticsearch.xpack.dataframe.rest.action.RestDeleteDataFrameTransformAction; +import org.elasticsearch.xpack.dataframe.rest.action.RestGetDataFrameTransformsAction; +import org.elasticsearch.xpack.dataframe.rest.action.RestGetDataFrameTransformsStatsAction; +import org.elasticsearch.xpack.dataframe.rest.action.RestPutDataFrameTransformAction; +import org.elasticsearch.xpack.dataframe.rest.action.RestStartDataFrameTransformAction; +import org.elasticsearch.xpack.dataframe.rest.action.RestStopDataFrameTransformAction; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransform; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformPersistentTasksExecutor; + +import java.io.IOException; +import java.time.Clock; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Supplier; +import java.util.function.UnaryOperator; + +import static java.util.Collections.emptyList; + +public class DataFrame extends Plugin implements ActionPlugin, PersistentTaskPlugin { + + public static final String NAME = "data_frame"; + public static final String TASK_THREAD_POOL_NAME = "data_frame_indexing"; + + // list of headers that will be stored when a transform is created + public static final Set HEADER_FILTERS = new HashSet<>( + Arrays.asList("es-security-runas-user", "_xpack_security_authentication")); + + private static final Logger logger = LogManager.getLogger(XPackPlugin.class); + + private final boolean enabled; + private final Settings settings; + private final boolean transportClientMode; + private final SetOnce dataFrameTransformsConfigManager = new SetOnce<>(); + + public DataFrame(Settings settings) { + this.settings = settings; + + this.enabled = XPackSettings.DATA_FRAME_ENABLED.get(settings); + this.transportClientMode = XPackPlugin.transportClientMode(settings); + } + + @Override + public Collection createGuiceModules() { + List modules = new ArrayList<>(); + + if (transportClientMode) { + return modules; + } + + modules.add(b -> XPackPlugin.bindFeatureSet(b, DataFrameFeatureSet.class)); + return modules; + } + + protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } + + @Override + public List getRestHandlers(final Settings settings, final RestController restController, + final ClusterSettings clusterSettings, final IndexScopedSettings indexScopedSettings, final SettingsFilter settingsFilter, + final IndexNameExpressionResolver indexNameExpressionResolver, final Supplier nodesInCluster) { + + if (!enabled) { + return emptyList(); + } + + return Arrays.asList( + new RestPutDataFrameTransformAction(settings, restController), + new RestStartDataFrameTransformAction(settings, restController), + new RestStopDataFrameTransformAction(settings, restController), + new RestDeleteDataFrameTransformAction(settings, restController), + new RestGetDataFrameTransformsAction(settings, restController), + new RestGetDataFrameTransformsStatsAction(settings, restController) + ); + } + + @Override + public List> getActions() { + if (!enabled) { + return emptyList(); + } + + return Arrays.asList( + new ActionHandler<>(PutDataFrameTransformAction.INSTANCE, TransportPutDataFrameTransformAction.class), + new ActionHandler<>(StartDataFrameTransformAction.INSTANCE, TransportStartDataFrameTransformAction.class), + new ActionHandler<>(StopDataFrameTransformAction.INSTANCE, TransportStopDataFrameTransformAction.class), + new ActionHandler<>(DeleteDataFrameTransformAction.INSTANCE, TransportDeleteDataFrameTransformAction.class), + new ActionHandler<>(GetDataFrameTransformsAction.INSTANCE, TransportGetDataFrameTransformsAction.class), + new ActionHandler<>(GetDataFrameTransformsStatsAction.INSTANCE, TransportGetDataFrameTransformsStatsAction.class) + ); + } + + @Override + public List> getExecutorBuilders(Settings settings) { + if (false == enabled || transportClientMode) { + return emptyList(); + } + + FixedExecutorBuilder indexing = new FixedExecutorBuilder(settings, TASK_THREAD_POOL_NAME, 4, 4, + "data_frame.task_thread_pool"); + + return Collections.singletonList(indexing); + } + + @Override + public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, + Environment environment, NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { + if (enabled == false || transportClientMode) { + return emptyList(); + } + + dataFrameTransformsConfigManager.set(new DataFrameTransformsConfigManager(client, xContentRegistry)); + + return Collections.singletonList(dataFrameTransformsConfigManager.get()); + } + + @Override + public UnaryOperator> getIndexTemplateMetaDataUpgrader() { + return templates -> { + try { + templates.put(DataFrameInternalIndex.INDEX_TEMPLATE_NAME, DataFrameInternalIndex.getIndexTemplateMetaData()); + } catch (IOException e) { + logger.error("Error creating data frame index template", e); + } + return templates; + }; + } + + @Override + public List> getPersistentTasksExecutor(ClusterService clusterService, ThreadPool threadPool, + Client client, SettingsModule settingsModule) { + if (enabled == false || transportClientMode) { + return emptyList(); + } + + SchedulerEngine schedulerEngine = new SchedulerEngine(settings, Clock.systemUTC()); + + // the transforms config manager should have been created + assert dataFrameTransformsConfigManager.get() != null; + return Collections.singletonList( + new DataFrameTransformPersistentTasksExecutor(client, dataFrameTransformsConfigManager.get(), schedulerEngine, threadPool)); + } + + @Override + public List getNamedXContent() { + if (enabled == false) { + return emptyList(); + } + return Arrays.asList( + new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(DataFrameField.TASK_NAME), + DataFrameTransform::fromXContent), + new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(DataFrameTransformState.NAME), + DataFrameTransformState::fromXContent), + new NamedXContentRegistry.Entry(PersistentTaskState.class, new ParseField(DataFrameTransformState.NAME), + DataFrameTransformState::fromXContent) + ); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java new file mode 100644 index 0000000000000..9be80024975d8 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.dataframe.DataFrameFeatureSetUsage; +import org.elasticsearch.xpack.core.dataframe.transform.DataFrameIndexerTransformStats; +import org.elasticsearch.xpack.dataframe.action.GetDataFrameTransformsStatsAction; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +public class DataFrameFeatureSet implements XPackFeatureSet { + + private final boolean enabled; + private final Client client; + private final XPackLicenseState licenseState; + + @Inject + public DataFrameFeatureSet(Settings settings, Client client, @Nullable XPackLicenseState licenseState) { + this.enabled = XPackSettings.DATA_FRAME_ENABLED.get(settings); + this.client = Objects.requireNonNull(client); + this.licenseState = licenseState; + } + + @Override + public String name() { + return XPackField.DATA_FRAME; + } + + @Override + public String description() { + return "Data Frame for the Elastic Stack"; + } + + @Override + public boolean available() { + return licenseState != null && licenseState.isDataFrameAllowed(); + } + + @Override + public boolean enabled() { + return enabled; + } + + @Override + public Map nativeCodeInfo() { + return null; + } + + @Override + public void usage(ActionListener listener) { + if (enabled == false) { + listener.onResponse( + new DataFrameFeatureSetUsage(available(), enabled(), Collections.emptyMap(), new DataFrameIndexerTransformStats())); + return; + } + + GetDataFrameTransformsStatsAction.Request transformStatsRequest = new GetDataFrameTransformsStatsAction.Request(MetaData.ALL); + + client.execute(GetDataFrameTransformsStatsAction.INSTANCE, transformStatsRequest, ActionListener.wrap(transformStatsResponse -> { + Map transformsCountByState = new HashMap<>(); + DataFrameIndexerTransformStats accumulatedStats = new DataFrameIndexerTransformStats(); + + transformStatsResponse.getTransformsStateAndStats().stream().forEach(singleResult -> { + transformsCountByState.merge(singleResult.getTransformState().getIndexerState().value(), 1L, Long::sum); + accumulatedStats.merge(singleResult.getTransformStats()); + }); + + listener.onResponse(new DataFrameFeatureSetUsage(available(), enabled(), transformsCountByState, accumulatedStats)); + }, listener::onFailure)); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/DataFrameTransformStateAndStats.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/DataFrameTransformStateAndStats.java new file mode 100644 index 0000000000000..1b8a7dfbd6805 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/DataFrameTransformStateAndStats.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.dataframe.transform.DataFrameIndexerTransformStats; +import org.elasticsearch.xpack.core.dataframe.transform.DataFrameTransformState; + +import java.io.IOException; +import java.util.Objects; + +public class DataFrameTransformStateAndStats implements Writeable, ToXContentObject { + + public static final ParseField STATE_FIELD = new ParseField("state"); + + private final String id; + private final DataFrameTransformState transformState; + private final DataFrameIndexerTransformStats transformStats; + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + GetDataFrameTransformsAction.NAME, + a -> new DataFrameTransformStateAndStats((String) a[0], (DataFrameTransformState) a[1], (DataFrameIndexerTransformStats) a[2])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), DataFrameField.ID); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), DataFrameTransformState.PARSER::apply, STATE_FIELD); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> DataFrameIndexerTransformStats.fromXContent(p), + DataFrameField.STATS_FIELD); + } + + public DataFrameTransformStateAndStats(String id, DataFrameTransformState state, DataFrameIndexerTransformStats stats) { + this.id = Objects.requireNonNull(id); + this.transformState = Objects.requireNonNull(state); + this.transformStats = Objects.requireNonNull(stats); + } + + public DataFrameTransformStateAndStats(StreamInput in) throws IOException { + this.id = in.readString(); + this.transformState = new DataFrameTransformState(in); + this.transformStats = new DataFrameIndexerTransformStats(in); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(DataFrameField.ID.getPreferredName(), id); + builder.field(STATE_FIELD.getPreferredName(), transformState); + builder.field(DataFrameField.STATS_FIELD.getPreferredName(), transformStats); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(id); + transformState.writeTo(out); + transformStats.writeTo(out); + } + + @Override + public int hashCode() { + return Objects.hash(id, transformState, transformStats); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DataFrameTransformStateAndStats that = (DataFrameTransformStateAndStats) other; + + return Objects.equals(this.id, that.id) && Objects.equals(this.transformState, that.transformState) + && Objects.equals(this.transformStats, that.transformStats); + } + + public String getId() { + return id; + } + + public DataFrameIndexerTransformStats getTransformStats() { + return transformStats; + } + + public DataFrameTransformState getTransformState() { + return transformState; + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/DeleteDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/DeleteDataFrameTransformAction.java new file mode 100644 index 0000000000000..06a8c5cf47d07 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/DeleteDataFrameTransformAction.java @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.support.tasks.BaseTasksRequest; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +public class DeleteDataFrameTransformAction extends Action { + + public static final DeleteDataFrameTransformAction INSTANCE = new DeleteDataFrameTransformAction(); + public static final String NAME = "cluster:admin/data_frame/delete"; + + private DeleteDataFrameTransformAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends BaseTasksRequest implements ToXContentFragment { + private String id; + + public Request(String id) { + this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); + } + + public Request() { + } + + public Request(StreamInput in) throws IOException { + super(in); + id = in.readString(); + } + + public String getId() { + return id; + } + + @Override + public boolean match(Task task) { + return task.getDescription().equals(DataFrameField.PERSISTENT_TASK_DESCRIPTION_PREFIX + id); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(DataFrameField.ID.getPreferredName(), id); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(id, other.id); + } + } + + public static class RequestBuilder + extends ActionRequestBuilder { + + protected RequestBuilder(ElasticsearchClient client, DeleteDataFrameTransformAction action) { + super(client, action, new DeleteDataFrameTransformAction.Request()); + } + } + + public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { + private boolean acknowledged; + public Response(StreamInput in) throws IOException { + super(Collections.emptyList(), Collections.emptyList()); + readFrom(in); + } + + public Response(boolean acknowledged, List taskFailures, List nodeFailures) { + super(taskFailures, nodeFailures); + this.acknowledged = acknowledged; + } + + public Response(boolean acknowledged) { + this(acknowledged, Collections.emptyList(), Collections.emptyList()); + } + + public Response() { + this(false, Collections.emptyList(), Collections.emptyList()); + } + + public boolean isDeleted() { + return acknowledged; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + acknowledged = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(acknowledged); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + toXContentCommon(builder, params); + builder.field("acknowledged", acknowledged); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + DeleteDataFrameTransformAction.Response response = (DeleteDataFrameTransformAction.Response) o; + return super.equals(o) && acknowledged == response.acknowledged; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), acknowledged); + } + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/GetDataFrameTransformsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/GetDataFrameTransformsAction.java new file mode 100644 index 0000000000000..5e10454207c21 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/GetDataFrameTransformsAction.java @@ -0,0 +1,223 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.apache.logging.log4j.LogManager; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.support.tasks.BaseTasksRequest; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformConfig; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +public class GetDataFrameTransformsAction extends Action{ + + public static final GetDataFrameTransformsAction INSTANCE = new GetDataFrameTransformsAction(); + public static final String NAME = "cluster:monitor/data_frame/get"; + + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(GetDataFrameTransformsAction.class)); + + private GetDataFrameTransformsAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends BaseTasksRequest implements ToXContent { + private String id; + + public Request(String id) { + if (Strings.isNullOrEmpty(id) || id.equals("*")) { + this.id = MetaData.ALL; + } else { + this.id = id; + } + } + + public Request() {} + + public Request(StreamInput in) throws IOException { + super(in); + id = in.readString(); + } + + @Override + public boolean match(Task task) { + // If we are retrieving all the transforms, the task description does not contain the id + if (id.equals(MetaData.ALL)) { + return task.getDescription().startsWith(DataFrameField.PERSISTENT_TASK_DESCRIPTION_PREFIX); + } + // Otherwise find the task by ID + return task.getDescription().equals(DataFrameField.PERSISTENT_TASK_DESCRIPTION_PREFIX + id); + } + + public String getId() { + return id; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(DataFrameField.ID.getPreferredName(), id); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(id, other.id); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + protected RequestBuilder(ElasticsearchClient client, GetDataFrameTransformsAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { + + public static final String INVALID_TRANSFORMS_DEPRECATION_WARNING = "Found [{}] invalid transforms"; + private static final ParseField INVALID_TRANSFORMS = new ParseField("invalid_transforms"); + + private List transformConfigurations; + + public Response(List transformConfigs) { + super(Collections.emptyList(), Collections.emptyList()); + this.transformConfigurations = transformConfigs; + } + + public Response(List transformConfigs, List taskFailures, + List nodeFailures) { + super(taskFailures, nodeFailures); + this.transformConfigurations = transformConfigs; + } + + public Response() { + super(Collections.emptyList(), Collections.emptyList()); + } + + public Response(StreamInput in) throws IOException { + super(Collections.emptyList(), Collections.emptyList()); + readFrom(in); + } + + public List getTransformConfigurations() { + return transformConfigurations; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + transformConfigurations = in.readList(DataFrameTransformConfig::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(transformConfigurations); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + List invalidTransforms = new ArrayList<>(); + builder.startObject(); + builder.field(DataFrameField.COUNT.getPreferredName(), transformConfigurations.size()); + // XContentBuilder does not support passing the params object for Iterables + builder.field(DataFrameField.TRANSFORMS.getPreferredName()); + builder.startArray(); + for (DataFrameTransformConfig configResponse : transformConfigurations) { + configResponse.toXContent(builder, params); + if (configResponse.isValid() == false) { + invalidTransforms.add(configResponse.getId()); + } + } + builder.endArray(); + if (invalidTransforms.isEmpty() == false) { + builder.startObject(INVALID_TRANSFORMS.getPreferredName()); + builder.field(DataFrameField.COUNT.getPreferredName(), invalidTransforms.size()); + builder.field(DataFrameField.TRANSFORMS.getPreferredName(), invalidTransforms); + builder.endObject(); + deprecationLogger.deprecated(INVALID_TRANSFORMS_DEPRECATION_WARNING, invalidTransforms.size()); + } + + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(transformConfigurations); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final Response that = (Response) other; + return Objects.equals(this.transformConfigurations, that.transformConfigurations); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/GetDataFrameTransformsStatsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/GetDataFrameTransformsStatsAction.java new file mode 100644 index 0000000000000..0dff8923dfeaa --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/GetDataFrameTransformsStatsAction.java @@ -0,0 +1,193 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.support.tasks.BaseTasksRequest; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +public class GetDataFrameTransformsStatsAction extends Action { + + public static final GetDataFrameTransformsStatsAction INSTANCE = new GetDataFrameTransformsStatsAction(); + public static final String NAME = "cluster:monitor/data_frame_stats/get"; + public GetDataFrameTransformsStatsAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends BaseTasksRequest implements ToXContent { + private String id; + + public Request(String id) { + if (Strings.isNullOrEmpty(id) || id.equals("*")) { + this.id = MetaData.ALL; + } else { + this.id = id; + } + } + + public Request() {} + + public Request(StreamInput in) throws IOException { + super(in); + id = in.readString(); + } + + @Override + public boolean match(Task task) { + // If we are retrieving all the transforms, the task description does not contain the id + if (id.equals(MetaData.ALL)) { + return task.getDescription().startsWith(DataFrameField.PERSISTENT_TASK_DESCRIPTION_PREFIX); + } + // Otherwise find the task by ID + return task.getDescription().equals(DataFrameField.PERSISTENT_TASK_DESCRIPTION_PREFIX + id); + } + + public String getId() { + return id; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(DataFrameField.ID.getPreferredName(), id); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(id, other.id); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + protected RequestBuilder(ElasticsearchClient client, GetDataFrameTransformsStatsAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { + private List transformsStateAndStats; + + public Response(List transformsStateAndStats) { + super(Collections.emptyList(), Collections.emptyList()); + this.transformsStateAndStats = transformsStateAndStats; + } + + public Response(List transformsStateAndStats, List taskFailures, + List nodeFailures) { + super(taskFailures, nodeFailures); + this.transformsStateAndStats = transformsStateAndStats; + } + + public Response() { + super(Collections.emptyList(), Collections.emptyList()); + this.transformsStateAndStats = Collections.emptyList(); + } + + public Response(StreamInput in) throws IOException { + super(Collections.emptyList(), Collections.emptyList()); + readFrom(in); + } + + public List getTransformsStateAndStats() { + return transformsStateAndStats; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + transformsStateAndStats = in.readList(DataFrameTransformStateAndStats::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(transformsStateAndStats); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(DataFrameField.COUNT.getPreferredName(), transformsStateAndStats.size()); + builder.field(DataFrameField.TRANSFORMS.getPreferredName(), transformsStateAndStats); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(transformsStateAndStats); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final Response that = (Response) other; + return Objects.equals(this.transformsStateAndStats, that.transformsStateAndStats); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/PutDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/PutDataFrameTransformAction.java new file mode 100644 index 0000000000000..71546111ff487 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/PutDataFrameTransformAction.java @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformConfig; + +import java.io.IOException; +import java.util.Objects; + +public class PutDataFrameTransformAction extends Action { + + public static final PutDataFrameTransformAction INSTANCE = new PutDataFrameTransformAction(); + public static final String NAME = "cluster:admin/data_frame/put"; + + private PutDataFrameTransformAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends AcknowledgedRequest implements ToXContentObject { + + private DataFrameTransformConfig config; + + public Request(DataFrameTransformConfig config) { + this.setConfig(config); + } + + public Request() { + + } + + public static Request fromXContent(final XContentParser parser, final String id) throws IOException { + return new Request(DataFrameTransformConfig.fromXContent(parser, id, false)); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return this.config.toXContent(builder, params); + } + + public DataFrameTransformConfig getConfig() { + return config; + } + + public void setConfig(DataFrameTransformConfig config) { + this.config = config; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.config = new DataFrameTransformConfig(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + this.config.writeTo(out); + } + + @Override + public int hashCode() { + return Objects.hash(config); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(config, other.config); + } + } + + public static class RequestBuilder extends MasterNodeOperationRequestBuilder { + + protected RequestBuilder(ElasticsearchClient client, PutDataFrameTransformAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends AcknowledgedResponse { + public Response() { + super(); + } + + public Response(boolean acknowledged) { + super(acknowledged); + } + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/StartDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/StartDataFrameTransformAction.java new file mode 100644 index 0000000000000..9a2e227f9e083 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/StartDataFrameTransformAction.java @@ -0,0 +1,162 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.tasks.BaseTasksRequest; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Collections; +import java.util.Objects; + +public class StartDataFrameTransformAction extends Action { + + public static final StartDataFrameTransformAction INSTANCE = new StartDataFrameTransformAction(); + public static final String NAME = "cluster:admin/data_frame/start"; + + private StartDataFrameTransformAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends BaseTasksRequest implements ToXContent { + private String id; + + public Request(String id) { + this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); + } + + public Request() { + } + + public Request(StreamInput in) throws IOException { + super(in); + id = in.readString(); + } + + public String getId() { + return id; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(DataFrameField.ID.getPreferredName(), id); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(id, other.id); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + protected RequestBuilder(ElasticsearchClient client, StartDataFrameTransformAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { + private boolean started; + + public Response() { + super(Collections.emptyList(), Collections.emptyList()); + } + + public Response(StreamInput in) throws IOException { + super(Collections.emptyList(), Collections.emptyList()); + readFrom(in); + } + + public Response(boolean started) { + super(Collections.emptyList(), Collections.emptyList()); + this.started = started; + } + + public boolean isStarted() { + return started; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + started = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(started); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("started", started); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + Response response = (Response) obj; + return started == response.started; + } + + @Override + public int hashCode() { + return Objects.hash(started); + } + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/StopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/StopDataFrameTransformAction.java new file mode 100644 index 0000000000000..9fe0a75a120d1 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/StopDataFrameTransformAction.java @@ -0,0 +1,200 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.tasks.BaseTasksRequest; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Collections; +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +public class StopDataFrameTransformAction extends Action { + + public static final StopDataFrameTransformAction INSTANCE = new StopDataFrameTransformAction(); + public static final String NAME = "cluster:admin/data_frame/stop"; + + public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + + private StopDataFrameTransformAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends BaseTasksRequest implements ToXContent { + private String id; + private final boolean waitForCompletion; + + public Request(String id, boolean waitForCompletion, @Nullable TimeValue timeout) { + this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); + this.waitForCompletion = waitForCompletion; + + // use the timeout value already present in BaseTasksRequest + this.setTimeout(timeout == null ? DEFAULT_TIMEOUT : timeout); + } + + public Request() { + this(null, false, null); + } + + public Request(StreamInput in) throws IOException { + super(in); + id = in.readString(); + waitForCompletion = in.readBoolean(); + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public boolean waitForCompletion() { + return waitForCompletion; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeBoolean(waitForCompletion); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(DataFrameField.ID.getPreferredName(), id); + builder.field(DataFrameField.WAIT_FOR_COMPLETION.getPreferredName(), waitForCompletion); + if (this.getTimeout() != null) { + builder.field(DataFrameField.TIMEOUT.getPreferredName(), this.getTimeout()); + } + return builder; + } + + @Override + public int hashCode() { + // the base class does not implement hashCode, therefore we need to hash timeout ourselves + return Objects.hash(id, waitForCompletion, this.getTimeout()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + + // the base class does not implement equals, therefore we need to compare timeout ourselves + if (Objects.equals(this.getTimeout(), other.getTimeout()) == false) { + return false; + } + + return Objects.equals(id, other.id) && Objects.equals(waitForCompletion, other.waitForCompletion); + } + + @Override + public boolean match(Task task) { + String expectedDescription = DataFrameField.PERSISTENT_TASK_DESCRIPTION_PREFIX + id; + + return task.getDescription().equals(expectedDescription); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + protected RequestBuilder(ElasticsearchClient client, StopDataFrameTransformAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { + + private boolean stopped; + + public Response() { + super(Collections.emptyList(), Collections.emptyList()); + } + + public Response(StreamInput in) throws IOException { + super(Collections.emptyList(), Collections.emptyList()); + readFrom(in); + } + + public Response(boolean stopped) { + super(Collections.emptyList(), Collections.emptyList()); + this.stopped = stopped; + } + + public boolean isStopped() { + return stopped; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + stopped = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(stopped); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("stopped", stopped); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + Response response = (Response) o; + return stopped == response.stopped; + } + + @Override + public int hashCode() { + return Objects.hash(stopped); + } + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java new file mode 100644 index 0000000000000..52694d00ce597 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.tasks.TransportTasksAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexing.IndexerState; +import org.elasticsearch.xpack.dataframe.action.DeleteDataFrameTransformAction.Request; +import org.elasticsearch.xpack.dataframe.action.DeleteDataFrameTransformAction.Response; +import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; + +import java.util.List; + +public class TransportDeleteDataFrameTransformAction extends TransportTasksAction { + + private final DataFrameTransformsConfigManager transformsConfigManager; + + @Inject + public TransportDeleteDataFrameTransformAction(TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, PersistentTasksService persistentTasksService, + ClusterService clusterService, DataFrameTransformsConfigManager transformsConfigManager) { + super(DeleteDataFrameTransformAction.NAME, clusterService, transportService, actionFilters, Request::new, Response::new, + Response::new, ThreadPool.Names.SAME); + this.transformsConfigManager = transformsConfigManager; + } + + @Override + protected Response newResponse(Request request, List tasks, List taskOperationFailures, + List failedNodeExceptions) { + assert tasks.size() + taskOperationFailures.size() == 1; + boolean cancelled = tasks.size() > 0 && tasks.stream().allMatch(Response::isDeleted); + + return new Response(cancelled, taskOperationFailures, failedNodeExceptions); + } + + @Override + protected void taskOperation(Request request, DataFrameTransformTask task, ActionListener listener) { + assert task.getTransformId().equals(request.getId()); + IndexerState state = task.getState().getIndexerState(); + if (state.equals(IndexerState.STOPPED)) { + task.onCancelled(); + transformsConfigManager.deleteTransformConfiguration(request.getId(), ActionListener.wrap(r -> { + listener.onResponse(new Response(true)); + }, listener::onFailure)); + } else { + listener.onFailure(new IllegalStateException("Could not delete transform [" + request.getId() + "] because " + + "indexer state is [" + state + "]. Transform must be [" + IndexerState.STOPPED + "] before deletion.")); + } + } + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + final ClusterState state = clusterService.state(); + final DiscoveryNodes nodes = state.nodes(); + if (nodes.isLocalNodeElectedMaster()) { + PersistentTasksCustomMetaData pTasksMeta = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + if (pTasksMeta != null && pTasksMeta.getTask(request.getId()) != null) { + super.doExecute(task, request, listener); + } else { + // we couldn't find the transform in the persistent task CS, but maybe the transform exists in the configuration index, + // if so delete the orphaned document and do not throw (for the normal case we want to stop the task first, + // than delete the configuration document if and only if the data frame transform is in stopped state) + transformsConfigManager.deleteTransformConfiguration(request.getId(), ActionListener.wrap(r -> { + listener.onResponse(new Response(true)); + return; + }, listener::onFailure)); + } + } else { + // Delegates DeleteTransform to elected master node, so it becomes the coordinating node. + // Non-master nodes may have a stale cluster state that shows transforms which are cancelled + // on the master, which makes testing difficult. + if (nodes.getMasterNode() == null) { + listener.onFailure(new MasterNotDiscoveredException("no known master nodes")); + } else { + transportService.sendRequest(nodes.getMasterNode(), actionName, request, + new ActionListenerResponseHandler<>(listener, Response::new)); + } + } + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java new file mode 100644 index 0000000000000..3249076cfa029 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.tasks.TransportTasksAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.dataframe.action.GetDataFrameTransformsAction.Request; +import org.elasticsearch.xpack.dataframe.action.GetDataFrameTransformsAction.Response; +import org.elasticsearch.xpack.dataframe.persistence.DataFramePersistentTaskUtils; +import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +public class TransportGetDataFrameTransformsAction extends + TransportTasksAction { + + private final DataFrameTransformsConfigManager transformsConfigManager; + + @Inject + public TransportGetDataFrameTransformsAction(TransportService transportService, ActionFilters actionFilters, + ClusterService clusterService, DataFrameTransformsConfigManager transformsConfigManager) { + super(GetDataFrameTransformsAction.NAME, clusterService, transportService, actionFilters, GetDataFrameTransformsAction.Request::new, + GetDataFrameTransformsAction.Response::new, GetDataFrameTransformsAction.Response::new, ThreadPool.Names.SAME); + this.transformsConfigManager = transformsConfigManager; + } + + @Override + protected Response newResponse(Request request, List tasks, List taskOperationFailures, + List failedNodeExceptions) { + List configs = tasks.stream().map(GetDataFrameTransformsAction.Response::getTransformConfigurations) + .flatMap(Collection::stream).collect(Collectors.toList()); + return new Response(configs, taskOperationFailures, failedNodeExceptions); + } + + @Override + protected void taskOperation(Request request, DataFrameTransformTask task, ActionListener listener) { + assert task.getTransformId().equals(request.getId()) || request.getId().equals(MetaData.ALL); + // Little extra insurance, make sure we only return transforms that aren't cancelled + if (task.isCancelled() == false) { + transformsConfigManager.getTransformConfiguration(task.getTransformId(), ActionListener.wrap(config -> { + listener.onResponse(new Response(Collections.singletonList(config))); + }, e -> { + listener.onFailure(new RuntimeException("failed to retrieve...", e)); + })); + } else { + listener.onResponse(new Response(Collections.emptyList())); + } + } + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + final ClusterState state = clusterService.state(); + final DiscoveryNodes nodes = state.nodes(); + + if (nodes.isLocalNodeElectedMaster()) { + if (DataFramePersistentTaskUtils.stateHasDataFrameTransforms(request.getId(), state)) { + super.doExecute(task, request, listener); + } else { + // If we couldn't find the transform in the persistent task CS, it means it was deleted prior to this GET + // and we can just send an empty response, no need to go looking for the allocated task + listener.onResponse(new Response(Collections.emptyList())); + } + + } else { + // Delegates GetTransforms to elected master node, so it becomes the coordinating node. + // Non-master nodes may have a stale cluster state that shows transforms which are cancelled + // on the master, which makes testing difficult. + if (nodes.getMasterNode() == null) { + listener.onFailure(new MasterNotDiscoveredException("no known master nodes")); + } else { + transportService.sendRequest(nodes.getMasterNode(), actionName, request, + new ActionListenerResponseHandler<>(listener, Response::new)); + } + } + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java new file mode 100644 index 0000000000000..e2e86125a6094 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.tasks.TransportTasksAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.dataframe.action.GetDataFrameTransformsStatsAction.Request; +import org.elasticsearch.xpack.dataframe.action.GetDataFrameTransformsStatsAction.Response; +import org.elasticsearch.xpack.dataframe.persistence.DataFramePersistentTaskUtils; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +public class TransportGetDataFrameTransformsStatsAction extends + TransportTasksAction { + + @Inject + public TransportGetDataFrameTransformsStatsAction(TransportService transportService, ActionFilters actionFilters, + ClusterService clusterService) { + super(GetDataFrameTransformsStatsAction.NAME, clusterService, transportService, actionFilters, Request::new, Response::new, + Response::new, ThreadPool.Names.SAME); + } + + @Override + protected Response newResponse(Request request, List tasks, List taskOperationFailures, + List failedNodeExceptions) { + List responses = tasks.stream() + .map(GetDataFrameTransformsStatsAction.Response::getTransformsStateAndStats).flatMap(Collection::stream) + .collect(Collectors.toList()); + return new Response(responses, taskOperationFailures, failedNodeExceptions); + } + + @Override + protected void taskOperation(Request request, DataFrameTransformTask task, ActionListener listener) { + List transformsStateAndStats = Collections.emptyList(); + + assert task.getTransformId().equals(request.getId()) || request.getId().equals(MetaData.ALL); + + // Little extra insurance, make sure we only return transforms that aren't cancelled + if (task.isCancelled() == false) { + DataFrameTransformStateAndStats transformStateAndStats = new DataFrameTransformStateAndStats(task.getTransformId(), + task.getState(), task.getStats()); + transformsStateAndStats = Collections.singletonList(transformStateAndStats); + } + + listener.onResponse(new Response(transformsStateAndStats)); + } + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + final ClusterState state = clusterService.state(); + final DiscoveryNodes nodes = state.nodes(); + + if (nodes.isLocalNodeElectedMaster()) { + if (DataFramePersistentTaskUtils.stateHasDataFrameTransforms(request.getId(), state)) { + super.doExecute(task, request, listener); + } else { + // If we couldn't find the transform in the persistent task CS, it means it was deleted prior to this GET + // and we can just send an empty response, no need to go looking for the allocated task + listener.onResponse(new Response(Collections.emptyList())); + } + + } else { + // Delegates GetTransforms to elected master node, so it becomes the coordinating node. + // Non-master nodes may have a stale cluster state that shows transforms which are cancelled + // on the master, which makes testing difficult. + if (nodes.getMasterNode() == null) { + listener.onFailure(new MasterNotDiscoveredException("no known master nodes")); + } else { + transportService.sendRequest(nodes.getMasterNode(), actionName, request, + new ActionListenerResponseHandler<>(listener, Response::new)); + } + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java new file mode 100644 index 0000000000000..c611fb9d3fb5a --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java @@ -0,0 +1,148 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; +import org.elasticsearch.xpack.dataframe.action.PutDataFrameTransformAction.Request; +import org.elasticsearch.xpack.dataframe.action.PutDataFrameTransformAction.Response; +import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; +import org.elasticsearch.xpack.dataframe.persistence.DataframeIndex; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransform; +import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; + +public class TransportPutDataFrameTransformAction + extends TransportMasterNodeAction { + + private static final Logger logger = LogManager.getLogger(TransportPutDataFrameTransformAction.class); + + private final XPackLicenseState licenseState; + private final PersistentTasksService persistentTasksService; + private final Client client; + private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; + + @Inject + public TransportPutDataFrameTransformAction(TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, XPackLicenseState licenseState, + PersistentTasksService persistentTasksService, DataFrameTransformsConfigManager dataFrameTransformsConfigManager, + Client client) { + super(PutDataFrameTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + PutDataFrameTransformAction.Request::new); + this.licenseState = licenseState; + this.persistentTasksService = persistentTasksService; + this.client = client; + this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected PutDataFrameTransformAction.Response newResponse() { + return new PutDataFrameTransformAction.Response(); + } + + @Override + protected void masterOperation(Request request, ClusterState clusterState, ActionListener listener) throws Exception { + + if (!licenseState.isDataFrameAllowed()) { + listener.onFailure(LicenseUtils.newComplianceException(XPackField.DATA_FRAME)); + return; + } + + XPackPlugin.checkReadyForXPackCustomMetadata(clusterState); + + String transformId = request.getConfig().getId(); + // quick check whether a transform has already been created under that name + if (PersistentTasksCustomMetaData.getTaskWithId(clusterState, transformId) != null) { + listener.onFailure(new ResourceAlreadyExistsException( + DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_TRANSFORM_EXISTS, transformId))); + return; + } + + // create the transform, for now we only have pivot and no support for custom queries + Pivot pivot = new Pivot(request.getConfig().getSource(), new MatchAllQueryBuilder(), request.getConfig().getPivotConfig()); + + // the non-state creating steps are done first, so we minimize the chance to end up with orphaned state transform validation + pivot.validate(client, ActionListener.wrap(validationResult -> { + // deduce target mappings + pivot.deduceMappings(client, ActionListener.wrap(mappings -> { + // create the destination index + DataframeIndex.createDestinationIndex(client, request.getConfig(), mappings, ActionListener.wrap(createIndexResult -> { + DataFrameTransform transform = createDataFrameTransform(transformId, threadPool); + // create the transform configuration and store it in the internal index + dataFrameTransformsConfigManager.putTransformConfiguration(request.getConfig(), ActionListener.wrap(r -> { + // finally start the persistent task + persistentTasksService.sendStartRequest(transform.getId(), DataFrameTransform.NAME, transform, + ActionListener.wrap(persistentTask -> { + listener.onResponse(new PutDataFrameTransformAction.Response(true)); + }, startPersistentTaskException -> { + // delete the otherwise orphaned transform configuration, for now we do not delete the destination index + dataFrameTransformsConfigManager.deleteTransformConfiguration(transformId, ActionListener.wrap(r2 -> { + logger.debug("Deleted data frame transform [{}] configuration from data frame configuration index", + transformId); + listener.onFailure( + new RuntimeException( + DataFrameMessages.getMessage( + DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_START_PERSISTENT_TASK, r2), + startPersistentTaskException)); + }, deleteTransformFromIndexException -> { + logger.error("Failed to cleanup orphaned data frame transform [{}] configuration", transformId); + listener.onFailure( + new RuntimeException( + DataFrameMessages.getMessage( + DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_START_PERSISTENT_TASK, false), + startPersistentTaskException)); + })); + })); + }, listener::onFailure)); + }, createDestinationIndexException -> { + listener.onFailure(new RuntimeException(DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_CREATE_TARGET_INDEX, + createDestinationIndexException)); + })); + }, deduceTargetMappingsException -> { + listener.onFailure(new RuntimeException(DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_DEDUCE_TARGET_MAPPINGS, + deduceTargetMappingsException)); + })); + }, validationException -> { + listener.onFailure(new RuntimeException(DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_VALIDATE_DATA_FRAME_CONFIGURATION, + validationException)); + })); + } + + private static DataFrameTransform createDataFrameTransform(String transformId, ThreadPool threadPool) { + return new DataFrameTransform(transformId); + } + + @Override + protected ClusterBlockException checkBlock(PutDataFrameTransformAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java new file mode 100644 index 0000000000000..199c8bf7ffdaa --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.tasks.TransportTasksAction; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; + +import java.util.List; +import java.util.function.Consumer; + +public class TransportStartDataFrameTransformAction extends + TransportTasksAction { + + private final XPackLicenseState licenseState; + + @Inject + public TransportStartDataFrameTransformAction(TransportService transportService, ActionFilters actionFilters, + ClusterService clusterService, XPackLicenseState licenseState) { + super(StartDataFrameTransformAction.NAME, clusterService, transportService, actionFilters, + StartDataFrameTransformAction.Request::new, StartDataFrameTransformAction.Response::new, + StartDataFrameTransformAction.Response::new, ThreadPool.Names.SAME); + this.licenseState = licenseState; + } + + @Override + protected void processTasks(StartDataFrameTransformAction.Request request, Consumer operation) { + DataFrameTransformTask matchingTask = null; + + // todo: re-factor, see rollup TransportTaskHelper + for (Task task : taskManager.getTasks().values()) { + if (task instanceof DataFrameTransformTask + && ((DataFrameTransformTask) task).getTransformId().equals(request.getId())) { + if (matchingTask != null) { + throw new IllegalArgumentException("Found more than one matching task for data frame transform [" + request.getId() + + "] when " + "there should only be one."); + } + matchingTask = (DataFrameTransformTask) task; + } + } + + if (matchingTask != null) { + operation.accept(matchingTask); + } + } + + @Override + protected void doExecute(Task task, StartDataFrameTransformAction.Request request, + ActionListener listener) { + + if (!licenseState.isDataFrameAllowed()) { + listener.onFailure(LicenseUtils.newComplianceException(XPackField.DATA_FRAME)); + return; + } + + super.doExecute(task, request, listener); + } + + @Override + protected void taskOperation(StartDataFrameTransformAction.Request request, DataFrameTransformTask transformTask, + ActionListener listener) { + if (transformTask.getTransformId().equals(request.getId())) { + transformTask.start(listener); + } else { + listener.onFailure(new RuntimeException("ID of data frame transform task [" + transformTask.getTransformId() + + "] does not match request's ID [" + request.getId() + "]")); + } + } + + @Override + protected StartDataFrameTransformAction.Response newResponse(StartDataFrameTransformAction.Request request, + List tasks, List taskOperationFailures, + List failedNodeExceptions) { + + if (taskOperationFailures.isEmpty() == false) { + throw org.elasticsearch.ExceptionsHelper.convertToElastic(taskOperationFailures.get(0).getCause()); + } else if (failedNodeExceptions.isEmpty() == false) { + throw org.elasticsearch.ExceptionsHelper.convertToElastic(failedNodeExceptions.get(0)); + } + + // Either the transform doesn't exist (the user didn't create it yet) or was deleted + // after the StartAPI executed. + // In either case, let the user know + if (tasks.size() == 0) { + throw new ResourceNotFoundException("Task for data frame transform [" + request.getId() + "] not found"); + } + + assert tasks.size() == 1; + + boolean allStarted = tasks.stream().allMatch(StartDataFrameTransformAction.Response::isStarted); + return new StartDataFrameTransformAction.Response(allStarted); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java new file mode 100644 index 0000000000000..e35a3aec1521d --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.tasks.TransportTasksAction; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; + +import java.util.List; + +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; + +public class TransportStopDataFrameTransformAction extends + TransportTasksAction { + + private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); + private final ThreadPool threadPool; + + @Inject + public TransportStopDataFrameTransformAction(TransportService transportService, ActionFilters actionFilters, + ClusterService clusterService, ThreadPool threadPool) { + super(StopDataFrameTransformAction.NAME, clusterService, transportService, actionFilters, StopDataFrameTransformAction.Request::new, + StopDataFrameTransformAction.Response::new, StopDataFrameTransformAction.Response::new, ThreadPool.Names.SAME); + this.threadPool = threadPool; + } + + @Override + protected void doExecute(Task task, StopDataFrameTransformAction.Request request, + ActionListener listener) { + super.doExecute(task, request, listener); + } + + @Override + protected void taskOperation(StopDataFrameTransformAction.Request request, DataFrameTransformTask transformTask, + ActionListener listener) { + if (transformTask.getTransformId().equals(request.getId())) { + if (request.waitForCompletion() == false) { + transformTask.stop(listener); + } else { + ActionListener blockingListener = ActionListener.wrap(response -> { + if (response.isStopped()) { + // The Task acknowledged that it is stopped/stopping... wait until the status actually + // changes over before returning. Switch over to Generic threadpool so + // we don't block the network thread + threadPool.generic().execute(() -> { + try { + long untilInNanos = System.nanoTime() + request.getTimeout().getNanos(); + + while (System.nanoTime() - untilInNanos < 0) { + if (transformTask.isStopped()) { + listener.onResponse(response); + return; + } + Thread.sleep(WAIT_FOR_COMPLETION_POLL.millis()); + } + // ran out of time + listener.onFailure(new ElasticsearchTimeoutException( + DataFrameMessages.getMessage(DataFrameMessages.REST_STOP_TRANSFORM_WAIT_FOR_COMPLETION_TIMEOUT, + request.getTimeout().getStringRep(), request.getId()))); + } catch (InterruptedException e) { + listener.onFailure(new ElasticsearchException(DataFrameMessages.getMessage( + DataFrameMessages.REST_STOP_TRANSFORM_WAIT_FOR_COMPLETION_INTERRUPT, request.getId()), e)); + } + }); + } else { + // Did not acknowledge stop, just return the response + listener.onResponse(response); + } + }, listener::onFailure); + + transformTask.stop(blockingListener); + } + } else { + listener.onFailure(new RuntimeException("ID of data frame indexer task [" + transformTask.getTransformId() + + "] does not match request's ID [" + request.getId() + "]")); + } + } + + @Override + protected StopDataFrameTransformAction.Response newResponse(StopDataFrameTransformAction.Request request, + List tasks, List taskOperationFailures, + List failedNodeExceptions) { + + if (taskOperationFailures.isEmpty() == false) { + throw ExceptionsHelper.convertToElastic(taskOperationFailures.get(0).getCause()); + } else if (failedNodeExceptions.isEmpty() == false) { + throw ExceptionsHelper.convertToElastic(failedNodeExceptions.get(0)); + } + + // Either the transform doesn't exist (the user didn't create it yet) or was deleted + // after the Stop API executed. + // In either case, let the user know + if (tasks.size() == 0) { + throw new ResourceNotFoundException("Task for Data Frame transform [" + request.getId() + "] not found"); + } + + assert tasks.size() == 1; + + boolean allStopped = tasks.stream().allMatch(StopDataFrameTransformAction.Response::isStopped); + return new StopDataFrameTransformAction.Response(allStopped); + } +} \ No newline at end of file diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java new file mode 100644 index 0000000000000..d9e0471e0a3fc --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.persistence; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; + +import java.io.IOException; +import java.util.Collections; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +public final class DataFrameInternalIndex { + + // constants for the index + public static final String INDEX_TEMPLATE_VERSION = "1"; + public static final String INDEX_TEMPLATE_PATTERN = ".data-frame-internal-"; + public static final String INDEX_TEMPLATE_NAME = INDEX_TEMPLATE_PATTERN + INDEX_TEMPLATE_VERSION; + public static final String INDEX_NAME = INDEX_TEMPLATE_NAME; + + // constants for mappings + public static final String ENABLED = "enabled"; + public static final String DYNAMIC = "dynamic"; + public static final String PROPERTIES = "properties"; + public static final String TYPE = "type"; + + // data types + public static final String DOUBLE = "double"; + public static final String KEYWORD = "keyword"; + + // internal document types, e.g. "transform_config" + public static final String DOC_TYPE = "doc_type"; + + public static IndexTemplateMetaData getIndexTemplateMetaData() throws IOException { + IndexTemplateMetaData dataFrameTemplate = IndexTemplateMetaData.builder(INDEX_TEMPLATE_NAME) + .patterns(Collections.singletonList(INDEX_TEMPLATE_NAME)) + .version(Version.CURRENT.id) + .settings(Settings.builder() + // the configurations are expected to be small + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "0-1")) + // todo: remove type + .putMapping(MapperService.SINGLE_MAPPING_NAME, Strings.toString(mappings())) + .build(); + return dataFrameTemplate; + } + + private static XContentBuilder mappings() throws IOException { + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + + builder.startObject(MapperService.SINGLE_MAPPING_NAME); + addMetaInformation(builder); + + // no need to analyze anything, we use the config index as key value store, revisit if we decide to search on it + builder.field(ENABLED, false); + // do not allow anything outside of the defined schema + builder.field(DYNAMIC, "strict"); + // the schema definitions + builder.startObject(PROPERTIES); + // overall doc type + builder.startObject(DOC_TYPE).field(TYPE, KEYWORD).endObject(); + // add the schema for transform configurations + addDataFrameTransformsConfigMappings(builder); + + // end type + builder.endObject(); + // end properties + builder.endObject(); + // end mapping + builder.endObject(); + return builder; + } + + private static XContentBuilder addDataFrameTransformsConfigMappings(XContentBuilder builder) throws IOException { + return builder + .startObject(DataFrameField.ID.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject(); + } + + /** + * Inserts "_meta" containing useful information like the version into the mapping + * template. + * + * @param builder The builder for the mappings + * @throws IOException On write error + */ + private static XContentBuilder addMetaInformation(XContentBuilder builder) throws IOException { + return builder.startObject("_meta") + .field("version", Version.CURRENT) + .endObject(); + } + + private DataFrameInternalIndex() { + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFramePersistentTaskUtils.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFramePersistentTaskUtils.java new file mode 100644 index 0000000000000..76e635df0d8fd --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFramePersistentTaskUtils.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.persistence; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; + +public final class DataFramePersistentTaskUtils { + + private DataFramePersistentTaskUtils() { + } + + /** + * Check to see if the PersistentTask's cluster state contains the data frame transform(s) we + * are interested in + */ + public static boolean stateHasDataFrameTransforms(String id, ClusterState state) { + boolean hasTransforms = false; + PersistentTasksCustomMetaData pTasksMeta = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + + if (pTasksMeta != null) { + // If the request was for _all transforms, we need to look through the list of + // persistent tasks and see if at least one is a data frame task + if (id.equals(MetaData.ALL)) { + hasTransforms = pTasksMeta.tasks().stream() + .anyMatch(persistentTask -> persistentTask.getTaskName().equals(DataFrameField.TASK_NAME)); + + } else if (pTasksMeta.getTask(id) != null) { + // If we're looking for a single transform, we can just check directly + hasTransforms = true; + } + } + return hasTransforms; + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java new file mode 100644 index 0000000000000..2293d2b6319ab --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java @@ -0,0 +1,151 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.persistence; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.delete.DeleteAction; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformConfig; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.core.ClientHelper.DATA_FRAME_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; + +public class DataFrameTransformsConfigManager { + + private static final Logger logger = LogManager.getLogger(DataFrameTransformsConfigManager.class); + + public static final Map TO_XCONTENT_PARAMS; + static { + Map modifiable = new HashMap<>(); + modifiable.put("for_internal_storage", "true"); + TO_XCONTENT_PARAMS = Collections.unmodifiableMap(modifiable); + } + + private final Client client; + private final NamedXContentRegistry xContentRegistry; + + public DataFrameTransformsConfigManager(Client client, NamedXContentRegistry xContentRegistry) { + this.client = client; + this.xContentRegistry = xContentRegistry; + } + + public void putTransformConfiguration(DataFrameTransformConfig transformConfig, ActionListener listener) { + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + XContentBuilder source = transformConfig.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); + + IndexRequest indexRequest = new IndexRequest(DataFrameInternalIndex.INDEX_NAME) + .opType(DocWriteRequest.OpType.CREATE) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .id(DataFrameTransformConfig.documentId(transformConfig.getId())) + .source(source); + + executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap(r -> { + listener.onResponse(true); + }, e -> { + if (e instanceof VersionConflictEngineException) { + // the transform already exists + listener.onFailure(new ResourceAlreadyExistsException( + DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_TRANSFORM_EXISTS, + transformConfig.getId()))); + } else { + listener.onFailure( + new RuntimeException(DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_PERSIST_TRANSFORM_CONFIGURATION, e)); + } + })); + } catch (IOException e) { + // not expected to happen but for the sake of completeness + listener.onFailure(new ElasticsearchParseException( + DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_FAILED_TO_SERIALIZE_TRANSFORM, transformConfig.getId()), + e)); + } + } + + public void getTransformConfiguration(String transformId, ActionListener resultListener) { + GetRequest getRequest = new GetRequest(DataFrameInternalIndex.INDEX_NAME, DataFrameTransformConfig.documentId(transformId)); + executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, GetAction.INSTANCE, getRequest, ActionListener.wrap(getResponse -> { + + if (getResponse.isExists() == false) { + resultListener.onFailure(new ResourceNotFoundException( + DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformId))); + return; + } + BytesReference source = getResponse.getSourceAsBytesRef(); + parseTransformLenientlyFromSource(source, transformId, resultListener); + }, e -> { + if (e.getClass() == IndexNotFoundException.class) { + resultListener.onFailure(new ResourceNotFoundException( + DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformId))); + } else { + resultListener.onFailure(e); + } + })); + } + + public void deleteTransformConfiguration(String transformId, ActionListener listener) { + DeleteRequest request = new DeleteRequest(DataFrameInternalIndex.INDEX_NAME, DataFrameTransformConfig.documentId(transformId)); + request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, DeleteAction.INSTANCE, request, ActionListener.wrap(deleteResponse -> { + + if (deleteResponse.getResult() == DocWriteResponse.Result.NOT_FOUND) { + listener.onFailure(new ResourceNotFoundException( + DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformId))); + return; + } + listener.onResponse(true); + }, e -> { + if (e.getClass() == IndexNotFoundException.class) { + listener.onFailure(new ResourceNotFoundException( + DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformId))); + } else { + listener.onFailure(e); + } + })); + } + + private void parseTransformLenientlyFromSource(BytesReference source, String transformId, + ActionListener transformListener) { + try (InputStream stream = source.streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream)) { + transformListener.onResponse(DataFrameTransformConfig.fromXContent(parser, transformId, true)); + } catch (Exception e) { + logger.error(DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_PARSE_TRANSFORM_CONFIGURATION, transformId), e); + transformListener.onFailure(e); + } + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java new file mode 100644 index 0000000000000..6605269475c79 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.persistence; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformConfig; + +import java.io.IOException; +import java.util.Map; +import java.util.Map.Entry; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +public final class DataframeIndex { + private static final Logger logger = LogManager.getLogger(DataframeIndex.class); + + public static final String DOC_TYPE = "_doc"; + private static final String PROPERTIES = "properties"; + private static final String TYPE = "type"; + private static final String META = "_meta"; + + private DataframeIndex() { + } + + public static void createDestinationIndex(Client client, DataFrameTransformConfig transformConfig, Map mappings, + final ActionListener listener) { + CreateIndexRequest request = new CreateIndexRequest(transformConfig.getDestination()); + + // TODO: revisit number of shards, number of replicas + request.settings(Settings.builder() // <1> + .put("index.number_of_shards", 1).put("index.number_of_replicas", 0)); + + request.mapping(DOC_TYPE, createMappingXContent(mappings, transformConfig.getId())); + + client.execute(CreateIndexAction.INSTANCE, request, ActionListener.wrap(createIndexResponse -> { + listener.onResponse(true); + }, e -> { + String message = DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_CREATE_DESTINATION_INDEX, + transformConfig.getDestination(), transformConfig.getId()); + logger.error(message); + listener.onFailure(new RuntimeException(message, e)); + })); + } + + private static XContentBuilder createMappingXContent(Map mappings, String id) { + try { + XContentBuilder builder = jsonBuilder().startObject(); + builder.startObject(DOC_TYPE); + addMetaData(builder, id); + builder.startObject(PROPERTIES); + for (Entry field : mappings.entrySet()) { + builder.startObject(field.getKey()).field(TYPE, field.getValue()).endObject(); + } + builder.endObject(); // properties + builder.endObject(); // doc_type + return builder.endObject(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private static XContentBuilder addMetaData(XContentBuilder builder, String id) throws IOException { + builder.startObject(META); + builder.field(DataFrameField.CREATED_BY, DataFrameField.DATA_FRAME_SIGNATURE); + builder.startObject(DataFrameField.META_FIELDNAME); + builder.field(DataFrameField.CREATION_DATE_MILLIS, System.currentTimeMillis()); + builder.startObject(DataFrameField.VERSION); + builder.field(DataFrameField.CREATED, Version.CURRENT); + builder.endObject(); + builder.field(DataFrameField.TRANSFORM, id); + builder.endObject(); // META_FIELDNAME + builder.endObject(); // META + return builder; + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java new file mode 100644 index 0000000000000..bd3917af9a7ce --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.dataframe.rest.action; + + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.dataframe.action.DeleteDataFrameTransformAction; + +import java.io.IOException; + +public class RestDeleteDataFrameTransformAction extends BaseRestHandler { + + public RestDeleteDataFrameTransformAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.DELETE, DataFrameField.REST_BASE_PATH_TRANSFORMS_BY_ID, this); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String id = restRequest.param(DataFrameField.ID.getPreferredName()); + DeleteDataFrameTransformAction.Request request = new DeleteDataFrameTransformAction.Request(id); + + return channel -> client.execute(DeleteDataFrameTransformAction.INSTANCE, request, + new RestToXContentListener(channel) { + @Override + protected RestStatus getStatus(DeleteDataFrameTransformAction.Response response) { + if (response.getNodeFailures().size() > 0 || response.getTaskFailures().size() > 0) { + return RestStatus.INTERNAL_SERVER_ERROR; + } + return RestStatus.OK; + } + }); + } + + @Override + public String getName() { + return "data_frame_delete_transform_action"; + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsAction.java new file mode 100644 index 0000000000000..1d35f7212108f --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsAction.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.rest.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.dataframe.action.GetDataFrameTransformsAction; + +public class RestGetDataFrameTransformsAction extends BaseRestHandler { + + public RestGetDataFrameTransformsAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.GET, DataFrameField.REST_BASE_PATH_TRANSFORMS_BY_ID, this); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + String id = restRequest.param(DataFrameField.ID.getPreferredName()); + GetDataFrameTransformsAction.Request request = new GetDataFrameTransformsAction.Request(id); + return channel -> client.execute(GetDataFrameTransformsAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "data_frame_get_transforms_action"; + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsStatsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsStatsAction.java new file mode 100644 index 0000000000000..6ae2c16166704 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsStatsAction.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.rest.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.dataframe.action.GetDataFrameTransformsStatsAction; + +public class RestGetDataFrameTransformsStatsAction extends BaseRestHandler { + + public RestGetDataFrameTransformsStatsAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.GET, DataFrameField.REST_BASE_PATH_TRANSFORMS_BY_ID + "_stats", this); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + String id = restRequest.param(DataFrameField.ID.getPreferredName()); + GetDataFrameTransformsStatsAction.Request request = new GetDataFrameTransformsStatsAction.Request(id); + return channel -> client.execute(GetDataFrameTransformsStatsAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "data_frame_get_transforms_stats_action"; + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestPutDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestPutDataFrameTransformAction.java new file mode 100644 index 0000000000000..a4a3222cfd6b5 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestPutDataFrameTransformAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.rest.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.dataframe.action.PutDataFrameTransformAction; + +import java.io.IOException; + +public class RestPutDataFrameTransformAction extends BaseRestHandler { + + public RestPutDataFrameTransformAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.PUT, DataFrameField.REST_BASE_PATH_TRANSFORMS_BY_ID, this); + } + + @Override + public String getName() { + return "data_frame_put_transform_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String id = restRequest.param(DataFrameField.ID.getPreferredName()); + XContentParser parser = restRequest.contentParser(); + + PutDataFrameTransformAction.Request request = PutDataFrameTransformAction.Request.fromXContent(parser, id); + + return channel -> client.execute(PutDataFrameTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java new file mode 100644 index 0000000000000..c889e75bf8363 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.rest.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.rollup.RollupField; +import org.elasticsearch.xpack.dataframe.action.StartDataFrameTransformAction; + +import java.io.IOException; + +public class RestStartDataFrameTransformAction extends BaseRestHandler { + + public RestStartDataFrameTransformAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, DataFrameField.REST_BASE_PATH_TRANSFORMS_BY_ID + "_start", this); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String id = restRequest.param(RollupField.ID.getPreferredName()); + StartDataFrameTransformAction.Request request = new StartDataFrameTransformAction.Request(id); + + return channel -> client.execute(StartDataFrameTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "data_frame_start_transform_action"; + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java new file mode 100644 index 0000000000000..510b40c31806d --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.dataframe.rest.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.dataframe.action.StopDataFrameTransformAction; + +import java.io.IOException; + +public class RestStopDataFrameTransformAction extends BaseRestHandler { + + public RestStopDataFrameTransformAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, DataFrameField.REST_BASE_PATH_TRANSFORMS_BY_ID + "_stop", this); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String id = restRequest.param(DataFrameField.ID.getPreferredName()); + TimeValue timeout = restRequest.paramAsTime(DataFrameField.TIMEOUT.getPreferredName(), + StopDataFrameTransformAction.DEFAULT_TIMEOUT); + boolean waitForCompletion = restRequest.paramAsBoolean(DataFrameField.WAIT_FOR_COMPLETION.getPreferredName(), false); + + StopDataFrameTransformAction.Request request = new StopDataFrameTransformAction.Request(id, waitForCompletion, timeout); + + return channel -> client.execute(StopDataFrameTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "data_frame_stop_transform_action"; + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java new file mode 100644 index 0000000000000..771e513f05047 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; +import org.elasticsearch.xpack.core.dataframe.transform.DataFrameIndexerTransformStats; +import org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer; +import org.elasticsearch.xpack.core.indexing.IndexerState; +import org.elasticsearch.xpack.core.indexing.IterationResult; +import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Map; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer, DataFrameIndexerTransformStats> { + + private static final String COMPOSITE_AGGREGATION_NAME = "_data_frame"; + private static final Logger logger = LogManager.getLogger(DataFrameIndexer.class); + + private Pivot pivot; + + public DataFrameIndexer(Executor executor, AtomicReference initialState, Map initialPosition) { + super(executor, initialState, initialPosition, new DataFrameIndexerTransformStats()); + } + + protected abstract DataFrameTransformConfig getConfig(); + + @Override + protected void onStartJob(long now) { + QueryBuilder queryBuilder = getConfig().getQueryConfig().getQuery(); + + pivot = new Pivot(getConfig().getSource(), queryBuilder, getConfig().getPivotConfig()); + } + + @Override + protected IterationResult> doProcess(SearchResponse searchResponse) { + final CompositeAggregation agg = searchResponse.getAggregations().get(COMPOSITE_AGGREGATION_NAME); + return new IterationResult<>(processBucketsToIndexRequests(agg).collect(Collectors.toList()), agg.afterKey(), + agg.getBuckets().isEmpty()); + } + + /* + * Parses the result and creates a stream of indexable documents + * + * Implementation decisions: + * + * Extraction uses generic maps as intermediate exchange format in order to hook in ingest pipelines/processors + * in later versions, see {@link IngestDocument). + */ + private Stream processBucketsToIndexRequests(CompositeAggregation agg) { + final DataFrameTransformConfig transformConfig = getConfig(); + String indexName = transformConfig.getDestination(); + + return pivot.extractResults(agg, getStats()).map(document -> { + XContentBuilder builder; + try { + builder = jsonBuilder(); + builder.map(document); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + + IndexRequest request = new IndexRequest(indexName).source(builder); + return request; + }); + } + + @Override + protected SearchRequest buildSearchRequest() { + return pivot.buildSearchRequest(getPosition()); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransform.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransform.java new file mode 100644 index 0000000000000..d2a9e324584eb --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransform.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +public class DataFrameTransform extends AbstractDiffable implements XPackPlugin.XPackPersistentTaskParams { + + public static final String NAME = DataFrameField.TASK_NAME; + + private final String transformId; + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + a -> new DataFrameTransform((String) a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), DataFrameField.ID); + } + + public DataFrameTransform(String transformId) { + this.transformId = transformId; + } + + public DataFrameTransform(StreamInput in) throws IOException { + this.transformId = in.readString(); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_7_1_0; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(transformId); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(DataFrameField.ID.getPreferredName(), transformId); + builder.endObject(); + return builder; + } + + public String getId() { + return transformId; + } + + public static DataFrameTransform fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DataFrameTransform that = (DataFrameTransform) other; + + return Objects.equals(this.transformId, that.transformId); + } + + @Override + public int hashCode() { + return Objects.hash(transformId); + } + + public Map getHeaders() { + return Collections.emptyMap(); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformConfig.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformConfig.java new file mode 100644 index 0000000000000..b5bd22f3a5e62 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformConfig.java @@ -0,0 +1,210 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms; + +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.dataframe.transforms.pivot.PivotConfig; + +import java.io.IOException; +import java.util.Collections; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * This class holds the configuration details of a data frame transform + */ +public class DataFrameTransformConfig extends AbstractDiffable implements Writeable, ToXContentObject { + + private static final String NAME = "data_frame_transforms"; + private static final ParseField SOURCE = new ParseField("source"); + private static final ParseField DESTINATION = new ParseField("dest"); + private static final ParseField QUERY = new ParseField("query"); + + // types of transforms + private static final ParseField PIVOT_TRANSFORM = new ParseField("pivot"); + + private static final ConstructingObjectParser STRICT_PARSER = createParser(false); + private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + + private final String id; + private final String source; + private final String dest; + + private final QueryConfig queryConfig; + private final PivotConfig pivotConfig; + + private static ConstructingObjectParser createParser(boolean lenient) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(NAME, lenient, + (args, optionalId) -> { + String id = args[0] != null ? (String) args[0] : optionalId; + String source = (String) args[1]; + String dest = (String) args[2]; + + // default handling: if the user does not specify a query, we default to match_all + QueryConfig queryConfig = null; + if (args[3] == null) { + queryConfig = new QueryConfig(Collections.singletonMap(MatchAllQueryBuilder.NAME, Collections.emptyMap()), + new MatchAllQueryBuilder()); + } else { + queryConfig = (QueryConfig) args[3]; + } + + PivotConfig pivotConfig = (PivotConfig) args[4]; + return new DataFrameTransformConfig(id, source, dest, queryConfig, pivotConfig); + }); + + parser.declareString(optionalConstructorArg(), DataFrameField.ID); + parser.declareString(constructorArg(), SOURCE); + parser.declareString(constructorArg(), DESTINATION); + parser.declareObject(optionalConstructorArg(), (p, c) -> QueryConfig.fromXContent(p, lenient), QUERY); + parser.declareObject(optionalConstructorArg(), (p, c) -> PivotConfig.fromXContent(p, lenient), PIVOT_TRANSFORM); + + return parser; + } + + public static String documentId(String transformId) { + return "data_frame-" + transformId; + } + + public DataFrameTransformConfig(final String id, + final String source, + final String dest, + final QueryConfig queryConfig, + final PivotConfig pivotConfig) { + this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); + this.source = ExceptionsHelper.requireNonNull(source, SOURCE.getPreferredName()); + this.dest = ExceptionsHelper.requireNonNull(dest, DESTINATION.getPreferredName()); + this.queryConfig = ExceptionsHelper.requireNonNull(queryConfig, QUERY.getPreferredName()); + this.pivotConfig = pivotConfig; + + // at least one function must be defined + if (this.pivotConfig == null) { + throw new IllegalArgumentException(DataFrameMessages.DATA_FRAME_TRANSFORM_CONFIGURATION_NO_TRANSFORM); + } + } + + public DataFrameTransformConfig(final StreamInput in) throws IOException { + id = in.readString(); + source = in.readString(); + dest = in.readString(); + queryConfig = in.readOptionalWriteable(QueryConfig::new); + pivotConfig = in.readOptionalWriteable(PivotConfig::new); + } + + public String getId() { + return id; + } + + public String getCron() { + return "*"; + } + + public String getSource() { + return source; + } + + public String getDestination() { + return dest; + } + + public PivotConfig getPivotConfig() { + return pivotConfig; + } + + public QueryConfig getQueryConfig() { + return queryConfig; + } + + public boolean isValid() { + // collect validation results from all child objects + if (queryConfig != null && queryConfig.isValid() == false) { + return false; + } + + if (pivotConfig != null && pivotConfig.isValid() == false) { + return false; + } + + return true; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeString(id); + out.writeString(source); + out.writeString(dest); + out.writeOptionalWriteable(queryConfig); + out.writeOptionalWriteable(pivotConfig); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + builder.field(DataFrameField.ID.getPreferredName(), id); + builder.field(SOURCE.getPreferredName(), source); + builder.field(DESTINATION.getPreferredName(), dest); + if (queryConfig != null) { + builder.field(QUERY.getPreferredName(), queryConfig); + } + if (pivotConfig != null) { + builder.field(PIVOT_TRANSFORM.getPreferredName(), pivotConfig); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final DataFrameTransformConfig that = (DataFrameTransformConfig) other; + + return Objects.equals(this.id, that.id) + && Objects.equals(this.source, that.source) + && Objects.equals(this.dest, that.dest) + && Objects.equals(this.queryConfig, that.queryConfig) + && Objects.equals(this.pivotConfig, that.pivotConfig); + } + + @Override + public int hashCode() { + return Objects.hash(id, source, dest, queryConfig, pivotConfig); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + public static DataFrameTransformConfig fromXContent(final XContentParser parser, @Nullable final String optionalTransformId, + boolean lenient) throws IOException { + + return lenient ? LENIENT_PARSER.apply(parser, optionalTransformId) : STRICT_PARSER.apply(parser, optionalTransformId); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java new file mode 100644 index 0000000000000..8b82f2684924d --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.persistent.PersistentTaskState; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.dataframe.transform.DataFrameTransformState; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; +import org.elasticsearch.xpack.dataframe.DataFrame; +import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; + +import java.util.Map; + +public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksExecutor { + + private static final Logger logger = LogManager.getLogger(DataFrameTransformPersistentTasksExecutor.class); + + private final Client client; + private final DataFrameTransformsConfigManager transformsConfigManager; + private final SchedulerEngine schedulerEngine; + private final ThreadPool threadPool; + + public DataFrameTransformPersistentTasksExecutor(Client client, DataFrameTransformsConfigManager transformsConfigManager, + SchedulerEngine schedulerEngine, ThreadPool threadPool) { + super(DataFrameField.TASK_NAME, DataFrame.TASK_THREAD_POOL_NAME); + this.client = client; + this.transformsConfigManager = transformsConfigManager; + this.schedulerEngine = schedulerEngine; + this.threadPool = threadPool; + } + + @Override + protected void nodeOperation(AllocatedPersistentTask task, @Nullable DataFrameTransform params, PersistentTaskState state) { + DataFrameTransformTask buildTask = (DataFrameTransformTask) task; + SchedulerEngine.Job schedulerJob = new SchedulerEngine.Job( + DataFrameTransformTask.SCHEDULE_NAME + "_" + params.getId(), next()); + + // Note that while the task is added to the scheduler here, the internal state will prevent + // it from doing any work until the task is "started" via the StartTransform api + schedulerEngine.register(buildTask); + schedulerEngine.add(schedulerJob); + + logger.info("Data frame transform [" + params.getId() + "] created."); + } + + static SchedulerEngine.Schedule next() { + return (startTime, now) -> { + return now + 1000; // to be fixed, hardcode something + }; + } + + @Override + protected AllocatedPersistentTask createTask(long id, String type, String action, TaskId parentTaskId, + PersistentTasksCustomMetaData.PersistentTask persistentTask, Map headers) { + return new DataFrameTransformTask(id, type, action, parentTaskId, persistentTask.getParams(), + (DataFrameTransformState) persistentTask.getState(), client, transformsConfigManager, schedulerEngine, threadPool, headers); + } +} \ No newline at end of file diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java new file mode 100644 index 0000000000000..71570769c052d --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -0,0 +1,328 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.bulk.BulkAction; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; +import org.elasticsearch.xpack.core.dataframe.transform.DataFrameIndexerTransformStats; +import org.elasticsearch.xpack.core.dataframe.transform.DataFrameTransformState; +import org.elasticsearch.xpack.core.indexing.IndexerState; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine.Event; +import org.elasticsearch.xpack.dataframe.action.StartDataFrameTransformAction; +import org.elasticsearch.xpack.dataframe.action.StartDataFrameTransformAction.Response; +import org.elasticsearch.xpack.dataframe.action.StopDataFrameTransformAction; +import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; + +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +public class DataFrameTransformTask extends AllocatedPersistentTask implements SchedulerEngine.Listener { + + private static final Logger logger = LogManager.getLogger(DataFrameTransformTask.class); + public static final String SCHEDULE_NAME = DataFrameField.TASK_NAME + "/schedule"; + + private final DataFrameTransform transform; + private final SchedulerEngine schedulerEngine; + private final ThreadPool threadPool; + private final DataFrameIndexer indexer; + + // the generation of this data frame, for v1 there will be only + // 0: data frame not created or still indexing + // 1: data frame complete, all data has been indexed + private final AtomicReference generation; + + public DataFrameTransformTask(long id, String type, String action, TaskId parentTask, DataFrameTransform transform, + DataFrameTransformState state, Client client, DataFrameTransformsConfigManager transformsConfigManager, + SchedulerEngine schedulerEngine, ThreadPool threadPool, Map headers) { + super(id, type, action, DataFrameField.PERSISTENT_TASK_DESCRIPTION_PREFIX + transform.getId(), parentTask, headers); + this.transform = transform; + this.schedulerEngine = schedulerEngine; + this.threadPool = threadPool; + IndexerState initialState = IndexerState.STOPPED; + long initialGeneration = 0; + Map initialPosition = null; + logger.info("[{}] init, got state: [{}]", transform.getId(), state != null); + if (state != null) { + final IndexerState existingState = state.getIndexerState(); + logger.info("[{}] Loading existing state: [{}], position [{}]", transform.getId(), existingState, state.getPosition()); + if (existingState.equals(IndexerState.INDEXING)) { + // reset to started as no indexer is running + initialState = IndexerState.STARTED; + } else if (existingState.equals(IndexerState.ABORTING) || existingState.equals(IndexerState.STOPPING)) { + // reset to stopped as something bad happened + initialState = IndexerState.STOPPED; + } else { + initialState = existingState; + } + initialPosition = state.getPosition(); + initialGeneration = state.getGeneration(); + } + + this.indexer = new ClientDataFrameIndexer(transform.getId(), transformsConfigManager, new AtomicReference<>(initialState), + initialPosition, client); + this.generation = new AtomicReference(initialGeneration); + } + + public String getTransformId() { + return transform.getId(); + } + + /** + * Enable Task API to return detailed status information + */ + @Override + public Status getStatus() { + return getState(); + } + + public DataFrameTransformState getState() { + return new DataFrameTransformState(indexer.getState(), indexer.getPosition(), generation.get()); + } + + public DataFrameIndexerTransformStats getStats() { + return indexer.getStats(); + } + + public long getGeneration() { + return generation.get(); + } + + public boolean isStopped() { + return indexer.getState().equals(IndexerState.STOPPED); + } + + public synchronized void start(ActionListener listener) { + final IndexerState prevState = indexer.getState(); + if (prevState != IndexerState.STOPPED) { + // fails if the task is not STOPPED + listener.onFailure(new ElasticsearchException("Cannot start task for data frame transform [{}], because state was [{}]", + transform.getId(), prevState)); + return; + } + + final IndexerState newState = indexer.start(); + if (newState != IndexerState.STARTED) { + listener.onFailure(new ElasticsearchException("Cannot start task for data frame transform [{}], because state was [{}]", + transform.getId(), newState)); + return; + } + + final DataFrameTransformState state = new DataFrameTransformState(IndexerState.STOPPED, indexer.getPosition(), generation.get()); + + logger.debug("Updating state for data frame transform [{}] to [{}][{}]", transform.getId(), state.getIndexerState(), + state.getPosition()); + updatePersistentTaskState(state, + ActionListener.wrap( + (task) -> { + logger.debug("Successfully updated state for data frame transform [" + transform.getId() + "] to [" + + state.getIndexerState() + "][" + state.getPosition() + "]"); + listener.onResponse(new StartDataFrameTransformAction.Response(true)); + }, (exc) -> { + // We were unable to update the persistent status, so we need to shutdown the indexer too. + indexer.stop(); + listener.onFailure(new ElasticsearchException("Error while updating state for data frame transform [" + + transform.getId() + "] to [" + state.getIndexerState() + "].", exc)); + }) + ); + } + + public synchronized void stop(ActionListener listener) { + final IndexerState newState = indexer.stop(); + switch (newState) { + case STOPPED: + listener.onResponse(new StopDataFrameTransformAction.Response(true)); + break; + + case STOPPING: + // update the persistent state to STOPPED. There are two scenarios and both are safe: + // 1. we persist STOPPED now, indexer continues a bit then sees the flag and checkpoints another STOPPED with the more recent + // position. + // 2. we persist STOPPED now, indexer continues a bit but then dies. When/if we resume we'll pick up at last checkpoint, + // overwrite some docs and eventually checkpoint. + DataFrameTransformState state = new DataFrameTransformState(IndexerState.STOPPED, indexer.getPosition(), generation.get()); + updatePersistentTaskState(state, ActionListener.wrap((task) -> { + logger.debug("Successfully updated state for data frame transform [{}] to [{}]", transform.getId(), + state.getIndexerState()); + listener.onResponse(new StopDataFrameTransformAction.Response(true)); + }, (exc) -> { + listener.onFailure(new ElasticsearchException("Error while updating state for data frame transform [{}] to [{}]", exc, + transform.getId(), state.getIndexerState())); + })); + break; + + default: + listener.onFailure(new ElasticsearchException("Cannot stop task for data frame transform [{}], because state was [{}]", + transform.getId(), newState)); + break; + } + } + + @Override + public synchronized void triggered(Event event) { + if (generation.get() == 0 && event.getJobName().equals(SCHEDULE_NAME + "_" + transform.getId())) { + logger.debug("Data frame indexer [" + event.getJobName() + "] schedule has triggered, state: [" + indexer.getState() + "]"); + indexer.maybeTriggerAsyncJob(System.currentTimeMillis()); + } + } + + /** + * Attempt to gracefully cleanup the data frame transform so it can be terminated. + * This tries to remove the job from the scheduler, and potentially any other + * cleanup operations in the future + */ + synchronized void shutdown() { + try { + logger.info("Data frame indexer [" + transform.getId() + "] received abort request, stopping indexer."); + schedulerEngine.remove(SCHEDULE_NAME + "_" + transform.getId()); + schedulerEngine.unregister(this); + } catch (Exception e) { + markAsFailed(e); + return; + } + markAsCompleted(); + } + + /** + * This is called when the persistent task signals that the allocated task should be terminated. + * Termination in the task framework is essentially voluntary, as the allocated task can only be + * shut down from the inside. + */ + @Override + public synchronized void onCancelled() { + logger.info( + "Received cancellation request for data frame transform [" + transform.getId() + "], state: [" + indexer.getState() + "]"); + if (indexer.abort()) { + // there is no background transform running, we can shutdown safely + shutdown(); + } + } + + protected class ClientDataFrameIndexer extends DataFrameIndexer { + private static final int LOAD_TRANSFORM_TIMEOUT_IN_SECONDS = 30; + private final Client client; + private final DataFrameTransformsConfigManager transformsConfigManager; + private final String transformId; + + private DataFrameTransformConfig transformConfig = null; + + public ClientDataFrameIndexer(String transformId, DataFrameTransformsConfigManager transformsConfigManager, + AtomicReference initialState, Map initialPosition, Client client) { + super(threadPool.executor(ThreadPool.Names.GENERIC), initialState, initialPosition); + this.transformId = transformId; + this.transformsConfigManager = transformsConfigManager; + this.client = client; + } + + @Override + protected DataFrameTransformConfig getConfig() { + return transformConfig; + } + + @Override + protected String getJobId() { + return transformId; + } + + @Override + public synchronized boolean maybeTriggerAsyncJob(long now) { + if (transformConfig == null) { + CountDownLatch latch = new CountDownLatch(1); + + transformsConfigManager.getTransformConfiguration(transformId, new LatchedActionListener<>(ActionListener.wrap(config -> { + transformConfig = config; + }, e -> { + throw new RuntimeException( + DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_LOAD_TRANSFORM_CONFIGURATION, transformId), e); + }), latch)); + + try { + latch.await(LOAD_TRANSFORM_TIMEOUT_IN_SECONDS, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw new RuntimeException( + DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_LOAD_TRANSFORM_CONFIGURATION, transformId), e); + } + } + + // todo: set job into failed state + if (transformConfig.isValid() == false) { + throw new RuntimeException( + DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_TRANSFORM_CONFIGURATION_INVALID, transformId)); + } + + return super.maybeTriggerAsyncJob(now); + } + + @Override + protected void doNextSearch(SearchRequest request, ActionListener nextPhase) { + ClientHelper.executeWithHeadersAsync(transform.getHeaders(), ClientHelper.DATA_FRAME_ORIGIN, client, SearchAction.INSTANCE, + request, nextPhase); + } + + @Override + protected void doNextBulk(BulkRequest request, ActionListener nextPhase) { + ClientHelper.executeWithHeadersAsync(transform.getHeaders(), ClientHelper.DATA_FRAME_ORIGIN, client, BulkAction.INSTANCE, + request, nextPhase); + } + + @Override + protected void doSaveState(IndexerState indexerState, Map position, Runnable next) { + if (indexerState.equals(IndexerState.ABORTING)) { + // If we're aborting, just invoke `next` (which is likely an onFailure handler) + next.run(); + return; + } + + if(indexerState.equals(IndexerState.STARTED)) { + // if the indexer resets the state to started, it means it is done, so increment the generation + generation.compareAndSet(0L, 1L); + } + + final DataFrameTransformState state = new DataFrameTransformState(indexerState, getPosition(), generation.get()); + logger.info("Updating persistent state of transform [" + transform.getId() + "] to [" + state.toString() + "]"); + + updatePersistentTaskState(state, ActionListener.wrap(task -> next.run(), exc -> { + logger.error("Updating persistent state of transform [" + transform.getId() + "] failed", exc); + next.run(); + })); + } + + @Override + protected void onFailure(Exception exc) { + logger.warn("Data frame transform [" + transform.getId() + "] failed with an exception: ", exc); + } + + @Override + protected void onFinish() { + logger.info("Finished indexing for data frame transform [" + transform.getId() + "]"); + } + + @Override + protected void onAbort() { + logger.info("Data frame transform [" + transform.getId() + "] received abort request, stopping indexer"); + shutdown(); + } + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/QueryConfig.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/QueryConfig.java new file mode 100644 index 0000000000000..6da61e711d842 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/QueryConfig.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class QueryConfig extends AbstractDiffable implements Writeable, ToXContentObject { + private static final Logger logger = LogManager.getLogger(QueryConfig.class); + + // we store the query in 2 formats: the raw format and the parsed format, because: + // - the parsed format adds defaults, which were not part of the original and looks odd on XContent retrieval + // - if parsing fails (e.g. query uses removed functionality), the source can be retrieved + private final Map source; + private final QueryBuilder query; + + public QueryConfig(final Map source, final QueryBuilder query) { + this.source = Objects.requireNonNull(source); + this.query = query; + } + + public QueryConfig(final StreamInput in) throws IOException { + this.source = in.readMap(); + this.query = in.readOptionalNamedWriteable(QueryBuilder.class); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.map(source); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(source); + out.writeOptionalNamedWriteable(query); + } + + public QueryBuilder getQuery() { + return query; + } + + public static QueryConfig fromXContent(final XContentParser parser, boolean lenient) throws IOException { + // we need 2 passes, but the parser can not be cloned, so we parse 1st into a map and then re-parse that for syntax checking + + // remember the registry, needed for the 2nd pass + NamedXContentRegistry registry = parser.getXContentRegistry(); + + Map source = parser.mapOrdered(); + QueryBuilder query = null; + + try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(source); + XContentParser sourceParser = XContentType.JSON.xContent().createParser(registry, LoggingDeprecationHandler.INSTANCE, + BytesReference.bytes(xContentBuilder).streamInput())) { + query = AbstractQueryBuilder.parseInnerQueryBuilder(sourceParser); + } catch (Exception e) { + if (lenient) { + logger.warn(DataFrameMessages.LOG_DATA_FRAME_TRANSFORM_CONFIGURATION_BAD_QUERY, e); + } else { + throw e; + } + } + + return new QueryConfig(source, query); + } + + @Override + public int hashCode() { + return Objects.hash(source, query); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final QueryConfig that = (QueryConfig) other; + + return Objects.equals(this.source, that.source) && Objects.equals(this.query, that.query); + } + + public boolean isValid() { + return this.query != null; + } +} \ No newline at end of file diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationConfig.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationConfig.java new file mode 100644 index 0000000000000..54b6109520a5b --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationConfig.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.Objects; + +/* + * Wrapper for the aggregations config part of a composite aggregation. + * + * For now just wraps aggregations from composite aggs. + * + */ +public class AggregationConfig implements Writeable, ToXContentObject { + private static final Logger logger = LogManager.getLogger(AggregationConfig.class); + + // we store the query in 2 formats: the raw format and the parsed format + private final Map source; + private final AggregatorFactories.Builder aggregations; + + public AggregationConfig(final Map source, AggregatorFactories.Builder aggregations) { + this.source = source; + this.aggregations = aggregations; + } + + public AggregationConfig(final StreamInput in) throws IOException { + source = in.readMap(); + aggregations = in.readOptionalWriteable(AggregatorFactories.Builder::new); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.map(source); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(source); + out.writeOptionalWriteable(aggregations); + } + + public Collection getAggregatorFactories() { + return aggregations.getAggregatorFactories(); + } + + public static AggregationConfig fromXContent(final XContentParser parser, boolean lenient) throws IOException { + NamedXContentRegistry registry = parser.getXContentRegistry(); + Map source = parser.mapOrdered(); + AggregatorFactories.Builder aggregations = null; + + if (source.isEmpty()) { + if (lenient) { + logger.warn(DataFrameMessages.DATA_FRAME_TRANSFORM_CONFIGURATION_PIVOT_NO_AGGREGATION); + } else { + throw new IllegalArgumentException(DataFrameMessages.DATA_FRAME_TRANSFORM_CONFIGURATION_PIVOT_NO_AGGREGATION); + } + } else { + try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(source); + XContentParser sourceParser = XContentType.JSON.xContent().createParser(registry, LoggingDeprecationHandler.INSTANCE, + BytesReference.bytes(xContentBuilder).streamInput())) { + sourceParser.nextToken(); + aggregations = AggregatorFactories.parseAggregators(sourceParser); + } catch (Exception e) { + if (lenient) { + logger.warn(DataFrameMessages.LOG_DATA_FRAME_TRANSFORM_CONFIGURATION_BAD_AGGREGATION, e); + } else { + throw e; + } + } + } + return new AggregationConfig(source, aggregations); + } + + @Override + public int hashCode() { + return Objects.hash(source, aggregations); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final AggregationConfig that = (AggregationConfig) other; + + return Objects.equals(this.source, that.source) && Objects.equals(this.aggregations, that.aggregations); + } + + public boolean isValid() { + return this.aggregations != null; + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java new file mode 100644 index 0000000000000..f301e64053664 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; +import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; +import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation.SingleValue; +import org.elasticsearch.xpack.core.dataframe.transform.DataFrameIndexerTransformStats; + +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Stream; + +final class AggregationResultUtils { + private static final Logger logger = LogManager.getLogger(AggregationResultUtils.class); + + /** + * Extracts aggregation results from a composite aggregation and puts it into a map. + * + * @param agg The aggregation result + * @param groups The original groupings used for querying + * @param aggregationBuilders the aggregation used for querying + * @param dataFrameIndexerTransformStats stats collector + * @return a map containing the results of the aggregation in a consumable way + */ + public static Stream> extractCompositeAggregationResults(CompositeAggregation agg, + GroupConfig groups, Collection aggregationBuilders, + DataFrameIndexerTransformStats dataFrameIndexerTransformStats) { + return agg.getBuckets().stream().map(bucket -> { + dataFrameIndexerTransformStats.incrementNumDocuments(bucket.getDocCount()); + + Map document = new HashMap<>(); + groups.getGroups().keySet().forEach(destinationFieldName -> { + document.put(destinationFieldName, bucket.getKey().get(destinationFieldName)); + }); + + for (AggregationBuilder aggregationBuilder : aggregationBuilders) { + String aggName = aggregationBuilder.getName(); + + // TODO: support other aggregation types + Aggregation aggResult = bucket.getAggregations().get(aggName); + + if (aggResult instanceof NumericMetricsAggregation.SingleValue) { + NumericMetricsAggregation.SingleValue aggResultSingleValue = (SingleValue) aggResult; + document.put(aggName, aggResultSingleValue.value()); + } else { + // Execution should never reach this point! + // Creating transforms with unsupported aggregations shall not be possible + logger.error("Dataframe Internal Error: unsupported aggregation ["+ aggResult.getName() +"], ignoring"); + assert false; + } + } + return document; + }); + } + +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java new file mode 100644 index 0000000000000..555deae36745f --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public final class Aggregations { + private Aggregations() {} + + /** + * Supported aggregation by dataframe and corresponding meta information. + * + * aggregationType - the name of the aggregation as returned by + * {@link org.elasticsearch.search.aggregations.BaseAggregationBuilder#getType()}} + * + * targetMapping - the field type for the output, if null, the source type should be used + * + */ + enum AggregationType { + AVG("avg", "double"), + CARDINALITY("cardinality", "long"), + VALUE_COUNT("value_count", "long"), + MAX("max", null), + MIN("min", null), + SUM("sum", null); + + private final String aggregationType; + private final String targetMapping; + + AggregationType(String name, String targetMapping) { + this.aggregationType = name; + this.targetMapping = targetMapping; + } + + public String getName() { + return aggregationType; + } + + public String getTargetMapping() { + return targetMapping; + } + } + + private static Set aggregationSupported = Stream.of(AggregationType.values()).map(AggregationType::name) + .collect(Collectors.toSet()); + + public static boolean isSupportedByDataframe(String aggregationType) { + return aggregationSupported.contains(aggregationType.toUpperCase(Locale.ROOT)); + } + + public static String resolveTargetMapping(String aggregationType, String sourceType) { + AggregationType agg = AggregationType.valueOf(aggregationType.toUpperCase(Locale.ROOT)); + return agg.getTargetMapping() == null ? sourceType : agg.getTargetMapping(); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/DateHistogramGroupSource.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/DateHistogramGroupSource.java new file mode 100644 index 0000000000000..59efac481d4d1 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/DateHistogramGroupSource.java @@ -0,0 +1,182 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; + +import java.io.IOException; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.util.Objects; + + +public class DateHistogramGroupSource extends SingleGroupSource { + + private static final String NAME = "data_frame_date_histogram_group"; + private static final ParseField TIME_ZONE = new ParseField("time_zone"); + private static final ParseField FORMAT = new ParseField("format"); + + private static final ConstructingObjectParser STRICT_PARSER = createParser(false); + private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + private long interval = 0; + private DateHistogramInterval dateHistogramInterval; + private String format; + private ZoneId timeZone; + + public DateHistogramGroupSource(String field) { + super(field); + } + + public DateHistogramGroupSource(StreamInput in) throws IOException { + super(in); + this.interval = in.readLong(); + this.dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new); + this.timeZone = in.readOptionalZoneId(); + this.format = in.readOptionalString(); + } + + private static ConstructingObjectParser createParser(boolean lenient) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(NAME, lenient, (args) -> { + String field = (String) args[0]; + return new DateHistogramGroupSource(field); + }); + + SingleGroupSource.declareValuesSourceFields(parser, null); + + parser.declareField((histogram, interval) -> { + if (interval instanceof Long) { + histogram.setInterval((long) interval); + } else { + histogram.setDateHistogramInterval((DateHistogramInterval) interval); + } + }, p -> { + if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { + return p.longValue(); + } else { + return new DateHistogramInterval(p.text()); + } + }, HistogramGroupSource.INTERVAL, ObjectParser.ValueType.LONG); + + parser.declareField(DateHistogramGroupSource::setTimeZone, p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return ZoneId.of(p.text()); + } else { + return ZoneOffset.ofHours(p.intValue()); + } + }, TIME_ZONE, ObjectParser.ValueType.LONG); + + parser.declareString(DateHistogramGroupSource::setFormat, FORMAT); + return parser; + } + + public static DateHistogramGroupSource fromXContent(final XContentParser parser, boolean lenient) throws IOException { + return lenient ? LENIENT_PARSER.apply(parser, null) : STRICT_PARSER.apply(parser, null); + } + + @Override + public Type getType() { + return Type.DATE_HISTOGRAM; + } + + public long getInterval() { + return interval; + } + + public void setInterval(long interval) { + if (interval < 1) { + throw new IllegalArgumentException("[interval] must be greater than or equal to 1."); + } + this.interval = interval; + } + + public DateHistogramInterval getDateHistogramInterval() { + return dateHistogramInterval; + } + + public void setDateHistogramInterval(DateHistogramInterval dateHistogramInterval) { + if (dateHistogramInterval == null) { + throw new IllegalArgumentException("[dateHistogramInterval] must not be null"); + } + this.dateHistogramInterval = dateHistogramInterval; + } + + public String getFormat() { + return format; + } + + public void setFormat(String format) { + this.format = format; + } + + public ZoneId getTimeZone() { + return timeZone; + } + + public void setTimeZone(ZoneId timeZone) { + this.timeZone = timeZone; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(field); + out.writeLong(interval); + out.writeOptionalWriteable(dateHistogramInterval); + out.writeOptionalZoneId(timeZone); + out.writeOptionalString(format); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (field != null) { + builder.field(FIELD.getPreferredName(), field); + } + if (dateHistogramInterval == null) { + builder.field(HistogramGroupSource.INTERVAL.getPreferredName(), interval); + } else { + builder.field(HistogramGroupSource.INTERVAL.getPreferredName(), dateHistogramInterval.toString()); + } + if (timeZone != null) { + builder.field(TIME_ZONE.getPreferredName(), timeZone.toString()); + } + if (format != null) { + builder.field(FORMAT.getPreferredName(), format); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final DateHistogramGroupSource that = (DateHistogramGroupSource) other; + + return Objects.equals(this.field, that.field) && + Objects.equals(interval, that.interval) && + Objects.equals(dateHistogramInterval, that.dateHistogramInterval) && + Objects.equals(timeZone, that.timeZone) && + Objects.equals(format, that.format); + } + + @Override + public int hashCode() { + return Objects.hash(field, interval, dateHistogramInterval, timeZone, format); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/GroupConfig.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/GroupConfig.java new file mode 100644 index 0000000000000..8ace9d64d9737 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/GroupConfig.java @@ -0,0 +1,185 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.dataframe.transforms.pivot.SingleGroupSource.Type; + +import java.io.IOException; +import java.util.LinkedHashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + +/* + * Wraps a single group for groupby + */ +public class GroupConfig implements Writeable, ToXContentObject { + + private static final Logger logger = LogManager.getLogger(GroupConfig.class); + + private final Map source; + private final Map> groups; + + public GroupConfig(final Map source, final Map> groups) { + this.source = ExceptionsHelper.requireNonNull(source, DataFrameField.GROUP_BY.getPreferredName()); + this.groups = groups; + } + + public GroupConfig(StreamInput in) throws IOException { + source = in.readMap(); + groups = in.readMap(StreamInput::readString, (stream) -> { + Type groupType = Type.fromId(stream.readByte()); + switch (groupType) { + case TERMS: + return new TermsGroupSource(stream); + case HISTOGRAM: + return new HistogramGroupSource(stream); + case DATE_HISTOGRAM: + return new DateHistogramGroupSource(stream); + default: + throw new IOException("Unknown group type"); + } + }); + } + + public Map > getGroups() { + return groups; + } + + public boolean isValid() { + return this.groups != null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(source); + out.writeMap(groups, StreamOutput::writeString, (stream, value) -> { + stream.writeByte(value.getType().getId()); + value.writeTo(stream); + }); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.map(source); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final GroupConfig that = (GroupConfig) other; + + return Objects.equals(this.source, that.source) && Objects.equals(this.groups, that.groups); + } + + @Override + public int hashCode() { + return Objects.hash(source, groups); + } + + public static GroupConfig fromXContent(final XContentParser parser, boolean lenient) throws IOException { + NamedXContentRegistry registry = parser.getXContentRegistry(); + Map source = parser.mapOrdered(); + Map> groups = null; + + if (source.isEmpty()) { + if (lenient) { + logger.warn(DataFrameMessages.DATA_FRAME_TRANSFORM_CONFIGURATION_PIVOT_NO_GROUP_BY); + } else { + throw new IllegalArgumentException(DataFrameMessages.DATA_FRAME_TRANSFORM_CONFIGURATION_PIVOT_NO_GROUP_BY); + } + } else { + try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(source); + XContentParser sourceParser = XContentType.JSON.xContent().createParser(registry, LoggingDeprecationHandler.INSTANCE, + BytesReference.bytes(xContentBuilder).streamInput())) { + groups = parseGroupConfig(sourceParser, lenient); + } catch (Exception e) { + if (lenient) { + logger.warn(DataFrameMessages.LOG_DATA_FRAME_TRANSFORM_CONFIGURATION_BAD_GROUP_BY, e); + } else { + throw e; + } + } + } + return new GroupConfig(source, groups); + } + + private static Map> parseGroupConfig(final XContentParser parser, + boolean lenient) throws IOException { + LinkedHashMap> groups = new LinkedHashMap<>(); + + // be parsing friendly, whether the token needs to be advanced or not (similar to what ObjectParser does) + XContentParser.Token token; + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + token = parser.currentToken(); + } else { + token = parser.nextToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new ParsingException(parser.getTokenLocation(), "Failed to parse object: Expected START_OBJECT but was: " + token); + } + } + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); + String destinationFieldName = parser.currentName(); + token = parser.nextToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation); + token = parser.nextToken(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); + Type groupType = SingleGroupSource.Type.valueOf(parser.currentName().toUpperCase(Locale.ROOT)); + + token = parser.nextToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation); + SingleGroupSource groupSource; + switch (groupType) { + case TERMS: + groupSource = TermsGroupSource.fromXContent(parser, lenient); + break; + case HISTOGRAM: + groupSource = HistogramGroupSource.fromXContent(parser, lenient); + break; + case DATE_HISTOGRAM: + groupSource = DateHistogramGroupSource.fromXContent(parser, lenient); + break; + default: + throw new ParsingException(parser.getTokenLocation(), "invalid grouping type: " + groupType); + } + + parser.nextToken(); + + groups.put(destinationFieldName, groupSource); + } + return groups; + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/HistogramGroupSource.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/HistogramGroupSource.java new file mode 100644 index 0000000000000..3c75dcdedc1b2 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/HistogramGroupSource.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class HistogramGroupSource extends SingleGroupSource { + + static final ParseField INTERVAL = new ParseField("interval"); + private static final String NAME = "data_frame_histogram_group"; + private static final ConstructingObjectParser STRICT_PARSER = createParser(false); + private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + private final double interval; + + public HistogramGroupSource(String field, double interval) { + super(field); + if (interval <= 0) { + throw new IllegalArgumentException("[interval] must be greater than 0."); + } + this.interval = interval; + } + + public HistogramGroupSource(StreamInput in) throws IOException { + super(in); + interval = in.readDouble(); + } + + private static ConstructingObjectParser createParser(boolean lenient) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(NAME, lenient, (args) -> { + String field = (String) args[0]; + double interval = (double) args[1]; + return new HistogramGroupSource(field, interval); + }); + SingleGroupSource.declareValuesSourceFields(parser, null); + parser.declareDouble(optionalConstructorArg(), INTERVAL); + return parser; + } + + @Override + public Type getType() { + return Type.HISTOGRAM; + } + + public static HistogramGroupSource fromXContent(final XContentParser parser, boolean lenient) throws IOException { + return lenient ? LENIENT_PARSER.apply(parser, null) : STRICT_PARSER.apply(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(field); + out.writeDouble(interval); + } + + public double getInterval() { + return interval; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (field != null) { + builder.field(FIELD.getPreferredName(), field); + } + builder.field(INTERVAL.getPreferredName(), interval); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final HistogramGroupSource that = (HistogramGroupSource) other; + + return Objects.equals(this.field, that.field) && + Objects.equals(this.interval, that.interval); + } + + @Override + public int hashCode() { + return Objects.hash(field, interval); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java new file mode 100644 index 0000000000000..ca4a7ec8eb4fb --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; +import org.elasticsearch.xpack.core.dataframe.transform.DataFrameIndexerTransformStats; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.stream.Stream; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +public class Pivot { + private static final String COMPOSITE_AGGREGATION_NAME = "_data_frame"; + + private final PivotConfig config; + private final String source; + + // objects for re-using + private final CompositeAggregationBuilder cachedCompositeAggregation; + private final SearchRequest cachedSearchRequest; + + public Pivot(String source, QueryBuilder query, PivotConfig config) { + this.source = source; + this.config = config; + this.cachedCompositeAggregation = createCompositeAggregation(config); + this.cachedSearchRequest = createSearchRequest(source, query, cachedCompositeAggregation); + } + + public void validate(Client client, final ActionListener listener) { + // step 1: check if used aggregations are supported + for (AggregationBuilder agg : config.getAggregationConfig().getAggregatorFactories()) { + if (Aggregations.isSupportedByDataframe(agg.getType()) == false) { + listener.onFailure(new RuntimeException("Unsupported aggregation type [" + agg.getType() + "]")); + return; + } + } + + // step 2: run a query to validate that config is valid + runTestQuery(client, listener); + } + + public void deduceMappings(Client client, final ActionListener> listener) { + SchemaUtil.deduceMappings(client, config, source, listener); + } + + public SearchRequest buildSearchRequest(Map position) { + if (position != null) { + cachedCompositeAggregation.aggregateAfter(position); + } + + return cachedSearchRequest; + } + + public Stream> extractResults(CompositeAggregation agg, + DataFrameIndexerTransformStats dataFrameIndexerTransformStats) { + + GroupConfig groups = config.getGroupConfig(); + Collection aggregationBuilders = config.getAggregationConfig().getAggregatorFactories(); + + return AggregationResultUtils.extractCompositeAggregationResults(agg, groups, aggregationBuilders, dataFrameIndexerTransformStats); + } + + private void runTestQuery(Client client, final ActionListener listener) { + // no after key + cachedCompositeAggregation.aggregateAfter(null); + client.execute(SearchAction.INSTANCE, cachedSearchRequest, ActionListener.wrap(response -> { + if (response == null) { + listener.onFailure(new RuntimeException("Unexpected null response from test query")); + return; + } + if (response.status() != RestStatus.OK) { + listener.onFailure(new RuntimeException("Unexpected status from response of test query: " + response.status())); + return; + } + listener.onResponse(true); + }, e->{ + listener.onFailure(new RuntimeException("Failed to test query",e)); + })); + } + + private static SearchRequest createSearchRequest(String index, QueryBuilder query, CompositeAggregationBuilder compositeAggregation) { + SearchRequest searchRequest = new SearchRequest(index); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.aggregation(compositeAggregation); + sourceBuilder.size(0); + sourceBuilder.query(query); + searchRequest.source(sourceBuilder); + return searchRequest; + } + + private static CompositeAggregationBuilder createCompositeAggregation(PivotConfig config) { + CompositeAggregationBuilder compositeAggregation; + + try (XContentBuilder builder = jsonBuilder()) { + // write configuration for composite aggs into builder + config.toCompositeAggXContent(builder, ToXContentObject.EMPTY_PARAMS); + XContentParser parser = builder.generator().contentType().xContent().createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, BytesReference.bytes(builder).streamInput()); + compositeAggregation = CompositeAggregationBuilder.parse(COMPOSITE_AGGREGATION_NAME, parser); + compositeAggregation.size(1000); + config.getAggregationConfig().getAggregatorFactories().forEach(agg -> compositeAggregation.subAggregation(agg)); + } catch (IOException e) { + throw new RuntimeException(DataFrameMessages.DATA_FRAME_TRANSFORM_PIVOT_FAILED_TO_CREATE_COMPOSITE_AGGREGATION, e); + } + return compositeAggregation; + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotConfig.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotConfig.java new file mode 100644 index 0000000000000..086268b169fbf --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotConfig.java @@ -0,0 +1,147 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Map.Entry; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class PivotConfig implements Writeable, ToXContentObject { + + private static final String NAME = "data_frame_transform_pivot"; + private final GroupConfig groups; + private final AggregationConfig aggregationConfig; + + private static final ConstructingObjectParser STRICT_PARSER = createParser(false); + private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + + private static ConstructingObjectParser createParser(boolean lenient) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(NAME, lenient, + args -> { + GroupConfig groups = (GroupConfig) args[0]; + + // allow "aggs" and "aggregations" but require one to be specified + // if somebody specifies both: throw + AggregationConfig aggregationConfig = null; + if (args[1] != null) { + aggregationConfig = (AggregationConfig) args[1]; + } + + if (args[2] != null) { + if (aggregationConfig != null) { + throw new IllegalArgumentException("Found two aggregation definitions: [aggs] and [aggregations]"); + } + aggregationConfig = (AggregationConfig) args[2]; + } + if (aggregationConfig == null) { + throw new IllegalArgumentException("Required [aggregations]"); + } + + return new PivotConfig(groups, aggregationConfig); + }); + + parser.declareObject(constructorArg(), + (p, c) -> (GroupConfig.fromXContent(p, lenient)), DataFrameField.GROUP_BY); + + parser.declareObject(optionalConstructorArg(), (p, c) -> AggregationConfig.fromXContent(p, lenient), DataFrameField.AGGREGATIONS); + parser.declareObject(optionalConstructorArg(), (p, c) -> AggregationConfig.fromXContent(p, lenient), DataFrameField.AGGS); + + return parser; + } + + public PivotConfig(final GroupConfig groups, final AggregationConfig aggregationConfig) { + this.groups = ExceptionsHelper.requireNonNull(groups, DataFrameField.GROUP_BY.getPreferredName()); + this.aggregationConfig = ExceptionsHelper.requireNonNull(aggregationConfig, DataFrameField.AGGREGATIONS.getPreferredName()); + } + + public PivotConfig(StreamInput in) throws IOException { + this.groups = new GroupConfig(in); + this.aggregationConfig = new AggregationConfig(in); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(DataFrameField.GROUP_BY.getPreferredName(), groups); + builder.field(DataFrameField.AGGREGATIONS.getPreferredName(), aggregationConfig); + builder.endObject(); + return builder; + } + + public void toCompositeAggXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(CompositeAggregationBuilder.SOURCES_FIELD_NAME.getPreferredName()); + builder.startArray(); + + for (Entry> groupBy : groups.getGroups().entrySet()) { + builder.startObject(); + builder.startObject(groupBy.getKey()); + builder.field(groupBy.getValue().getType().value(), groupBy.getValue()); + builder.endObject(); + builder.endObject(); + } + + builder.endArray(); + builder.endObject(); // sources + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + groups.writeTo(out); + aggregationConfig.writeTo(out); + } + + public AggregationConfig getAggregationConfig() { + return aggregationConfig; + } + + public GroupConfig getGroupConfig() { + return groups; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final PivotConfig that = (PivotConfig) other; + + return Objects.equals(this.groups, that.groups) && Objects.equals(this.aggregationConfig, that.aggregationConfig); + } + + @Override + public int hashCode() { + return Objects.hash(groups, aggregationConfig); + } + + public boolean isValid() { + return groups.isValid() && aggregationConfig.isValid(); + } + + public static PivotConfig fromXContent(final XContentParser parser, boolean lenient) throws IOException { + return lenient ? LENIENT_PARSER.apply(parser, null) : STRICT_PARSER.apply(parser, null); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java new file mode 100644 index 0000000000000..619e4514d7674 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java @@ -0,0 +1,143 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsAction; +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData; +import org.elasticsearch.client.Client; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; + +import java.util.HashMap; +import java.util.Map; + +public class SchemaUtil { + private static final Logger logger = LogManager.getLogger(SchemaUtil.class); + + private SchemaUtil() { + } + + public static void deduceMappings(final Client client, final PivotConfig config, final String source, + final ActionListener> listener) { + // collects the fieldnames used as source for aggregations + Map aggregationSourceFieldNames = new HashMap<>(); + // collects the aggregation types by source name + Map aggregationTypes = new HashMap<>(); + // collects the fieldnames and target fieldnames used for grouping + Map fieldNamesForGrouping = new HashMap<>(); + + config.getGroupConfig().getGroups().forEach((destinationFieldName, group) -> { + fieldNamesForGrouping.put(destinationFieldName, group.getField()); + }); + + for (AggregationBuilder agg : config.getAggregationConfig().getAggregatorFactories()) { + if (agg instanceof ValuesSourceAggregationBuilder) { + ValuesSourceAggregationBuilder valueSourceAggregation = (ValuesSourceAggregationBuilder) agg; + aggregationSourceFieldNames.put(valueSourceAggregation.getName(), valueSourceAggregation.field()); + aggregationTypes.put(valueSourceAggregation.getName(), valueSourceAggregation.getType()); + } else { + // execution should not reach this point + listener.onFailure(new RuntimeException("Unsupported aggregation type [" + agg.getType() + "]")); + return; + } + } + + Map allFieldNames = new HashMap<>(); + allFieldNames.putAll(aggregationSourceFieldNames); + allFieldNames.putAll(fieldNamesForGrouping); + + getSourceFieldMappings(client, source, allFieldNames.values().toArray(new String[0]), + ActionListener.wrap(sourceMappings -> { + Map targetMapping = resolveMappings(aggregationSourceFieldNames, aggregationTypes, + fieldNamesForGrouping, sourceMappings); + + listener.onResponse(targetMapping); + }, e -> { + listener.onFailure(e); + })); + } + + private static Map resolveMappings(Map aggregationSourceFieldNames, + Map aggregationTypes, Map fieldNamesForGrouping, Map sourceMappings) { + Map targetMapping = new HashMap<>(); + + aggregationTypes.forEach((targetFieldName, aggregationName) -> { + String sourceFieldName = aggregationSourceFieldNames.get(targetFieldName); + String destinationMapping = Aggregations.resolveTargetMapping(aggregationName, sourceMappings.get(sourceFieldName)); + + logger.debug( + "Deduced mapping for: [" + targetFieldName + "], agg type [" + aggregationName + "] to [" + destinationMapping + "]"); + if (destinationMapping != null) { + targetMapping.put(targetFieldName, destinationMapping); + } else { + logger.warn("Failed to deduce mapping for [" + targetFieldName + "], fall back to double."); + targetMapping.put(targetFieldName, "double"); + } + }); + + fieldNamesForGrouping.forEach((targetFieldName, sourceFieldName) -> { + String destinationMapping = sourceMappings.get(sourceFieldName); + logger.debug( + "Deduced mapping for: [" + targetFieldName + "] to [" + destinationMapping + "]"); + if (destinationMapping != null) { + targetMapping.put(targetFieldName, destinationMapping); + } else { + logger.warn("Failed to deduce mapping for [" + targetFieldName + "], fall back to keyword."); + targetMapping.put(targetFieldName, "keyword"); + } + }); + return targetMapping; + } + + /* + * Very "magic" helper method to extract the source mappings + */ + private static void getSourceFieldMappings(Client client, String index, String[] fields, + ActionListener> listener) { + GetFieldMappingsRequest fieldMappingRequest = new GetFieldMappingsRequest(); + fieldMappingRequest.indices(index); + fieldMappingRequest.fields(fields); + + client.execute(GetFieldMappingsAction.INSTANCE, fieldMappingRequest, ActionListener.wrap(response -> { + listener.onResponse(extractSourceFieldMappings(response.mappings())); + }, e -> { + listener.onFailure(e); + })); + } + + private static Map extractSourceFieldMappings(Map>> mappings) { + Map extractedTypes = new HashMap<>(); + + mappings.forEach((indexName, docTypeToMapping) -> { + // "_doc" -> + docTypeToMapping.forEach((docType, fieldNameToMapping) -> { + // "my_field" -> + fieldNameToMapping.forEach((fieldName, fieldMapping) -> { + // "mapping" -> "my_field" -> + fieldMapping.sourceAsMap().forEach((name, typeMap) -> { + // expected object: { "type": type } + if (typeMap instanceof Map) { + final Map map = (Map) typeMap; + if (map.containsKey("type")) { + String type = map.get("type").toString(); + logger.debug("Extracted type for [" + fieldName + "] : [" + type + "]"); + // TODO: overwrites types, requires resolve if + // types are mixed + extractedTypes.put(fieldName, type); + } + } + }); + }); + }); + }); + return extractedTypes; + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SingleGroupSource.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SingleGroupSource.java new file mode 100644 index 0000000000000..9b309e59af4c3 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SingleGroupSource.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.AbstractObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.support.ValueType; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/* + * Base class for a single source for group_by + */ +public abstract class SingleGroupSource> implements Writeable, ToXContentObject { + + public enum Type { + TERMS(0), + HISTOGRAM(1), + DATE_HISTOGRAM(2); + + private final byte id; + + Type(int id) { + this.id = (byte) id; + } + + public byte getId() { + return id; + } + + public static Type fromId(byte id) { + switch (id) { + case 0: + return TERMS; + case 1: + return HISTOGRAM; + case 2: + return DATE_HISTOGRAM; + default: + throw new IllegalArgumentException("unknown type"); + } + } + + public String value() { + return name().toLowerCase(Locale.ROOT); + } + } + + protected static final ParseField FIELD = new ParseField("field"); + + // TODO: add script + protected final String field; + + static , T> void declareValuesSourceFields(AbstractObjectParser parser, + ValueType targetValueType) { + // either script or field + parser.declareString(optionalConstructorArg(), FIELD); + } + + public SingleGroupSource(final String field) { + this.field = field; + } + + public SingleGroupSource(StreamInput in) throws IOException { + field = in.readOptionalString(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (field != null) { + builder.field(FIELD.getPreferredName(), field); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(field); + } + + public abstract Type getType(); + + public String getField() { + return field; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final SingleGroupSource that = (SingleGroupSource) other; + + return Objects.equals(this.field, that.field); + } + + @Override + public int hashCode() { + return Objects.hash(field); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/TermsGroupSource.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/TermsGroupSource.java new file mode 100644 index 0000000000000..b3073f0e1de21 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/TermsGroupSource.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +/* + * A terms aggregation source for group_by + */ +public class TermsGroupSource extends SingleGroupSource { + private static final String NAME = "data_frame_terms_group"; + + private static final ConstructingObjectParser STRICT_PARSER = createParser(false); + private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + + private static ConstructingObjectParser createParser(boolean lenient) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(NAME, lenient, (args) -> { + String field = (String) args[0]; + return new TermsGroupSource(field); + }); + + SingleGroupSource.declareValuesSourceFields(parser, null); + return parser; + } + + public TermsGroupSource(final String field) { + super(field); + } + + public TermsGroupSource(StreamInput in) throws IOException { + super(in); + } + + @Override + public Type getType() { + return Type.TERMS; + } + + public static TermsGroupSource fromXContent(final XContentParser parser, boolean lenient) throws IOException { + return lenient ? LENIENT_PARSER.apply(parser, null) : STRICT_PARSER.apply(parser, null); + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSetTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSetTests.java new file mode 100644 index 0000000000000..4dd667f04c84f --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSetTests.java @@ -0,0 +1,152 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureSet.Usage; +import org.elasticsearch.xpack.core.dataframe.transform.DataFrameIndexerTransformStats; +import org.elasticsearch.xpack.dataframe.action.DataFrameTransformStateAndStats; +import org.elasticsearch.xpack.dataframe.action.DataFrameTransformStateAndStatsTests; +import org.elasticsearch.xpack.dataframe.action.GetDataFrameTransformsStatsAction; +import org.elasticsearch.xpack.dataframe.action.GetDataFrameTransformsStatsAction.Response; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +import static java.lang.Math.toIntExact; +import static org.hamcrest.core.Is.is; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.same; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DataFrameFeatureSetTests extends ESTestCase { + private XPackLicenseState licenseState; + + @Before + public void init() { + licenseState = mock(XPackLicenseState.class); + } + + public void testAvailable() { + DataFrameFeatureSet featureSet = new DataFrameFeatureSet(Settings.EMPTY, mock(Client.class), licenseState); + boolean available = randomBoolean(); + when(licenseState.isDataFrameAllowed()).thenReturn(available); + assertThat(featureSet.available(), is(available)); + } + + public void testEnabledSetting() { + boolean enabled = randomBoolean(); + Settings.Builder settings = Settings.builder(); + settings.put("xpack.data_frame.enabled", enabled); + DataFrameFeatureSet featureSet = new DataFrameFeatureSet(settings.build(), mock(Client.class), licenseState); + assertThat(featureSet.enabled(), is(enabled)); + } + + public void testEnabledDefault() { + DataFrameFeatureSet featureSet = new DataFrameFeatureSet(Settings.EMPTY, mock(Client.class), licenseState); + assertTrue(featureSet.enabled()); + } + + public void testUsage() throws InterruptedException, ExecutionException, IOException { + Client client = mock(Client.class); + when(licenseState.isDataFrameAllowed()).thenReturn(true); + + DataFrameFeatureSet featureSet = new DataFrameFeatureSet(Settings.EMPTY, client, licenseState); + + List transformsStateAndStats = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(0, 10); ++i) { + transformsStateAndStats.add(DataFrameTransformStateAndStatsTests.randomDataFrameTransformStateAndStats()); + } + + GetDataFrameTransformsStatsAction.Response mockResponse = new GetDataFrameTransformsStatsAction.Response(transformsStateAndStats); + + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(mockResponse); + return Void.TYPE; + }).when(client).execute(same(GetDataFrameTransformsStatsAction.INSTANCE), any(), any()); + + PlainActionFuture future = new PlainActionFuture<>(); + featureSet.usage(future); + XPackFeatureSet.Usage usage = future.get(); + + assertTrue(usage.enabled()); + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + usage.toXContent(builder, ToXContent.EMPTY_PARAMS); + + XContentParser parser = createParser(builder); + Map usageAsMap = parser.map(); + assertTrue((boolean) XContentMapValues.extractValue("available", usageAsMap)); + + if (transformsStateAndStats.isEmpty()) { + // no transforms, no stats + assertEquals(null, XContentMapValues.extractValue("transforms", usageAsMap)); + assertEquals(null, XContentMapValues.extractValue("stats", usageAsMap)); + } else { + assertEquals(transformsStateAndStats.size(), XContentMapValues.extractValue("transforms._all", usageAsMap)); + + Map stateCounts = new HashMap<>(); + transformsStateAndStats.stream().map(x -> x.getTransformState().getIndexerState().value()) + .forEach(x -> stateCounts.merge(x, 1, Integer::sum)); + stateCounts.forEach((k, v) -> assertEquals(v, XContentMapValues.extractValue("transforms." + k, usageAsMap))); + + DataFrameIndexerTransformStats combinedStats = transformsStateAndStats.stream().map(x -> x.getTransformStats()) + .reduce((l, r) -> l.merge(r)).get(); + + assertEquals(toIntExact(combinedStats.getIndexFailures()), + XContentMapValues.extractValue("stats.index_failures", usageAsMap)); + assertEquals(toIntExact(combinedStats.getIndexTotal()), XContentMapValues.extractValue("stats.index_total", usageAsMap)); + assertEquals(toIntExact(combinedStats.getSearchTime()), + XContentMapValues.extractValue("stats.search_time_in_ms", usageAsMap)); + assertEquals(toIntExact(combinedStats.getNumDocuments()), + XContentMapValues.extractValue("stats.documents_processed", usageAsMap)); + } + } + } + + public void testUsageDisabled() throws IOException, InterruptedException, ExecutionException { + when(licenseState.isDataFrameAllowed()).thenReturn(true); + Settings.Builder settings = Settings.builder(); + settings.put("xpack.data_frame.enabled", false); + DataFrameFeatureSet featureSet = new DataFrameFeatureSet(settings.build(), mock(Client.class), licenseState); + PlainActionFuture future = new PlainActionFuture<>(); + featureSet.usage(future); + XPackFeatureSet.Usage usage = future.get(); + + assertFalse(usage.enabled()); + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + usage.toXContent(builder, ToXContent.EMPTY_PARAMS); + + XContentParser parser = createParser(builder); + Map usageAsMap = parser.map(); + assertTrue((boolean) XContentMapValues.extractValue("available", usageAsMap)); + assertFalse((boolean) XContentMapValues.extractValue("enabled", usageAsMap)); + // not enabled -> no transforms, no stats + assertEquals(null, XContentMapValues.extractValue("transforms", usageAsMap)); + assertEquals(null, XContentMapValues.extractValue("stats", usageAsMap)); + } + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/LocalStateDataFrame.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/LocalStateDataFrame.java new file mode 100644 index 0000000000000..f4b3221ec9d62 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/LocalStateDataFrame.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.dataframe; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import java.nio.file.Path; + +public class LocalStateDataFrame extends LocalStateCompositeXPackPlugin { + + public LocalStateDataFrame(final Settings settings, final Path configPath) throws Exception { + super(settings, configPath); + @SuppressWarnings("resource") + LocalStateDataFrame thisVar = this; + + plugins.add(new DataFrame(settings) { + @Override + protected XPackLicenseState getLicenseState() { + return thisVar.getLicenseState(); + } + }); + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DataFrameTransformStateAndStatsTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DataFrameTransformStateAndStatsTests.java new file mode 100644 index 0000000000000..edc46897f38c0 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DataFrameTransformStateAndStatsTests.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.dataframe.transform.DataFrameIndexerTransformStatsTests; +import org.elasticsearch.xpack.core.dataframe.transform.DataFrameTransformStateTests; +import org.elasticsearch.xpack.dataframe.transforms.AbstractSerializingDataFrameTestCase; + +import java.io.IOException; + +public class DataFrameTransformStateAndStatsTests extends AbstractSerializingDataFrameTestCase { + + public static DataFrameTransformStateAndStats randomDataFrameTransformStateAndStats() { + return new DataFrameTransformStateAndStats(randomAlphaOfLengthBetween(1, 10), + DataFrameTransformStateTests.randomDataFrameTransformState(), + DataFrameIndexerTransformStatsTests.randomStats()); + } + + @Override + protected DataFrameTransformStateAndStats doParseInstance(XContentParser parser) throws IOException { + return DataFrameTransformStateAndStats.PARSER.apply(parser, null); + } + + @Override + protected DataFrameTransformStateAndStats createTestInstance() { + return randomDataFrameTransformStateAndStats(); + } + + @Override + protected Reader instanceReader() { + return DataFrameTransformStateAndStats::new; + } + +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DeleteDataFrameTransformActionRequestTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DeleteDataFrameTransformActionRequestTests.java new file mode 100644 index 0000000000000..d9825cfd5fb3b --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DeleteDataFrameTransformActionRequestTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.dataframe.action.DeleteDataFrameTransformAction.Request; + +public class DeleteDataFrameTransformActionRequestTests extends AbstractWireSerializingTestCase { + @Override + protected Request createTestInstance() { + return new Request(randomAlphaOfLengthBetween(1, 20)); + } + + @Override + protected Writeable.Reader instanceReader() { + return Request::new; + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/GetDataFrameTransformsActionRequestTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/GetDataFrameTransformsActionRequestTests.java new file mode 100644 index 0000000000000..87ec615e6c169 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/GetDataFrameTransformsActionRequestTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.dataframe.action.GetDataFrameTransformsAction.Request; + +public class GetDataFrameTransformsActionRequestTests extends AbstractWireSerializingTestCase { + + @Override + protected Request createTestInstance() { + if (randomBoolean()) { + return new Request(MetaData.ALL); + } + return new Request(randomAlphaOfLengthBetween(1, 20)); + } + + @Override + protected Writeable.Reader instanceReader() { + return Request::new; + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/GetDataFrameTransformsActionResponseTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/GetDataFrameTransformsActionResponseTests.java new file mode 100644 index 0000000000000..0872eb3d7bd3e --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/GetDataFrameTransformsActionResponseTests.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.watch.Payload.XContent; +import org.elasticsearch.xpack.dataframe.action.GetDataFrameTransformsAction.Response; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformConfigTests; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class GetDataFrameTransformsActionResponseTests extends ESTestCase { + + public void testInvalidTransforms() throws IOException { + List transforms = new ArrayList<>(); + + transforms.add(DataFrameTransformConfigTests.randomDataFrameTransformConfig()); + transforms.add(DataFrameTransformConfigTests.randomInvalidDataFrameTransformConfig()); + transforms.add(DataFrameTransformConfigTests.randomDataFrameTransformConfig()); + transforms.add(DataFrameTransformConfigTests.randomInvalidDataFrameTransformConfig()); + + Response r = new Response(transforms); + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + r.toXContent(builder, XContent.EMPTY_PARAMS); + Map responseAsMap = createParser(builder).map(); + assertEquals(2, XContentMapValues.extractValue("invalid_transforms.count", responseAsMap)); + List expectedInvalidTransforms = new ArrayList<>(); + expectedInvalidTransforms.add(transforms.get(1).getId()); + expectedInvalidTransforms.add(transforms.get(3).getId()); + assertEquals(expectedInvalidTransforms, XContentMapValues.extractValue("invalid_transforms.transforms", responseAsMap)); + assertWarnings(LoggerMessageFormat.format(Response.INVALID_TRANSFORMS_DEPRECATION_WARNING, 2)); + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/GetDataFrameTransformsStatsActionRequestTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/GetDataFrameTransformsStatsActionRequestTests.java new file mode 100644 index 0000000000000..0751a8fd6cda5 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/GetDataFrameTransformsStatsActionRequestTests.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.dataframe.action.GetDataFrameTransformsStatsAction.Request; + +public class GetDataFrameTransformsStatsActionRequestTests extends AbstractWireSerializingTestCase { + @Override + protected Request createTestInstance() { + if (randomBoolean()) { + return new Request(MetaData.ALL); + } + return new Request(randomAlphaOfLengthBetween(1, 20)); + } + + @Override + protected Writeable.Reader instanceReader() { + return Request::new; + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/PutDataFrameTransformActionRequestTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/PutDataFrameTransformActionRequestTests.java new file mode 100644 index 0000000000000..e2dc9edfe54db --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/PutDataFrameTransformActionRequestTests.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.dataframe.action.PutDataFrameTransformAction.Request; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformConfigTests; +import org.junit.Before; + +import java.io.IOException; + +import static java.util.Collections.emptyList; + +public class PutDataFrameTransformActionRequestTests extends AbstractStreamableXContentTestCase { + + private String transformId; + + private NamedWriteableRegistry namedWriteableRegistry; + private NamedXContentRegistry namedXContentRegistry; + + @Before + public void registerAggregationNamedObjects() throws Exception { + // register aggregations as NamedWriteable + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()); + namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables()); + namedXContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents()); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return namedWriteableRegistry; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return namedXContentRegistry; + } + + @Before + public void setupTransformId() { + transformId = randomAlphaOfLengthBetween(1, 10); + } + + @Override + protected Request doParseInstance(XContentParser parser) throws IOException { + return Request.fromXContent(parser, transformId); + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createTestInstance() { + DataFrameTransformConfig config = DataFrameTransformConfigTests.randomDataFrameTransformConfig(); + return new Request(config); + } + +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/StartDataFrameTransformActionTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/StartDataFrameTransformActionTests.java new file mode 100644 index 0000000000000..9ac38fe2d6f81 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/StartDataFrameTransformActionTests.java @@ -0,0 +1,22 @@ +package org.elasticsearch.xpack.dataframe.action; +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.dataframe.action.StartDataFrameTransformAction.Request; + +public class StartDataFrameTransformActionTests extends AbstractWireSerializingTestCase { + @Override + protected Request createTestInstance() { + return new Request(randomAlphaOfLengthBetween(1, 20)); + } + + @Override + protected Writeable.Reader instanceReader() { + return Request::new; + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/StopDataFrameTransformActionRequestTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/StopDataFrameTransformActionRequestTests.java new file mode 100644 index 0000000000000..09e835ef8b7bc --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/StopDataFrameTransformActionRequestTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.dataframe.action.StopDataFrameTransformAction.Request; + +public class StopDataFrameTransformActionRequestTests extends AbstractWireSerializingTestCase { + + @Override + protected Request createTestInstance() { + TimeValue timeout = randomBoolean() ? TimeValue.timeValueMinutes(randomIntBetween(1, 10)) : null; + return new Request(randomAlphaOfLengthBetween(1, 10), randomBoolean(), timeout); + } + + @Override + protected Writeable.Reader instanceReader() { + return Request::new; + } + + public void testSameButDifferentTimeout() { + String id = randomAlphaOfLengthBetween(1, 10); + boolean waitForCompletion = randomBoolean(); + + Request r1 = new Request(id, waitForCompletion, TimeValue.timeValueSeconds(10)); + Request r2 = new Request(id, waitForCompletion, TimeValue.timeValueSeconds(20)); + + assertNotEquals(r1,r2); + assertNotEquals(r1.hashCode(),r2.hashCode()); + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameSingleNodeTestCase.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameSingleNodeTestCase.java new file mode 100644 index 0000000000000..d1691fd094d23 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameSingleNodeTestCase.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.persistence; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.xpack.core.template.TemplateUtils; +import org.elasticsearch.xpack.dataframe.LocalStateDataFrame; +import org.junit.Before; + +import java.util.Collection; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + +public abstract class DataFrameSingleNodeTestCase extends ESSingleNodeTestCase { + + @Before + public void waitForTemplates() throws Exception { + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + assertTrue("Timed out waiting for the data frame templates to be installed", + TemplateUtils.checkTemplateExistsAndVersionIsGTECurrentVersion(DataFrameInternalIndex.INDEX_TEMPLATE_NAME, state)); + }); + } + + @Override + protected Settings nodeSettings() { + Settings.Builder newSettings = Settings.builder(); + newSettings.put(super.nodeSettings()); + + return newSettings.build(); + } + + @Override + protected Collection> getPlugins() { + return pluginList(LocalStateDataFrame.class); + } + + protected void assertAsync(Consumer> function, T expected, CheckedConsumer onAnswer, + Consumer onException) throws InterruptedException { + + CountDownLatch latch = new CountDownLatch(1); + + LatchedActionListener listener = new LatchedActionListener<>(ActionListener.wrap(r -> { + if (expected == null) { + fail("expected an exception but got a response"); + } else { + assertEquals(r, expected); + } + if (onAnswer != null) { + onAnswer.accept(r); + } + }, e -> { + if (onException == null) { + fail("got unexpected exception: " + e.getMessage()); + } else { + onException.accept(e); + } + }), latch); + + function.accept(listener); + latch.await(10, TimeUnit.SECONDS); + } + +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java new file mode 100644 index 0000000000000..2efee3faa2d6f --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.persistence; + +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformConfigTests; +import org.junit.Before; + +public class DataFrameTransformsConfigManagerTests extends DataFrameSingleNodeTestCase { + + private DataFrameTransformsConfigManager transformsConfigManager; + + @Before + public void createComponents() { + transformsConfigManager = new DataFrameTransformsConfigManager(client(), xContentRegistry()); + } + + public void testGetMissingTransform() throws InterruptedException { + // the index does not exist yet + assertAsync(listener -> transformsConfigManager.getTransformConfiguration("not_there", listener), (DataFrameTransformConfig) null, + null, e -> { + assertEquals(ResourceNotFoundException.class, e.getClass()); + assertEquals(DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, "not_there"), + e.getMessage()); + }); + + // create one transform and test with an existing index + assertAsync( + listener -> transformsConfigManager + .putTransformConfiguration(DataFrameTransformConfigTests.randomDataFrameTransformConfig(), listener), + true, null, null); + + // same test, but different code path + assertAsync(listener -> transformsConfigManager.getTransformConfiguration("not_there", listener), (DataFrameTransformConfig) null, + null, e -> { + assertEquals(ResourceNotFoundException.class, e.getClass()); + assertEquals(DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, "not_there"), + e.getMessage()); + }); + } + + public void testDeleteMissingTransform() throws InterruptedException { + // the index does not exist yet + assertAsync(listener -> transformsConfigManager.deleteTransformConfiguration("not_there", listener), (Boolean) null, null, e -> { + assertEquals(ResourceNotFoundException.class, e.getClass()); + assertEquals(DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, "not_there"), e.getMessage()); + }); + + // create one transform and test with an existing index + assertAsync( + listener -> transformsConfigManager + .putTransformConfiguration(DataFrameTransformConfigTests.randomDataFrameTransformConfig(), listener), + true, null, null); + + // same test, but different code path + assertAsync(listener -> transformsConfigManager.deleteTransformConfiguration("not_there", listener), (Boolean) null, null, e -> { + assertEquals(ResourceNotFoundException.class, e.getClass()); + assertEquals(DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, "not_there"), e.getMessage()); + }); + } + + public void testCreateReadDelete() throws InterruptedException { + DataFrameTransformConfig transformConfig = DataFrameTransformConfigTests.randomDataFrameTransformConfig(); + + // create transform + assertAsync(listener -> transformsConfigManager.putTransformConfiguration(transformConfig, listener), true, null, null); + + // read transform + assertAsync(listener -> transformsConfigManager.getTransformConfiguration(transformConfig.getId(), listener), transformConfig, null, + null); + + // try to create again + assertAsync(listener -> transformsConfigManager.putTransformConfiguration(transformConfig, listener), (Boolean) null, null, e -> { + assertEquals(ResourceAlreadyExistsException.class, e.getClass()); + assertEquals(DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_TRANSFORM_EXISTS, transformConfig.getId()), + e.getMessage()); + }); + + // delete transform + assertAsync(listener -> transformsConfigManager.deleteTransformConfiguration(transformConfig.getId(), listener), true, null, null); + + // delete again + assertAsync(listener -> transformsConfigManager.deleteTransformConfiguration(transformConfig.getId(), listener), (Boolean) null, + null, e -> { + assertEquals(ResourceNotFoundException.class, e.getClass()); + assertEquals(DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformConfig.getId()), + e.getMessage()); + }); + + // try to get deleted transform + assertAsync(listener -> transformsConfigManager.getTransformConfiguration(transformConfig.getId(), listener), + (DataFrameTransformConfig) null, null, e -> { + assertEquals(ResourceNotFoundException.class, e.getClass()); + assertEquals(DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformConfig.getId()), + e.getMessage()); + }); + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/AbstractSerializingDataFrameTestCase.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/AbstractSerializingDataFrameTestCase.java new file mode 100644 index 0000000000000..0b7697c7e4cc7 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/AbstractSerializingDataFrameTestCase.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.BaseAggregationBuilder; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.junit.Before; + +import java.util.List; + +import static java.util.Collections.emptyList; + +public abstract class AbstractSerializingDataFrameTestCase + extends AbstractSerializingTestCase { + + /** + * Test case that ensures aggregation named objects are registered + */ + private NamedWriteableRegistry namedWriteableRegistry; + private NamedXContentRegistry namedXContentRegistry; + + @Before + public void registerAggregationNamedObjects() throws Exception { + // register aggregations as NamedWriteable + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()); + + List namedWriteables = searchModule.getNamedWriteables(); + namedWriteables.add(new NamedWriteableRegistry.Entry(QueryBuilder.class, MockDeprecatedQueryBuilder.NAME, + MockDeprecatedQueryBuilder::new)); + namedWriteables.add(new NamedWriteableRegistry.Entry(AggregationBuilder.class, MockDeprecatedAggregationBuilder.NAME, + MockDeprecatedAggregationBuilder::new)); + + List namedXContents = searchModule.getNamedXContents(); + namedXContents.add(new NamedXContentRegistry.Entry(QueryBuilder.class, + new ParseField(MockDeprecatedQueryBuilder.NAME), (p, c) -> MockDeprecatedQueryBuilder.fromXContent(p))); + namedXContents.add(new NamedXContentRegistry.Entry(BaseAggregationBuilder.class, + new ParseField(MockDeprecatedAggregationBuilder.NAME), (p, c) -> MockDeprecatedAggregationBuilder.fromXContent(p))); + + namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); + namedXContentRegistry = new NamedXContentRegistry(namedXContents); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return namedWriteableRegistry; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return namedXContentRegistry; + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformConfigTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformConfigTests.java new file mode 100644 index 0000000000000..daabe1cccaa39 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformConfigTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.dataframe.transforms.pivot.PivotConfigTests; +import org.junit.Before; + +import java.io.IOException; + +import static org.elasticsearch.test.TestMatchers.matchesPattern; + +public class DataFrameTransformConfigTests extends AbstractSerializingDataFrameTestCase { + + private String transformId; + + public static DataFrameTransformConfig randomDataFrameTransformConfig() { + return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10), QueryConfigTests.randomQueryConfig(), PivotConfigTests.randomPivotConfig()); + } + + public static DataFrameTransformConfig randomInvalidDataFrameTransformConfig() { + if (randomBoolean()) { + return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10), QueryConfigTests.randomInvalidQueryConfig(), PivotConfigTests.randomPivotConfig()); + } // else + return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10), QueryConfigTests.randomQueryConfig(), PivotConfigTests.randomInvalidPivotConfig()); + } + + @Before + public void setUpOptionalId() { + transformId = randomAlphaOfLengthBetween(1, 10); + } + + @Override + protected DataFrameTransformConfig doParseInstance(XContentParser parser) throws IOException { + if (randomBoolean()) { + return DataFrameTransformConfig.fromXContent(parser, transformId, false); + } else { + return DataFrameTransformConfig.fromXContent(parser, null, false); + } + } + + @Override + protected DataFrameTransformConfig createTestInstance() { + return randomDataFrameTransformConfig(); + } + + @Override + protected Reader instanceReader() { + return DataFrameTransformConfig::new; + } + + public void testDefaultMatchAll( ) throws IOException { + String pivotTransform = "{" + + " \"source\" : \"src\"," + + " \"dest\" : \"dest\"," + + " \"pivot\" : {" + + " \"group_by\": {" + + " \"id\": {" + + " \"terms\": {" + + " \"field\": \"id\"" + + "} } }," + + " \"aggs\": {" + + " \"avg\": {" + + " \"avg\": {" + + " \"field\": \"points\"" + + "} } } } }"; + + DataFrameTransformConfig dataFrameTransformConfig = createDataFrameTransformConfigFromString(pivotTransform, "test_match_all"); + assertNotNull(dataFrameTransformConfig.getQueryConfig()); + assertTrue(dataFrameTransformConfig.getQueryConfig().isValid()); + + try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { + XContentBuilder content = dataFrameTransformConfig.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + String pivotTransformWithIdAndDefaults = Strings.toString(content); + + assertThat(pivotTransformWithIdAndDefaults, matchesPattern(".*\"match_all\"\\s*:\\s*\\{\\}.*")); + } + } + + private DataFrameTransformConfig createDataFrameTransformConfigFromString(String json, String id) throws IOException { + final XContentParser parser = XContentType.JSON.xContent().createParser(xContentRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + return DataFrameTransformConfig.fromXContent(parser, id, false); + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/MockDeprecatedAggregationBuilder.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/MockDeprecatedAggregationBuilder.java new file mode 100644 index 0000000000000..d9d546942401b --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/MockDeprecatedAggregationBuilder.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms; + +import org.apache.logging.log4j.LogManager; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValueType; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.aggregations.support.ValuesSourceType; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Map; + +public class MockDeprecatedAggregationBuilder extends ValuesSourceAggregationBuilder { + + public static final String NAME = "deprecated_agg"; + public static final String DEPRECATION_MESSAGE = "expected deprecation message from MockDeprecatedAggregationBuilder"; + + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(MockDeprecatedAggregationBuilder.class)); + + protected MockDeprecatedAggregationBuilder(MockDeprecatedAggregationBuilder clone, Builder factoriesBuilder, + Map metaData) { + super(clone, factoriesBuilder, metaData); + } + + @Override + protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map metaData) { + return new MockDeprecatedAggregationBuilder(this, factoriesBuilder, metaData); + } + + public MockDeprecatedAggregationBuilder() { + super(NAME, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + } + + /** + * Read from a stream. + */ + protected MockDeprecatedAggregationBuilder(StreamInput in) throws IOException { + super(in, null, null); + } + + @Override + public String getType() { + return NAME; + } + + @Override + protected void innerWriteTo(StreamOutput out) throws IOException { + } + + @Override + protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, + AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + return null; + } + + @Override + protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + return null; + } + + @Override + protected int innerHashCode() { + return 0; + } + + @Override + protected boolean innerEquals(Object obj) { + return false; + } + + public static MockDeprecatedAggregationBuilder fromXContent(XContentParser p) { + deprecationLogger.deprecatedAndMaybeLog("deprecated_mock", DEPRECATION_MESSAGE); + return new MockDeprecatedAggregationBuilder(); + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/MockDeprecatedQueryBuilder.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/MockDeprecatedQueryBuilder.java new file mode 100644 index 0000000000000..223a7100d3109 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/MockDeprecatedQueryBuilder.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms; + +import org.apache.logging.log4j.LogManager; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.QueryShardContext; + +import java.io.IOException; + +/* + * Utility test class to write a deprecation message on usage + */ +public class MockDeprecatedQueryBuilder extends AbstractQueryBuilder { + + public static final String NAME = "deprecated_match_all"; + public static final String DEPRECATION_MESSAGE = "expected deprecation message from MockDeprecatedQueryBuilder"; + + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(MockDeprecatedQueryBuilder.class)); + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, MockDeprecatedQueryBuilder::new); + + static { + declareStandardFields(PARSER); + } + + public MockDeprecatedQueryBuilder() { + } + + public MockDeprecatedQueryBuilder(StreamInput in) throws IOException { + super(in); + } + + public static MockDeprecatedQueryBuilder fromXContent(XContentParser parser) { + try { + deprecationLogger.deprecatedAndMaybeLog("deprecated_mock", DEPRECATION_MESSAGE); + + return PARSER.apply(parser, null); + } catch (IllegalArgumentException e) { + throw new ParsingException(parser.getTokenLocation(), e.getMessage(), e); + } + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + printBoostAndQueryName(builder); + builder.endObject(); + } + + @Override + protected Query doToQuery(QueryShardContext context) throws IOException { + return Queries.newMatchAllQuery(); + } + + @Override + protected boolean doEquals(MockDeprecatedQueryBuilder other) { + return true; + } + + @Override + protected int doHashCode() { + return 0; + } +} \ No newline at end of file diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/QueryConfigTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/QueryConfigTests.java new file mode 100644 index 0000000000000..8d64eae9b44cc --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/QueryConfigTests.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.junit.Before; + +import java.io.IOException; +import java.util.LinkedHashMap; + +public class QueryConfigTests extends AbstractSerializingDataFrameTestCase { + + private boolean lenient; + + public static QueryConfig randomQueryConfig() { + + QueryBuilder queryBuilder = randomBoolean() ? new MatchAllQueryBuilder() : new MatchNoneQueryBuilder(); + LinkedHashMap source = null; + + try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { + XContentBuilder content = queryBuilder.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + source = (LinkedHashMap) XContentHelper.convertToMap(BytesReference.bytes(content), true, XContentType.JSON) + .v2(); + } catch (IOException e) { + // should not happen + fail("failed to create random query config"); + } + + return new QueryConfig(source, queryBuilder); + } + + public static QueryConfig randomInvalidQueryConfig() { + // create something broken but with a source + LinkedHashMap source = new LinkedHashMap<>(); + for (String key : randomUnique(() -> randomAlphaOfLengthBetween(1, 20), randomIntBetween(1, 10))) { + source.put(key, randomAlphaOfLengthBetween(1, 20)); + } + + return new QueryConfig(source, null); + } + + @Before + public void setRandomFeatures() { + lenient = randomBoolean(); + } + + @Override + protected QueryConfig doParseInstance(XContentParser parser) throws IOException { + return QueryConfig.fromXContent(parser, lenient); + } + + @Override + protected QueryConfig createTestInstance() { + return lenient ? randomBoolean() ? randomQueryConfig() : randomInvalidQueryConfig() : randomQueryConfig(); + } + + @Override + protected Reader instanceReader() { + return QueryConfig::new; + } + + public void testValidQueryParsing() throws IOException { + QueryBuilder query = new MatchQueryBuilder("key", "value"); + String source = query.toString(); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + QueryConfig queryConfig = QueryConfig.fromXContent(parser, true); + assertEquals(query, queryConfig.getQuery()); + assertTrue(queryConfig.isValid()); + } + } + + public void testFailOnStrictPassOnLenient() throws IOException { + String source = "{\"query_element_does_not_exist\" : {}}"; + + // lenient, passes but reports invalid + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + QueryConfig query = QueryConfig.fromXContent(parser, true); + assertFalse(query.isValid()); + } + + // strict throws + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + expectThrows(ParsingException.class, () -> QueryConfig.fromXContent(parser, false)); + } + } + + public void testFailOnEmptyQuery() throws IOException { + String source = ""; + + // lenient, passes but reports invalid + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + QueryConfig query = QueryConfig.fromXContent(parser, true); + assertFalse(query.isValid()); + } + + // strict throws + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + expectThrows(IllegalArgumentException.class, () -> QueryConfig.fromXContent(parser, false)); + } + } + + public void testFailOnEmptyQueryClause() throws IOException { + String source = "{}"; + + // lenient, passes but reports invalid + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + QueryConfig query = QueryConfig.fromXContent(parser, true); + assertFalse(query.isValid()); + } + + // strict throws + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + expectThrows(IllegalArgumentException.class, () -> QueryConfig.fromXContent(parser, false)); + } + } + + public void testDeprecation() throws IOException { + String source = "{\"" + MockDeprecatedQueryBuilder.NAME + "\" : {}}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + QueryConfig query = QueryConfig.fromXContent(parser, false); + assertTrue(query.isValid()); + assertWarnings(MockDeprecatedQueryBuilder.DEPRECATION_MESSAGE); + } + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationConfigTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationConfigTests.java new file mode 100644 index 0000000000000..ccf9090182349 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationConfigTests.java @@ -0,0 +1,150 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.NamedObjectNotFoundException; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.xpack.dataframe.transforms.AbstractSerializingDataFrameTestCase; +import org.elasticsearch.xpack.dataframe.transforms.MockDeprecatedAggregationBuilder; +import org.junit.Before; + +import java.io.IOException; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Set; + +public class AggregationConfigTests extends AbstractSerializingDataFrameTestCase { + + private boolean lenient; + + public static AggregationConfig randomAggregationConfig() { + + AggregatorFactories.Builder builder = new AggregatorFactories.Builder(); + Map source = null; + + // ensure that the unlikely does not happen: 2 aggs share the same name + Set names = new HashSet<>(); + for (int i = 0; i < randomIntBetween(1, 20); ++i) { + AggregationBuilder aggBuilder = getRandomSupportedAggregation(); + if (names.add(aggBuilder.getName())) { + builder.addAggregator(aggBuilder); + } + } + + try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { + + XContentBuilder content = builder.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + source = XContentHelper.convertToMap(BytesReference.bytes(content), true, XContentType.JSON).v2(); + } catch (IOException e) { + fail("failed to create random aggregation config: " + e.getMessage()); + } + + return new AggregationConfig(source, builder); + } + + public static AggregationConfig randomInvalidAggregationConfig() { + // create something broken but with a source + Map source = new LinkedHashMap<>(); + for (String key : randomUnique(() -> randomAlphaOfLengthBetween(1, 20), randomIntBetween(1, 10))) { + source.put(key, randomAlphaOfLengthBetween(1, 20)); + } + + return new AggregationConfig(source, null); + } + + @Before + public void setRandomFeatures() { + lenient = randomBoolean(); + } + + @Override + protected AggregationConfig doParseInstance(XContentParser parser) throws IOException { + return AggregationConfig.fromXContent(parser, lenient); + } + + @Override + protected AggregationConfig createTestInstance() { + return lenient ? randomBoolean() ? randomAggregationConfig() : randomInvalidAggregationConfig() : randomAggregationConfig(); + } + + @Override + protected Reader instanceReader() { + return AggregationConfig::new; + } + + public void testEmptyAggregation() throws IOException { + String source = "{}"; + + // lenient, passes but reports invalid + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + AggregationConfig aggregationConfig = AggregationConfig.fromXContent(parser, true); + assertFalse(aggregationConfig.isValid()); + } + + // strict throws + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + expectThrows(IllegalArgumentException.class, () -> AggregationConfig.fromXContent(parser, false)); + } + } + + public void testFailOnStrictPassOnLenient() throws IOException { + String source = "{\n" + + " \"avg_rating\": { \"some_removed_agg\": { \"field\": \"rating\" } }\n" + + " },\n" + + " {\n" + + " \"max_rating\": { \"max_rating\" : { \"field\" : \"rating\" } }\n" + + " }"; + + // lenient, passes but reports invalid + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + AggregationConfig aggregationConfig = AggregationConfig.fromXContent(parser, true); + assertFalse(aggregationConfig.isValid()); + } + + // strict throws + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + expectThrows(NamedObjectNotFoundException.class, () -> AggregationConfig.fromXContent(parser, false)); + } + } + + public void testDeprecation() throws IOException { + String source = "{\"dep_agg\": {\"" + MockDeprecatedAggregationBuilder.NAME + "\" : {}}}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + AggregationConfig agg = AggregationConfig.fromXContent(parser, false); + assertTrue(agg.isValid()); + assertWarnings(MockDeprecatedAggregationBuilder.DEPRECATION_MESSAGE); + } + } + + private static AggregationBuilder getRandomSupportedAggregation() { + final int numberOfSupportedAggs = 4; + switch (randomIntBetween(1, numberOfSupportedAggs)) { + case 1: + return AggregationBuilders.avg(randomAlphaOfLengthBetween(1, 10)).field(randomAlphaOfLengthBetween(1, 10)); + case 2: + return AggregationBuilders.min(randomAlphaOfLengthBetween(1, 10)).field(randomAlphaOfLengthBetween(1, 10)); + case 3: + return AggregationBuilders.max(randomAlphaOfLengthBetween(1, 10)).field(randomAlphaOfLengthBetween(1, 10)); + case 4: + return AggregationBuilders.sum(randomAlphaOfLengthBetween(1, 10)).field(randomAlphaOfLengthBetween(1, 10)); + } + + return null; + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java new file mode 100644 index 0000000000000..49829750e954a --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java @@ -0,0 +1,323 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ContextParser; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; +import org.elasticsearch.search.aggregations.bucket.composite.ParsedComposite; +import org.elasticsearch.search.aggregations.bucket.terms.DoubleTerms; +import org.elasticsearch.search.aggregations.bucket.terms.LongTerms; +import org.elasticsearch.search.aggregations.bucket.terms.ParsedDoubleTerms; +import org.elasticsearch.search.aggregations.bucket.terms.ParsedLongTerms; +import org.elasticsearch.search.aggregations.bucket.terms.ParsedStringTerms; +import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.CardinalityAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ParsedAvg; +import org.elasticsearch.search.aggregations.metrics.ParsedCardinality; +import org.elasticsearch.search.aggregations.metrics.ParsedExtendedStats; +import org.elasticsearch.search.aggregations.metrics.ParsedMax; +import org.elasticsearch.search.aggregations.metrics.ParsedMin; +import org.elasticsearch.search.aggregations.metrics.ParsedStats; +import org.elasticsearch.search.aggregations.metrics.ParsedSum; +import org.elasticsearch.search.aggregations.metrics.ParsedValueCount; +import org.elasticsearch.search.aggregations.metrics.StatsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.ParsedStatsBucket; +import org.elasticsearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.dataframe.transform.DataFrameIndexerTransformStats; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static java.util.Arrays.asList; + +public class AggregationResultUtilsTests extends ESTestCase { + + private final NamedXContentRegistry namedXContentRegistry = new NamedXContentRegistry(namedXContents); + + private final String KEY = Aggregation.CommonFields.KEY.getPreferredName(); + private final String DOC_COUNT = Aggregation.CommonFields.DOC_COUNT.getPreferredName(); + + // aggregations potentially useful for writing tests, to be expanded as necessary + private static final List namedXContents; + static { + Map> map = new HashMap<>(); + map.put(CardinalityAggregationBuilder.NAME, (p, c) -> ParsedCardinality.fromXContent(p, (String) c)); + map.put(MinAggregationBuilder.NAME, (p, c) -> ParsedMin.fromXContent(p, (String) c)); + map.put(MaxAggregationBuilder.NAME, (p, c) -> ParsedMax.fromXContent(p, (String) c)); + map.put(SumAggregationBuilder.NAME, (p, c) -> ParsedSum.fromXContent(p, (String) c)); + map.put(AvgAggregationBuilder.NAME, (p, c) -> ParsedAvg.fromXContent(p, (String) c)); + map.put(ValueCountAggregationBuilder.NAME, (p, c) -> ParsedValueCount.fromXContent(p, (String) c)); + map.put(StatsAggregationBuilder.NAME, (p, c) -> ParsedStats.fromXContent(p, (String) c)); + map.put(StatsBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedStatsBucket.fromXContent(p, (String) c)); + map.put(ExtendedStatsAggregationBuilder.NAME, (p, c) -> ParsedExtendedStats.fromXContent(p, (String) c)); + map.put(StringTerms.NAME, (p, c) -> ParsedStringTerms.fromXContent(p, (String) c)); + map.put(LongTerms.NAME, (p, c) -> ParsedLongTerms.fromXContent(p, (String) c)); + map.put(DoubleTerms.NAME, (p, c) -> ParsedDoubleTerms.fromXContent(p, (String) c)); + + namedXContents = map.entrySet().stream() + .map(entry -> new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(entry.getKey()), entry.getValue())) + .collect(Collectors.toList()); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return namedXContentRegistry; + } + + public void testExtractCompositeAggregationResults() throws IOException { + String targetField = randomAlphaOfLengthBetween(5, 10); + + GroupConfig groupBy = parseGroupConfig("{ \"" + targetField + "\" : {" + + "\"terms\" : {" + + " \"field\" : \"doesn't_matter_for_this_test\"" + + "} } }"); + + String aggName = randomAlphaOfLengthBetween(5, 10); + String aggTypedName = "avg#" + aggName; + Collection aggregationBuilders = Collections.singletonList(AggregationBuilders.avg(aggName)); + + Map input = asMap( + "buckets", + asList( + asMap( + KEY, asMap( + targetField, "ID1"), + aggTypedName, asMap( + "value", 42.33), + DOC_COUNT, 8), + asMap( + KEY, asMap( + targetField, "ID2"), + aggTypedName, asMap( + "value", 28.99), + DOC_COUNT, 3), + asMap( + KEY, asMap( + targetField, "ID3"), + aggTypedName, asMap( + "value", 12.55), + DOC_COUNT, 9) + )); + + List> expected = asList( + asMap( + targetField, "ID1", + aggName, 42.33 + ), + asMap( + targetField, "ID2", + aggName, 28.99 + ), + asMap( + targetField, "ID3", + aggName, 12.55 + ) + ); + + executeTest(groupBy, aggregationBuilders, input, expected, 20); + } + + public void testExtractCompositeAggregationResultsMultiSources() throws IOException { + String targetField = randomAlphaOfLengthBetween(5, 10); + String targetField2 = randomAlphaOfLengthBetween(5, 10) + "_2"; + + GroupConfig groupBy = parseGroupConfig("{" + + "\"" + targetField + "\" : {" + + " \"terms\" : {" + + " \"field\" : \"doesn't_matter_for_this_test\"" + + " } }," + + "\"" + targetField2 + "\" : {" + + " \"terms\" : {" + + " \"field\" : \"doesn't_matter_for_this_test\"" + + " } }" + + "}"); + + String aggName = randomAlphaOfLengthBetween(5, 10); + String aggTypedName = "avg#" + aggName; + Collection aggregationBuilders = Collections.singletonList(AggregationBuilders.avg(aggName)); + + Map input = asMap( + "buckets", + asList( + asMap( + KEY, asMap( + targetField, "ID1", + targetField2, "ID1_2" + ), + aggTypedName, asMap( + "value", 42.33), + DOC_COUNT, 1), + asMap( + KEY, asMap( + targetField, "ID1", + targetField2, "ID2_2" + ), + aggTypedName, asMap( + "value", 8.4), + DOC_COUNT, 2), + asMap( + KEY, asMap( + targetField, "ID2", + targetField2, "ID1_2" + ), + aggTypedName, asMap( + "value", 28.99), + DOC_COUNT, 3), + asMap( + KEY, asMap( + targetField, "ID3", + targetField2, "ID2_2" + ), + aggTypedName, asMap( + "value", 12.55), + DOC_COUNT, 4) + )); + + List> expected = asList( + asMap( + targetField, "ID1", + targetField2, "ID1_2", + aggName, 42.33 + ), + asMap( + targetField, "ID1", + targetField2, "ID2_2", + aggName, 8.4 + ), + asMap( + targetField, "ID2", + targetField2, "ID1_2", + aggName, 28.99 + ), + asMap( + targetField, "ID3", + targetField2, "ID2_2", + aggName, 12.55 + ) + ); + executeTest(groupBy, aggregationBuilders, input, expected, 10); + } + + public void testExtractCompositeAggregationResultsMultiAggregations() throws IOException { + String targetField = randomAlphaOfLengthBetween(5, 10); + + GroupConfig groupBy = parseGroupConfig("{\"" + targetField + "\" : {" + + "\"terms\" : {" + + " \"field\" : \"doesn't_matter_for_this_test\"" + + "} } }"); + + String aggName = randomAlphaOfLengthBetween(5, 10); + String aggTypedName = "avg#" + aggName; + + String aggName2 = randomAlphaOfLengthBetween(5, 10) + "_2"; + String aggTypedName2 = "max#" + aggName2; + + Collection aggregationBuilders = asList(AggregationBuilders.avg(aggName), AggregationBuilders.max(aggName2)); + + Map input = asMap( + "buckets", + asList( + asMap( + KEY, asMap( + targetField, "ID1"), + aggTypedName, asMap( + "value", 42.33), + aggTypedName2, asMap( + "value", 9.9), + DOC_COUNT, 111), + asMap( + KEY, asMap( + targetField, "ID2"), + aggTypedName, asMap( + "value", 28.99), + aggTypedName2, asMap( + "value", 222.33), + DOC_COUNT, 88), + asMap( + KEY, asMap( + targetField, "ID3"), + aggTypedName, asMap( + "value", 12.55), + aggTypedName2, asMap( + "value", -2.44), + DOC_COUNT, 1) + )); + + List> expected = asList( + asMap( + targetField, "ID1", + aggName, 42.33, + aggName2, 9.9 + ), + asMap( + targetField, "ID2", + aggName, 28.99, + aggName2, 222.33 + ), + asMap( + targetField, "ID3", + aggName, 12.55, + aggName2, -2.44 + ) + ); + executeTest(groupBy, aggregationBuilders, input, expected, 200); + } + + private void executeTest(GroupConfig groups, Collection aggregationBuilders, Map input, + List> expected, long expectedDocCounts) throws IOException { + DataFrameIndexerTransformStats stats = new DataFrameIndexerTransformStats(); + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + builder.map(input); + + try (XContentParser parser = createParser(builder)) { + CompositeAggregation agg = ParsedComposite.fromXContent(parser, "my_feature"); + List> result = AggregationResultUtils + .extractCompositeAggregationResults(agg, groups, aggregationBuilders, stats).collect(Collectors.toList()); + + assertEquals(expected, result); + assertEquals(expectedDocCounts, stats.getNumDocuments()); + } + } + + private GroupConfig parseGroupConfig(String json) throws IOException { + final XContentParser parser = XContentType.JSON.xContent().createParser(xContentRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + return GroupConfig.fromXContent(parser, false); + } + + static Map asMap(Object... fields) { + assert fields.length % 2 == 0; + final Map map = new HashMap<>(); + for (int i = 0; i < fields.length; i += 2) { + String field = (String) fields[i]; + map.put(field, fields[i + 1]); + } + return map; + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java new file mode 100644 index 0000000000000..23720ab6af3b8 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.elasticsearch.test.ESTestCase; + +public class AggregationsTests extends ESTestCase { + public void testResolveTargetMapping() { + + // avg + assertEquals("double", Aggregations.resolveTargetMapping("avg", "int")); + assertEquals("double", Aggregations.resolveTargetMapping("avg", "double")); + + // max + assertEquals("int", Aggregations.resolveTargetMapping("max", "int")); + assertEquals("double", Aggregations.resolveTargetMapping("max", "double")); + assertEquals("half_float", Aggregations.resolveTargetMapping("max", "half_float")); + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java new file mode 100644 index 0000000000000..8e7c6028af5ba --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; + +public class DateHistogramGroupSourceTests extends AbstractSerializingTestCase { + + public static DateHistogramGroupSource randomDateHistogramGroupSource() { + String field = randomAlphaOfLengthBetween(1, 20); + DateHistogramGroupSource dateHistogramGroupSource = new DateHistogramGroupSource(field); + if (randomBoolean()) { + dateHistogramGroupSource.setInterval(randomLongBetween(1, 10_000)); + } else { + dateHistogramGroupSource.setDateHistogramInterval(randomFrom(DateHistogramInterval.days(10), + DateHistogramInterval.minutes(1), DateHistogramInterval.weeks(1))); + } + if (randomBoolean()) { + dateHistogramGroupSource.setTimeZone(randomZone()); + } + if (randomBoolean()) { + dateHistogramGroupSource.setFormat(randomAlphaOfLength(10)); + } + return dateHistogramGroupSource; + } + + @Override + protected DateHistogramGroupSource doParseInstance(XContentParser parser) throws IOException { + return DateHistogramGroupSource.fromXContent(parser, false); + } + + @Override + protected DateHistogramGroupSource createTestInstance() { + return randomDateHistogramGroupSource(); + } + + @Override + protected Reader instanceReader() { + return DateHistogramGroupSource::new; + } + +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/GroupConfigTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/GroupConfigTests.java new file mode 100644 index 0000000000000..72b0af31c6d81 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/GroupConfigTests.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.dataframe.transforms.pivot.SingleGroupSource.Type; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Set; + +public class GroupConfigTests extends AbstractSerializingTestCase { + + public static GroupConfig randomGroupConfig() { + Map source = new LinkedHashMap<>(); + Map> groups = new LinkedHashMap<>(); + + // ensure that the unlikely does not happen: 2 group_by's share the same name + Set names = new HashSet<>(); + for (int i = 0; i < randomIntBetween(1, 20); ++i) { + String targetFieldName = randomAlphaOfLengthBetween(1, 20); + if (names.add(targetFieldName)) { + SingleGroupSource groupBy; + Type type = randomFrom(SingleGroupSource.Type.values()); + switch (type) { + case TERMS: + groupBy = TermsGroupSourceTests.randomTermsGroupSource(); + break; + case HISTOGRAM: + groupBy = HistogramGroupSourceTests.randomHistogramGroupSource(); + break; + case DATE_HISTOGRAM: + default: + groupBy = DateHistogramGroupSourceTests.randomDateHistogramGroupSource(); + } + + source.put(targetFieldName, Collections.singletonMap(type.value(), getSource(groupBy))); + groups.put(targetFieldName, groupBy); + } + } + + return new GroupConfig(source, groups); + } + + @Override + protected GroupConfig doParseInstance(XContentParser parser) throws IOException { + return GroupConfig.fromXContent(parser, false); + } + + @Override + protected GroupConfig createTestInstance() { + return randomGroupConfig(); + } + + @Override + protected Reader instanceReader() { + return GroupConfig::new; + } + + public void testEmptyGroupBy() throws IOException { + String source = "{}"; + + // lenient, passes but reports invalid + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + GroupConfig groupConfig = GroupConfig.fromXContent(parser, true); + assertFalse(groupConfig.isValid()); + } + + // strict throws + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + expectThrows(IllegalArgumentException.class, () -> GroupConfig.fromXContent(parser, false)); + } + } + + private static Map getSource(SingleGroupSource groupSource) { + try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { + XContentBuilder content = groupSource.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + return XContentHelper.convertToMap(BytesReference.bytes(content), true, XContentType.JSON).v2(); + } catch (IOException e) { + // should not happen + fail("failed to create random single group source"); + } + return null; + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/HistogramGroupSourceTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/HistogramGroupSourceTests.java new file mode 100644 index 0000000000000..3e2581fca5249 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/HistogramGroupSourceTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; + +public class HistogramGroupSourceTests extends AbstractSerializingTestCase { + + public static HistogramGroupSource randomHistogramGroupSource() { + String field = randomAlphaOfLengthBetween(1, 20); + double interval = randomDoubleBetween(Math.nextUp(0), Double.MAX_VALUE, false); + return new HistogramGroupSource(field, interval); + } + + @Override + protected HistogramGroupSource doParseInstance(XContentParser parser) throws IOException { + return HistogramGroupSource.fromXContent(parser, false); + } + + @Override + protected HistogramGroupSource createTestInstance() { + return randomHistogramGroupSource(); + } + + @Override + protected Reader instanceReader() { + return HistogramGroupSource::new; + } + +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotConfigTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotConfigTests.java new file mode 100644 index 0000000000000..2397c088293f4 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotConfigTests.java @@ -0,0 +1,144 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.dataframe.transforms.AbstractSerializingDataFrameTestCase; + +import java.io.IOException; + +public class PivotConfigTests extends AbstractSerializingDataFrameTestCase { + + public static PivotConfig randomPivotConfig() { + return new PivotConfig(GroupConfigTests.randomGroupConfig(), AggregationConfigTests.randomAggregationConfig()); + } + + public static PivotConfig randomInvalidPivotConfig() { + return new PivotConfig(GroupConfigTests.randomGroupConfig(), AggregationConfigTests.randomInvalidAggregationConfig()); + } + + @Override + protected PivotConfig doParseInstance(XContentParser parser) throws IOException { + return PivotConfig.fromXContent(parser, false); + } + + @Override + protected PivotConfig createTestInstance() { + return randomPivotConfig(); + } + + @Override + protected Reader instanceReader() { + return PivotConfig::new; + } + + public void testAggsAbbreviations() throws IOException { + String pivotAggs = "{" + + " \"group_by\": {" + + " \"id\": {" + + " \"terms\": {" + + " \"field\": \"id\"" + + "} } }," + + " \"aggs\": {" + + " \"avg\": {" + + " \"avg\": {" + + " \"field\": \"points\"" + + "} } } }"; + + PivotConfig p1 = createPivotConfigFromString(pivotAggs, false); + String pivotAggregations = pivotAggs.replace("aggs", "aggregations"); + assertNotEquals(pivotAggs, pivotAggregations); + PivotConfig p2 = createPivotConfigFromString(pivotAggregations, false); + assertEquals(p1,p2); + } + + public void testMissingAggs() throws IOException { + String pivot = "{" + + " \"group_by\": {" + + " \"id\": {" + + " \"terms\": {" + + " \"field\": \"id\"" + + "} } } }"; + + expectThrows(IllegalArgumentException.class, () -> createPivotConfigFromString(pivot, false)); + } + + public void testEmptyAggs() throws IOException { + String pivot = "{" + + " \"group_by\": {" + + " \"id\": {" + + " \"terms\": {" + + " \"field\": \"id\"" + + "} } }," + + "\"aggs\": {}" + + " }"; + + expectThrows(IllegalArgumentException.class, () -> createPivotConfigFromString(pivot, false)); + + // lenient passes but reports invalid + PivotConfig pivotConfig = createPivotConfigFromString(pivot, true); + assertFalse(pivotConfig.isValid()); + } + + public void testEmptyGroupBy() throws IOException { + String pivot = "{" + + " \"group_by\": {}," + + " \"aggs\": {" + + " \"avg\": {" + + " \"avg\": {" + + " \"field\": \"points\"" + + "} } } }"; + + expectThrows(IllegalArgumentException.class, () -> createPivotConfigFromString(pivot, false)); + + // lenient passes but reports invalid + PivotConfig pivotConfig = createPivotConfigFromString(pivot, true); + assertFalse(pivotConfig.isValid()); + } + + public void testMissingGroupBy() throws IOException { + String pivot = "{" + + " \"aggs\": {" + + " \"avg\": {" + + " \"avg\": {" + + " \"field\": \"points\"" + + "} } } }"; + + expectThrows(IllegalArgumentException.class, () -> createPivotConfigFromString(pivot, false)); + } + + public void testDoubleAggs() throws IOException { + String pivot = "{" + + " \"group_by\": {" + + " \"id\": {" + + " \"terms\": {" + + " \"field\": \"id\"" + + "} } }," + + " \"aggs\": {" + + " \"avg\": {" + + " \"avg\": {" + + " \"field\": \"points\"" + + "} } }," + + " \"aggregations\": {" + + " \"avg\": {" + + " \"avg\": {" + + " \"field\": \"points\"" + + "} } }" + + "}"; + + expectThrows(IllegalArgumentException.class, () -> createPivotConfigFromString(pivot, false)); + } + + private PivotConfig createPivotConfigFromString(String json, boolean lenient) throws IOException { + final XContentParser parser = XContentType.JSON.xContent().createParser(xContentRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + return PivotConfig.fromXContent(parser, lenient); + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java new file mode 100644 index 0000000000000..4845085eba337 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java @@ -0,0 +1,212 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.apache.lucene.search.TotalHits; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchResponseSections; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.xpack.dataframe.transforms.pivot.Aggregations.AggregationType; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.util.Collections.emptyList; +import static org.hamcrest.Matchers.equalTo; + +public class PivotTests extends ESTestCase { + + private NamedXContentRegistry namedXContentRegistry; + private Client client; + + private final Set supportedAggregations = Stream.of(AggregationType.values()).map(AggregationType::getName) + .collect(Collectors.toSet()); + private final String[] unsupportedAggregations = { "stats" }; + + @Before + public void registerAggregationNamedObjects() throws Exception { + // register aggregations as NamedWriteable + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()); + namedXContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents()); + } + + @Before + public void setupClient() { + if (client != null) { + client.close(); + } + client = new MyMockClient(getTestName()); + } + + @After + public void tearDownClient() { + client.close(); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return namedXContentRegistry; + } + + public void testValidateExistingIndex() throws Exception { + Pivot pivot = new Pivot("existing_source_index", new MatchAllQueryBuilder(), getValidPivotConfig()); + + assertValidTransform(client, pivot); + } + + public void testValidateNonExistingIndex() throws Exception { + Pivot pivot = new Pivot("non_existing_source_index", new MatchAllQueryBuilder(), getValidPivotConfig()); + + assertInvalidTransform(client, pivot); + } + + public void testSearchFailure() throws Exception { + // test a failure during the search operation, transform creation fails if + // search has failures although they might just be temporary + Pivot pivot = new Pivot("existing_source_index_with_failing_shards", new MatchAllQueryBuilder(), getValidPivotConfig()); + + assertInvalidTransform(client, pivot); + } + + public void testValidateAllSupportedAggregations() throws Exception { + for (String agg : supportedAggregations) { + AggregationConfig aggregationConfig = getAggregationConfig(agg); + + Pivot pivot = new Pivot("existing_source", new MatchAllQueryBuilder(), getValidPivotConfig(aggregationConfig)); + + assertValidTransform(client, pivot); + } + } + + public void testValidateAllUnsupportedAggregations() throws Exception { + for (String agg : unsupportedAggregations) { + AggregationConfig aggregationConfig = getAggregationConfig(agg); + + Pivot pivot = new Pivot("existing_source", new MatchAllQueryBuilder(), getValidPivotConfig(aggregationConfig)); + + assertInvalidTransform(client, pivot); + } + } + + private class MyMockClient extends NoOpClient { + MyMockClient(String testName) { + super(testName); + } + + @SuppressWarnings("unchecked") + @Override + protected void doExecute(Action action, Request request, + ActionListener listener) { + + if (request instanceof SearchRequest) { + SearchRequest searchRequest = (SearchRequest) request; + List searchFailures = new ArrayList<>(); + + for (String index : searchRequest.indices()) { + if (index.contains("non_existing")) { + listener.onFailure(new IndexNotFoundException(index)); + return; + } + + if (index.contains("with_failing_shards")) { + searchFailures.add(new ShardSearchFailure(new RuntimeException("shard failed"))); + } + } + + final SearchResponseSections sections = new SearchResponseSections( + new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), 0), null, null, false, null, null, + 1); + final SearchResponse response = new SearchResponse(sections, null, 10, searchFailures.size() > 0 ? 0 : 5, 0, 0, + searchFailures.toArray(new ShardSearchFailure[searchFailures.size()]), null); + + listener.onResponse((Response) response); + return; + } + + super.doExecute(action, request, listener); + } + } + + private PivotConfig getValidPivotConfig() throws IOException { + return new PivotConfig(GroupConfigTests.randomGroupConfig(), getValidAggregationConfig()); + } + + private PivotConfig getValidPivotConfig(AggregationConfig aggregationConfig) throws IOException { + return new PivotConfig(GroupConfigTests.randomGroupConfig(), aggregationConfig); + } + + private AggregationConfig getValidAggregationConfig() throws IOException { + return getAggregationConfig(randomFrom(supportedAggregations)); + } + + private AggregationConfig getAggregationConfig(String agg) throws IOException { + return parseAggregations("{\n" + " \"pivot_" + agg + "\": {\n" + " \"" + agg + "\": {\n" + " \"field\": \"values\"\n" + + " }\n" + " }" + "}"); + } + + private AggregationConfig parseAggregations(String json) throws IOException { + final XContentParser parser = XContentType.JSON.xContent().createParser(xContentRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + // parseAggregators expects to be already inside the xcontent object + assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT)); + return AggregationConfig.fromXContent(parser, false); + } + + private static void assertValidTransform(Client client, Pivot pivot) throws Exception { + validate(client, pivot, true); + } + + private static void assertInvalidTransform(Client client, Pivot pivot) throws Exception { + validate(client, pivot, false); + } + + private static void validate(Client client, Pivot pivot, boolean expectValid) throws Exception { + CountDownLatch latch = new CountDownLatch(1); + final AtomicReference exceptionHolder = new AtomicReference<>(); + pivot.validate(client, ActionListener.wrap(validity -> { + assertEquals(expectValid, validity); + latch.countDown(); + }, e -> { + exceptionHolder.set(e); + latch.countDown(); + })); + + assertTrue(latch.await(100, TimeUnit.MILLISECONDS)); + if (expectValid == true && exceptionHolder.get() != null) { + throw exceptionHolder.get(); + } else if (expectValid == false && exceptionHolder.get() == null) { + fail("Expected config to be invalid"); + } + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/TermsGroupSourceTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/TermsGroupSourceTests.java new file mode 100644 index 0000000000000..984cd40bd9640 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/TermsGroupSourceTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms.pivot; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; + +public class TermsGroupSourceTests extends AbstractSerializingTestCase { + + public static TermsGroupSource randomTermsGroupSource() { + String field = randomAlphaOfLengthBetween(1, 20); + + return new TermsGroupSource(field); + } + + @Override + protected TermsGroupSource doParseInstance(XContentParser parser) throws IOException { + return TermsGroupSource.fromXContent(parser, false); + } + + @Override + protected TermsGroupSource createTestInstance() { + return randomTermsGroupSource(); + } + + @Override + protected Reader instanceReader() { + return TermsGroupSource::new; + } + +} diff --git a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java index b3c93acb97b99..8b4c21ee086aa 100644 --- a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java +++ b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java @@ -278,6 +278,7 @@ public void testCcrAndIlmWithRollover() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37165") public void testUnfollowInjectedBeforeShrink() throws Exception { final String indexName = "shrink-test"; final String shrunkenIndexName = "shrink-" + indexName; diff --git a/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java b/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java index 9e31ddb131c6f..67b72a648db60 100644 --- a/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java +++ b/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java @@ -35,9 +35,9 @@ public void test() throws IOException { // role for (ExecutableSection section : testCandidate.getTestSection().getExecutableSections()) { if (section instanceof DoSection) { - if (((DoSection) section).getApiCallSection().getApi().startsWith("xpack.ml.") && - ((DoSection) section).getApiCallSection().getApi().startsWith("xpack.ml.get_") == false && - ((DoSection) section).getApiCallSection().getApi().equals("xpack.ml.find_file_structure") == false) { + if (((DoSection) section).getApiCallSection().getApi().startsWith("ml.") && + ((DoSection) section).getApiCallSection().getApi().startsWith("ml.get_") == false && + ((DoSection) section).getApiCallSection().getApi().equals("ml.find_file_structure") == false) { fail("should have failed because of missing role"); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java index 6c14423d9acdb..18520eecd10d0 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java @@ -15,11 +15,14 @@ import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ml.MachineLearningField; +import java.util.Arrays; import java.util.Collection; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + /** * An extension to {@link ESSingleNodeTestCase} that adds node settings specifically needed for ML test cases. */ @@ -46,6 +49,33 @@ protected Collection> getPlugins() { return pluginList(LocalStateMachineLearning.class); } + /** + * This cleanup is to fix the problem described in + * https://github.com/elastic/elasticsearch/issues/38952 + */ + @Override + public void tearDown() throws Exception { + try { + logger.trace("[{}#{}]: ML-specific after test cleanup", getTestClass().getSimpleName(), getTestName()); + String[] nonAnnotationMlIndices; + boolean mlAnnotationsIndexExists; + do { + String[] mlIndices = client().admin().indices().prepareGetIndex().addIndices(".ml-*").get().indices(); + nonAnnotationMlIndices = Arrays.stream(mlIndices).filter(name -> name.startsWith(".ml-annotations") == false) + .toArray(String[]::new); + mlAnnotationsIndexExists = mlIndices.length > nonAnnotationMlIndices.length; + } while (nonAnnotationMlIndices.length > 0 && mlAnnotationsIndexExists == false); + if (nonAnnotationMlIndices.length > 0) { + // Delete the ML indices apart from the annotations index. The annotations index will be deleted by the + // base class cleanup. We want to delete all the others first so that the annotations index doesn't get + // automatically recreated. + assertAcked(client().admin().indices().prepareDelete(nonAnnotationMlIndices).get()); + } + } finally { + super.tearDown(); + } + } + protected void waitForMlTemplates() throws Exception { // block until the templates are installed assertBusy(() -> { diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index cdabb36d42760..bb3ff17dbc1db 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -397,7 +397,6 @@ public void testSimpleDateHistoWithTimeZone() throws Exception { }); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/34762") public void testRandomizedDateHisto() throws Exception { String rollupIndex = randomAlphaOfLengthBetween(5, 10); @@ -413,7 +412,9 @@ public void testRandomizedDateHisto() throws Exception { final List> dataset = new ArrayList<>(); int numDocs = randomIntBetween(1,100); for (int i = 0; i < numDocs; i++) { - long timestamp = new DateTime().minusHours(randomIntBetween(1,100)).getMillis(); + // Make sure the timestamp is sufficiently in the past that we don't get bitten + // by internal rounding, causing no docs to match + long timestamp = new DateTime().minusDays(2).minusHours(randomIntBetween(11,100)).getMillis(); dataset.add(asMap(timestampField, timestamp, valueField, randomLongBetween(1, 100))); } executeTestCase(dataset, job, System.currentTimeMillis(), (resp) -> { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java index 0397fac1027ea..308fbedb0f241 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java @@ -19,6 +19,7 @@ import java.util.function.Predicate; import static org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction.TASKS_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.DATA_FRAME_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.DEPRECATION_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.INDEX_LIFECYCLE_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; @@ -105,6 +106,7 @@ public static void switchUserBasedOnActionOriginAndExecute(ThreadContext threadC case WATCHER_ORIGIN: case ML_ORIGIN: case MONITORING_ORIGIN: + case DATA_FRAME_ORIGIN: case DEPRECATION_ORIGIN: case PERSISTENT_TASK_ORIGIN: case ROLLUP_ORIGIN: diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 7c4cd564e9993..bde5949d378b3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -261,6 +261,8 @@ public void testActionsForSystemUserIsAuthorized() throws IOException { "indices:admin/seq_no/global_checkpoint_sync", "indices:admin/seq_no/retention_lease_sync", "indices:admin/seq_no/retention_lease_background_sync", + "indices:admin/seq_no/add_retention_lease", + "indices:admin/seq_no/renew_retention_lease", "indices:admin/settings/update" }; for (String action : actions) { authorize(authentication, action, request); diff --git a/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java b/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java index 336ddadea4c32..ba63034c170d0 100644 --- a/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java +++ b/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java @@ -105,13 +105,13 @@ private void waitForWatcher() throws Exception { if (isWatcherTest()) { assertBusy(() -> { ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); switch (state) { case "stopped": ClientYamlTestResponse startResponse = - getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap()); boolean isAcknowledged = (boolean) startResponse.evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); throw new AssertionError("waiting until stopped state reached started state"); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.graph.explore.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/graph.explore.json similarity index 97% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.graph.explore.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/graph.explore.json index a092ffb1582eb..293694d0ae8a0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.graph.explore.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/graph.explore.json @@ -1,5 +1,5 @@ { - "xpack.graph.explore": { + "graph.explore": { "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html", "methods": ["GET", "POST"], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.delete.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.delete.json similarity index 89% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.delete.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/license.delete.json index 72229bafdbe04..315b283699b62 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.delete.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.delete.json @@ -1,5 +1,5 @@ { - "xpack.license.delete": { + "license.delete": { "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", "methods": ["DELETE"], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get.json similarity index 94% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/license.get.json index aa425d3b12d8e..0de1fb48536e8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get.json @@ -1,5 +1,5 @@ { - "xpack.license.get": { + "license.get": { "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", "methods": ["GET"], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_basic_status.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_basic_status.json similarity index 88% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_basic_status.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_basic_status.json index d5ae7be328718..e9823b449087e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_basic_status.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_basic_status.json @@ -1,5 +1,5 @@ { - "xpack.license.get_basic_status": { + "license.get_basic_status": { "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", "methods": ["GET"], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_trial_status.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_trial_status.json similarity index 88% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_trial_status.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_trial_status.json index dd867ae6e79a5..54f6b0a8c7d43 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_trial_status.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_trial_status.json @@ -1,5 +1,5 @@ { - "xpack.license.get_trial_status": { + "license.get_trial_status": { "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", "methods": ["GET"], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post.json similarity index 95% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/license.post.json index 5c58f55004217..23d597a3c1964 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post.json @@ -1,5 +1,5 @@ { - "xpack.license.post": { + "license.post": { "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", "methods": ["PUT", "POST"], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_basic.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_basic.json similarity index 92% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_basic.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_basic.json index 4b4610973f9bc..2b9da7d47c685 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_basic.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_basic.json @@ -1,5 +1,5 @@ { - "xpack.license.post_start_basic": { + "license.post_start_basic": { "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", "methods": ["POST"], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_trial.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_trial.json similarity index 94% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_trial.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_trial.json index 8c8b19b0506ba..d0e3afcbb1e1f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_trial.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_trial.json @@ -1,5 +1,5 @@ { - "xpack.license.post_start_trial": { + "license.post_start_trial": { "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", "methods": ["POST"], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.deprecations.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/migration.deprecations.json similarity index 92% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.deprecations.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/migration.deprecations.json index 9ca2d5fd75ad2..989b206919ba9 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.deprecations.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/migration.deprecations.json @@ -1,5 +1,5 @@ { - "xpack.migration.deprecations": { + "migration.deprecations": { "documentation": "http://www.elastic.co/guide/en/migration/current/migration-api-deprecation.html", "methods": [ "GET" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.get_assistance.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/migration.get_assistance.json similarity index 97% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.get_assistance.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/migration.get_assistance.json index cfa7d949efed4..b4f89205ecb8b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.get_assistance.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/migration.get_assistance.json @@ -1,5 +1,5 @@ { - "xpack.migration.get_assistance": { + "migration.get_assistance": { "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-assistance.html", "methods": [ "GET" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.upgrade.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/migration.upgrade.json similarity index 95% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.upgrade.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/migration.upgrade.json index d134b27d257a9..e5150e8d10196 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.upgrade.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/migration.upgrade.json @@ -1,5 +1,5 @@ { - "xpack.migration.upgrade": { + "migration.upgrade": { "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-upgrade.html", "methods": [ "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.delete_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.delete_job.json similarity index 91% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.delete_job.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.delete_job.json index 8046667f889e1..e03cd2ae977b9 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.delete_job.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.delete_job.json @@ -1,5 +1,5 @@ { - "xpack.rollup.delete_job": { + "rollup.delete_job": { "documentation": "", "methods": [ "DELETE" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_jobs.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_jobs.json similarity index 93% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_jobs.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_jobs.json index fc2f49f8415e8..aa5d56e590910 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_jobs.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_jobs.json @@ -1,5 +1,5 @@ { - "xpack.rollup.get_jobs": { + "rollup.get_jobs": { "documentation": "", "methods": [ "GET" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_rollup_caps.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_rollup_caps.json similarity index 91% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_rollup_caps.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_rollup_caps.json index f21bdf26bbf09..0fd8aa3168222 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_rollup_caps.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_rollup_caps.json @@ -1,5 +1,5 @@ { - "xpack.rollup.get_rollup_caps": { + "rollup.get_rollup_caps": { "documentation": "", "methods": [ "GET" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_rollup_index_caps.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_rollup_index_caps.json similarity index 89% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_rollup_index_caps.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_rollup_index_caps.json index f2db0e93dce77..c446f29e7591b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_rollup_index_caps.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_rollup_index_caps.json @@ -1,5 +1,5 @@ { - "xpack.rollup.get_rollup_index_caps": { + "rollup.get_rollup_index_caps": { "documentation": "", "methods": [ "GET" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.put_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.put_job.json similarity index 93% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.put_job.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.put_job.json index 5b5d59b1dd3c3..ca33affd7d8a6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.put_job.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.put_job.json @@ -1,5 +1,5 @@ { - "xpack.rollup.put_job": { + "rollup.put_job": { "documentation": "", "methods": [ "PUT" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.rollup_search.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.rollup_search.json similarity index 97% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.rollup_search.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.rollup_search.json index 0858e3260f822..826f97aa15a03 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.rollup_search.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.rollup_search.json @@ -1,5 +1,5 @@ { - "xpack.rollup.rollup_search": { + "rollup.rollup_search": { "documentation": "", "methods": [ "GET", "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.start_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.start_job.json similarity index 91% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.start_job.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.start_job.json index 6fad8ef9c35c5..8ee505b195b22 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.start_job.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.start_job.json @@ -1,5 +1,5 @@ { - "xpack.rollup.start_job": { + "rollup.start_job": { "documentation": "", "methods": [ "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.stop_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.stop_job.json similarity index 96% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.stop_job.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.stop_job.json index b42087208e202..152b72945800d 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.stop_job.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.stop_job.json @@ -1,5 +1,5 @@ { - "xpack.rollup.stop_job": { + "rollup.stop_job": { "documentation": "", "methods": [ "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.clear_cursor.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/sql.clear_cursor.json similarity index 91% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.clear_cursor.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/sql.clear_cursor.json index 2d2ce3519b239..ec84f9543bfe0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.clear_cursor.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/sql.clear_cursor.json @@ -1,5 +1,5 @@ { - "xpack.sql.clear_cursor": { + "sql.clear_cursor": { "documentation": "Clear SQL cursor", "methods": [ "POST"], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.query.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/sql.query.json similarity index 95% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.query.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/sql.query.json index b95aa509772fd..c12a876e8cd32 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.query.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/sql.query.json @@ -1,5 +1,5 @@ { - "xpack.sql.query": { + "sql.query": { "documentation": "Execute SQL", "methods": [ "POST", "GET" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.translate.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/sql.translate.json similarity index 92% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.translate.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/sql.translate.json index 29a522ceb31c7..2200a61be66b2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.translate.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/sql.translate.json @@ -1,5 +1,5 @@ { - "xpack.sql.translate": { + "sql.translate": { "documentation": "Translate SQL into Elasticsearch queries", "methods": [ "POST", "GET" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.ack_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.ack_watch.json similarity index 95% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.ack_watch.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.ack_watch.json index 5f1ed7f860f97..4920c986a042f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.ack_watch.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.ack_watch.json @@ -1,5 +1,5 @@ { - "xpack.watcher.ack_watch": { + "watcher.ack_watch": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-ack-watch.html", "methods": [ "PUT", "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.activate_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.activate_watch.json similarity index 92% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.activate_watch.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.activate_watch.json index 12c38ce1bebf8..49fb169dede77 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.activate_watch.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.activate_watch.json @@ -1,5 +1,5 @@ { - "xpack.watcher.activate_watch": { + "watcher.activate_watch": { "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-activate-watch.html", "methods": [ "PUT", "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.deactivate_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.deactivate_watch.json similarity index 92% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.deactivate_watch.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.deactivate_watch.json index d9cb9d653bc01..ddc68b439395e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.deactivate_watch.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.deactivate_watch.json @@ -1,5 +1,5 @@ { - "xpack.watcher.deactivate_watch": { + "watcher.deactivate_watch": { "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-deactivate-watch.html", "methods": [ "PUT", "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.delete_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.delete_watch.json similarity index 92% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.delete_watch.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.delete_watch.json index a243315c91a62..cdf61ad52023f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.delete_watch.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.delete_watch.json @@ -1,5 +1,5 @@ { - "xpack.watcher.delete_watch": { + "watcher.delete_watch": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-delete-watch.html", "methods": [ "DELETE" ], diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.execute_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.execute_watch.json similarity index 95% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.execute_watch.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.execute_watch.json index 0456eef5f49ab..6db8f3ae115f5 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.execute_watch.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.execute_watch.json @@ -1,5 +1,5 @@ { - "xpack.watcher.execute_watch": { + "watcher.execute_watch": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-execute-watch.html", "methods": [ "PUT", "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.get_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.get_watch.json similarity index 93% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.get_watch.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.get_watch.json index b0587301ec425..81f21b4b0c1e5 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.get_watch.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.get_watch.json @@ -1,5 +1,5 @@ { - "xpack.watcher.get_watch": { + "watcher.get_watch": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-watch.html", "methods": [ "GET" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.put_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.put_watch.json similarity index 97% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.put_watch.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.put_watch.json index 438f2e4ee7637..24f020a7b90b4 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.put_watch.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.put_watch.json @@ -1,5 +1,5 @@ { - "xpack.watcher.put_watch": { + "watcher.put_watch": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-put-watch.html", "methods": [ "PUT", "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.start.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.start.json similarity index 91% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.start.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.start.json index eceb2a8628517..649b21c7db3f1 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.start.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.start.json @@ -1,5 +1,5 @@ { - "xpack.watcher.start": { + "watcher.start": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-start.html", "methods": [ "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.stats.json similarity index 97% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stats.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.stats.json index 13857f1791019..1fe6eaed3d9a8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stats.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.stats.json @@ -1,5 +1,5 @@ { - "xpack.watcher.stats": { + "watcher.stats": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stats.html", "methods": [ "GET" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stop.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.stop.json similarity index 92% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stop.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.stop.json index 1a14947b4fb11..4deee79436e2d 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stop.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.stop.json @@ -1,5 +1,5 @@ { - "xpack.watcher.stop": { + "watcher.stop": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stop.html", "methods": [ "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/deprecation/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/deprecation/10_basic.yml index 1cbb310bb4a08..99e6cdc72faf8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/deprecation/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/deprecation/10_basic.yml @@ -7,7 +7,7 @@ setup: --- "Test Deprecations": - do: - xpack.migration.deprecations: + migration.deprecations: index: "*" - length: { cluster_settings: 0 } - length: { node_settings: 0 } @@ -54,7 +54,7 @@ setup: - do: warnings: - Deprecated field [use_dis_max] used, replaced by [Set [tie_breaker] to 1 instead] - xpack.migration.deprecations: + migration.deprecations: index: "*" - length: { ml_settings: 1 } - match: { ml_settings.0.level : warning } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/graph/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/graph/10_basic.yml index ccd861e6358e0..c7aa714032f92 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/graph/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/graph/10_basic.yml @@ -39,7 +39,7 @@ setup: wait_for_status: green - do: - xpack.graph.explore: + graph.explore: index: test_1 body: {"query": {"match": {"keys": 1}},"controls":{"use_significance":false},"vertices":[{"field": "keys","min_doc_count": 1}]} - length: {failures: 0} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml index 6f5b1bd740a92..0a3b2bc135b57 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml @@ -1,7 +1,7 @@ --- teardown: - do: - xpack.license.post: + license.post: acknowledge: true body: | {"licenses":[{"uid":"3aa62ffe-36e1-4fad-bfdc-9dff8301eb22","type":"trial","issue_date_in_millis":1523456691721,"expiry_date_in_millis":1838816691721,"max_nodes":5,"issued_to":"customer","issuer":"elasticsearch","signature":"AAAABAAAAA2kWNcuc+DT0lrlmYZKAAAAIAo5/x6hrsGh1GqqrJmy4qgmEC7gK0U4zQ6q5ZEMhm4jAAABAEn6fG9y2VxKBu2T3D5hffh56kzOQODCOdhr0y2d17ZSIJMZRqO7ZywPCWNS1aR33GhfIHkTER0ysML0xMH/gXavhyRvMBndJj0UBKzuwpTawSlnxYtcqN8mSBIvJC7Ki+uJ1SpAILC2ZP9fnkRlqwXqBlTwfYn7xnZgu9DKrOWru/ipTPObo7jcePl8VTK6nWFen7/hCFDQTUFZ0jQvd+nq7A1PAcHGNxGfdbMVmAXCXgGWkRfT3clo9/vadgo+isNyh1sPq9mN7gwsvBAKtA1FrpH2EXYYbfOsSpBvUmhYMgErLg1k3/CbS0pCWLKOaX1xTMayosdZOjagU3auZXY=","start_date_in_millis":-1}]} @@ -10,7 +10,7 @@ teardown: ## current license version - do: - xpack.license.post: + license.post: acknowledge: true body: | {"licenses":[{"uid":"894371dc-9t49-4997-93cb-8o2e3r7fa6a8","type":"trial","issue_date_in_millis":1411948800000,"expiry_date_in_millis":1916956799999,"max_nodes":1,"issued_to":"issuedTo","issuer":"issuer","signature":"AAAAAgAAAA0FWh0T9njItjQ2qammAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQBZhvozA0trrxhUZ1QbaTsKTna9C5KVQ6pv8yg1pnsBpZXCl8kX1SrgoFn1bXq61IvJwfw5qnmYNiH3hRhTO9EyaCBqaLk8NXZQ6TrRkQSpEnnBwAYUkZeKXsIuBoOk4B4mzwC/r8aMAkzrTiEBtBbog+57cSaU9y37Gkdd+1jXCQrxP+jOEUf7gnXWZvE6oeRroLvCt1fYn09k0CF8kKTbrPTSjC6igZR3uvTHyee74XQ9PRavvHax73T4UOEdQZX/P1ibSQIWKbBRD5YQ1POYVjTayoltTnWLMxfEcAkkATJZLhpBEHST7kZWjrTS6J1dCReJc7a8Vsj/78HXvOIy"}]} @@ -18,14 +18,14 @@ teardown: - match: { license_status: "valid" } - do: - xpack.license.get: {} + license.get: {} ## a license object has 11 attributes - length: { license: 11 } ## bwc for licenses format - do: - xpack.license.post: + license.post: acknowledge: true body: | {"licenses":[{"uid":"893361dc-9749-4997-93cb-802e3d7fa4a8","type":"gold","issue_date_in_millis":1411948800000,"expiry_date_in_millis":1914278399999,"max_nodes":1,"issued_to":"issued_to","issuer":"issuer","signature":"AAAAAwAAAA2T3vqdBBetKQaBgxipAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQB7pGFYgawfLm9zzT80LvcLHjy1t/v2uSzCQWKdXXhrwSy4WrAH2uK/+PEiQ7aEpW5erLsyJ5KLA6OEZJDaP7r+mjOPuLt0++l5j4DMn7ybMzOPHXWBc6LETE3+pp0GZPyOmwsDkZSRUegTtciR2R6z+mdnGrhOYM80y08KVWwhdU/DHw41MK7ePo6tq73Nz49y9lDgt9fxA0t4ggEBPbnTDDBVQ25AjauY8sa0M5eg9rDDRayw1KamYWrara8PIGX+2YjhtUeQhmlCPdlxc9wECJ7/knPss5bI3ZoXQR3fyXhjcXNnHEIsblqLrMCal3pLxs7lI+KPYMa2ZYL/am4P"}]} @@ -33,13 +33,13 @@ teardown: - match: { license_status: "valid" } - do: - xpack.license.get: {} + license.get: {} - length: { license: 11 } ## license version: 1.x - do: - xpack.license.post: + license.post: acknowledge: true body: | {"licenses":[{"uid":"893361dc-9749-4997-93cb-802e3d7fa4a8","type":"subscription","subscription_type":"gold","issue_date_in_millis":1411948800000,"feature":"shield","expiry_date_in_millis":1914278399999,"max_nodes":1,"issued_to":"issuedTo","issuer":"issuer","signature":"AAAAAQAAAA0LVAywwpSH94cyXr4zAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQA4qscc/URRZVdFoLwgy9dqybYEQLW8YLkiAyPV5XHHHdtk+dtZIepiNEDkUXhSX2waVJlsNRF8/4kqplDfwNoD2TUM8fTgiIfiSiZYGDTGST+yW/5eAveEU5J5v1liBN27bwkqL+V4YAa0Tcm7NKKwjScWKAHiTU3vF8chPkGfCHE0kQgVwPC9RE82pTw0s6/uR4PfLGNFfqPM0uiE5nucfVrtj89JQiO/KA/7ZyFbo7VTNXxZQt7T7rZWBCP9KIjptXzcWuk08Q5S+rSoJNYbFo3HGKtrCVsRz/55rceNtdwKKXu1IwnSeir4I1/KLduQTtFLy0+1th87VS8T88UT"}]} @@ -47,13 +47,13 @@ teardown: - match: { license_status: "valid" } - do: - xpack.license.get: {} + license.get: {} - length: { license: 11 } ## multiple licenses version: 1.x - do: - xpack.license.post: + license.post: acknowledge: true body: | {"licenses":[{"uid":"893361dc-9749-4997-93cb-802e3d7fa4a8","type":"internal","subscription_type":"none","issue_date_in_millis":1411948800000,"feature":"shield","expiry_date_in_millis":1440892799999,"max_nodes":1,"issued_to":"issuedTo","issuer":"issuer","signature":"AAAAAQAAAA04Q4ky3rFyyWLFkytEAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQBxMvUMn4h2E4R4TQMijahTxQj4LPQO4f1M79UxX/XkDlGcH+J5pRHx08OtTRPsFL1lED+h+PIXx307Vo+PNDsOxrWvoYZeYBkOLAO3ny9vhQga+52jYhMxIuFrT9xbcSCSNpMhGojgOIPU2WgiopVdVcimo1+Gk8VtklPB1wPwFzfOjOnPgp/Icx3WYpfkeAUUOyWUYiFIBAe4bnz84iF+xwLKbgYk6aHF25ECBtdb/Uruhcm9+jEFpoIEUtCouvvk9C+NJZ4OickV4xpRgaRG2x9PONH8ZN0QGhGYhJGbisoCxuDmlLsyVxqxfMu3n/r7/jdsEJScjAlSrsLDOu6H"},{"uid":"893361dc-9749-4997-93cb-802e3dofh7aa","type":"internal","subscription_type":"none","issue_date_in_millis":1443484800000,"feature":"watcher","expiry_date_in_millis":1914278399999,"max_nodes":1,"issued_to":"issuedTo","issuer":"issuer","signature":"AAAAAQAAAA0Sc90guRIaQEmgLvMnAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQCQ94dju0pnDZR3Uuypi0ic3aQJ+nvVqe+U8u79Dga5n1qIjcHDh7HvIBJEkF+tnVPlo/PXV/x7BZSwVY1PVErit+6rYix1yuHEgqwxmx/VdRICjCaZM6tk0Ob4dZCPv6Ebn2Mmk89KHC/PwiLPqF6QfwV/Pkpa8k2A3ORJmvYSDvXhe6tCs8dqc4ebrsFxqrZjwWh5CZSpzqqZBFXlngDv2N0hHhpGlueRszD0JJ5dfEL5ZA1DDOrgO9OJVejSHyRqe1L5QRUNdXPVfS+EAG0Dd1cNdJ/sMpYCPnVjbw6iq2/YgM3cuztsXVBY7ij4WnoP3ce7Zjs9TwHn+IqzftC6"}]} @@ -61,19 +61,19 @@ teardown: - match: { license_status: "valid" } - do: - xpack.license.get: {} + license.get: {} - length: { license: 11 } - match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" } --- "Should throw 404 after license deletion": - do: - xpack.license.delete: {} + license.delete: {} - match: { acknowledged: true } - do: - xpack.license.get: {} + license.get: {} catch: missing --- @@ -81,7 +81,7 @@ teardown: # VERSION_NO_FEATURE_TYPE license version - do: - xpack.license.post: + license.post: acknowledge: true body: | {"license": {"uid":"893361dc-9749-4997-93cb-802e3d7fa4a8","type":"gold","issue_date_in_millis":1411948800000,"expiry_date_in_millis":1914278399999,"max_nodes":1,"issued_to":"issued_to","issuer":"issuer","signature":"AAAAAgAAAA3U8+YmnvwC+CWsV/mRAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQBe8GfzDm6T537Iuuvjetb3xK5dvg0K5NQapv+rczWcQFxgCuzbF8plkgetP1aAGZP4uRESDQPMlOCsx4d0UqqAm9f7GbBQ3l93P+PogInPFeEH9NvOmaAQovmxVM9SE6DsDqlX4cXSO+bgWpXPTd2LmpoQc1fXd6BZ8GeuyYpVHVKp9hVU0tAYjw6HzYOE7+zuO1oJYOxElqy66AnIfkvHrvni+flym3tE7tDTgsDRaz7W3iBhaqiSntEqabEkvHdPHQdSR99XGaEvnHO1paK01/35iZF6OXHsF7CCj+558GRXiVxzueOe7TsGSSt8g7YjZwV9bRCyU7oB4B/nidgI"}} @@ -89,7 +89,7 @@ teardown: - match: { license_status: "valid" } - do: - xpack.license.get: {} + license.get: {} - length: { license: 11 } --- @@ -97,7 +97,7 @@ teardown: - do: catch: bad_request - xpack.license.post: + license.post: acknowledge: true body: | {"license":{"uid":"893361dc-9749-4997-93cb-802e3d7fa4a8","type":"basic","issue_date_in_millis":1411948800000,"expiry_date_in_millis":1914278399999,"max_nodes":1,"issued_to":"issuedTo","issuer":"issuer","signature":"AAAAAgAAAA0lKPZ0a7aZquUltho/AAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQAALuQ44S3IG6SzolcXVJ6Z4CIXORDrYQ+wdLCeey0XdujTslAOj+k+vNgo6wauc7Uswi01esHu4lb5IgpvKy7RRCbh5bj/z2ubu2qMJqopp9BQyD7VQjVfqmG6seUMJwJ1a5Avvm9r41YPSPcrii3bKK2e1l6jK6N8ibCvnTyY/XkYGCJrBWTSJePDbg6ErbyodrZ37x1StLbPWcNAkmweyHjDJnvYnbeZZO7A3NmubXZjW7Ttf8/YwQyE00PqMcl7fVPY3hkKpAeHf8aaJbqkKYbqZuER3EWJX7ZvLVb1dNdNg8aXRn7YrkQcYwWgptYQpfV+D7yEJ4j5muAEoler"}} @@ -108,7 +108,7 @@ teardown: - do: catch: bad_request - xpack.license.post: + license.post: acknowledge: true - match: { error.root_cause.0.reason: 'The license must be provided in the request body' } @@ -116,24 +116,24 @@ teardown: "Current license is trial means not eligle to start trial": - do: - xpack.license.get_trial_status: {} + license.get_trial_status: {} - match: { eligible_to_start_trial: false } - do: - xpack.license.post_start_basic: + license.post_start_basic: acknowledge: true - match: { basic_was_started: true } - do: - xpack.license.get_trial_status: {} + license.get_trial_status: {} - match: { eligible_to_start_trial: false } - do: catch: forbidden - xpack.license.post_start_trial: + license.post_start_trial: acknowledge: true - match: { trial_was_started: false } @@ -142,31 +142,31 @@ teardown: "Trial license cannot be basic": - do: catch: bad_request - xpack.license.post_start_trial: + license.post_start_trial: type: "basic" acknowledge: true --- "Can start basic license if do not already have basic": - do: - xpack.license.get_basic_status: {} + license.get_basic_status: {} - match: { eligible_to_start_basic: true } - do: - xpack.license.post_start_basic: + license.post_start_basic: acknowledge: true - match: { basic_was_started: true } - match: { acknowledged: true } - do: - xpack.license.get_basic_status: {} + license.get_basic_status: {} - match: { eligible_to_start_basic: false } - do: catch: forbidden - xpack.license.post_start_basic: {} + license.post_start_basic: {} - match: { basic_was_started: false } - match: { acknowledged: true } @@ -174,7 +174,7 @@ teardown: --- "Must acknowledge to start basic": - do: - xpack.license.post_start_basic: {} + license.post_start_basic: {} - match: { basic_was_started: false } - match: { acknowledged: false } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml index 40fa404f36147..1710e51c32bdc 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml @@ -15,7 +15,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -41,7 +41,7 @@ setup: "Test basic delete_job": - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: @@ -80,12 +80,12 @@ setup: upgraded_doc_id: true - do: - xpack.rollup.delete_job: + rollup.delete_job: id: foo - is_true: acknowledged - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: jobs: [] @@ -94,7 +94,7 @@ setup: "Test delete job twice": - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: @@ -133,12 +133,12 @@ setup: upgraded_doc_id: true - do: - xpack.rollup.delete_job: + rollup.delete_job: id: foo - is_true: acknowledged - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: jobs: [] @@ -147,7 +147,7 @@ setup: "Test delete running job": - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: @@ -186,13 +186,13 @@ setup: upgraded_doc_id: true - do: - xpack.rollup.start_job: + rollup.start_job: id: foo - is_true: started - do: catch: request - xpack.rollup.delete_job: + rollup.delete_job: id: foo - is_false: acknowledged - match: { task_failures.0.reason.type: "illegal_state_exception" } @@ -205,5 +205,5 @@ setup: catch: /the task with id \[does_not_exist\] doesn't exist/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.delete_job: + rollup.delete_job: id: does_not_exist diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml index 6332302e67418..cd00a6f717b02 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml @@ -18,7 +18,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -42,7 +42,7 @@ setup: - is_true: acknowledged - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: @@ -83,7 +83,7 @@ setup: "Test get with no jobs": - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: "_all" - length: { jobs: 0 } @@ -92,7 +92,7 @@ setup: "Test get missing job": - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: @@ -108,7 +108,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -134,7 +134,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: bar body: > { @@ -158,7 +158,7 @@ setup: - is_true: acknowledged - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: "_all" - length: { jobs: 2 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml index 0b5a8a2e11180..3d38f4a371234 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml @@ -36,7 +36,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -62,7 +62,7 @@ setup: "Verify one job caps": - do: - xpack.rollup.get_rollup_caps: + rollup.get_rollup_caps: id: "foo" - match: @@ -87,7 +87,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -109,7 +109,7 @@ setup: ] } - do: - xpack.rollup.get_rollup_caps: + rollup.get_rollup_caps: id: "foo" - match: @@ -146,7 +146,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -171,7 +171,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo3 body: > { @@ -194,7 +194,7 @@ setup: } - do: - xpack.rollup.get_rollup_caps: + rollup.get_rollup_caps: id: "_all" - match: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml index b300af5e0a014..e4b98b9492087 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml @@ -36,7 +36,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -62,7 +62,7 @@ setup: "Verify one job caps by rollup index": - do: - xpack.rollup.get_rollup_index_caps: + rollup.get_rollup_index_caps: index: "foo_rollup" - match: @@ -87,7 +87,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -109,7 +109,7 @@ setup: ] } - do: - xpack.rollup.get_rollup_index_caps: + rollup.get_rollup_index_caps: index: "foo_rollup" - match: @@ -147,7 +147,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -169,7 +169,7 @@ setup: ] } - do: - xpack.rollup.get_rollup_index_caps: + rollup.get_rollup_index_caps: index: "foo_rollup" - match: @@ -194,7 +194,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -218,7 +218,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo3 body: > { @@ -241,7 +241,7 @@ setup: } - do: - xpack.rollup.get_rollup_index_caps: + rollup.get_rollup_index_caps: index: "_all" - match: @@ -297,7 +297,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -321,7 +321,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo3 body: > { @@ -344,7 +344,7 @@ setup: } - do: - xpack.rollup.get_rollup_index_caps: + rollup.get_rollup_index_caps: index: "foo_rollup2,foo_rollup" - match: @@ -396,7 +396,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -420,7 +420,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo3 body: > { @@ -443,7 +443,7 @@ setup: } - do: - xpack.rollup.get_rollup_index_caps: + rollup.get_rollup_index_caps: index: "*_rollup2" - match: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml index f8cfe85cc2e6a..7983778108bd0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml @@ -18,7 +18,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -42,7 +42,7 @@ setup: - is_true: acknowledged - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: @@ -86,7 +86,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -113,7 +113,7 @@ setup: catch: /Cannot create rollup job \[foo\] because job was previously created \(existing metadata\)/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -144,7 +144,7 @@ setup: catch: /foo/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -174,7 +174,7 @@ setup: catch: /unknown field \[headers\], parser not found/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -206,7 +206,7 @@ setup: catch: /Could not find a \[numeric\] or \[date\] field with name \[field_doesnt_exist\] in any of the indices matching the index pattern/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -235,7 +235,7 @@ setup: catch: /Unsupported metric \[does_not_exist\]/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml index 9af896f4c9fab..a7765dfc15fe3 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml @@ -17,7 +17,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -127,7 +127,7 @@ setup: "Basic Search": - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" body: size: 0 @@ -152,7 +152,7 @@ setup: "Formatted Date Histo": - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" body: size: 0 @@ -178,7 +178,7 @@ setup: "Empty aggregation": - do: - xpack.rollup.rollup_search: + rollup.rollup_search: rest_total_hits_as_int: true index: "foo_rollup" body: @@ -193,7 +193,7 @@ setup: "Empty aggregation with new response format": - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" body: size: 0 @@ -210,7 +210,7 @@ setup: "Search with Metric": - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" body: size: 0 @@ -243,7 +243,7 @@ setup: "Search with Query": - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" body: size: 0 @@ -280,7 +280,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -388,7 +388,7 @@ setup: - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" body: size: 0 @@ -423,7 +423,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -531,7 +531,7 @@ setup: - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" body: size: 0 @@ -568,7 +568,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -675,7 +675,7 @@ setup: "_rollup.version": 1 - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" body: size: 0 @@ -710,7 +710,7 @@ setup: "Wildcards matching single rollup index": - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup*" body: size: 0 @@ -750,7 +750,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: bar body: > { @@ -777,7 +777,7 @@ setup: - do: catch: /RollupSearch currently only supports searching one rollup index at a time\./ - xpack.rollup.rollup_search: + rollup.rollup_search: index: "*_rollup" body: size: 0 @@ -798,7 +798,7 @@ setup: name: rollup_alias - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "rollup_alias" body: size: 0 @@ -838,7 +838,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: bar body: > { @@ -870,7 +870,7 @@ setup: - do: catch: /RollupSearch currently only supports searching one rollup index at a time\./ - xpack.rollup.rollup_search: + rollup.rollup_search: index: "rollup_alias" body: size: 0 @@ -886,7 +886,7 @@ setup: "Search with typed_keys": - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" typed_keys: true body: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml index 5a53847187484..07f4e2b62a6f9 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml @@ -89,7 +89,7 @@ teardown: # This index pattern will match both indices, but we only have permission to read one - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -115,7 +115,7 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user - xpack.rollup.start_job: + rollup.start_job: id: foo - is_true: started @@ -135,7 +135,7 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: jobs.0.stats.documents_processed: 1 @@ -250,7 +250,7 @@ teardown: # Index contains two docs, but we should only be able to see one of them - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -275,7 +275,7 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user - xpack.rollup.start_job: + rollup.start_job: id: foo - is_true: started @@ -295,7 +295,7 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: jobs.0.stats.documents_processed: 1 diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml index 88619e0dfc8ee..fbf9e8519059a 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml @@ -15,7 +15,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -44,7 +44,7 @@ setup: catch: /Task for Rollup Job \[does_not_exist\] not found/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.start_job: + rollup.start_job: id: does_not_exist @@ -54,7 +54,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.start_job: + rollup.start_job: id: foo - is_true: started @@ -62,6 +62,6 @@ setup: catch: /Cannot start task for Rollup Job \[foo\] because state was/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.start_job: + rollup.start_job: id: foo diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml index bcb592be7a074..7e8b6b3f61af0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml @@ -15,7 +15,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -44,7 +44,7 @@ setup: catch: /Task for Rollup Job \[does_not_exist\] not found/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.stop_job: + rollup.stop_job: id: does_not_exist @@ -54,21 +54,21 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.start_job: + rollup.start_job: id: foo - is_true: started - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.stop_job: + rollup.stop_job: id: foo - is_true: stopped - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.stop_job: + rollup.stop_job: id: foo - is_true: stopped @@ -78,7 +78,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.stop_job: + rollup.stop_job: id: foo - is_true: stopped @@ -92,14 +92,14 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.start_job: + rollup.start_job: id: foo - is_true: started - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.stop_job: + rollup.stop_job: id: foo wait_for_completion: true - is_true: stopped @@ -113,14 +113,14 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.start_job: + rollup.start_job: id: foo - is_true: started - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.stop_job: + rollup.stop_job: id: foo wait_for_completion: true timeout: "5s" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml index 551866b3b1ebd..9ac15b309b161 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml @@ -26,7 +26,7 @@ setup: --- "Execute some SQL": - do: - xpack.sql.query: + sql.query: format: json body: query: "SELECT * FROM test ORDER BY int asc" @@ -42,7 +42,7 @@ setup: --- "Paging through results": - do: - xpack.sql.query: + sql.query: format: json body: query: "SELECT * FROM test ORDER BY int asc" @@ -57,7 +57,7 @@ setup: - set: { cursor: cursor } - do: - xpack.sql.query: + sql.query: format: json body: cursor: "$cursor" @@ -68,7 +68,7 @@ setup: - set: { cursor: cursor } - do: - xpack.sql.query: + sql.query: format: json body: cursor: "$cursor" @@ -79,7 +79,7 @@ setup: --- "Getting textual representation": - do: - xpack.sql.query: + sql.query: format: txt body: query: "SELECT * FROM test ORDER BY int asc" @@ -95,7 +95,7 @@ setup: --- "Clean cursor": - do: - xpack.sql.query: + sql.query: format: json body: query: "SELECT * FROM test ORDER BY int asc" @@ -108,7 +108,7 @@ setup: - set: { cursor: cursor} - do: - xpack.sql.clear_cursor: + sql.clear_cursor: body: cursor: "$cursor" - match: { "succeeded": true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml index 9fa8e6259f5ff..57f275af5e26b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml @@ -16,7 +16,7 @@ int: 1 - do: - xpack.sql.translate: + sql.translate: body: query: "SELECT * FROM test ORDER BY int asc" - match: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/upgrade/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/upgrade/10_basic.yml index a2895bb1ed7d6..acb6004b4d5c3 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/upgrade/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/upgrade/10_basic.yml @@ -1,7 +1,7 @@ --- setup: - do: - xpack.license.post: + license.post: body: > { "license": { @@ -27,7 +27,7 @@ setup: --- "Upgrade info - all": - do: - xpack.migration.get_assistance: { index: _all } + migration.get_assistance: { index: _all } - length: { indices: 0 } @@ -35,13 +35,13 @@ setup: "Upgrade test - should fail as index is already up to date": - do: catch: /illegal_state_exception/ - xpack.migration.upgrade: { index: test1 } + migration.upgrade: { index: test1 } --- "Upgrade test - wait_for_completion:false": - do: - xpack.migration.upgrade: + migration.upgrade: index: test1 wait_for_completion: false diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/10_basic.yml index 9c861e3dcd831..ed35d17984679 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/10_basic.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -38,7 +38,7 @@ wait_for_status: yellow - do: - xpack.watcher.ack_watch: + watcher.ack_watch: watch_id: "my_watch" - match: { "status.actions.test_index.ack.state" : "awaits_successful_execution" } @@ -52,7 +52,7 @@ - match: { hits.hits.0._source.status.actions.test_index.ack.state: "awaits_successful_execution" } - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" - match: { found: true } @@ -63,6 +63,6 @@ cluster.health: wait_for_status: yellow - do: - xpack.watcher.ack_watch: + watcher.ack_watch: watch_id: "non-existent-watch" catch: missing diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/20_ack_individual_action.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/20_ack_individual_action.yml index 813e1f0c88899..34cea0135c9ec 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/20_ack_individual_action.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/20_ack_individual_action.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -38,14 +38,14 @@ wait_for_status: yellow - do: - xpack.watcher.ack_watch: + watcher.ack_watch: watch_id: "my_watch" action_id: "test_index" - match: { "status.actions.test_index.ack.state" : "awaits_successful_execution" } - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" - match: { found: true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/30_reset_ack_after_unmet_condition.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/30_reset_ack_after_unmet_condition.yml index 2a9a4959de4c2..30787ed3c3023 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/30_reset_ack_after_unmet_condition.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/30_reset_ack_after_unmet_condition.yml @@ -7,7 +7,7 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" ignore: 404 @@ -15,7 +15,7 @@ teardown: "Ensure that ack status is reset after unsuccessful execution": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -45,7 +45,7 @@ teardown: } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -54,18 +54,18 @@ teardown: - match: { watch_record.status.actions.indexme.ack.state: "ackable" } - do: - xpack.watcher.ack_watch: + watcher.ack_watch: watch_id: "my_watch" - match: { "status.actions.indexme.ack.state" : "acked" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { "status.actions.indexme.ack.state" : "acked" } # having a false result will reset the ack state - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -80,12 +80,12 @@ teardown: - match: { watch_record.status.actions.indexme.ack.state: "awaits_successful_execution" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { "status.actions.indexme.ack.state" : "awaits_successful_execution" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -97,7 +97,7 @@ teardown: - match: { watch_record.status.actions.indexme.ack.state: "ackable" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { "status.actions.indexme.ack.state" : "ackable" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/40_reset_ack_after_unmet_action_condition.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/40_reset_ack_after_unmet_action_condition.yml index 946f23a2f5a4e..6e7e2030287f6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/40_reset_ack_after_unmet_action_condition.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/40_reset_ack_after_unmet_action_condition.yml @@ -7,7 +7,7 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" ignore: 404 @@ -15,7 +15,7 @@ teardown: "Ensure that ack status is reset after unmet action condition": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -45,7 +45,7 @@ teardown: } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -54,18 +54,18 @@ teardown: - match: { watch_record.status.actions.indexme.ack.state: "ackable" } - do: - xpack.watcher.ack_watch: + watcher.ack_watch: watch_id: "my_watch" - match: { "status.actions.indexme.ack.state" : "acked" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { "status.actions.indexme.ack.state" : "acked" } # having a false result will reset the ack state - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -80,12 +80,12 @@ teardown: - match: { watch_record.status.actions.indexme.ack.state: "awaits_successful_execution" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { "status.actions.indexme.ack.state" : "awaits_successful_execution" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -97,7 +97,7 @@ teardown: - match: { watch_record.status.actions.indexme.ack.state: "ackable" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { "status.actions.indexme.ack.state" : "ackable" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/activate_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/activate_watch/10_basic.yml index 99459119e3cdf..5f09e7ef1847a 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/activate_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/activate_watch/10_basic.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -34,7 +34,7 @@ - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} @@ -42,7 +42,7 @@ - match: { status.state.active: true } - do: - xpack.watcher.deactivate_watch: + watcher.deactivate_watch: watch_id: "my_watch" - match: { status.state.active : false } @@ -56,14 +56,14 @@ - match: { hits.hits.0._source.status.state.active: false } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } - match: { status.state.active: false } - do: - xpack.watcher.activate_watch: + watcher.activate_watch: watch_id: "my_watch" - match: { status.state.active : true } @@ -77,7 +77,7 @@ - match: { hits.hits.0._source.status.state.active: true } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} @@ -85,7 +85,7 @@ - match: { status.state.active: true } - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" - match: { found: true } @@ -97,11 +97,11 @@ wait_for_status: yellow - do: - xpack.watcher.activate_watch: + watcher.activate_watch: watch_id: "non-existent-watch" catch: missing - do: - xpack.watcher.deactivate_watch: + watcher.deactivate_watch: watch_id: "non-existent-watch" catch: missing diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/delete_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/delete_watch/10_basic.yml index d22b66f85d188..1e9526ab209fa 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/delete_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/delete_watch/10_basic.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" ignore: 404 --- "Test delete watch api": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -47,7 +47,7 @@ teardown: - match: { created: true } - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" - match: { found: true } @@ -61,6 +61,6 @@ teardown: --- "Non existent watch returns 404": - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "non-existent-watch" catch: missing diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/10_basic.yml index 1fd3c06b2eee7..4f4ab18796d7b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/10_basic.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "test_watch" ignore: 404 --- "Test execute watch api with configured trigger data timestamps": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "test_watch" body: > { @@ -42,7 +42,7 @@ teardown: - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" body: > { @@ -67,7 +67,7 @@ teardown: "Test execute watch API with user supplied watch": - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { @@ -104,7 +104,7 @@ teardown: "Execute unknown watch results in 404": - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "non-existent-watch" catch: missing @@ -112,7 +112,7 @@ teardown: "Test execute watch with alternative input": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "test_watch" body: > { @@ -138,7 +138,7 @@ teardown: - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/20_transform.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/20_transform.yml index 3766cb6c4a788..fa0793378756e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/20_transform.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/20_transform.yml @@ -17,7 +17,7 @@ setup: } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { @@ -87,7 +87,7 @@ setup: } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { @@ -146,7 +146,7 @@ setup: index: my_test_index - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/30_throttled.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/30_throttled.yml index 0d2497fed79f1..0511e4447db52 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/30_throttled.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/30_throttled.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "test_watch" ignore: 404 --- "Test execute watch api works with throttling": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "test_watch" body: > { @@ -41,7 +41,7 @@ teardown: - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" body: > { @@ -60,7 +60,7 @@ teardown: - match: { watch_record.status.state.active: true } - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "test_watch" body: > { @@ -87,7 +87,7 @@ teardown: - match: { _id: "test_watch" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" body: > { @@ -103,7 +103,7 @@ teardown: - match: { watch_record.status.execution_state: "executed" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/40_ignore_condition.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/40_ignore_condition.yml index 5c835f7d6927a..bb6efc6e1041f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/40_ignore_condition.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/40_ignore_condition.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "test_watch" ignore: 404 --- "Test execute watch api can ignore conditions": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "test_watch" body: > { @@ -41,7 +41,7 @@ teardown: - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/50_action_mode.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/50_action_mode.yml index 3f6303b4d4718..38dcabf5601a8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/50_action_mode.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/50_action_mode.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "test_watch" ignore: 404 --- "Test execute watch api supports action modes": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "test_watch" body: > { @@ -38,7 +38,7 @@ teardown: - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" body: > { @@ -55,7 +55,7 @@ teardown: - match: { watch_record.result.actions.0.status: "simulated" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/60_http_input.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/60_http_input.yml index 8a9ba14cb849a..f21981de7cfcf 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/60_http_input.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/60_http_input.yml @@ -17,7 +17,7 @@ setup: - set: { nodes.$master.http.publish_address: http_host } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/70_invalid.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/70_invalid.yml index f13c5faf59959..63ad1bd7fe233 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/70_invalid.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/70_invalid.yml @@ -2,7 +2,7 @@ "Test execute watch api returns proper error message with watch directly in the body": - do: catch: /please wrap watch including field \[trigger\] inside a \"watch\" field/ - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "trigger": { @@ -11,7 +11,7 @@ } - do: catch: /please wrap watch including field \[input\] inside a \"watch\" field/ - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "input": { @@ -22,7 +22,7 @@ } - do: catch: /please wrap watch including field \[condition\] inside a \"watch\" field/ - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "condition": { @@ -31,7 +31,7 @@ } - do: catch: /please wrap watch including field \[actions\] inside a \"watch\" field/ - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "actions": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/10_basic.yml index 3ae5492328702..09b2230f04c60 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/10_basic.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" ignore: 404 --- "Test get watch api": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -54,7 +54,7 @@ teardown: - match: { hits.total: 1 } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/20_missing.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/20_missing.yml index fc795005ac8a8..eeed53a78c856 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/20_missing.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/20_missing.yml @@ -6,7 +6,7 @@ # ensure index exists by creating a different watch - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "other" body: > { @@ -38,7 +38,7 @@ - do: catch: missing - xpack.watcher.get_watch: + watcher.get_watch: id: "missing_watch" - match: { found : false} - match: { _id: "missing_watch" } @@ -56,7 +56,7 @@ - do: catch: missing - xpack.watcher.get_watch: + watcher.get_watch: id: "missing_watch" - match: { found : false} - match: { _id: "missing_watch" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/30_with_chain_input.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/30_with_chain_input.yml index 81a12fe6f7ddb..5e51b9c8d0414 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/30_with_chain_input.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/30_with_chain_input.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -45,7 +45,7 @@ } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/10_basic.yml index 78d1b6e65e666..a517293a233af 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/10_basic.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -44,7 +44,7 @@ - do: catch: bad_request - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" - match: { error.root_cause.0.type: "action_request_validation_exception" } - match: { error.root_cause.0.reason: "Validation Failed: 1: request body is missing;" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/20_put_watch_with_throttle_period.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/20_put_watch_with_throttle_period.yml index ab8d852dab3d4..baf27eb90efd6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/20_put_watch_with_throttle_period.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/20_put_watch_with_throttle_period.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch1" ignore: 404 --- "Test put watch api with watch level throttle": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch1" body: > { @@ -47,7 +47,7 @@ teardown: - match: { _id: "my_watch1" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch1" - match: { found : true} - match: { _id: "my_watch1" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/30_put_watch_with_action_throttle_period.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/30_put_watch_with_action_throttle_period.yml index a48d667066ef3..bf44433ebf31a 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/30_put_watch_with_action_throttle_period.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/30_put_watch_with_action_throttle_period.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch1" ignore: 404 --- "Test put watch api with action level throttle period": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch1" body: > { @@ -47,7 +47,7 @@ teardown: - match: { _id: "my_watch1" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch1" - match: { found : true} - match: { _id: "my_watch1" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/40_put_watch_as_inactive.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/40_put_watch_as_inactive.yml index 47b27d6b9be3e..18e2829993955 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/40_put_watch_as_inactive.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/40_put_watch_as_inactive.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" ignore: 404 --- "Test put inactive watch": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" active: false body: > @@ -47,7 +47,7 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/50_email_attachment_validation.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/50_email_attachment_validation.yml index e76ab7fd71508..d86b496fde116 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/50_email_attachment_validation.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/50_email_attachment_validation.yml @@ -6,7 +6,7 @@ - do: catch: /Configured URL is empty/ - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -45,7 +45,7 @@ - do: catch: /Malformed URL/ - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/60_put_watch_with_action_condition.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/60_put_watch_with_action_condition.yml index bc26a60e4702f..517bb68dbec96 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/60_put_watch_with_action_condition.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/60_put_watch_with_action_condition.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch1" ignore: 404 --- "Test put watch api with action level condition": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch1" body: > { @@ -51,7 +51,7 @@ teardown: - match: { _id: "my_watch1" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch1" - match: { found : true } - match: { _id: "my_watch1" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/70_put_watch_with_index_action_using_id.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/70_put_watch_with_index_action_using_id.yml index 7bad6c8f1eebf..23d1b1057cfa0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/70_put_watch_with_index_action_using_id.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/70_put_watch_with_index_action_using_id.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" ignore: 404 --- "Test put watch api with index action using doc_id": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -45,7 +45,7 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true } - match: { _id: "my_watch" } @@ -53,7 +53,7 @@ teardown: - match: { watch.actions.test_index.index.doc_id: "test_id1" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.state: "executed" } - match: { watch_record.status.execution_state: "executed" } @@ -62,7 +62,7 @@ teardown: --- "Test put watch api with index action using _id field": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -93,7 +93,7 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true } - match: { _id: "my_watch" } @@ -101,7 +101,7 @@ teardown: - match: { watch.input.simple.value: 20 } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.state: "executed" } - match: { watch_record.status.execution_state: "executed" } @@ -110,7 +110,7 @@ teardown: --- "Test put watch api with bulk index action using _id field": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -149,7 +149,7 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true } - match: { _id: "my_watch" } @@ -159,7 +159,7 @@ teardown: - match: { watch.input.simple._doc.1.value: 40 } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.state: "executed" } - match: { watch_record.status.execution_state: "executed" } @@ -169,7 +169,7 @@ teardown: --- "Test put watch api with bulk index action using _id field in one document": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -207,7 +207,7 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true } - match: { _id: "my_watch" } @@ -216,7 +216,7 @@ teardown: - match: { watch.input.simple._doc.1.value: 60 } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.state: "executed" } - match: { watch_record.status.execution_state: "executed" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/80_put_get_watch_with_passwords.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/80_put_get_watch_with_passwords.yml index ebef6c87d7022..077ddd2d30825 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/80_put_get_watch_with_passwords.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/80_put_get_watch_with_passwords.yml @@ -8,7 +8,7 @@ setup: "Test getting a watch does not contain the original password": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "watch_with_password" body: > { @@ -40,7 +40,7 @@ setup: } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "watch_with_password" - match: { _id: "watch_with_password" } - match: { watch.input.http.request.auth.basic.password: "::es_redacted::" } @@ -50,7 +50,7 @@ setup: # version 1 - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "watch_without_version_test" body: > { @@ -83,7 +83,7 @@ setup: - do: catch: bad_request - xpack.watcher.put_watch: + watcher.put_watch: id: "watch_without_version_test" body: > { @@ -123,7 +123,7 @@ setup: # version 1 - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "watch_with_seq_no" body: > { @@ -161,7 +161,7 @@ setup: # as if two users in the watch UI tried to update the same watch - do: catch: conflict - xpack.watcher.put_watch: + watcher.put_watch: id: "watch_with_seq_no" if_seq_no: 123034 if_primary_term: $primaryTerm @@ -196,7 +196,7 @@ setup: - do: catch: conflict - xpack.watcher.put_watch: + watcher.put_watch: id: "watch_with_seq_no" if_seq_no: $seqNo if_primary_term: 234242423 @@ -230,7 +230,7 @@ setup: } - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "watch_with_seq_no" if_seq_no: $seqNo if_primary_term: $primaryTerm diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/90_ensure_watch_gets_overwritten_without_version.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/90_ensure_watch_gets_overwritten_without_version.yml index 4bea2f655e624..8a9ceb04dc90e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/90_ensure_watch_gets_overwritten_without_version.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/90_ensure_watch_gets_overwritten_without_version.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -32,14 +32,14 @@ - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { watch.input.simple.foo: "bar" } # change the simple input fields, then ensure the old # field does not exist on get - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -66,7 +66,7 @@ - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { watch.input.simple.spam: "eggs" } - is_false: watch.input.simple.foo diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/91_search_total_hits_as_int.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/91_search_total_hits_as_int.yml index eba7f75a75968..c427f634a604e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/91_search_total_hits_as_int.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/91_search_total_hits_as_int.yml @@ -18,7 +18,7 @@ setup: "Test search input includes hits by default": - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { @@ -61,7 +61,7 @@ setup: "Test search transform includes hits by default": - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/start_watcher/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/start_watcher/10_basic.yml index 575d01fcee767..d3d25ae1ea7a6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/start_watcher/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/start_watcher/10_basic.yml @@ -4,5 +4,5 @@ cluster.health: wait_for_status: yellow - - do: {xpack.watcher.start: {}} + - do: {watcher.start: {}} - match: { acknowledged: true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml index 5a90af3725294..f5a8b149fe952 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml @@ -1,13 +1,13 @@ --- "Test watcher stats output": - - do: {xpack.watcher.stats: {}} + - do: {watcher.stats: {}} - match: { "manually_stopped": false } - match: { "stats.0.watcher_state": "started" } --- "Test watcher stats supports emit_stacktraces parameter": - do: - xpack.watcher.stats: + watcher.stats: metric: "all" emit_stacktraces: "true" - match: { "manually_stopped": false } @@ -20,7 +20,7 @@ reason: metrics were fixed in 7.0.0 - do: - xpack.watcher.stats: + watcher.stats: metric: "current_watches" - is_false: stats.0.queued_watches @@ -33,7 +33,7 @@ reason: metrics were fixed in 7.0.0 - do: - xpack.watcher.stats: + watcher.stats: metric: "queued_watches" - is_false: stats.0.current_watches @@ -50,7 +50,7 @@ warnings: - 'The pending_watches parameter is deprecated, use queued_watches instead' - xpack.watcher.stats: + watcher.stats: metric: "pending_watches" - is_false: stats.0.current_watches @@ -59,7 +59,7 @@ --- "Test watcher stats all watches": - do: - xpack.watcher.stats: + watcher.stats: metric: "_all" - is_true: stats.0.current_watches diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stop_watcher/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stop_watcher/10_basic.yml index 518714c57ab3f..a4bac7b78a10f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stop_watcher/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stop_watcher/10_basic.yml @@ -4,8 +4,8 @@ cluster.health: wait_for_status: yellow - - do: {xpack.watcher.stop: {}} + - do: {watcher.stop: {}} - match: { acknowledged: true } - - do: {xpack.watcher.start: {}} + - do: {watcher.start: {}} - match: { acknowledged: true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml index 432308581f6f0..754f5281d8535 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml @@ -2,7 +2,7 @@ "Test watcher usage stats output": - do: catch: missing - xpack.watcher.delete_watch: + watcher.delete_watch: id: "usage_stats_watch" - do: {xpack.usage: {}} @@ -10,7 +10,7 @@ - set: { "watcher.count.total": watch_count_total } - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "usage_stats_watch" body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml index 5e61f98bbc297..213d935a5c52f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml @@ -7,7 +7,7 @@ wait_for_status: yellow - do: - xpack.license.delete: {} + license.delete: {} - match: { acknowledged: true } # we don't have a license now @@ -30,7 +30,7 @@ # - is_false: features.monitoring.available TODO fix once licensing is fixed - do: - xpack.license.post: + license.post: body: > { "license": { @@ -49,7 +49,7 @@ - match: { license_status: "valid" } - do: - xpack.license.get: {} + license.get: {} - match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" } - match: { license.type: "internal" } - match: { license.status: "active" } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java index b03d75af113af..6bd9da794cdfc 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.watcher.test.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; @@ -28,7 +28,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/36782") +@TestLogging("org.elasticsearch.xpack.watcher.execution:DEBUG") @ClusterScope(scope = SUITE, numClientNodes = 0, transportClientRatio = 0, maxNumDataNodes = 1, supportsDedicatedMasters = false) public class SingleNodeTests extends AbstractWatcherIntegrationTestCase { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusTests.java index fce9f8cb05df5..85728f62b8a92 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusTests.java @@ -88,7 +88,7 @@ public void testHeadersSerialization() throws IOException { BytesStreamOutput out = new BytesStreamOutput(); status.writeTo(out); BytesReference bytesReference = out.bytes(); - WatchStatus readStatus = WatchStatus.read(bytesReference.streamInput()); + WatchStatus readStatus = new WatchStatus(bytesReference.streamInput()); assertThat(readStatus, is(status)); assertThat(readStatus.getHeaders(), is(headers)); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index c7bea7b108804..ae5f3f8a12cc2 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -16,8 +16,6 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.rest.action.document.RestGetAction; -import org.elasticsearch.rest.action.document.RestIndexAction; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.rest.ESRestTestCase; @@ -28,15 +26,12 @@ import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction; import org.elasticsearch.xpack.watcher.common.text.TextTemplate; import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; -import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; import org.hamcrest.Matcher; -import org.junit.Before; import java.io.IOException; import java.nio.charset.StandardCharsets; -import java.util.ArrayList; import java.util.Base64; import java.util.List; import java.util.Locale; @@ -45,9 +40,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; -import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -61,13 +54,6 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { - private String type; - - @Before - public void setType() { - type = getOldClusterVersion().before(Version.V_6_7_0) ? "doc" : "_doc"; - } - @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); @@ -84,7 +70,7 @@ protected Settings restClientSettings() { * Tests that a single document survives. Super basic smoke test. */ public void testSingleDoc() throws IOException { - String docLocation = "/testsingledoc/" + type + "/1"; + String docLocation = "/testsingledoc/_doc/1"; String doc = "{\"test\": \"test\"}"; if (isRunningAgainstOldCluster()) { @@ -95,9 +81,6 @@ public void testSingleDoc() throws IOException { } Request getRequest = new Request("GET", docLocation); - if (getOldClusterVersion().before(Version.V_6_7_0)) { - getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); - } assertThat(toStr(client().performRequest(getRequest)), containsString(doc)); } @@ -318,149 +301,6 @@ public void testRollupAfterRestart() throws Exception { } } - public void testRollupIDSchemeAfterRestart() throws Exception { - assumeTrue("Rollup can be tested with 6.3.0 and onwards", getOldClusterVersion().onOrAfter(Version.V_6_3_0)); - assumeTrue("Rollup ID scheme changed in 6.4", getOldClusterVersion().before(Version.V_6_4_0)); - if (isRunningAgainstOldCluster()) { - - final Request indexRequest = new Request("POST", "/id-test-rollup" + type + "/1"); - indexRequest.setJsonEntity("{\"timestamp\":\"2018-01-01T00:00:01\",\"value\":123}"); - client().performRequest(indexRequest); - - // create the rollup job - final Request createRollupJobRequest = new Request("PUT", getRollupEndpoint() + "/job/rollup-id-test"); - createRollupJobRequest.setJsonEntity("{" - + "\"index_pattern\":\"id-test-rollup\"," - + "\"rollup_index\":\"id-test-results-rollup\"," - + "\"cron\":\"*/1 * * * * ?\"," - + "\"page_size\":100," - + "\"groups\":{" - + " \"date_histogram\":{" - + " \"field\":\"timestamp\"," - + " \"interval\":\"5m\"" - + " }," - + "\"histogram\":{" - + " \"fields\": [\"value\"]," - + " \"interval\":1" - + " }," - + "\"terms\":{" - + " \"fields\": [\"value\"]" - + " }" - + "}," - + "\"metrics\":[" - + " {\"field\":\"value\",\"metrics\":[\"min\",\"max\",\"sum\"]}" - + "]" - + "}"); - - Map createRollupJobResponse = entityAsMap(client().performRequest(createRollupJobRequest)); - assertThat(createRollupJobResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - - // start the rollup job - final Request startRollupJobRequest = new Request("POST", getRollupEndpoint() + "/job/rollup-id-test/_start"); - Map startRollupJobResponse = entityAsMap(client().performRequest(startRollupJobRequest)); - assertThat(startRollupJobResponse.get("started"), equalTo(Boolean.TRUE)); - - assertRollUpJob("rollup-id-test"); - - assertBusy(() -> { - client().performRequest(new Request("POST", "id-test-results-rollup/_refresh")); - final Request searchRequest = new Request("GET", "id-test-results-rollup/_search"); - if (isRunningAgainstOldCluster() == false) { - searchRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - } - try { - Map searchResponse = entityAsMap(client().performRequest(searchRequest)); - assertNotNull(ObjectPath.eval("hits.total", searchResponse)); - assertThat(ObjectPath.eval("hits.total", searchResponse), equalTo(1)); - assertThat(ObjectPath.eval("hits.hits.0._id", searchResponse), equalTo("3310683722")); - } catch (IOException e) { - fail(); - } - }); - - // After we've confirmed the doc, wait until we move back to STARTED so that we know the - // state was saved at the end - waitForRollUpJob("rollup-id-test", equalTo("started")); - - } else { - - final Request indexRequest = new Request("POST", "/id-test-rollup/" + type + "/2"); - indexRequest.setJsonEntity("{\"timestamp\":\"2018-01-02T00:00:01\",\"value\":345}"); - if (getOldClusterVersion().before(Version.V_6_7_0)) { - indexRequest.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); - } - client().performRequest(indexRequest); - - assertRollUpJob("rollup-id-test"); - - // stop the rollup job to force a state save, which will upgrade the ID - final Request stopRollupJobRequest = new Request("POST", "/_rollup/job/rollup-id-test/_stop"); - Map stopRollupJobResponse = entityAsMap(client().performRequest(stopRollupJobRequest)); - assertThat(stopRollupJobResponse.get("stopped"), equalTo(Boolean.TRUE)); - - waitForRollUpJob("rollup-id-test", equalTo("stopped")); - - // start the rollup job again - final Request startRollupJobRequest = new Request("POST", "/_rollup/job/rollup-id-test/_start"); - Map startRollupJobResponse = entityAsMap(client().performRequest(startRollupJobRequest)); - assertThat(startRollupJobResponse.get("started"), equalTo(Boolean.TRUE)); - - waitForRollUpJob("rollup-id-test", anyOf(equalTo("indexing"), equalTo("started"))); - - assertBusy(() -> { - client().performRequest(new Request("POST", "id-test-results-rollup/_refresh")); - final Request searchRequest = new Request("GET", "id-test-results-rollup/_search"); - if (isRunningAgainstOldCluster() == false) { - searchRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - } - try { - Map searchResponse = entityAsMap(client().performRequest(searchRequest)); - assertNotNull(ObjectPath.eval("hits.total", searchResponse)); - assertThat(ObjectPath.eval("hits.total", searchResponse), equalTo(2)); - List ids = new ArrayList<>(2); - ids.add(ObjectPath.eval("hits.hits.0._id", searchResponse)); - ids.add(ObjectPath.eval("hits.hits.1._id", searchResponse)); - - // should have both old and new ID formats - assertThat(ids, containsInAnyOrder("3310683722", "rollup-id-test$ehY4NAyVSy8xxUDZrNXXIA")); - - List values = new ArrayList<>(2); - Map doc = ObjectPath.eval("hits.hits.0._source", searchResponse); - values.add((Double)doc.get("value.min.value")); - doc = ObjectPath.eval("hits.hits.1._source", searchResponse); - values.add((Double)doc.get("value.min.value")); - - assertThat(values, containsInAnyOrder(123.0, 345.0)); - } catch (IOException e) { - fail(); - } - }); - } - } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/34774") - public void testSqlFailsOnIndexWithTwoTypes() throws IOException { - // TODO this isn't going to trigger until we backport to 6.1 - assumeTrue("It is only possible to build an index that sql doesn't like before 6.0.0", - getOldClusterVersion().before(Version.V_6_0_0_alpha1)); - if (isRunningAgainstOldCluster()) { - Request doc1 = new Request("POST", "/testsqlfailsonindexwithtwotypes/type1"); - doc1.setJsonEntity("{}"); - client().performRequest(doc1); - Request doc2 = new Request("POST", "/testsqlfailsonindexwithtwotypes/type2"); - doc2.setJsonEntity("{}"); - client().performRequest(doc2); - return; - } - final Request sqlRequest = new Request("POST", getSQLEndpoint()); - - sqlRequest.setJsonEntity("{\"query\":\"SELECT * FROM testsqlfailsonindexwithtwotypes\"}"); - ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(sqlRequest)); - assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); - assertThat(e.getMessage(), containsString( - "[testsqlfailsonindexwithtwotypes] contains more than one type [type1, type2] so it is incompatible with sql")); - } - private String loadWatch(String watch) throws IOException { return StreamsUtils.copyToStringFromClasspath("/org/elasticsearch/xpack/restart/" + watch); } @@ -475,20 +315,11 @@ private void assertOldTemplatesAreDeleted() throws IOException { private void assertWatchIndexContentsWork() throws Exception { // Fetch a basic watch Request getRequest = new Request("GET", "_watcher/watch/bwc_watch"); - if (getOldClusterVersion().before(Version.V_7_0_0)) { - getRequest.setOptions( - expectWarnings( - IndexAction.TYPES_DEPRECATION_MESSAGE, - WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE - ) - ); - } else { - getRequest.setOptions( - expectWarnings( - IndexAction.TYPES_DEPRECATION_MESSAGE - ) - ); - } + getRequest.setOptions( + expectWarnings( + IndexAction.TYPES_DEPRECATION_MESSAGE + ) + ); Map bwcWatch = entityAsMap(client().performRequest(getRequest)); @@ -505,20 +336,12 @@ private void assertWatchIndexContentsWork() throws Exception { // Fetch a watch with "fun" throttle periods getRequest = new Request("GET", "_watcher/watch/bwc_throttle_period"); - if (getOldClusterVersion().before(Version.V_7_0_0)) { - getRequest.setOptions( - expectWarnings( - IndexAction.TYPES_DEPRECATION_MESSAGE, - WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE - ) - ); - } else { - getRequest.setOptions( - expectWarnings( - IndexAction.TYPES_DEPRECATION_MESSAGE - ) - ); - } + getRequest.setOptions( + expectWarnings( + IndexAction.TYPES_DEPRECATION_MESSAGE + ) + ); + bwcWatch = entityAsMap(client().performRequest(getRequest)); assertThat(bwcWatch.get("found"), equalTo(true)); source = (Map) bwcWatch.get("watch"); @@ -753,12 +576,7 @@ private void assertRollUpJob(final String rollupJob) throws Exception { for (Map task : rollupJobTasks) { if (ObjectPath.eval("id", task).equals(rollupJob)) { hasRollupTask = true; - - // Persistent task state field has been renamed in 6.4.0 from "status" to "state" - final String stateFieldName - = (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_6_4_0)) ? "status" : "state"; - - final String jobStateField = "task.xpack/rollup/job." + stateFieldName + ".job_state"; + final String jobStateField = "task.xpack/rollup/job.state.job_state"; assertThat("Expected field [" + jobStateField + "] to be started or indexing in " + task.get("id"), ObjectPath.eval(jobStateField, task), expectedStates); break; @@ -767,7 +585,6 @@ private void assertRollUpJob(final String rollupJob) throws Exception { if (hasRollupTask == false) { fail("Expected persistent task for [" + rollupJob + "] but none found."); } - } private void waitForRollUpJob(final String rollupJob, final Matcher expectedStates) throws Exception { diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java index e8bce60ae0bbb..2161ea1fd2aa0 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java @@ -53,13 +53,6 @@ protected Settings restClientSettings() { @Before public void waitForMlTemplates() throws Exception { List templatesToWaitFor = XPackRestTestHelper.ML_POST_V660_TEMPLATES; - - // If upgrading from a version prior to v6.6.0 the set of templates - // to wait for is different - if (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_6_6_0) ) { - templatesToWaitFor = XPackRestTestHelper.ML_PRE_V660_TEMPLATES; - } - XPackRestTestHelper.waitForTemplates(client(), templatesToWaitFor); } @@ -100,9 +93,6 @@ private void oldClusterTests() throws IOException { client().performRequest(putClosedJob); DatafeedConfig.Builder stoppedDfBuilder = new DatafeedConfig.Builder(OLD_CLUSTER_STOPPED_DATAFEED_ID, OLD_CLUSTER_CLOSED_JOB_ID); - if (getOldClusterVersion().before(Version.V_6_6_0)) { - stoppedDfBuilder.setDelayedDataCheckConfig(null); - } stoppedDfBuilder.setIndices(Collections.singletonList("airline-data")); Request putStoppedDatafeed = new Request("PUT", "/_xpack/ml/datafeeds/" + OLD_CLUSTER_STOPPED_DATAFEED_ID); @@ -121,9 +111,6 @@ private void oldClusterTests() throws IOException { client().performRequest(openOpenJob); DatafeedConfig.Builder dfBuilder = new DatafeedConfig.Builder(OLD_CLUSTER_STARTED_DATAFEED_ID, OLD_CLUSTER_OPEN_JOB_ID); - if (getOldClusterVersion().before(Version.V_6_6_0)) { - dfBuilder.setDelayedDataCheckConfig(null); - } dfBuilder.setIndices(Collections.singletonList("airline-data")); Request putDatafeed = new Request("PUT", "_xpack/ml/datafeeds/" + OLD_CLUSTER_STARTED_DATAFEED_ID); diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java index b142d5bafaa48..aaa28d140201b 100644 --- a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java @@ -22,7 +22,7 @@ public class CcrRollingUpgradeIT extends AbstractMultiClusterUpgradeTestCase { - public void testIndexFollowing() throws Exception { + public void testUniDirectionalIndexFollowing() throws Exception { logger.info("clusterName={}, upgradeState={}", clusterName, upgradeState); if (clusterName == ClusterName.LEADER) { @@ -230,6 +230,63 @@ public void testCannotFollowLeaderInUpgradedCluster() throws Exception { } } + public void testBiDirectionalIndexFollowing() throws Exception { + logger.info("clusterName={}, upgradeState={}", clusterName, upgradeState); + + if (clusterName == ClusterName.FOLLOWER) { + switch (upgradeState) { + case NONE: + createLeaderIndex(leaderClient(), "leader_index5"); + index(leaderClient(), "leader_index5", 128); + + followIndex(followerClient(), "leader", "leader_index5", "follower_index5"); + followIndex(leaderClient(), "follower", "follower_index5", "follower_index6"); + assertTotalHitCount("follower_index5", 128, followerClient()); + assertTotalHitCount("follower_index6", 128, leaderClient()); + + index(leaderClient(), "leader_index5", 128); + pauseIndexFollowing(followerClient(), "follower_index5"); + pauseIndexFollowing(leaderClient(), "follower_index6"); + break; + case ONE_THIRD: + index(leaderClient(), "leader_index5", 128); + break; + case TWO_THIRD: + index(leaderClient(), "leader_index5", 128); + break; + case ALL: + index(leaderClient(), "leader_index5", 128); + break; + default: + throw new AssertionError("unexpected upgrade_state [" + upgradeState + "]"); + } + } else if (clusterName == ClusterName.LEADER) { + switch (upgradeState) { + case NONE: + break; + case ONE_THIRD: + index(leaderClient(), "leader_index5", 128); + break; + case TWO_THIRD: + index(leaderClient(), "leader_index5", 128); + break; + case ALL: + ensureGreen(followerClient(), "follower_index5"); + resumeIndexFollowing(followerClient(), "follower_index5"); + ensureGreen(leaderClient(), "follower_index6"); + resumeIndexFollowing(leaderClient(), "follower_index6"); + + assertTotalHitCount("follower_index5", 896, followerClient()); + assertTotalHitCount("follower_index6", 896, leaderClient()); + break; + default: + throw new AssertionError("unexpected upgrade_state [" + upgradeState + "]"); + } + } else { + throw new AssertionError("unexpected cluster_name [" + clusterName + "]"); + } + } + private static void createLeaderIndex(RestClient client, String indexName) throws IOException { Settings.Builder indexSettings = Settings.builder() .put("index.number_of_shards", 1) @@ -305,9 +362,26 @@ private static void verifyTotalHitCount(final String index, } private static void stopIndexFollowing(RestClient client, String followerIndex) throws IOException { - assertOK(client.performRequest(new Request("POST", "/" + followerIndex + "/_ccr/pause_follow"))); + pauseIndexFollowing(client, followerIndex); assertOK(client.performRequest(new Request("POST", "/" + followerIndex + "/_close"))); assertOK(client.performRequest(new Request("POST", "/" + followerIndex + "/_ccr/unfollow"))); } + private static void pauseIndexFollowing(RestClient client, String followerIndex) throws IOException { + assertOK(client.performRequest(new Request("POST", "/" + followerIndex + "/_ccr/pause_follow"))); + } + + private static void resumeIndexFollowing(RestClient client, String followerIndex) throws IOException { + assertOK(client.performRequest(new Request("POST", "/" + followerIndex + "/_ccr/resume_follow"))); + } + + private static void ensureGreen(RestClient client, String index) throws IOException { + Request request = new Request("GET", "/_cluster/health/" + index); + request.addParameter("wait_for_status", "green"); + request.addParameter("wait_for_no_relocating_shards", "true"); + request.addParameter("timeout", "70s"); + request.addParameter("level", "shards"); + client.performRequest(request); + } + } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupIDUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupIDUpgradeIT.java index 2f653c2bbf1ca..d986f79eb396f 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupIDUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupIDUpgradeIT.java @@ -5,297 +5,6 @@ */ package org.elasticsearch.upgrades; -import org.elasticsearch.Version; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.common.Booleans; -import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.common.xcontent.ObjectPath; -import org.hamcrest.Matcher; - -import java.io.IOException; -import java.time.Instant; -import java.time.OffsetDateTime; -import java.time.ZoneOffset; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; -import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.equalTo; - - public class RollupIDUpgradeIT extends AbstractUpgradeTestCase { - private static final Version UPGRADE_FROM_VERSION = - Version.fromString(System.getProperty("tests.upgrade_from_version")); - - /** - * This test verifies that as a cluster is upgraded incrementally, new documents eventually switch - * over to the "new" form of ID (128 bit Murmur3 ids). - * - * Rollup IDs are essentially the hashed concatenation of keys returned by the composite aggregation, - * so the field values that are being indexed (timestamp, value, etc) directly affect the - * ID that is generated. - * - * We don't know which node will get the Rollup task to start, so we don't know when it will migrate. - * The first doc is guaranteed to be the "old" style since all nodes are un-upgraded. The second - * and third phase will have a mixed cluster, and the rollup task may or may not migrate. In those - * phases we have two options (old and new) for the document added in the phase. - * - * The last phase is guaranteed to be new as it's a fully upgraded cluster. - */ - public void testIDsUpgradeCorrectly() throws Exception { - assumeTrue("Rollup became available in 6.3", UPGRADE_FROM_VERSION.onOrAfter(Version.V_6_3_0)); - assumeTrue("Rollup ID change happened in 6.4", UPGRADE_FROM_VERSION.before(Version.V_6_4_0)); - switch (CLUSTER_TYPE) { - case OLD: - break; - case MIXED: - Request waitForYellow = new Request("GET", "/_cluster/health"); - waitForYellow.addParameter("wait_for_nodes", "3"); - waitForYellow.addParameter("wait_for_status", "yellow"); - client().performRequest(waitForYellow); - break; - case UPGRADED: - Request waitForGreen = new Request("GET", "/_cluster/health/target,rollup"); - waitForGreen.addParameter("wait_for_nodes", "3"); - waitForGreen.addParameter("wait_for_status", "green"); - // wait for long enough that we give delayed unassigned shards to stop being delayed - waitForGreen.addParameter("timeout", "70s"); - waitForGreen.addParameter("level", "shards"); - client().performRequest(waitForGreen); - break; - default: - throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); - } - - OffsetDateTime timestamp = Instant.parse("2018-01-01T00:00:01.000Z").atOffset(ZoneOffset.UTC); - - if (CLUSTER_TYPE == ClusterType.OLD) { - String recoverQuickly = "{\"settings\": {\"index.unassigned.node_left.delayed_timeout\": \"100ms\"}}"; - - Request createTargetIndex = new Request("PUT", "/target"); - createTargetIndex.setJsonEntity(recoverQuickly); - client().performRequest(createTargetIndex); - - final Request indexRequest = new Request("POST", "/target/_doc/1"); - indexRequest.setJsonEntity("{\"timestamp\":\"" + timestamp.toString() + "\",\"value\":123}"); - client().performRequest(indexRequest); - - // create the rollup job - final Request createRollupJobRequest = new Request("PUT", "/_rollup/job/rollup-id-test"); - createRollupJobRequest.setJsonEntity("{" - + "\"index_pattern\":\"target\"," - + "\"rollup_index\":\"rollup\"," - + "\"cron\":\"*/1 * * * * ?\"," - + "\"page_size\":100," - + "\"groups\":{" - + " \"date_histogram\":{" - + " \"field\":\"timestamp\"," - + " \"interval\":\"5m\"" - + " }," - + "\"histogram\":{" - + " \"fields\": [\"value\"]," - + " \"interval\":1" - + " }," - + "\"terms\":{" - + " \"fields\": [\"value\"]" - + " }" - + "}," - + "\"metrics\":[" - + " {\"field\":\"value\",\"metrics\":[\"min\",\"max\",\"sum\"]}" - + "]" - + "}"); - - Map createRollupJobResponse = entityAsMap(client().performRequest(createRollupJobRequest)); - assertThat(createRollupJobResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - - Request updateSettings = new Request("PUT", "/rollup/_settings"); - updateSettings.setJsonEntity(recoverQuickly); - client().performRequest(updateSettings); - - // start the rollup job - final Request startRollupJobRequest = new Request("POST", "_rollup/job/rollup-id-test/_start"); - Map startRollupJobResponse = entityAsMap(client().performRequest(startRollupJobRequest)); - assertThat(startRollupJobResponse.get("started"), equalTo(Boolean.TRUE)); - - assertRollUpJob("rollup-id-test"); - List ids = getSearchResults(1); - assertThat(ids, containsInAnyOrder("3310683722")); - - } - - if (CLUSTER_TYPE == ClusterType.MIXED && Booleans.parseBoolean(System.getProperty("tests.first_round"))) { - final Request indexRequest = new Request("POST", "/target/_doc/2"); - indexRequest.setJsonEntity("{\"timestamp\":\"" + timestamp.plusDays(1).toString() + "\",\"value\":345}"); - client().performRequest(indexRequest); - - assertRollUpJob("rollup-id-test"); - client().performRequest(new Request("POST", "rollup/_refresh")); - - List ids = getSearchResults(2); - // first doc, guaranteed old style - ids.remove("3310683722"); - - // next doc may be either style - ids.removeAll(Arrays.asList("621059582", "rollup-id-test$ehY4NAyVSy8xxUDZrNXXIA")); - assertThat(ids.toString(),ids.size(), equalTo(0)); - } - - if (CLUSTER_TYPE == ClusterType.MIXED && Booleans.parseBoolean(System.getProperty("tests.first_round")) == false) { - final Request indexRequest = new Request("POST", "/target/_doc/3"); - indexRequest.setJsonEntity("{\"timestamp\":\"" + timestamp.plusDays(2).toString() + "\",\"value\":456}"); - client().performRequest(indexRequest); - - assertRollUpJob("rollup-id-test"); - client().performRequest(new Request("POST", "rollup/_refresh")); - - List ids = getSearchResults(3); - - // first doc, guaranteed old style - ids.remove("3310683722"); - - // next two docs may be either style - ids.removeAll(Arrays.asList("621059582", "4288019978", - "rollup-id-test$ehY4NAyVSy8xxUDZrNXXIA", "rollup-id-test$60RGDSb92YI5LH4_Fnq_1g")); - assertThat(ids.toString(), ids.size(), equalTo(0)); - - } - - if (CLUSTER_TYPE == ClusterType.UPGRADED) { - final Request indexRequest = new Request("POST", "/target/_doc/4"); - indexRequest.setJsonEntity("{\"timestamp\":\"" + timestamp.plusDays(3).toString() + "\",\"value\":567}"); - client().performRequest(indexRequest); - - assertRollUpJob("rollup-id-test"); - client().performRequest(new Request("POST", "rollup/_refresh")); - - List ids = getSearchResults(4); - // first doc, guaranteed old style - ids.remove("3310683722"); - - // next two docs may be either style - ids.removeAll(Arrays.asList("621059582", "4288019978", - "rollup-id-test$ehY4NAyVSy8xxUDZrNXXIA", "rollup-id-test$60RGDSb92YI5LH4_Fnq_1g")); - - // Last is guaranteed to be new - ids.remove("rollup-id-test$LAKZftDeQwsUtdPixrkkzQ"); - assertThat(ids.toString(), ids.size(), equalTo(0)); - } - - } - - private List getSearchResults(int expectedCount) throws Exception { - final List collectedIDs = new ArrayList<>(); - assertBusy(() -> { - collectedIDs.clear(); - client().performRequest(new Request("POST", "rollup/_refresh")); - final Request searchRequest = new Request("GET", "rollup/_search"); - searchRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - try { - Map searchResponse = entityAsMap(client().performRequest(searchRequest)); - assertNotNull(ObjectPath.eval("hits.total", searchResponse)); - assertThat(ObjectPath.eval("hits.total", searchResponse), equalTo(expectedCount)); - - for (int i = 0; i < expectedCount; i++) { - String id = ObjectPath.eval("hits.hits." + i + "._id", searchResponse); - collectedIDs.add(id); - Map doc = ObjectPath.eval("hits.hits." + i + "._source", searchResponse); - assertNotNull(doc); - if (id.startsWith("rollup-id-test")) { - assertThat(doc.get("_rollup.version"), equalTo(2)); - } else { - assertThat(doc.get("_rollup.version"), equalTo(1)); - } - } - } catch (IOException e) { - fail(); - } - }); - return collectedIDs; - } - - @SuppressWarnings("unchecked") - private void assertRollUpJob(final String rollupJob) throws Exception { - final Matcher expectedStates = anyOf(equalTo("indexing"), equalTo("started")); - waitForRollUpJob(rollupJob, expectedStates); - - // check that the rollup job is started using the RollUp API - final Request getRollupJobRequest = new Request("GET", "_rollup/job/" + rollupJob); - Map getRollupJobResponse = entityAsMap(client().performRequest(getRollupJobRequest)); - Map job = getJob(getRollupJobResponse, rollupJob); - if (job != null) { - assertThat(ObjectPath.eval("status.job_state", job), expectedStates); - } - - // check that the rollup job is started using the Tasks API - final Request taskRequest = new Request("GET", "_tasks"); - taskRequest.addParameter("detailed", "true"); - taskRequest.addParameter("actions", "xpack/rollup/*"); - Map taskResponse = entityAsMap(client().performRequest(taskRequest)); - Map taskResponseNodes = (Map) taskResponse.get("nodes"); - Map taskResponseNode = (Map) taskResponseNodes.values().iterator().next(); - Map taskResponseTasks = (Map) taskResponseNode.get("tasks"); - Map taskResponseStatus = (Map) taskResponseTasks.values().iterator().next(); - assertThat(ObjectPath.eval("status.job_state", taskResponseStatus), expectedStates); - - // check that the rollup job is started using the Cluster State API - final Request clusterStateRequest = new Request("GET", "_cluster/state/metadata"); - Map clusterStateResponse = entityAsMap(client().performRequest(clusterStateRequest)); - List> rollupJobTasks = ObjectPath.eval("metadata.persistent_tasks.tasks", clusterStateResponse); - - boolean hasRollupTask = false; - for (Map task : rollupJobTasks) { - if (ObjectPath.eval("id", task).equals(rollupJob)) { - hasRollupTask = true; - break; - } - } - if (hasRollupTask == false) { - fail("Expected persistent task for [" + rollupJob + "] but none found."); - } - - } - - private void waitForRollUpJob(final String rollupJob, final Matcher expectedStates) throws Exception { - assertBusy(() -> { - final Request getRollupJobRequest = new Request("GET", "_rollup/job/" + rollupJob); - Response getRollupJobResponse = client().performRequest(getRollupJobRequest); - assertThat(getRollupJobResponse.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); - - Map job = getJob(getRollupJobResponse, rollupJob); - if (job != null) { - assertThat(ObjectPath.eval("status.job_state", job), expectedStates); - } - }, 30L, TimeUnit.SECONDS); - } - - private static Map getJob(Response response, String targetJobId) throws IOException { - return getJob(ESRestTestCase.entityAsMap(response), targetJobId); - } - - @SuppressWarnings("unchecked") - private static Map getJob(Map jobsMap, String targetJobId) throws IOException { - - List> jobs = - (List>) XContentMapValues.extractValue("jobs", jobsMap); - - if (jobs == null) { - return null; - } - for (Map job : jobs) { - String jobId = (String) ((Map) job.get("config")).get("id"); - if (jobId.equals(targetJobId)) { - return job; - } - } - return null; - } } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_watcher.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_watcher.yml index 2952e649c76c8..af6f9c47ed559 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_watcher.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_watcher.yml @@ -3,7 +3,7 @@ # no need to put watch, exists already - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -11,7 +11,7 @@ # execute watch - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -41,12 +41,12 @@ # deactivate watch, check with GET API as well - do: - xpack.watcher.deactivate_watch: + watcher.deactivate_watch: watch_id: "my_watch" - match: { status.state.active : false } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -55,12 +55,12 @@ # activate watch again, check with GET API as well - do: - xpack.watcher.activate_watch: + watcher.activate_watch: watch_id: "my_watch" - match: { status.state.active : true } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -70,6 +70,6 @@ --- "Test watcher stats output": - do: - xpack.watcher.stats: {} + watcher.stats: {} - match: { "manually_stopped": false } - match: { "stats.0.watcher_state": "started" } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_watcher.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_watcher.yml index 810307bbb2846..3cfc3f2c461d9 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_watcher.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_watcher.yml @@ -2,7 +2,7 @@ "CRUD watch APIs": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -23,7 +23,7 @@ - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -31,7 +31,7 @@ # execute watch - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -61,12 +61,12 @@ # deactivate watch, check with GET API as well - do: - xpack.watcher.deactivate_watch: + watcher.deactivate_watch: watch_id: "my_watch" - match: { status.state.active : false } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -75,12 +75,12 @@ # activate watch again, check with GET API as well - do: - xpack.watcher.activate_watch: + watcher.activate_watch: watch_id: "my_watch" - match: { status.state.active : true } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -90,6 +90,6 @@ --- "Test watcher stats output": - do: - xpack.watcher.stats: {} + watcher.stats: {} - match: { "manually_stopped": false } - match: { "stats.0.watcher_state": "started" } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_watcher.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_watcher.yml index 3828db6128f91..6a7a225912e3e 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_watcher.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_watcher.yml @@ -3,7 +3,7 @@ # no need to put watch, exists already - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -11,7 +11,7 @@ # execute watch - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -41,12 +41,12 @@ # deactivate watch, check with GET API as well - do: - xpack.watcher.deactivate_watch: + watcher.deactivate_watch: watch_id: "my_watch" - match: { status.state.active : false } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -55,12 +55,12 @@ # activate watch again, check with GET API as well - do: - xpack.watcher.activate_watch: + watcher.activate_watch: watch_id: "my_watch" - match: { status.state.active : true } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -69,6 +69,6 @@ --- "Test watcher stats output": - do: - xpack.watcher.stats: {} + watcher.stats: {} - match: { "manually_stopped": false } - match: { "stats.0.watcher_state": "started" } diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java index eb92bd29cd78d..5eabd512dc525 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java @@ -53,13 +53,13 @@ public void startWatcher() throws Exception { assertBusy(() -> { ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); switch (state) { case "stopped": ClientYamlTestResponse startResponse = - getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap()); boolean isAcknowledged = (boolean) startResponse.evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); throw new AssertionError("waiting until stopped state reached started state"); @@ -88,7 +88,7 @@ public void startWatcher() throws Exception { public void stopWatcher() throws Exception { assertBusy(() -> { ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); switch (state) { @@ -101,7 +101,7 @@ public void stopWatcher() throws Exception { throw new AssertionError("waiting until starting state reached started state to stop"); case "started": ClientYamlTestResponse stopResponse = - getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stop", emptyMap(), emptyList(), emptyMap()); boolean isAcknowledged = (boolean) stopResponse.evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); throw new AssertionError("waiting until started state reached stopped state"); diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index 9ea73d5b1e09f..da8eca37d5476 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.smoketest; import org.apache.http.util.EntityUtils; +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; @@ -31,6 +32,7 @@ import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.is; +@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35361") public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { private static final String TEST_ADMIN_USERNAME = "test_admin"; diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/10_insufficient_privs.yml b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/10_insufficient_privs.yml index e3c512560a992..3cd7a43e69c08 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/10_insufficient_privs.yml +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/10_insufficient_privs.yml @@ -5,10 +5,10 @@ - do: headers: { es-security-runas-user: powerless_user } catch: forbidden - xpack.watcher.stats: {} + watcher.stats: {} # there seems to be a bug in the yaml parser we use, where a single element list # has the END_LIST token skipped...so here we just rerun the same request without # the impersonation to show it works - do: - xpack.watcher.stats: {} + watcher.stats: {} - match: { stats.0.watcher_state: started } diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml index ec0be2532a6ee..b50f20afd0358 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml @@ -18,7 +18,7 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" ignore: 404 @@ -27,7 +27,7 @@ teardown: - skip: features: headers - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -63,13 +63,13 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { _id: "my_watch" } - is_false: watch.status.headers - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.watch_id: "my_watch" } - match: { watch_record.state: "executed" } @@ -83,7 +83,7 @@ teardown: - skip: features: headers - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -119,14 +119,14 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { _id: "my_watch" } - is_false: watch.status.headers - do: headers: { es-security-runas-user: x_pack_rest_user } - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.watch_id: "my_watch" } - match: { watch_record.state: "executed" } @@ -140,7 +140,7 @@ teardown: - do: # by impersonating this request as powerless user we cannot query the my_test_index # headers: { es-security-runas-user: powerless_user } - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -176,13 +176,13 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { _id: "my_watch" } - is_false: watch.status.headers - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.watch_id: "my_watch" } # because we are not allowed to read the index, there wont be any data @@ -193,7 +193,7 @@ teardown: --- "Test watch search transform is run as user who added the watch": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -227,7 +227,7 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.watch_id: "my_watch" } @@ -242,7 +242,7 @@ teardown: --- "Test watch search transform does not work without permissions": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -277,7 +277,7 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.watch_id: "my_watch" } @@ -294,7 +294,7 @@ teardown: - skip: features: headers - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -318,13 +318,13 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { _id: "my_watch" } - is_false: watch.status.headers - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.watch_id: "my_watch" } - match: { watch_record.state: "executed" } @@ -343,7 +343,7 @@ teardown: - skip: features: headers - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -367,13 +367,13 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { _id: "my_watch" } - is_false: watch.status.headers - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.watch_id: "my_watch" } - match: { watch_record.state: "executed" } diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java index 771302a99bbfb..1d7759b28b9fe 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java @@ -35,13 +35,13 @@ public static Iterable parameters() throws Exception { public void startWatcher() throws Exception { assertBusy(() -> { ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); switch (state) { case "stopped": ClientYamlTestResponse startResponse = - getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap()); boolean isAcknowledged = (boolean) startResponse.evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); throw new AssertionError("waiting until stopped state reached started state"); @@ -70,7 +70,7 @@ public void startWatcher() throws Exception { public void stopWatcher() throws Exception { assertBusy(() -> { ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); switch (state) { @@ -83,7 +83,7 @@ public void stopWatcher() throws Exception { throw new AssertionError("waiting until starting state reached started state to stop"); case "started": ClientYamlTestResponse stopResponse = - getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stop", emptyMap(), emptyList(), emptyMap()); boolean isAcknowledged = (boolean) stopResponse.evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); throw new AssertionError("waiting until started state reached stopped state"); diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/10_webhook.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/10_webhook.yml index 50ee1f6eafdb9..5a5a4dfe46205 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/10_webhook.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/10_webhook.yml @@ -26,7 +26,7 @@ - set: { docs.0.doc._source.port: port } - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "test_watch" body: trigger: @@ -53,7 +53,7 @@ - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: test_watch body: record_execution: true diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/20_array_access.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/20_array_access.yml index bb52ee7f8d176..6371e743821ab 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/20_array_access.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/20_array_access.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch": { diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/25_array_compare.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/25_array_compare.yml index e252932393262..9371040a0ff50 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/25_array_compare.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/25_array_compare.yml @@ -36,7 +36,7 @@ indices.refresh: {} - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/30_search_input.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/30_search_input.yml index 074b8d0fea7ca..d712ddba3a498 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/30_search_input.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/30_search_input.yml @@ -3,7 +3,7 @@ setup: - do: cluster.health: wait_for_status: yellow - - do: {xpack.watcher.stats:{}} + - do: {watcher.stats:{}} - do: index: index: idx @@ -51,7 +51,7 @@ setup: --- "Test search input mustache integration (using request body)": - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "trigger_data" : { @@ -126,7 +126,7 @@ setup: - match: { acknowledged: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "trigger_data" : { @@ -172,7 +172,7 @@ setup: version: " - 6.99.99" reason: "rest_total_hits_as_int support was added in 7.0" - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "trigger_data" : { @@ -250,7 +250,7 @@ setup: - match: { acknowledged: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "trigger_data" : { diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/40_search_transform.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/40_search_transform.yml index abad97ae944f8..08ff0fae5ba7b 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/40_search_transform.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/40_search_transform.yml @@ -3,7 +3,7 @@ setup: - do: cluster.health: wait_for_status: yellow - - do: {xpack.watcher.stats:{}} + - do: {watcher.stats:{}} - do: index: index: idx @@ -51,7 +51,7 @@ setup: --- "Test search transform mustache integration (using request body)": - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "trigger_data" : { @@ -110,7 +110,7 @@ setup: --- "Test search transform mustache integration (using request template)": - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "trigger_data" : { diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml index 713dbb65b3db6..01326f9764fa1 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml @@ -12,7 +12,7 @@ refresh: true body: { foo: bar } - - do: {xpack.watcher.stats:{}} + - do: {watcher.stats:{}} - match: { "stats.0.watcher_state": "started" } - match: { "stats.0.watch_count": 0 } @@ -38,7 +38,7 @@ - set: { docs.0.doc._source.port: port } - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "test_watch" body: metadata: @@ -73,7 +73,7 @@ - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" - do: diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml index 4ea2a8dc7ab74..7f6db2a6d6614 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml @@ -5,7 +5,7 @@ wait_for_status: green - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_exe_watch" body: > { @@ -80,7 +80,7 @@ - match: { _id: "my_exe_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_exe_watch" - match: { _id: "my_exe_watch" } @@ -88,7 +88,7 @@ - match: { watch.input.chain.inputs.1.second.transform.script.source: "return [ 'hits' : [ 'total' : ctx.payload.first.hits.total ]]" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_exe_watch" body: > { @@ -132,7 +132,7 @@ wait_for_status: green - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_exe_watch" body: > { @@ -207,7 +207,7 @@ - match: { _id: "my_exe_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_exe_watch" - match: { _id: "my_exe_watch" } @@ -215,7 +215,7 @@ - match: { watch.input.chain.inputs.1.second.transform.script.source: "return [ 'hits' : [ 'total' : ctx.payload.first.hits.total ]]" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_exe_watch" body: > { diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/20_minimal_body.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/20_minimal_body.yml index f5f0a6cd04b0d..b95d2843aeb88 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/20_minimal_body.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/20_minimal_body.yml @@ -5,7 +5,7 @@ wait_for_status: green - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_logging_watch" body: > { @@ -34,7 +34,7 @@ - match: { _id: "my_logging_watch" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_logging_watch" - match: { "watch_record.watch_id": "my_logging_watch" } diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/30_inline_watch.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/30_inline_watch.yml index f9ad2a42414bd..d4ff40b9b084a 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/30_inline_watch.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/30_inline_watch.yml @@ -5,7 +5,7 @@ wait_for_status: green - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "trigger_data" : { diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/40_exception.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/40_exception.yml index b8839ea364e45..411ef8426552a 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/40_exception.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/40_exception.yml @@ -6,7 +6,7 @@ - do: catch: bad_request - xpack.watcher.put_watch: + watcher.put_watch: id: "my_exe_watch" body: > { @@ -42,7 +42,7 @@ wait_for_status: green - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { @@ -80,7 +80,7 @@ - do: catch: bad_request - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch": { diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/50_update_scripts.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/50_update_scripts.yml index 89e6602035c2a..e764505f9c058 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/50_update_scripts.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/50_update_scripts.yml @@ -21,7 +21,7 @@ } - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -50,7 +50,7 @@ - match: { _id: "my_watch" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { "watch_record.watch_id": "my_watch" } @@ -69,7 +69,7 @@ } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { "watch_record.watch_id": "my_watch" } @@ -97,7 +97,7 @@ } - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -126,7 +126,7 @@ - match: { _id: "my_watch" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { "watch_record.watch_id": "my_watch" } @@ -144,7 +144,7 @@ } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { "watch_record.watch_id": "my_watch" } diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/60_chain_input_with_transform.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/60_chain_input_with_transform.yml index 69fd7b4d575ee..d8545110c024e 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/60_chain_input_with_transform.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/60_chain_input_with_transform.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { diff --git a/x-pack/qa/third-party/hipchat/src/test/java/org/elasticsearch/smoketest/WatcherHipchatYamlTestSuiteIT.java b/x-pack/qa/third-party/hipchat/src/test/java/org/elasticsearch/smoketest/WatcherHipchatYamlTestSuiteIT.java index 785b9d3a89249..b5cb43ebc282b 100644 --- a/x-pack/qa/third-party/hipchat/src/test/java/org/elasticsearch/smoketest/WatcherHipchatYamlTestSuiteIT.java +++ b/x-pack/qa/third-party/hipchat/src/test/java/org/elasticsearch/smoketest/WatcherHipchatYamlTestSuiteIT.java @@ -40,7 +40,7 @@ public void startWatcher() throws Exception { final List watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES); assertBusy(() -> { try { - getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap()); for (String template : watcherTemplates) { ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template", @@ -49,7 +49,7 @@ public void startWatcher() throws Exception { } ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); assertThat(state, is("started")); } catch (IOException e) { @@ -62,9 +62,9 @@ public void startWatcher() throws Exception { public void stopWatcher() throws Exception { assertBusy(() -> { try { - getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stop", emptyMap(), emptyList(), emptyMap()); ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); assertThat(state, is("stopped")); } catch (IOException e) { diff --git a/x-pack/qa/third-party/hipchat/src/test/resources/rest-api-spec/test/hipchat/10_hipchat.yml b/x-pack/qa/third-party/hipchat/src/test/resources/rest-api-spec/test/hipchat/10_hipchat.yml index bd4751cac4b78..be4874b8d42e5 100644 --- a/x-pack/qa/third-party/hipchat/src/test/resources/rest-api-spec/test/hipchat/10_hipchat.yml +++ b/x-pack/qa/third-party/hipchat/src/test/resources/rest-api-spec/test/hipchat/10_hipchat.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "hipchat_v1_watch" body: > { @@ -37,7 +37,7 @@ } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "hipchat_v1_watch" body: > { @@ -101,7 +101,7 @@ # custom rooms, custom users and custom from are not allowed for this account type to be configured - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "hipchat_integration_account_watch" body: > { @@ -131,7 +131,7 @@ } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "hipchat_integration_account_watch" body: > { @@ -186,7 +186,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "hipchat_user_account_watch" body: > { @@ -216,7 +216,7 @@ } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "hipchat_user_account_watch" body: > { diff --git a/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java b/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java index 8218d0e18f67b..0eca4d03dfd06 100644 --- a/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java +++ b/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java @@ -40,7 +40,7 @@ public void startWatcher() throws Exception { final List watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES); assertBusy(() -> { try { - getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap()); for (String template : watcherTemplates) { ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template", @@ -49,7 +49,7 @@ public void startWatcher() throws Exception { } ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); assertThat(state, is("started")); } catch (IOException e) { @@ -62,9 +62,9 @@ public void startWatcher() throws Exception { public void stopWatcher() throws Exception { assertBusy(() -> { try { - getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stop", emptyMap(), emptyList(), emptyMap()); ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); assertThat(state, is("stopped")); } catch (IOException e) { diff --git a/x-pack/qa/third-party/jira/src/test/resources/rest-api-spec/test/jira/10_jira.yml b/x-pack/qa/third-party/jira/src/test/resources/rest-api-spec/test/jira/10_jira.yml index 55573f0c0f0c7..d914b63391146 100644 --- a/x-pack/qa/third-party/jira/src/test/resources/rest-api-spec/test/jira/10_jira.yml +++ b/x-pack/qa/third-party/jira/src/test/resources/rest-api-spec/test/jira/10_jira.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "jira_watch" body: > { @@ -43,7 +43,7 @@ - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "jira_watch" body: > { @@ -126,7 +126,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "wrong_jira_watch" body: > { @@ -161,7 +161,7 @@ - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "wrong_jira_watch" body: > { @@ -226,7 +226,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "jira_watch_with_custom_field_one" body: > { @@ -262,7 +262,7 @@ - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "jira_watch_with_custom_field_one" body: > { @@ -277,7 +277,7 @@ - match: { watch_record.state: "executed" } - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "jira_watch_with_custom_field_two" body: > { @@ -318,7 +318,7 @@ - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "jira_watch_with_custom_field_two" body: > { diff --git a/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java b/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java index 019609793e38c..e111bbd10696b 100644 --- a/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java +++ b/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java @@ -40,7 +40,7 @@ public void startWatcher() throws Exception { final List watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES); assertBusy(() -> { try { - getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap()); for (String template : watcherTemplates) { ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template", @@ -49,7 +49,7 @@ public void startWatcher() throws Exception { } ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); assertThat(state, is("started")); } catch (IOException e) { @@ -62,9 +62,9 @@ public void startWatcher() throws Exception { public void stopWatcher() throws Exception { assertBusy(() -> { try { - getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stop", emptyMap(), emptyList(), emptyMap()); ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); assertThat(state, is("stopped")); } catch (IOException e) { diff --git a/x-pack/qa/third-party/pagerduty/src/test/resources/rest-api-spec/test/pagerduty/10_pagerduty.yml b/x-pack/qa/third-party/pagerduty/src/test/resources/rest-api-spec/test/pagerduty/10_pagerduty.yml index fa83c8e8e8ce1..82fb47e4bc14c 100644 --- a/x-pack/qa/third-party/pagerduty/src/test/resources/rest-api-spec/test/pagerduty/10_pagerduty.yml +++ b/x-pack/qa/third-party/pagerduty/src/test/resources/rest-api-spec/test/pagerduty/10_pagerduty.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "pagerduty_watch" body: > { @@ -44,7 +44,7 @@ } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "pagerduty_watch" body: > { diff --git a/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java b/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java index f6e8222ea73a0..7021548109fd5 100644 --- a/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java +++ b/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java @@ -40,7 +40,7 @@ public void startWatcher() throws Exception { final List watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES); assertBusy(() -> { try { - getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap()); for (String template : watcherTemplates) { ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template", @@ -49,7 +49,7 @@ public void startWatcher() throws Exception { } ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); assertThat(state, is("started")); } catch (IOException e) { @@ -62,9 +62,9 @@ public void startWatcher() throws Exception { public void stopWatcher() throws Exception { assertBusy(() -> { try { - getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stop", emptyMap(), emptyList(), emptyMap()); ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); assertThat(state, is("stopped")); } catch (IOException e) { diff --git a/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml b/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml index f5719ddbfc00c..6da232fc2ffb4 100644 --- a/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml +++ b/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "slack_watch" body: > { @@ -76,7 +76,7 @@ } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "slack_watch" body: > {